summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/speakup/i18n.c4
-rw-r--r--drivers/android/binder.c228
-rw-r--r--drivers/android/binder_alloc.c15
-rw-r--r--drivers/android/binder_alloc.h8
-rw-r--r--drivers/android/binder_internal.h24
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/block/xen-blkback/common.h1
-rw-r--r--drivers/block/xen-blkback/xenbus.c38
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/bus/mhi/core/boot.c64
-rw-r--r--drivers/bus/mhi/core/debugfs.c2
-rw-r--r--drivers/bus/mhi/core/init.c72
-rw-r--r--drivers/bus/mhi/core/internal.h20
-rw-r--r--drivers/bus/mhi/core/main.c416
-rw-r--r--drivers/bus/mhi/core/pm.c119
-rw-r--r--drivers/bus/mhi/pci_generic.c330
-rw-r--r--drivers/char/applicom.c2
-rw-r--r--drivers/char/hw_random/ba431-rng.c16
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c27
-rw-r--r--drivers/char/hw_random/cctrng.c20
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/hw_random/intel-rng.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c14
-rw-r--r--drivers/char/hw_random/pic32-rng.c3
-rw-r--r--drivers/char/hw_random/xiphera-trng.c4
-rw-r--r--drivers/char/lp.c4
-rw-r--r--drivers/char/mwave/tp3780i.c6
-rw-r--r--drivers/char/mwave/tp3780i.h2
-rw-r--r--drivers/char/random.c21
-rw-r--r--drivers/char/tpm/eventlog/acpi.c33
-rw-r--r--drivers/char/tpm/eventlog/common.c3
-rw-r--r--drivers/char/tpm/eventlog/efi.c29
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c1
-rw-r--r--drivers/char/virtio_console.c23
-rw-r--r--drivers/clocksource/arm_arch_timer.c23
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c8
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c28
-rw-r--r--drivers/clocksource/hyperv_timer.c251
-rw-r--r--drivers/clocksource/ingenic-ost.c9
-rw-r--r--drivers/clocksource/ingenic-timer.c2
-rw-r--r--drivers/clocksource/sh_cmt.c5
-rw-r--r--drivers/clocksource/timer-atmel-tcb.c4
-rw-r--r--drivers/clocksource/timer-fsl-ftm.c2
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c2
-rw-r--r--drivers/clocksource/timer-npcm7xx.c1
-rw-r--r--drivers/clocksource/timer-of.c4
-rw-r--r--drivers/clocksource/timer-pistachio.c4
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c157
-rw-r--r--drivers/clocksource/timer-vf-pit.c2
-rw-r--r--drivers/crypto/allwinner/Kconfig14
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c23
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c2
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c9
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c11
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c12
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c12
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c18
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h8
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.h18
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.h2
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c3
-rw-r--r--drivers/crypto/atmel-ecc.c30
-rw-r--r--drivers/crypto/atmel-i2c.c2
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/atmel-tdes.c1
-rw-r--r--drivers/crypto/bcm/cipher.c7
-rw-r--r--drivers/crypto/bcm/spu.c16
-rw-r--r--drivers/crypto/bcm/spu2.c43
-rw-r--r--drivers/crypto/bcm/util.c4
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c3
-rw-r--r--drivers/crypto/caam/caampkc.c3
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c9
-rw-r--r--drivers/crypto/cavium/zip/common.h1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c3
-rw-r--r--drivers/crypto/ccp/ccp-dev.c12
-rw-r--r--drivers/crypto/ccp/ccp-ops.c1
-rw-r--r--drivers/crypto/ccp/sev-dev.c6
-rw-r--r--drivers/crypto/ccp/sp-dev.c12
-rw-r--r--drivers/crypto/ccp/sp-dev.h15
-rw-r--r--drivers/crypto/ccp/sp-pci.c1
-rw-r--r--drivers/crypto/ccp/tee-dev.c57
-rw-r--r--drivers/crypto/ccp/tee-dev.h20
-rw-r--r--drivers/crypto/ccree/cc_driver.c4
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c32
-rw-r--r--drivers/crypto/chelsio/chcr_core.c5
-rw-r--r--drivers/crypto/chelsio/chcr_core.h1
-rw-r--r--drivers/crypto/geode-aes.c4
-rw-r--r--drivers/crypto/hisilicon/Kconfig2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h18
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c921
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c158
-rw-r--r--drivers/crypto/hisilicon/qm.c396
-rw-r--r--drivers/crypto/hisilicon/qm.h29
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c2
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c13
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h10
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c137
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h6
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c267
-rw-r--r--drivers/crypto/hisilicon/sgl.c37
-rw-r--r--drivers/crypto/hisilicon/trng/trng.c13
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h50
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c698
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c99
-rw-r--r--drivers/crypto/img-hash.c3
-rw-r--r--drivers/crypto/inside-secure/safexcel.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c7
-rw-r--r--drivers/crypto/keembay/keembay-ocs-aes-core.c8
-rw-r--r--drivers/crypto/keembay/keembay-ocs-hcu-core.c8
-rw-r--r--drivers/crypto/keembay/ocs-hcu.c8
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c14
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c8
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c33
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c144
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c2
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c2
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c2
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c4
-rw-r--r--drivers/crypto/nx/nx-sha256.c2
-rw-r--r--drivers/crypto/nx/nx-sha512.c2
-rw-r--r--drivers/crypto/nx/nx.c5
-rw-r--r--drivers/crypto/nx/nx_debugfs.c2
-rw-r--r--drivers/crypto/omap-aes.c7
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.c25
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.h13
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.c40
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h14
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c29
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c1
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c17
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c32
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c4
-rw-r--r--drivers/crypto/qce/cipher.h1
-rw-r--r--drivers/crypto/qce/common.c25
-rw-r--r--drivers/crypto/qce/common.h3
-rw-r--r--drivers/crypto/qce/sha.c143
-rw-r--r--drivers/crypto/qce/skcipher.c69
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c2
-rw-r--r--drivers/crypto/s5p-sss.c17
-rw-r--r--drivers/crypto/sa2ul.c143
-rw-r--r--drivers/crypto/sa2ul.h4
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c8
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c5
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c10
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.c2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.h4
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irqp.h4
-rw-r--r--drivers/crypto/ux500/cryp/cryp_p.h15
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c18
-rw-r--r--drivers/crypto/vmx/aes.c2
-rw-r--r--drivers/crypto/vmx/aes_cbc.c2
-rw-r--r--drivers/crypto/vmx/aes_ctr.c2
-rw-r--r--drivers/crypto/vmx/aes_xts.c2
-rw-r--r--drivers/crypto/vmx/ghash.c2
-rw-r--r--drivers/crypto/vmx/vmx.c2
-rw-r--r--drivers/cxl/mem.c152
-rw-r--r--drivers/dax/bus.c6
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/idxd/device.c65
-rw-r--r--drivers/dma/idxd/idxd.h3
-rw-r--r--drivers/dma/idxd/init.c11
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/idxd/sysfs.c19
-rw-r--r--drivers/dma/plx_dma.c18
-rw-r--r--drivers/dma/tegra20-apb-dma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c31
-rw-r--r--drivers/extcon/extcon-max8997.c4
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c99
-rw-r--r--drivers/extcon/extcon-sm5502.c22
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/efi/libstub/Makefile3
-rw-r--r--drivers/firmware/google/gsmi.c14
-rw-r--r--drivers/fpga/Kconfig9
-rw-r--r--drivers/fpga/dfl-afu-error.c10
-rw-r--r--drivers/fpga/dfl-afu-main.c35
-rw-r--r--drivers/fpga/dfl-afu.h2
-rw-r--r--drivers/fpga/dfl-pci.c18
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c45
-rw-r--r--drivers/fpga/xilinx-spi.c24
-rw-r--r--drivers/gpio/gpio-omap.c9
-rw-r--r--drivers/gpio/gpiolib-sysfs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c3
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c14
-rw-r--r--drivers/greybus/es2.c8
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c40
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h1
-rw-r--r--drivers/hid/hid-alps.c1
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-cp2112.c22
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/hv/Kconfig1
-rw-r--r--drivers/hv/channel.c103
-rw-r--r--drivers/hv/channel_mgmt.c86
-rw-r--r--drivers/hv/connection.c7
-rw-r--r--drivers/hv/hv.c152
-rw-r--r--drivers/hv/hv_balloon.c89
-rw-r--r--drivers/hv/hv_trace.h15
-rw-r--r--drivers/hv/ring_buffer.c10
-rw-r--r--drivers/hv/vmbus_drv.c93
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c1
-rw-r--r--drivers/hwtracing/intel_th/core.c2
-rw-r--r--drivers/hwtracing/intel_th/gth.c4
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h8
-rw-r--r--drivers/hwtracing/intel_th/msu.c2
-rw-r--r--drivers/hwtracing/intel_th/pci.c12
-rw-r--r--drivers/hwtracing/intel_th/pti.c4
-rw-r--r--drivers/hwtracing/stm/p_sys-t.c6
-rw-r--r--drivers/hwtracing/stm/policy.c5
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c4
-rw-r--r--drivers/input/joystick/n64joy.c4
-rw-r--r--drivers/input/keyboard/nspire-keypad.c56
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h1
-rw-r--r--drivers/input/touchscreen/elants_i2c.c5
-rw-r--r--drivers/input/touchscreen/s6sy761.c4
-rw-r--r--drivers/interconnect/qcom/Kconfig18
-rw-r--r--drivers/interconnect/qcom/Makefile4
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c4
-rw-r--r--drivers/interconnect/qcom/sdm660.c923
-rw-r--r--drivers/interconnect/qcom/sm8350.c633
-rw-r--r--drivers/interconnect/qcom/sm8350.h168
-rw-r--r--drivers/irqchip/Kconfig18
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c4
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c2
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c2
-rw-r--r--drivers/irqchip/irq-gic-v2m.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c10
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c10
-rw-r--r--drivers/irqchip/irq-gic-v4.c27
-rw-r--r--drivers/irqchip/irq-hip04.c4
-rw-r--r--drivers/irqchip/irq-idt3243x.c124
-rw-r--r--drivers/irqchip/irq-jcore-aic.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c2
-rw-r--r--drivers/irqchip/irq-mbigen.c4
-rw-r--r--drivers/irqchip/irq-meson-gpio.c2
-rw-r--r--drivers/irqchip/irq-mst-intc.c98
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c2
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/irq-sifive-plic.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c7
-rw-r--r--drivers/irqchip/irq-sun4i.c2
-rw-r--r--drivers/irqchip/irq-tb10x.c1
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c2
-rw-r--r--drivers/irqchip/irq-vic.c4
-rw-r--r--drivers/irqchip/irq-wpcm450-aic.c161
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c2
-rw-r--r--drivers/md/dm-verity-fec.c11
-rw-r--r--drivers/md/dm-verity-fec.h1
-rw-r--r--drivers/mfd/intel_pmt.c112
-rw-r--r--drivers/misc/Kconfig19
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/ad525x_dpot.c3
-rw-r--r--drivers/misc/cxl/context.c2
-rw-r--r--drivers/misc/cxl/fault.c2
-rw-r--r--drivers/misc/dw-xdata-pcie.c420
-rw-r--r--drivers/misc/genwqe/card_ddcb.c10
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c12
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c368
-rw-r--r--drivers/misc/habanalabs/common/context.c14
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c224
-rw-r--r--drivers/misc/habanalabs/common/device.c221
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c238
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h184
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c28
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c35
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c10
-rw-r--r--drivers/misc/habanalabs/common/irq.c56
-rw-r--r--drivers/misc/habanalabs/common/memory.c182
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c3
-rw-r--r--drivers/misc/habanalabs/common/pci/pci.c52
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c33
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c357
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h3
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c8
-rw-r--r--drivers/misc/habanalabs/goya/goya.c140
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h2
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h99
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h219
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi.h2
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h2
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h43
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h14
-rw-r--r--drivers/misc/habanalabs/include/goya/goya.h2
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_async_events.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_fw_if.h11
-rw-r--r--drivers/misc/kgdbts.c74
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c27
-rw-r--r--drivers/misc/lkdtm/bugs.c17
-rw-r--r--drivers/misc/lkdtm/core.c1
-rw-r--r--drivers/misc/lkdtm/lkdtm.h1
-rw-r--r--drivers/misc/mei/hw-me-regs.h1
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/misc/pvpanic/Kconfig27
-rw-r--r--drivers/misc/pvpanic/Makefile8
-rw-r--r--drivers/misc/pvpanic/pvpanic-mmio.c (renamed from drivers/misc/pvpanic.c)89
-rw-r--r--drivers/misc/pvpanic/pvpanic-pci.c125
-rw-r--r--drivers/misc/pvpanic/pvpanic.c113
-rw-r--r--drivers/misc/pvpanic/pvpanic.h21
-rw-r--r--drivers/misc/sgi-xp/xp_main.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c2
-rw-r--r--drivers/misc/uacce/uacce.c2
-rw-r--r--drivers/misc/vmw_balloon.c11
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c5
-rw-r--r--drivers/most/most_cdev.c6
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c4
-rw-r--r--drivers/mux/gpio.c19
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c30
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c102
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c6
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c14
-rw-r--r--drivers/net/ethernet/jme.c10
-rw-r--r--drivers/net/ethernet/jme.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c84
-rw-r--r--drivers/net/geneve.c6
-rw-r--r--drivers/net/phy/marvell.c32
-rw-r--r--drivers/net/vrf.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c12
-rw-r--r--drivers/nvdimm/bus.c14
-rw-r--r--drivers/nvdimm/pmem.c37
-rw-r--r--drivers/nvdimm/region_devs.c16
-rw-r--r--drivers/nvmem/Kconfig10
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/brcm_nvram.c78
-rw-r--r--drivers/nvmem/core.c95
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c2
-rw-r--r--drivers/nvmem/qfprom.c44
-rw-r--r--drivers/nvmem/snvs_lpgpr.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c4
-rw-r--r--drivers/perf/arm-cci.c12
-rw-r--r--drivers/perf/arm-ccn.c31
-rw-r--r--drivers/perf/arm-cmn.c22
-rw-r--r--drivers/perf/arm_dmc620_pmu.c2
-rw-r--r--drivers/perf/arm_dsu_pmu.c5
-rw-r--r--drivers/perf/arm_pmu_platform.c54
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c36
-rw-r--r--drivers/perf/arm_spe_pmu.c3
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c7
-rw-r--r--drivers/perf/hisilicon/Makefile3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c348
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c301
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c355
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c500
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c79
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h20
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c530
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/perf/qcom_l3_pmu.c4
-rw-r--r--drivers/perf/thunderx2_pmu.c4
-rw-r--r--drivers/perf/xgene_pmu.c4
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/broadcom/Kconfig2
-rw-r--r--drivers/phy/cadence/Kconfig2
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c419
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c475
-rw-r--r--drivers/phy/hisilicon/phy-hi6220-usb.c2
-rw-r--r--drivers/phy/hisilicon/phy-hix5hd2-sata.c2
-rw-r--r--drivers/phy/ingenic/phy-ingenic-usb.c4
-rw-r--r--drivers/phy/intel/phy-intel-lgm-combo.c2
-rw-r--r--drivers/phy/marvell/Kconfig12
-rw-r--r--drivers/phy/marvell/Makefile1
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-utmi.c384
-rw-r--r--drivers/phy/microchip/Kconfig13
-rw-r--r--drivers/phy/microchip/Makefile6
-rw-r--r--drivers/phy/microchip/sparx5_serdes.c2513
-rw-r--r--drivers/phy/microchip/sparx5_serdes.h136
-rw-r--r--drivers/phy/microchip/sparx5_serdes_regs.h2695
-rw-r--r--drivers/phy/phy-core.c30
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c528
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h77
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c1
-rw-r--r--drivers/phy/ralink/phy-mt7621-pci.c6
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c1
-rw-r--r--drivers/phy/st/Kconfig1
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c65
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c449
-rw-r--r--drivers/phy/ti/phy-tusb1210.c27
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c2
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c58
-rw-r--r--drivers/pinctrl/core.c14
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c6
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c4
-rw-r--r--drivers/platform/surface/Kconfig69
-rw-r--r--drivers/platform/surface/Makefile3
-rw-r--r--drivers/platform/surface/aggregator/controller.c16
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c626
-rw-r--r--drivers/platform/surface/surface_dtx.c1289
-rw-r--r--drivers/platform/surface/surface_platform_profile.c190
-rw-r--r--drivers/platform/surface/surfacepro3_button.c2
-rw-r--r--drivers/platform/x86/Kconfig26
-rw-r--r--drivers/platform/x86/Makefile4
-rw-r--r--drivers/platform/x86/adv_swbutton.c121
-rw-r--r--drivers/platform/x86/asus-laptop.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c5
-rw-r--r--drivers/platform/x86/classmate-laptop.c2
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c4
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c2
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c3
-rw-r--r--drivers/platform/x86/dell/dell-wmi-descriptor.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c3
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c3
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c34
-rw-r--r--drivers/platform/x86/dell/dell-wmi.c3
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c203
-rw-r--r--drivers/platform/x86/hp-wmi.c101
-rw-r--r--drivers/platform/x86/intel-vbtn.c3
-rw-r--r--drivers/platform/x86/intel-wmi-sbl-fw-update.c3
-rw-r--r--drivers/platform/x86/intel-wmi-thunderbolt.c3
-rw-r--r--drivers/platform/x86/intel_chtdc_ti_pwrbtn.c2
-rw-r--r--drivers/platform/x86/intel_pmc_core.c493
-rw-r--r--drivers/platform/x86/intel_pmc_core.h53
-rw-r--r--drivers/platform/x86/intel_pmt_class.c46
-rw-r--r--drivers/platform/x86/intel_pmt_class.h1
-rw-r--r--drivers/platform/x86/intel_pmt_telemetry.c20
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c33
-rw-r--r--drivers/platform/x86/lg-laptop.c2
-rw-r--r--drivers/platform/x86/panasonic-laptop.c2
-rw-r--r--drivers/platform/x86/pmc_atom.c28
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c256
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c37
-rw-r--r--drivers/platform/x86/wmi-bmof.c3
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/platform/x86/xo15-ebook.c6
-rw-r--r--drivers/pps/clients/pps-gpio.c108
-rw-r--r--drivers/scsi/libiscsi.c26
-rw-r--r--drivers/scsi/libsas/sas_ata.c9
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c18
-rw-r--r--drivers/sh/intc/core.c49
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c3
-rw-r--r--drivers/soundwire/Makefile2
-rw-r--r--drivers/soundwire/bus.c100
-rw-r--r--drivers/soundwire/bus.h2
-rw-r--r--drivers/soundwire/bus_type.c15
-rw-r--r--drivers/soundwire/cadence_master.c16
-rw-r--r--drivers/soundwire/dmi-quirks.c96
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c15
-rw-r--r--drivers/soundwire/intel.c24
-rw-r--r--drivers/soundwire/intel_init.c9
-rw-r--r--drivers/soundwire/qcom.c652
-rw-r--r--drivers/soundwire/slave.c9
-rw-r--r--drivers/soundwire/stream.c28
-rw-r--r--drivers/uio/Kconfig17
-rw-r--r--drivers/uio/Makefile1
-rw-r--r--drivers/uio/uio_dfl.c66
-rw-r--r--drivers/vdpa/mlx5/core/mr.c4
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vhost/vdpa.c6
-rw-r--r--drivers/video/fbdev/core/fbcmap.c8
-rw-r--r--drivers/video/fbdev/hyperv_fb.c2
-rw-r--r--drivers/virt/acrn/vm.c2
-rw-r--r--drivers/w1/slaves/w1_ds2780.c2
-rw-r--r--drivers/w1/slaves/w1_ds2781.c2
-rw-r--r--drivers/w1/slaves/w1_ds2805.c15
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c16
-rw-r--r--drivers/w1/slaves/w1_therm.c7
-rw-r--r--drivers/xen/Kconfig31
-rw-r--r--drivers/xen/Makefile3
-rw-r--r--drivers/xen/pcpu.c35
-rw-r--r--drivers/xen/time.c3
-rw-r--r--drivers/xen/xen-acpi-cpuhotplug.c446
-rw-r--r--drivers/xen/xen-acpi-memhotplug.c475
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c6
-rw-r--r--drivers/xen/xen-pciback/vpci.c7
-rw-r--r--drivers/xen/xen-stub.c90
527 files changed, 25182 insertions, 5723 deletions
diff --git a/drivers/accessibility/speakup/i18n.c b/drivers/accessibility/speakup/i18n.c
index ee240d36f947..46bd50f3c3a4 100644
--- a/drivers/accessibility/speakup/i18n.c
+++ b/drivers/accessibility/speakup/i18n.c
@@ -548,12 +548,10 @@ ssize_t spk_msg_set(enum msg_index_t index, char *text, size_t length)
if ((index < MSG_FIRST_INDEX) || (index >= MSG_LAST_INDEX))
return -EINVAL;
- newstr = kmalloc(length + 1, GFP_KERNEL);
+ newstr = kmemdup_nul(text, length, GFP_KERNEL);
if (!newstr)
return -ENOMEM;
- memcpy(newstr, text, length);
- newstr[length] = '\0';
if (index >= MSG_FORMATTED_START &&
index <= MSG_FORMATTED_END &&
!fmt_validate(speakup_default_msgs[index], newstr)) {
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index c119736ca56a..63d2c4339689 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1506,6 +1506,12 @@ static void binder_free_transaction(struct binder_transaction *t)
if (target_proc) {
binder_inner_proc_lock(target_proc);
+ target_proc->outstanding_txns--;
+ if (target_proc->outstanding_txns < 0)
+ pr_warn("%s: Unexpected outstanding_txns %d\n",
+ __func__, target_proc->outstanding_txns);
+ if (!target_proc->outstanding_txns && target_proc->is_frozen)
+ wake_up_interruptible_all(&target_proc->freeze_wait);
if (t->buffer)
t->buffer->transaction = NULL;
binder_inner_proc_unlock(target_proc);
@@ -2331,10 +2337,11 @@ static int binder_fixup_parent(struct binder_transaction *t,
* If the @thread parameter is not NULL, the transaction is always queued
* to the waitlist of that specific thread.
*
- * Return: true if the transactions was successfully queued
- * false if the target process or thread is dead
+ * Return: 0 if the transaction was successfully queued
+ * BR_DEAD_REPLY if the target process or thread is dead
+ * BR_FROZEN_REPLY if the target process or thread is frozen
*/
-static bool binder_proc_transaction(struct binder_transaction *t,
+static int binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
struct binder_thread *thread)
{
@@ -2353,11 +2360,16 @@ static bool binder_proc_transaction(struct binder_transaction *t,
}
binder_inner_proc_lock(proc);
+ if (proc->is_frozen) {
+ proc->sync_recv |= !oneway;
+ proc->async_recv |= oneway;
+ }
- if (proc->is_dead || (thread && thread->is_dead)) {
+ if ((proc->is_frozen && !oneway) || proc->is_dead ||
+ (thread && thread->is_dead)) {
binder_inner_proc_unlock(proc);
binder_node_unlock(node);
- return false;
+ return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
}
if (!thread && !pending_async)
@@ -2373,10 +2385,11 @@ static bool binder_proc_transaction(struct binder_transaction *t,
if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
+ proc->outstanding_txns++;
binder_inner_proc_unlock(proc);
binder_node_unlock(node);
- return true;
+ return 0;
}
/**
@@ -3007,19 +3020,25 @@ static void binder_transaction(struct binder_proc *proc,
goto err_bad_object_type;
}
}
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ if (t->buffer->oneway_spam_suspect)
+ tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
+ else
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
- if (target_thread->is_dead) {
+ if (target_thread->is_dead || target_proc->is_frozen) {
+ return_error = target_thread->is_dead ?
+ BR_DEAD_REPLY : BR_FROZEN_REPLY;
binder_inner_proc_unlock(target_proc);
goto err_dead_proc_or_thread;
}
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction_ilocked(target_thread, in_reply_to);
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
+ target_proc->outstanding_txns++;
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
binder_free_transaction(in_reply_to);
@@ -3038,7 +3057,9 @@ static void binder_transaction(struct binder_proc *proc,
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
- if (!binder_proc_transaction(t, target_proc, target_thread)) {
+ return_error = binder_proc_transaction(t,
+ target_proc, target_thread);
+ if (return_error) {
binder_inner_proc_lock(proc);
binder_pop_transaction_ilocked(thread, t);
binder_inner_proc_unlock(proc);
@@ -3048,7 +3069,8 @@ static void binder_transaction(struct binder_proc *proc,
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
binder_enqueue_thread_work(thread, tcomplete);
- if (!binder_proc_transaction(t, target_proc, NULL))
+ return_error = binder_proc_transaction(t, target_proc, NULL);
+ if (return_error)
goto err_dead_proc_or_thread;
}
if (target_thread)
@@ -3065,7 +3087,6 @@ static void binder_transaction(struct binder_proc *proc,
return;
err_dead_proc_or_thread:
- return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
binder_dequeue_work(proc, tcomplete);
err_translate_failed:
@@ -3696,7 +3717,7 @@ static int binder_wait_for_work(struct binder_thread *thread,
binder_inner_proc_lock(proc);
list_del_init(&thread->waiting_thread_node);
if (signal_pending(current)) {
- ret = -ERESTARTSYS;
+ ret = -EINTR;
break;
}
}
@@ -3875,9 +3896,14 @@ retry:
binder_stat_br(proc, thread, cmd);
} break;
- case BINDER_WORK_TRANSACTION_COMPLETE: {
+ case BINDER_WORK_TRANSACTION_COMPLETE:
+ case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
+ if (proc->oneway_spam_detection_enabled &&
+ w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
+ cmd = BR_ONEWAY_SPAM_SUSPECT;
+ else
+ cmd = BR_TRANSACTION_COMPLETE;
binder_inner_proc_unlock(proc);
- cmd = BR_TRANSACTION_COMPLETE;
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
if (put_user(cmd, (uint32_t __user *)ptr))
@@ -4298,6 +4324,9 @@ static void binder_free_proc(struct binder_proc *proc)
BUG_ON(!list_empty(&proc->todo));
BUG_ON(!list_empty(&proc->delivered_death));
+ if (proc->outstanding_txns)
+ pr_warn("%s: Unexpected outstanding_txns %d\n",
+ __func__, proc->outstanding_txns);
device = container_of(proc->context, struct binder_device, context);
if (refcount_dec_and_test(&device->ref)) {
kfree(proc->context->name);
@@ -4359,6 +4388,7 @@ static int binder_thread_release(struct binder_proc *proc,
(t->to_thread == thread) ? "in" : "out");
if (t->to_thread == thread) {
+ thread->proc->outstanding_txns--;
t->to_proc = NULL;
t->to_thread = NULL;
if (t->buffer) {
@@ -4609,6 +4639,76 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
return 0;
}
+static int binder_ioctl_freeze(struct binder_freeze_info *info,
+ struct binder_proc *target_proc)
+{
+ int ret = 0;
+
+ if (!info->enable) {
+ binder_inner_proc_lock(target_proc);
+ target_proc->sync_recv = false;
+ target_proc->async_recv = false;
+ target_proc->is_frozen = false;
+ binder_inner_proc_unlock(target_proc);
+ return 0;
+ }
+
+ /*
+ * Freezing the target. Prevent new transactions by
+ * setting frozen state. If timeout specified, wait
+ * for transactions to drain.
+ */
+ binder_inner_proc_lock(target_proc);
+ target_proc->sync_recv = false;
+ target_proc->async_recv = false;
+ target_proc->is_frozen = true;
+ binder_inner_proc_unlock(target_proc);
+
+ if (info->timeout_ms > 0)
+ ret = wait_event_interruptible_timeout(
+ target_proc->freeze_wait,
+ (!target_proc->outstanding_txns),
+ msecs_to_jiffies(info->timeout_ms));
+
+ if (!ret && target_proc->outstanding_txns)
+ ret = -EAGAIN;
+
+ if (ret < 0) {
+ binder_inner_proc_lock(target_proc);
+ target_proc->is_frozen = false;
+ binder_inner_proc_unlock(target_proc);
+ }
+
+ return ret;
+}
+
+static int binder_ioctl_get_freezer_info(
+ struct binder_frozen_status_info *info)
+{
+ struct binder_proc *target_proc;
+ bool found = false;
+
+ info->sync_recv = 0;
+ info->async_recv = 0;
+
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
+ if (target_proc->pid == info->pid) {
+ found = true;
+ binder_inner_proc_lock(target_proc);
+ info->sync_recv |= target_proc->sync_recv;
+ info->async_recv |= target_proc->async_recv;
+ binder_inner_proc_unlock(target_proc);
+ }
+ }
+ mutex_unlock(&binder_procs_lock);
+
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+}
+
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
@@ -4727,6 +4827,96 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
}
+ case BINDER_FREEZE: {
+ struct binder_freeze_info info;
+ struct binder_proc **target_procs = NULL, *target_proc;
+ int target_procs_count = 0, i = 0;
+
+ ret = 0;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
+ if (target_proc->pid == info.pid)
+ target_procs_count++;
+ }
+
+ if (target_procs_count == 0) {
+ mutex_unlock(&binder_procs_lock);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ target_procs = kcalloc(target_procs_count,
+ sizeof(struct binder_proc *),
+ GFP_KERNEL);
+
+ if (!target_procs) {
+ mutex_unlock(&binder_procs_lock);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
+ if (target_proc->pid != info.pid)
+ continue;
+
+ binder_inner_proc_lock(target_proc);
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_proc);
+
+ target_procs[i++] = target_proc;
+ }
+ mutex_unlock(&binder_procs_lock);
+
+ for (i = 0; i < target_procs_count; i++) {
+ if (ret >= 0)
+ ret = binder_ioctl_freeze(&info,
+ target_procs[i]);
+
+ binder_proc_dec_tmpref(target_procs[i]);
+ }
+
+ kfree(target_procs);
+
+ if (ret < 0)
+ goto err;
+ break;
+ }
+ case BINDER_GET_FROZEN_INFO: {
+ struct binder_frozen_status_info info;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = binder_ioctl_get_freezer_info(&info);
+ if (ret < 0)
+ goto err;
+
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
+ case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
+ uint32_t enable;
+
+ if (copy_from_user(&enable, ubuf, sizeof(enable))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ binder_inner_proc_lock(proc);
+ proc->oneway_spam_detection_enabled = (bool)enable;
+ binder_inner_proc_unlock(proc);
+ break;
+ }
default:
ret = -EINVAL;
goto err;
@@ -4736,7 +4926,7 @@ err:
if (thread)
thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
- if (ret && ret != -ERESTARTSYS)
+ if (ret && ret != -EINTR)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
trace_binder_ioctl_done(ret);
@@ -4823,6 +5013,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
INIT_LIST_HEAD(&proc->todo);
+ init_waitqueue_head(&proc->freeze_wait);
proc->default_priority = task_nice(current);
/* binderfs stashes devices in i_private */
if (is_binderfs_device(nodp)) {
@@ -5035,6 +5226,9 @@ static void binder_deferred_release(struct binder_proc *proc)
proc->tmp_ref++;
proc->is_dead = true;
+ proc->is_frozen = false;
+ proc->sync_recv = false;
+ proc->async_recv = false;
threads = 0;
active_transactions = 0;
while ((n = rb_first(&proc->threads))) {
@@ -5385,7 +5579,9 @@ static const char * const binder_return_strings[] = {
"BR_FINISHED",
"BR_DEAD_BINDER",
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
- "BR_FAILED_REPLY"
+ "BR_FAILED_REPLY",
+ "BR_FROZEN_REPLY",
+ "BR_ONEWAY_SPAM_SUSPECT",
};
static const char * const binder_command_strings[] = {
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 7caf74ad2405..340515f54498 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -338,7 +338,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
return vma;
}
-static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
{
/*
* Find the amount and size of buffers allocated by the current caller;
@@ -366,13 +366,19 @@ static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
/*
* Warn if this pid has more than 50 transactions, or more than 50% of
- * async space (which is 25% of total buffer size).
+ * async space (which is 25% of total buffer size). Oneway spam is only
+ * detected when the threshold is exceeded.
*/
if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
alloc->pid, pid, num_buffers, total_alloc_size);
+ if (!alloc->oneway_spam_detected) {
+ alloc->oneway_spam_detected = true;
+ return true;
+ }
}
+ return false;
}
static struct binder_buffer *binder_alloc_new_buf_locked(
@@ -525,6 +531,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->async_transaction = is_async;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = pid;
+ buffer->oneway_spam_suspect = false;
if (is_async) {
alloc->free_async_space -= size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
@@ -536,7 +543,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
* of async space left (which is less than 10% of total
* buffer size).
*/
- debug_low_async_space_locked(alloc, pid);
+ buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
+ } else {
+ alloc->oneway_spam_detected = false;
}
}
return buffer;
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 6e8e001381af..7dea57a84c79 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -26,6 +26,8 @@ struct binder_transaction;
* @clear_on_free: %true if buffer must be zeroed after use
* @allow_user_free: %true if user is allowed to free buffer
* @async_transaction: %true if buffer is in use for an async txn
+ * @oneway_spam_suspect: %true if total async allocate size just exceed
+ * spamming detect threshold
* @debug_id: unique ID for debugging
* @transaction: pointer to associated struct binder_transaction
* @target_node: struct binder_node associated with this buffer
@@ -45,7 +47,8 @@ struct binder_buffer {
unsigned clear_on_free:1;
unsigned allow_user_free:1;
unsigned async_transaction:1;
- unsigned debug_id:28;
+ unsigned oneway_spam_suspect:1;
+ unsigned debug_id:27;
struct binder_transaction *transaction;
@@ -87,6 +90,8 @@ struct binder_lru_page {
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
* @pages_high: high watermark of offset in @pages
+ * @oneway_spam_detected: %true if oneway spam detection fired, clear that
+ * flag once the async buffer has returned to a healthy state
*
* Bookkeeping structure for per-proc address space management for binder
* buffers. It is normally initialized during binder_init() and binder_mmap()
@@ -107,6 +112,7 @@ struct binder_alloc {
uint32_t buffer_free;
int pid;
size_t pages_high;
+ bool oneway_spam_detected;
};
#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 6cd79011e35d..810c0b84d3f8 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -155,7 +155,7 @@ enum binder_stat_types {
};
struct binder_stats {
- atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ atomic_t br[_IOC_NR(BR_ONEWAY_SPAM_SUSPECT) + 1];
atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
atomic_t obj_created[BINDER_STAT_COUNT];
atomic_t obj_deleted[BINDER_STAT_COUNT];
@@ -174,6 +174,7 @@ struct binder_work {
enum binder_work_type {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
BINDER_WORK_DEAD_BINDER,
@@ -367,9 +368,22 @@ struct binder_ref {
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
* (protected by binder_deferred_lock)
+ * @outstanding_txns: number of transactions to be transmitted before
+ * processes in freeze_wait are woken up
+ * (protected by @inner_lock)
* @is_dead: process is dead and awaiting free
* when outstanding transactions are cleaned up
* (protected by @inner_lock)
+ * @is_frozen: process is frozen and unable to service
+ * binder transactions
+ * (protected by @inner_lock)
+ * @sync_recv: process received sync transactions since last frozen
+ * (protected by @inner_lock)
+ * @async_recv: process received async transactions since last frozen
+ * (protected by @inner_lock)
+ * @freeze_wait: waitqueue of processes waiting for all outstanding
+ * transactions to be processed
+ * (protected by @inner_lock)
* @todo: list of work for this process
* (protected by @inner_lock)
* @stats: per-process binder statistics
@@ -396,6 +410,8 @@ struct binder_ref {
* @outer_lock: no nesting under innor or node lock
* Lock order: 1) outer, 2) node, 3) inner
* @binderfs_entry: process-specific binderfs log file
+ * @oneway_spam_detection_enabled: process enabled oneway spam detection
+ * or not
*
* Bookkeeping structure for binder processes
*/
@@ -410,7 +426,12 @@ struct binder_proc {
struct task_struct *tsk;
struct hlist_node deferred_work_node;
int deferred_work;
+ int outstanding_txns;
bool is_dead;
+ bool is_frozen;
+ bool sync_recv;
+ bool async_recv;
+ wait_queue_head_t freeze_wait;
struct list_head todo;
struct binder_stats stats;
@@ -426,6 +447,7 @@ struct binder_proc {
spinlock_t inner_lock;
spinlock_t outer_lock;
struct dentry *binderfs_entry;
+ bool oneway_spam_detection_enabled;
};
/**
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index b574cce98dc3..422753d52244 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2054,7 +2054,7 @@ static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb)
}
submitted++;
ATM_SKB(skb)->vcc = vcc;
- tasklet_disable(&ENI_DEV(vcc->dev)->task);
+ tasklet_disable_in_atomic(&ENI_DEV(vcc->dev)->task);
res = do_tx(skb);
tasklet_enable(&ENI_DEV(vcc->dev)->task);
if (res == enq_ok) return 0;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index b0c71d3a81a0..bda5c815e441 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -313,6 +313,7 @@ struct xen_blkif {
struct work_struct free_work;
unsigned int nr_ring_pages;
+ bool multi_ref;
/* All rings for this device. */
struct xen_blkif_ring *rings;
unsigned int nr_rings;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2aaf690352c..125b22205d38 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -998,14 +998,17 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
for (i = 0; i < nr_grefs; i++) {
char ring_ref_name[RINGREF_NAME_LEN];
- snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
+ if (blkif->multi_ref)
+ snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
+ else {
+ WARN_ON(i != 0);
+ snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
+ }
+
err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
"%u", &ring_ref[i]);
if (err != 1) {
- if (nr_grefs == 1)
- break;
-
err = -EINVAL;
xenbus_dev_fatal(dev, err, "reading %s/%s",
dir, ring_ref_name);
@@ -1013,18 +1016,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
}
}
- if (err != 1) {
- WARN_ON(nr_grefs != 1);
-
- err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
- &ring_ref[0]);
- if (err != 1) {
- err = -EINVAL;
- xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
- return err;
- }
- }
-
err = -ENOMEM;
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -1129,10 +1120,15 @@ static int connect_ring(struct backend_info *be)
blkif->nr_rings, blkif->blk_protocol, protocol,
blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
- ring_page_order = xenbus_read_unsigned(dev->otherend,
- "ring-page-order", 0);
-
- if (ring_page_order > xen_blkif_max_ring_order) {
+ err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
+ &ring_page_order);
+ if (err != 1) {
+ blkif->nr_ring_pages = 1;
+ blkif->multi_ref = false;
+ } else if (ring_page_order <= xen_blkif_max_ring_order) {
+ blkif->nr_ring_pages = 1 << ring_page_order;
+ blkif->multi_ref = true;
+ } else {
err = -EINVAL;
xenbus_dev_fatal(dev, err,
"requested ring page order %d exceed max:%d",
@@ -1141,8 +1137,6 @@ static int connect_ring(struct backend_info *be)
return err;
}
- blkif->nr_ring_pages = 1 << ring_page_order;
-
if (blkif->nr_rings == 1)
return read_per_ring_refs(&blkif->rings[0], dev->otherend);
else {
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index e1c6798889f4..06c4efd97780 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2397,7 +2397,7 @@ static void blkfront_connect(struct blkfront_info *info)
}
/*
- * physcial-sector-size is a newer field, so old backends may not
+ * physical-sector-size is a newer field, so old backends may not
* provide this. Assume physical sector size to be the same as
* sector_size in that case.
*/
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
index c2546bf229fb..8100cf51cd09 100644
--- a/drivers/bus/mhi/core/boot.c
+++ b/drivers/bus/mhi/core/boot.c
@@ -389,7 +389,6 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
const struct firmware *firmware = NULL;
- struct image_info *image_info;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
const char *fw_name;
void *buf;
@@ -417,9 +416,9 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
}
}
- /* If device is in pass through, do reset to ready state transition */
- if (mhi_cntrl->ee == MHI_EE_PTHRU)
- goto fw_load_ee_pthru;
+ /* wait for ready on pass through or any other execution environment */
+ if (mhi_cntrl->ee != MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_PBL)
+ goto fw_load_ready_state;
fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
mhi_cntrl->edl_image : mhi_cntrl->fw_image;
@@ -461,9 +460,10 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
goto error_fw_load;
}
- if (mhi_cntrl->ee == MHI_EE_EDL) {
+ /* Wait for ready since EDL image was loaded */
+ if (fw_name == mhi_cntrl->edl_image) {
release_firmware(firmware);
- return;
+ goto fw_load_ready_state;
}
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -488,47 +488,45 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
release_firmware(firmware);
-fw_load_ee_pthru:
+fw_load_ready_state:
/* Transitioning into MHI RESET->READY state */
ret = mhi_ready_state_transition(mhi_cntrl);
-
- if (!mhi_cntrl->fbc_download)
- return;
-
if (ret) {
dev_err(dev, "MHI did not enter READY state\n");
goto error_ready_state;
}
- /* Wait for the SBL event */
- ret = wait_event_timeout(mhi_cntrl->state_event,
- mhi_cntrl->ee == MHI_EE_SBL ||
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ dev_info(dev, "Wait for device to enter SBL or Mission mode\n");
+ return;
- if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
- dev_err(dev, "MHI did not enter SBL\n");
- goto error_ready_state;
+error_ready_state:
+ if (mhi_cntrl->fbc_download) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
}
- /* Start full firmware image download */
- image_info = mhi_cntrl->fbc_image;
+error_fw_load:
+ mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
+ wake_up_all(&mhi_cntrl->state_event);
+}
+
+int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
+{
+ struct image_info *image_info = mhi_cntrl->fbc_image;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ if (!image_info)
+ return -EIO;
+
ret = mhi_fw_load_bhie(mhi_cntrl,
/* Vector table is the last entry */
&image_info->mhi_buf[image_info->entries - 1]);
if (ret) {
- dev_err(dev, "MHI did not load image over BHIe, ret: %d\n",
- ret);
- goto error_fw_load;
+ dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
+ mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
+ wake_up_all(&mhi_cntrl->state_event);
}
- return;
-
-error_ready_state:
- mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
- mhi_cntrl->fbc_image = NULL;
-
-error_fw_load:
- mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
- wake_up_all(&mhi_cntrl->state_event);
+ return ret;
}
diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/core/debugfs.c
index 7d43138ce66d..858d7516410b 100644
--- a/drivers/bus/mhi/core/debugfs.c
+++ b/drivers/bus/mhi/core/debugfs.c
@@ -377,7 +377,7 @@ static struct dentry *mhi_debugfs_root;
void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
{
mhi_cntrl->debugfs_dentry =
- debugfs_create_dir(dev_name(mhi_cntrl->cntrl_dev),
+ debugfs_create_dir(dev_name(&mhi_cntrl->mhi_dev->dev),
mhi_debugfs_root);
debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry,
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index be4eebb0971b..c81b377fca8f 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -22,13 +22,14 @@
static DEFINE_IDA(mhi_controller_ida);
const char * const mhi_ee_str[MHI_EE_MAX] = {
- [MHI_EE_PBL] = "PBL",
- [MHI_EE_SBL] = "SBL",
- [MHI_EE_AMSS] = "AMSS",
- [MHI_EE_RDDM] = "RDDM",
- [MHI_EE_WFW] = "WFW",
- [MHI_EE_PTHRU] = "PASS THRU",
- [MHI_EE_EDL] = "EDL",
+ [MHI_EE_PBL] = "PRIMARY BOOTLOADER",
+ [MHI_EE_SBL] = "SECONDARY BOOTLOADER",
+ [MHI_EE_AMSS] = "MISSION MODE",
+ [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
+ [MHI_EE_WFW] = "WLAN FIRMWARE",
+ [MHI_EE_PTHRU] = "PASS THROUGH",
+ [MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
+ [MHI_EE_FP] = "FLASH PROGRAMMER",
[MHI_EE_DISABLE_TRANSITION] = "DISABLE",
[MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
};
@@ -37,8 +38,9 @@ const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
[DEV_ST_TRANSITION_PBL] = "PBL",
[DEV_ST_TRANSITION_READY] = "READY",
[DEV_ST_TRANSITION_SBL] = "SBL",
- [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
- [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR",
+ [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
+ [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
+ [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
[DEV_ST_TRANSITION_DISABLE] = "DISABLE",
};
@@ -49,24 +51,30 @@ const char * const mhi_state_str[MHI_STATE_MAX] = {
[MHI_STATE_M1] = "M1",
[MHI_STATE_M2] = "M2",
[MHI_STATE_M3] = "M3",
- [MHI_STATE_M3_FAST] = "M3_FAST",
+ [MHI_STATE_M3_FAST] = "M3 FAST",
[MHI_STATE_BHI] = "BHI",
- [MHI_STATE_SYS_ERR] = "SYS_ERR",
+ [MHI_STATE_SYS_ERR] = "SYS ERROR",
+};
+
+const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
+ [MHI_CH_STATE_TYPE_RESET] = "RESET",
+ [MHI_CH_STATE_TYPE_STOP] = "STOP",
+ [MHI_CH_STATE_TYPE_START] = "START",
};
static const char * const mhi_pm_state_str[] = {
[MHI_PM_STATE_DISABLE] = "DISABLE",
- [MHI_PM_STATE_POR] = "POR",
+ [MHI_PM_STATE_POR] = "POWER ON RESET",
[MHI_PM_STATE_M0] = "M0",
[MHI_PM_STATE_M2] = "M2",
[MHI_PM_STATE_M3_ENTER] = "M?->M3",
[MHI_PM_STATE_M3] = "M3",
[MHI_PM_STATE_M3_EXIT] = "M3->M0",
- [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error",
- [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect",
- [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process",
+ [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
+ [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
+ [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
[MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
- [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
+ [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
};
const char *to_mhi_pm_state_str(enum mhi_pm_state state)
@@ -508,8 +516,6 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
/* Setup wake db */
mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
- mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
- mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
mhi_cntrl->wake_set = false;
/* Setup channel db address for each channel in tre_ring */
@@ -552,6 +558,7 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_ring *buf_ring;
struct mhi_ring *tre_ring;
struct mhi_chan_ctxt *chan_ctxt;
+ u32 tmp;
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
@@ -565,7 +572,19 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
vfree(buf_ring->base);
buf_ring->base = tre_ring->base = NULL;
+ tre_ring->ctxt_wp = NULL;
chan_ctxt->rbase = 0;
+ chan_ctxt->rlen = 0;
+ chan_ctxt->rp = 0;
+ chan_ctxt->wp = 0;
+
+ tmp = chan_ctxt->chcfg;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
+ chan_ctxt->chcfg = tmp;
+
+ /* Update to all cores */
+ smp_wmb();
}
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
@@ -863,12 +882,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
u32 soc_info;
int ret, i;
- if (!mhi_cntrl)
- return -EINVAL;
-
- if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
+ if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
+ !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
!mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
- !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs)
+ !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || !mhi_cntrl->irq)
return -EINVAL;
ret = parse_config(mhi_cntrl, config);
@@ -890,8 +907,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
init_waitqueue_head(&mhi_cntrl->state_event);
- mhi_cntrl->hiprio_wq = alloc_ordered_workqueue
- ("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
+ mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
if (!mhi_cntrl->hiprio_wq) {
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
ret = -ENOMEM;
@@ -1083,8 +1099,6 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
}
- mhi_cntrl->pre_init = true;
-
mutex_unlock(&mhi_cntrl->pm_mutex);
return 0;
@@ -1115,7 +1129,6 @@ void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
}
mhi_deinit_dev_ctxt(mhi_cntrl);
- mhi_cntrl->pre_init = false;
}
EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
@@ -1296,7 +1309,8 @@ static int mhi_driver_remove(struct device *dev)
mutex_lock(&mhi_chan->mutex);
- if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
+ if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
+ ch_state[dir] == MHI_CH_STATE_STOP) &&
!mhi_chan->offload_ch)
mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index 6f80ec30c0cd..5b9ea66b92dc 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -369,6 +369,18 @@ enum mhi_ch_state {
MHI_CH_STATE_ERROR = 0x5,
};
+enum mhi_ch_state_type {
+ MHI_CH_STATE_TYPE_RESET,
+ MHI_CH_STATE_TYPE_STOP,
+ MHI_CH_STATE_TYPE_START,
+ MHI_CH_STATE_TYPE_MAX,
+};
+
+extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
+#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
+ "INVALID_STATE" : \
+ mhi_ch_state_type_str[(state)])
+
#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
mode != MHI_DB_BRST_ENABLE)
@@ -379,13 +391,15 @@ extern const char * const mhi_ee_str[MHI_EE_MAX];
#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
ee == MHI_EE_EDL)
-#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW)
+#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \
+ ee == MHI_EE_FP)
enum dev_st_transition {
DEV_ST_TRANSITION_PBL,
DEV_ST_TRANSITION_READY,
DEV_ST_TRANSITION_SBL,
DEV_ST_TRANSITION_MISSION_MODE,
+ DEV_ST_TRANSITION_FP,
DEV_ST_TRANSITION_SYS_ERR,
DEV_ST_TRANSITION_DISABLE,
DEV_ST_TRANSITION_MAX,
@@ -619,6 +633,7 @@ int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
enum mhi_cmd_type cmd);
+int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
{
return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
@@ -643,6 +658,9 @@ int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset, u32 mask,
u32 shift, u32 *out);
+int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 shift, u32 val, u32 delayus);
void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
u32 offset, u32 val);
void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 4e0131b94056..22acde118bc3 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -4,6 +4,7 @@
*
*/
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
@@ -37,6 +38,28 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
return 0;
}
+int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset,
+ u32 mask, u32 shift, u32 val, u32 delayus)
+{
+ int ret;
+ u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+
+ while (retry--) {
+ ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, shift,
+ &out);
+ if (ret)
+ return ret;
+
+ if (out == val)
+ return 0;
+
+ fsleep(delayus);
+ }
+
+ return -ETIMEDOUT;
+}
+
void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
u32 offset, u32 val)
{
@@ -242,10 +265,17 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
smp_wmb();
}
+static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
+{
+ return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
+}
+
int mhi_destroy_device(struct device *dev, void *data)
{
+ struct mhi_chan *ul_chan, *dl_chan;
struct mhi_device *mhi_dev;
struct mhi_controller *mhi_cntrl;
+ enum mhi_ee_type ee = MHI_EE_MAX;
if (dev->bus != &mhi_bus_type)
return 0;
@@ -257,6 +287,17 @@ int mhi_destroy_device(struct device *dev, void *data)
if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
return 0;
+ ul_chan = mhi_dev->ul_chan;
+ dl_chan = mhi_dev->dl_chan;
+
+ /*
+ * If execution environment is specified, remove only those devices that
+ * started in them based on ee_mask for the channels as we move on to a
+ * different execution environment
+ */
+ if (data)
+ ee = *(enum mhi_ee_type *)data;
+
/*
* For the suspend and resume case, this function will get called
* without mhi_unregister_controller(). Hence, we need to drop the
@@ -264,11 +305,19 @@ int mhi_destroy_device(struct device *dev, void *data)
* be sure that there will be no instances of mhi_dev left after
* this.
*/
- if (mhi_dev->ul_chan)
- put_device(&mhi_dev->ul_chan->mhi_dev->dev);
+ if (ul_chan) {
+ if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
+ return 0;
- if (mhi_dev->dl_chan)
- put_device(&mhi_dev->dl_chan->mhi_dev->dev);
+ put_device(&ul_chan->mhi_dev->dev);
+ }
+
+ if (dl_chan) {
+ if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
+ return 0;
+
+ put_device(&dl_chan->mhi_dev->dev);
+ }
dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
mhi_dev->name);
@@ -383,7 +432,16 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
struct mhi_event_ctxt *er_ctxt =
&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
struct mhi_ring *ev_ring = &mhi_event->ring;
- void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ dma_addr_t ptr = er_ctxt->rp;
+ void *dev_rp;
+
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return IRQ_HANDLED;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
/* Only proceed if event ring has pending events */
if (ev_ring->rp == dev_rp)
@@ -407,9 +465,9 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
{
struct mhi_controller *mhi_cntrl = priv;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- enum mhi_state state = MHI_STATE_MAX;
+ enum mhi_state state;
enum mhi_pm_state pm_state = 0;
- enum mhi_ee_type ee = 0;
+ enum mhi_ee_type ee;
write_lock_irq(&mhi_cntrl->pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
@@ -418,11 +476,11 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
}
state = mhi_get_mhi_state(mhi_cntrl);
- ee = mhi_cntrl->ee;
- mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
- dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
- TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
- TO_MHI_STATE_STR(state));
+ ee = mhi_get_exec_env(mhi_cntrl);
+ dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state));
if (state == MHI_STATE_SYS_ERR) {
dev_dbg(dev, "System error detected\n");
@@ -431,27 +489,30 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
}
write_unlock_irq(&mhi_cntrl->pm_lock);
- /* If device supports RDDM don't bother processing SYS error */
- if (mhi_cntrl->rddm_image) {
- /* host may be performing a device power down already */
- if (!mhi_is_active(mhi_cntrl))
- goto exit_intvec;
+ if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
+ goto exit_intvec;
- if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
+ switch (ee) {
+ case MHI_EE_RDDM:
+ /* proceed if power down is not already in progress */
+ if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+ mhi_cntrl->ee = ee;
wake_up_all(&mhi_cntrl->state_event);
}
- goto exit_intvec;
- }
-
- if (pm_state == MHI_PM_SYS_ERR_DETECT) {
+ break;
+ case MHI_EE_PBL:
+ case MHI_EE_EDL:
+ case MHI_EE_PTHRU:
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
+ mhi_cntrl->ee = ee;
wake_up_all(&mhi_cntrl->state_event);
-
- /* For fatal errors, we let controller decide next step */
- if (MHI_IN_PBL(ee))
- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
- else
- mhi_pm_sys_err_handler(mhi_cntrl);
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ break;
+ default:
+ wake_up_all(&mhi_cntrl->state_event);
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ break;
}
exit_intvec:
@@ -536,6 +597,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info;
u16 xfer_len;
+ if (!is_valid_ring_ptr(tre_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event element points outside of the tre ring\n");
+ break;
+ }
/* Get the TRB this event points to */
ev_tre = mhi_to_virtual(tre_ring, ptr);
@@ -570,8 +636,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
/* notify client */
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
- if (mhi_chan->dir == DMA_TO_DEVICE)
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
atomic_dec(&mhi_cntrl->pending_pkts);
+ /* Release the reference got from mhi_queue() */
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
/*
* Recycle the buffer if buffer is pre-allocated,
@@ -595,15 +664,15 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
case MHI_EV_CC_OOB:
case MHI_EV_CC_DB_MODE:
{
- unsigned long flags;
+ unsigned long pm_lock_flags;
mhi_chan->db_cfg.db_mode = 1;
- read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+ read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
if (tre_ring->wp != tre_ring->rp &&
MHI_DB_ACCESS_VALID(mhi_cntrl)) {
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
}
- read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
break;
}
case MHI_EV_CC_BAD_TRE:
@@ -695,6 +764,12 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan;
u32 chan;
+ if (!is_valid_ring_ptr(mhi_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event element points outside of the cmd ring\n");
+ return;
+ }
+
cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
@@ -719,6 +794,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 chan;
int count = 0;
+ dma_addr_t ptr = er_ctxt->rp;
/*
* This is a quick check to avoid unnecessary event processing
@@ -728,7 +804,13 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
return -EIO;
- dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
local_rp = ev_ring->rp;
while (dev_rp != local_rp) {
@@ -771,14 +853,14 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
break;
case MHI_STATE_SYS_ERR:
{
- enum mhi_pm_state new_state;
+ enum mhi_pm_state pm_state;
dev_dbg(dev, "System error detected\n");
write_lock_irq(&mhi_cntrl->pm_lock);
- new_state = mhi_tryset_pm_state(mhi_cntrl,
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
write_unlock_irq(&mhi_cntrl->pm_lock);
- if (new_state == MHI_PM_SYS_ERR_DETECT)
+ if (pm_state == MHI_PM_SYS_ERR_DETECT)
mhi_pm_sys_err_handler(mhi_cntrl);
break;
}
@@ -807,6 +889,9 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
case MHI_EE_AMSS:
st = DEV_ST_TRANSITION_MISSION_MODE;
break;
+ case MHI_EE_FP:
+ st = DEV_ST_TRANSITION_FP;
+ break;
case MHI_EE_RDDM:
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -834,6 +919,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
*/
if (chan < mhi_cntrl->max_chan) {
mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ if (!mhi_chan->configured)
+ break;
parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
event_quota--;
}
@@ -845,7 +932,15 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
local_rp = ev_ring->rp;
- dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+ ptr = er_ctxt->rp;
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
count++;
}
@@ -868,11 +963,18 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
int count = 0;
u32 chan;
struct mhi_chan *mhi_chan;
+ dma_addr_t ptr = er_ctxt->rp;
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
return -EIO;
- dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
local_rp = ev_ring->rp;
while (dev_rp != local_rp && event_quota > 0) {
@@ -886,7 +988,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
* Only process the event ring elements whose channel
* ID is within the maximum supported range.
*/
- if (chan < mhi_cntrl->max_chan) {
+ if (chan < mhi_cntrl->max_chan &&
+ mhi_cntrl->mhi_chan[chan].configured) {
mhi_chan = &mhi_cntrl->mhi_chan[chan];
if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
@@ -900,7 +1003,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
local_rp = ev_ring->rp;
- dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+ ptr = er_ctxt->rp;
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
count++;
}
read_lock_bh(&mhi_cntrl->pm_lock);
@@ -996,7 +1107,7 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
if (unlikely(ret)) {
- ret = -ENOMEM;
+ ret = -EAGAIN;
goto exit_unlock;
}
@@ -1004,9 +1115,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
if (unlikely(ret))
goto exit_unlock;
- /* trigger M3 exit if necessary */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
+ /* Packet is queued, take a usage ref to exit M3 if necessary
+ * for host->device buffer, balanced put is done on buffer completion
+ * for device->host buffer, balanced put is after ringing the DB
+ */
+ mhi_cntrl->runtime_get(mhi_cntrl);
/* Assert dev_wake (to exit/prevent M1/M2)*/
mhi_cntrl->wake_toggle(mhi_cntrl);
@@ -1014,12 +1127,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
- if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
- ret = -EIO;
- goto exit_unlock;
- }
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ if (dir == DMA_FROM_DEVICE)
+ mhi_cntrl->runtime_put(mhi_cntrl);
exit_unlock:
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
@@ -1162,6 +1274,11 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
break;
+ case MHI_CMD_STOP_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
+ break;
case MHI_CMD_START_CHAN:
cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
@@ -1183,56 +1300,125 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
return 0;
}
-static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan)
+static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan,
+ enum mhi_ch_state_type to_state)
{
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+ enum mhi_cmd_type cmd = MHI_CMD_NOP;
int ret;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
-
- dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
- /* no more processing events for this channel */
- mutex_lock(&mhi_chan->mutex);
- write_lock_irq(&mhi_chan->lock);
- if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
- mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
+ dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
+ TO_CH_STATE_TYPE_STR(to_state));
+
+ switch (to_state) {
+ case MHI_CH_STATE_TYPE_RESET:
+ write_lock_irq(&mhi_chan->lock);
+ if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
+ mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
+ mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
+ write_unlock_irq(&mhi_chan->lock);
+ return -EINVAL;
+ }
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
write_unlock_irq(&mhi_chan->lock);
- mutex_unlock(&mhi_chan->mutex);
- return;
+
+ cmd = MHI_CMD_RESET_CHAN;
+ break;
+ case MHI_CH_STATE_TYPE_STOP:
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ return -EINVAL;
+
+ cmd = MHI_CMD_STOP_CHAN;
+ break;
+ case MHI_CH_STATE_TYPE_START:
+ if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
+ mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
+ return -EINVAL;
+
+ cmd = MHI_CMD_START_CHAN;
+ break;
+ default:
+ dev_err(dev, "%d: Channel state update to %s not allowed\n",
+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+ return -EINVAL;
}
- mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
- write_unlock_irq(&mhi_chan->lock);
+ /* bring host and device out of suspended states */
+ ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
+ if (ret)
+ return ret;
+ mhi_cntrl->runtime_get(mhi_cntrl);
reinit_completion(&mhi_chan->completion);
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
- read_unlock_bh(&mhi_cntrl->pm_lock);
- goto error_invalid_state;
+ ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
+ if (ret) {
+ dev_err(dev, "%d: Failed to send %s channel command\n",
+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+ goto exit_channel_update;
}
- mhi_cntrl->wake_toggle(mhi_cntrl);
- read_unlock_bh(&mhi_cntrl->pm_lock);
+ ret = wait_for_completion_timeout(&mhi_chan->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
+ dev_err(dev,
+ "%d: Failed to receive %s channel command completion\n",
+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+ ret = -EIO;
+ goto exit_channel_update;
+ }
- mhi_cntrl->runtime_get(mhi_cntrl);
+ ret = 0;
+
+ if (to_state != MHI_CH_STATE_TYPE_RESET) {
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
+ MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ dev_dbg(dev, "%d: Channel state change to %s successful\n",
+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+
+exit_channel_update:
mhi_cntrl->runtime_put(mhi_cntrl);
- ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
+ mhi_device_put(mhi_cntrl->mhi_dev);
+
+ return ret;
+}
+
+static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ int ret;
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+
+ mutex_lock(&mhi_chan->mutex);
+
+ if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
+ dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
+ goto exit_unprepare_channel;
+ }
+
+ /* no more processing events for this channel */
+ ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
+ MHI_CH_STATE_TYPE_RESET);
if (ret)
- goto error_invalid_state;
+ dev_err(dev, "%d: Failed to reset channel, still resetting\n",
+ mhi_chan->chan);
- /* even if it fails we will still reset */
- ret = wait_for_completion_timeout(&mhi_chan->completion,
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
- if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
- dev_err(dev,
- "Failed to receive cmd completion, still resetting\n");
+exit_unprepare_channel:
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ write_unlock_irq(&mhi_chan->lock);
-error_invalid_state:
if (!mhi_chan->offload_ch) {
mhi_reset_chan(mhi_cntrl, mhi_chan);
mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
}
- dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
+ dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
+
mutex_unlock(&mhi_chan->mutex);
}
@@ -1240,28 +1426,16 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan)
{
int ret = 0;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
-
- dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
+ struct device *dev = &mhi_chan->mhi_dev->dev;
if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
- dev_err(dev,
- "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
- TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
- mhi_chan->name);
+ dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
return -ENOTCONN;
}
mutex_lock(&mhi_chan->mutex);
- /* If channel is not in disable state, do not allow it to start */
- if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
- ret = -EIO;
- dev_dbg(dev, "channel: %d is not in disabled state\n",
- mhi_chan->chan);
- goto error_init_chan;
- }
-
/* Check of client manages channel context for offload channels */
if (!mhi_chan->offload_ch) {
ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
@@ -1269,34 +1443,11 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
goto error_init_chan;
}
- reinit_completion(&mhi_chan->completion);
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
- read_unlock_bh(&mhi_cntrl->pm_lock);
- ret = -EIO;
- goto error_pm_state;
- }
-
- mhi_cntrl->wake_toggle(mhi_cntrl);
- read_unlock_bh(&mhi_cntrl->pm_lock);
- mhi_cntrl->runtime_get(mhi_cntrl);
- mhi_cntrl->runtime_put(mhi_cntrl);
-
- ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
+ ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
+ MHI_CH_STATE_TYPE_START);
if (ret)
goto error_pm_state;
- ret = wait_for_completion_timeout(&mhi_chan->completion,
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
- if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
- ret = -EIO;
- goto error_pm_state;
- }
-
- write_lock_irq(&mhi_chan->lock);
- mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
- write_unlock_irq(&mhi_chan->lock);
-
/* Pre-allocate buffer for xfer ring */
if (mhi_chan->pre_alloc) {
int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
@@ -1334,9 +1485,6 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
mutex_unlock(&mhi_chan->mutex);
- dev_dbg(dev, "Chan: %d successfully moved to start state\n",
- mhi_chan->chan);
-
return 0;
error_pm_state:
@@ -1350,7 +1498,7 @@ error_init_chan:
error_pre_alloc:
mutex_unlock(&mhi_chan->mutex);
- __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ mhi_unprepare_channel(mhi_cntrl, mhi_chan);
return ret;
}
@@ -1365,6 +1513,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ev_ring;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
unsigned long flags;
+ dma_addr_t ptr;
dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
@@ -1372,7 +1521,15 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
/* mark all stale events related to channel as STALE event */
spin_lock_irqsave(&mhi_event->lock, flags);
- dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+ ptr = er_ctxt->rp;
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ dev_rp = ev_ring->rp;
+ } else {
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ }
local_rp = ev_ring->rp;
while (dev_rp != local_rp) {
@@ -1403,8 +1560,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
while (tre_ring->rp != tre_ring->wp) {
struct mhi_buf_info *buf_info = buf_ring->rp;
- if (mhi_chan->dir == DMA_TO_DEVICE)
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
atomic_dec(&mhi_cntrl->pending_pkts);
+ /* Release the reference got from mhi_queue() */
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
if (!buf_info->pre_mapped)
mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
@@ -1467,7 +1627,7 @@ error_open_chan:
if (!mhi_chan)
continue;
- __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ mhi_unprepare_channel(mhi_cntrl, mhi_chan);
}
return ret;
@@ -1485,7 +1645,7 @@ void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
if (!mhi_chan)
continue;
- __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ mhi_unprepare_channel(mhi_cntrl, mhi_chan);
}
}
EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index 681960c72d2a..e2e59a341fef 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -153,35 +153,33 @@ static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
/* Handle device ready state transition */
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
{
- void __iomem *base = mhi_cntrl->regs;
struct mhi_event *mhi_event;
enum mhi_pm_state cur_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- u32 reset = 1, ready = 0;
+ u32 interval_us = 25000; /* poll register field every 25 milliseconds */
int ret, i;
- /* Wait for RESET to be cleared and READY bit to be set by the device */
- wait_event_timeout(mhi_cntrl->state_event,
- MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
- mhi_read_reg_field(mhi_cntrl, base, MHICTRL,
- MHICTRL_RESET_MASK,
- MHICTRL_RESET_SHIFT, &reset) ||
- mhi_read_reg_field(mhi_cntrl, base, MHISTATUS,
- MHISTATUS_READY_MASK,
- MHISTATUS_READY_SHIFT, &ready) ||
- (!reset && ready),
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
/* Check if device entered error state */
if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
dev_err(dev, "Device link is not accessible\n");
return -EIO;
}
- /* Timeout if device did not transition to ready state */
- if (reset || !ready) {
- dev_err(dev, "Device Ready timeout\n");
- return -ETIMEDOUT;
+ /* Wait for RESET to be cleared and READY bit to be set by the device */
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
+ interval_us);
+ if (ret) {
+ dev_err(dev, "Device failed to clear MHI Reset\n");
+ return ret;
+ }
+
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
+ MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, 1,
+ interval_us);
+ if (ret) {
+ dev_err(dev, "Device failed to enter MHI Ready\n");
+ return ret;
}
dev_dbg(dev, "Device in READY State\n");
@@ -377,24 +375,28 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
int i, ret;
dev_dbg(dev, "Processing Mission Mode transition\n");
write_lock_irq(&mhi_cntrl->pm_lock);
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
- mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ ee = mhi_get_exec_env(mhi_cntrl);
- if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
+ if (!MHI_IN_MISSION_MODE(ee)) {
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
write_unlock_irq(&mhi_cntrl->pm_lock);
wake_up_all(&mhi_cntrl->state_event);
return -EIO;
}
+ mhi_cntrl->ee = ee;
write_unlock_irq(&mhi_cntrl->pm_lock);
wake_up_all(&mhi_cntrl->state_event);
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
+ mhi_destroy_device);
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
/* Force MHI to be in M0 state before continuing */
@@ -560,6 +562,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
{
enum mhi_pm_state cur_state, prev_state;
+ enum dev_st_transition next_state;
struct mhi_event *mhi_event;
struct mhi_cmd_ctxt *cmd_ctxt;
struct mhi_cmd *mhi_cmd;
@@ -673,7 +676,23 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
er_ctxt->wp = er_ctxt->rbase;
}
- mhi_ready_state_transition(mhi_cntrl);
+ /* Transition to next state */
+ if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (cur_state != MHI_PM_POR) {
+ dev_err(dev, "Error moving to state %s from %s\n",
+ to_mhi_pm_state_str(MHI_PM_POR),
+ to_mhi_pm_state_str(cur_state));
+ goto exit_sys_error_transition;
+ }
+ next_state = DEV_ST_TRANSITION_PBL;
+ } else {
+ next_state = DEV_ST_TRANSITION_READY;
+ }
+
+ mhi_queue_state_transition(mhi_cntrl, next_state);
exit_sys_error_transition:
dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
@@ -742,8 +761,7 @@ void mhi_pm_st_worker(struct work_struct *work)
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
write_unlock_irq(&mhi_cntrl->pm_lock);
- if (MHI_IN_PBL(mhi_cntrl->ee))
- mhi_fw_load_handler(mhi_cntrl);
+ mhi_fw_load_handler(mhi_cntrl);
break;
case DEV_ST_TRANSITION_SBL:
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -755,10 +773,18 @@ void mhi_pm_st_worker(struct work_struct *work)
* either SBL or AMSS states
*/
mhi_create_devices(mhi_cntrl);
+ if (mhi_cntrl->fbc_download)
+ mhi_download_amss_image(mhi_cntrl);
break;
case DEV_ST_TRANSITION_MISSION_MODE:
mhi_pm_mission_mode_transition(mhi_cntrl);
break;
+ case DEV_ST_TRANSITION_FP:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = MHI_EE_FP;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mhi_create_devices(mhi_cntrl);
+ break;
case DEV_ST_TRANSITION_READY:
mhi_ready_state_transition(mhi_cntrl);
break;
@@ -822,7 +848,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
return -EBUSY;
}
- dev_info(dev, "Allowing M3 transition\n");
+ dev_dbg(dev, "Allowing M3 transition\n");
new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
if (new_state != MHI_PM_M3_ENTER) {
write_unlock_irq(&mhi_cntrl->pm_lock);
@@ -836,7 +862,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
/* Set MHI to M3 and wait for completion */
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
write_unlock_irq(&mhi_cntrl->pm_lock);
- dev_info(dev, "Wait for M3 completion\n");
+ dev_dbg(dev, "Waiting for M3 completion\n");
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->dev_state == MHI_STATE_M3 ||
@@ -870,9 +896,9 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
enum mhi_pm_state cur_state;
int ret;
- dev_info(dev, "Entered with PM state: %s, MHI state: %s\n",
- to_mhi_pm_state_str(mhi_cntrl->pm_state),
- TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+ dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
return 0;
@@ -880,6 +906,9 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
+ if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3)
+ return -EINVAL;
+
/* Notify clients about exiting LPM */
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
mutex_lock(&itr->mutex);
@@ -1033,13 +1062,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
mutex_lock(&mhi_cntrl->pm_mutex);
mhi_cntrl->pm_state = MHI_PM_DISABLE;
- if (!mhi_cntrl->pre_init) {
- /* Setup device context */
- ret = mhi_init_dev_ctxt(mhi_cntrl);
- if (ret)
- goto error_dev_ctxt;
- }
-
ret = mhi_init_irq_setup(mhi_cntrl);
if (ret)
goto error_setup_irq;
@@ -1092,7 +1114,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
&val) ||
!val,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
- if (ret) {
+ if (!ret) {
ret = -EIO;
dev_info(dev, "Failed to reset MHI due to syserr state\n");
goto error_bhi_offset;
@@ -1121,10 +1143,7 @@ error_bhi_offset:
mhi_deinit_free_irq(mhi_cntrl);
error_setup_irq:
- if (!mhi_cntrl->pre_init)
- mhi_deinit_dev_ctxt(mhi_cntrl);
-
-error_dev_ctxt:
+ mhi_cntrl->pm_state = MHI_PM_DISABLE;
mutex_unlock(&mhi_cntrl->pm_mutex);
return ret;
@@ -1136,12 +1155,19 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
enum mhi_pm_state cur_state, transition_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_cntrl->pm_state;
+ if (cur_state == MHI_PM_DISABLE) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return; /* Already powered down */
+ }
+
/* If it's not a graceful shutdown, force MHI to linkdown state */
transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
MHI_PM_LD_ERR_FATAL_DETECT;
- mutex_lock(&mhi_cntrl->pm_mutex);
- write_lock_irq(&mhi_cntrl->pm_lock);
cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
if (cur_state != transition_state) {
dev_err(dev, "Failed to move to state: %s from: %s\n",
@@ -1166,15 +1192,6 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
flush_work(&mhi_cntrl->st_worker);
free_irq(mhi_cntrl->irq[0], mhi_cntrl);
-
- if (!mhi_cntrl->pre_init) {
- /* Free all allocated resources */
- if (mhi_cntrl->fbc_image) {
- mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
- mhi_cntrl->fbc_image = NULL;
- }
- mhi_deinit_dev_ctxt(mhi_cntrl);
- }
}
EXPORT_SYMBOL_GPL(mhi_power_down);
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 20673a4b4a3c..7c810f02a2ef 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -14,6 +14,7 @@
#include <linux/mhi.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
@@ -71,9 +72,9 @@ struct mhi_pci_dev_info {
.doorbell_mode_switch = false, \
}
-#define MHI_EVENT_CONFIG_CTRL(ev_ring) \
+#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
{ \
- .num_elements = 64, \
+ .num_elements = el_count, \
.irq_moderation_ms = 0, \
.irq = (ev_ring) + 1, \
.priority = 1, \
@@ -114,9 +115,69 @@ struct mhi_pci_dev_info {
.doorbell_mode_switch = true, \
}
-#define MHI_EVENT_CONFIG_DATA(ev_ring) \
+#define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_SBL), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_SBL), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ }
+
+#define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_FP), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_FP), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ }
+
+#define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
{ \
- .num_elements = 128, \
+ .num_elements = el_count, \
.irq_moderation_ms = 5, \
.irq = (ev_ring) + 1, \
.priority = 1, \
@@ -127,9 +188,9 @@ struct mhi_pci_dev_info {
.offload_channel = false, \
}
-#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, ch_num) \
+#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
{ \
- .num_elements = 2048, \
+ .num_elements = el_count, \
.irq_moderation_ms = 1, \
.irq = (ev_ring) + 1, \
.priority = 1, \
@@ -150,21 +211,23 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
};
static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
/* first ring is control+data ring */
- MHI_EVENT_CONFIG_CTRL(0),
+ MHI_EVENT_CONFIG_CTRL(0, 64),
/* DIAG dedicated event ring */
- MHI_EVENT_CONFIG_DATA(1),
+ MHI_EVENT_CONFIG_DATA(1, 128),
/* Hardware channels request dedicated hardware event rings */
- MHI_EVENT_CONFIG_HW_DATA(2, 100),
- MHI_EVENT_CONFIG_HW_DATA(3, 101)
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
};
-static struct mhi_controller_config modem_qcom_v1_mhiv_config = {
+static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.max_channels = 128,
.timeout_ms = 8000,
.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
@@ -173,6 +236,15 @@ static struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.event_cfg = modem_qcom_v1_mhi_events,
};
+static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
+ .name = "qcom-sdx65m",
+ .fw = "qcom/sdx65m/xbl.elf",
+ .edl = "qcom/sdx65m/edl.mbn",
+ .config = &modem_qcom_v1_mhiv_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32
+};
+
static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
.name = "qcom-sdx55m",
.fw = "qcom/sdx55m/sbl1.mbn",
@@ -182,15 +254,121 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
.dma_data_width = 32
};
+static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
+ .name = "qcom-sdx24",
+ .edl = "qcom/prog_firehose_sdx24.mbn",
+ .config = &modem_qcom_v1_mhiv_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32
+};
+
+static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ /* The EDL firmware is a flash-programmer exposing firehose protocol */
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_quectel_em1xx_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_DATA(1, 128),
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
+};
+
+static const struct mhi_controller_config modem_quectel_em1xx_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
+ .ch_cfg = mhi_quectel_em1xx_channels,
+ .num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
+ .event_cfg = mhi_quectel_em1xx_events,
+};
+
+static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
+ .name = "quectel-em1xx",
+ .edl = "qcom/prog_firehose_sdx24.mbn",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32
+};
+
+static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "AT", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "AT", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_DATA(1, 128),
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
+};
+
+static const struct mhi_controller_config modem_foxconn_sdx55_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
+ .ch_cfg = mhi_foxconn_sdx55_channels,
+ .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
+ .event_cfg = mhi_foxconn_sdx55_events,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
+ .name = "foxconn-sdx55",
+ .fw = "qcom/sdx55m/sbl1.mbn",
+ .edl = "qcom/sdx55m/edl.mbn",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32
+};
+
static const struct pci_device_id mhi_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
+ { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */
+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+ { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */
+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
+ /* T99W175 (sdx55), Both for eSIM and Non-eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* DW5930e (sdx55), With eSIM, It's also T99W175 */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
{ }
};
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
enum mhi_pci_device_status {
MHI_PCI_DEV_STARTED,
+ MHI_PCI_DEV_SUSPENDED,
};
struct mhi_pci_device {
@@ -224,12 +402,31 @@ static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
case MHI_CB_FATAL_ERROR:
case MHI_CB_SYS_ERROR:
dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
+ pm_runtime_forbid(&pdev->dev);
+ break;
+ case MHI_CB_EE_MISSION_MODE:
+ pm_runtime_allow(&pdev->dev);
break;
default:
break;
}
}
+static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
+{
+ /* no-op */
+}
+
+static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
+{
+ /* no-op */
+}
+
+static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
+{
+ /* no-op */
+}
+
static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
{
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
@@ -330,13 +527,19 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
{
- /* no PM for now */
- return 0;
+ /* The runtime_get() MHI callback means:
+ * Do whatever is requested to leave M3.
+ */
+ return pm_runtime_get(mhi_cntrl->cntrl_dev);
}
static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
{
- /* no PM for now */
+ /* The runtime_put() MHI callback means:
+ * Device can be moved in M3 state.
+ */
+ pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
+ pm_runtime_put(mhi_cntrl->cntrl_dev);
}
static void mhi_pci_recovery_work(struct work_struct *work)
@@ -350,6 +553,7 @@ static void mhi_pci_recovery_work(struct work_struct *work)
dev_warn(&pdev->dev, "device recovery started\n");
del_timer(&mhi_pdev->health_check_timer);
+ pm_runtime_forbid(&pdev->dev);
/* Clean up MHI state */
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -357,7 +561,6 @@ static void mhi_pci_recovery_work(struct work_struct *work)
mhi_unprepare_after_power_down(mhi_cntrl);
}
- /* Check if we can recover without full reset */
pci_set_power_state(pdev, PCI_D0);
pci_load_saved_state(pdev, mhi_pdev->pci_state);
pci_restore_state(pdev);
@@ -391,6 +594,10 @@ static void health_check(struct timer_list *t)
struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+ test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
+ return;
+
if (!mhi_pci_is_alive(mhi_cntrl)) {
dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
queue_work(system_long_wq, &mhi_pdev->recovery_work);
@@ -433,6 +640,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->status_cb = mhi_pci_status_cb;
mhi_cntrl->runtime_get = mhi_pci_runtime_get;
mhi_cntrl->runtime_put = mhi_pci_runtime_put;
+ mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
+ mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
+ mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
if (err)
@@ -444,9 +654,12 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, mhi_pdev);
- /* Have stored pci confspace at hand for restore in sudden PCI error */
+ /* Have stored pci confspace at hand for restore in sudden PCI error.
+ * cache the state locally and discard the PCI core one.
+ */
pci_save_state(pdev);
mhi_pdev->pci_state = pci_store_saved_state(pdev);
+ pci_load_saved_state(pdev, NULL);
pci_enable_pcie_error_reporting(pdev);
@@ -472,6 +685,14 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* start health check */
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ /* Only allow runtime-suspend if PME capable (for wakeup) */
+ if (pci_pme_capable(pdev, PCI_D3hot)) {
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ }
+
return 0;
err_unprepare:
@@ -495,9 +716,19 @@ static void mhi_pci_remove(struct pci_dev *pdev)
mhi_unprepare_after_power_down(mhi_cntrl);
}
+ /* balancing probe put_noidle */
+ if (pci_pme_capable(pdev, PCI_D3hot))
+ pm_runtime_get_noresume(&pdev->dev);
+
mhi_unregister_controller(mhi_cntrl);
}
+static void mhi_pci_shutdown(struct pci_dev *pdev)
+{
+ mhi_pci_remove(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
static void mhi_pci_reset_prepare(struct pci_dev *pdev)
{
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
@@ -605,41 +836,59 @@ static const struct pci_error_handlers mhi_pci_err_handler = {
.reset_done = mhi_pci_reset_done,
};
-static int __maybe_unused mhi_pci_suspend(struct device *dev)
+static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ int err;
+
+ if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
+ return 0;
del_timer(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
+ if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+ mhi_cntrl->ee != MHI_EE_AMSS)
+ goto pci_suspend; /* Nothing to do at MHI level */
+
/* Transition to M3 state */
- mhi_pm_suspend(mhi_cntrl);
+ err = mhi_pm_suspend(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
+ clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
+ return -EBUSY;
+ }
- pci_save_state(pdev);
+pci_suspend:
pci_disable_device(pdev);
pci_wake_from_d3(pdev, true);
- pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
-static int __maybe_unused mhi_pci_resume(struct device *dev)
+static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
int err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_set_master(pdev);
+ if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
+ return 0;
err = pci_enable_device(pdev);
if (err)
goto err_recovery;
+ pci_set_master(pdev);
+ pci_wake_from_d3(pdev, false);
+
+ if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+ mhi_cntrl->ee != MHI_EE_AMSS)
+ return 0; /* Nothing to do at MHI level */
+
/* Exit M3, transition to M0 state */
err = mhi_pm_resume(mhi_cntrl);
if (err) {
@@ -650,16 +899,44 @@ static int __maybe_unused mhi_pci_resume(struct device *dev)
/* Resume health check */
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ /* It can be a remote wakeup (no mhi runtime_get), update access time */
+ pm_runtime_mark_last_busy(dev);
+
return 0;
err_recovery:
- /* The device may have loose power or crashed, try recovering it */
+ /* Do not fail to not mess up our PCI device state, the device likely
+ * lost power (d3cold) and we simply need to reset it from the recovery
+ * procedure, trigger the recovery asynchronously to prevent system
+ * suspend exit delaying.
+ */
queue_work(system_long_wq, &mhi_pdev->recovery_work);
+ pm_runtime_mark_last_busy(dev);
- return err;
+ return 0;
+}
+
+static int __maybe_unused mhi_pci_suspend(struct device *dev)
+{
+ pm_runtime_disable(dev);
+ return mhi_pci_runtime_suspend(dev);
+}
+
+static int __maybe_unused mhi_pci_resume(struct device *dev)
+{
+ int ret;
+
+ /* Depending the platform, device may have lost power (d3cold), we need
+ * to resume it now to check its state and recover when necessary.
+ */
+ ret = mhi_pci_runtime_resume(dev);
+ pm_runtime_enable(dev);
+
+ return ret;
}
static const struct dev_pm_ops mhi_pci_pm_ops = {
+ SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
};
@@ -668,6 +945,7 @@ static struct pci_driver mhi_pci_driver = {
.id_table = mhi_pci_id_table,
.probe = mhi_pci_probe,
.remove = mhi_pci_remove,
+ .shutdown = mhi_pci_shutdown,
.err_handler = &mhi_pci_err_handler,
.driver.pm = &mhi_pci_pm_ops
};
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 45ac7ab003ce..deb85a334c93 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -836,7 +836,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
Dummy = readb(apbs[IndexCard].RamIO + VERS);
kfree(adgl);
mutex_unlock(&ac_mutex);
- return 0;
+ return ret;
err:
if (warncount) {
diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c
index 410b50b05e21..5b7ca0416490 100644
--- a/drivers/char/hw_random/ba431-rng.c
+++ b/drivers/char/hw_random/ba431-rng.c
@@ -170,7 +170,6 @@ static int ba431_trng_init(struct hwrng *rng)
static int ba431_trng_probe(struct platform_device *pdev)
{
struct ba431_trng *ba431;
- struct resource *res;
int ret;
ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL);
@@ -179,8 +178,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
ba431->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ba431->base = devm_ioremap_resource(&pdev->dev, res);
+ ba431->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ba431->base))
return PTR_ERR(ba431->base);
@@ -193,7 +191,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ba431);
- ret = hwrng_register(&ba431->rng);
+ ret = devm_hwrng_register(&pdev->dev, &ba431->rng);
if (ret) {
dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret);
return ret;
@@ -204,15 +202,6 @@ static int ba431_trng_probe(struct platform_device *pdev)
return 0;
}
-static int ba431_trng_remove(struct platform_device *pdev)
-{
- struct ba431_trng *ba431 = platform_get_drvdata(pdev);
-
- hwrng_unregister(&ba431->rng);
-
- return 0;
-}
-
static const struct of_device_id ba431_trng_dt_ids[] = {
{ .compatible = "silex-insight,ba431-rng", .data = NULL },
{ /* sentinel */ }
@@ -225,7 +214,6 @@ static struct platform_driver ba431_trng_driver = {
.of_match_table = ba431_trng_dt_ids,
},
.probe = ba431_trng_probe,
- .remove = ba431_trng_remove,
};
module_platform_driver(ba431_trng_driver);
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 1a7c43b43c6b..e7dd457e9b22 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/clk.h>
+#include <linux/reset.h>
#define RNG_CTRL 0x0
#define RNG_STATUS 0x4
@@ -32,6 +33,7 @@ struct bcm2835_rng_priv {
void __iomem *base;
bool mask_interrupts;
struct clk *clk;
+ struct reset_control *reset;
};
static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
@@ -88,11 +90,13 @@ static int bcm2835_rng_init(struct hwrng *rng)
int ret = 0;
u32 val;
- if (!IS_ERR(priv->clk)) {
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
- }
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
if (priv->mask_interrupts) {
/* mask the interrupt */
@@ -115,8 +119,7 @@ static void bcm2835_rng_cleanup(struct hwrng *rng)
/* disable rng hardware */
rng_writel(priv, 0, RNG_CTRL);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
}
struct bcm2835_rng_of_data {
@@ -155,9 +158,13 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
/* Clock is optional on most platforms */
- priv->clk = devm_clk_get(dev, NULL);
- if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ priv->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
priv->rng.name = pdev->name;
priv->rng.init = bcm2835_rng_init;
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index 7a293f2147a0..302ffa354c2f 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -486,7 +486,6 @@ static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
static int cctrng_probe(struct platform_device *pdev)
{
- struct resource *req_mem_cc_regs = NULL;
struct cctrng_drvdata *drvdata;
struct device *dev = &pdev->dev;
int rc = 0;
@@ -510,27 +509,16 @@ static int cctrng_probe(struct platform_device *pdev)
drvdata->circ.buf = (char *)drvdata->data_buf;
- /* Get device resources */
- /* First CC registers space */
- req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /* Map registers space */
- drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drvdata->cc_base)) {
dev_err(dev, "Failed to ioremap registers");
return PTR_ERR(drvdata->cc_base);
}
- dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
- req_mem_cc_regs);
- dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
- &req_mem_cc_regs->start, drvdata->cc_base);
-
/* Then IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Failed getting IRQ resource\n");
+ if (irq < 0)
return irq;
- }
/* parse sampling rate from device tree */
rc = cc_trng_parse_sampling_ratio(drvdata);
@@ -585,7 +573,7 @@ static int cctrng_probe(struct platform_device *pdev)
atomic_set(&drvdata->pending_hw, 1);
/* registration of the hwrng device */
- rc = hwrng_register(&drvdata->rng);
+ rc = devm_hwrng_register(dev, &drvdata->rng);
if (rc) {
dev_err(dev, "Could not register hwrng device.\n");
goto post_pm_err;
@@ -618,8 +606,6 @@ static int cctrng_remove(struct platform_device *pdev)
dev_dbg(dev, "Releasing cctrng resources...\n");
- hwrng_unregister(&drvdata->rng);
-
cc_trng_pm_fini(drvdata);
cc_trng_clk_fini(drvdata);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 8c1c47dd9f46..adb3c2bd7783 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -396,7 +396,7 @@ static ssize_t hwrng_attr_selected_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
+ return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
}
static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index eb7db27f9f19..d740b8814bf3 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -25,13 +25,13 @@
*/
#include <linux/hw_random.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/stop_machine.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/io.h>
#define PFX KBUILD_MODNAME ": "
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 5cc5fc504968..cede9f159102 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -30,8 +30,7 @@
#include <linux/of_address.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#define RNG_REG_STATUS_RDY (1 << 0)
@@ -378,16 +377,13 @@ MODULE_DEVICE_TABLE(of, omap_rng_of_match);
static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
int irq, err;
- match = of_match_device(of_match_ptr(omap_rng_of_match), dev);
- if (!match) {
- dev_err(dev, "no compatible OF match\n");
- return -EINVAL;
- }
- priv->pdata = match->data;
+ priv->pdata = of_device_get_match_data(dev);
+ if (!priv->pdata)
+ return -ENODEV;
+
if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index e8210c1715cf..99c8bd0859a1 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -96,7 +96,7 @@ static int pic32_rng_probe(struct platform_device *pdev)
priv->rng.name = pdev->name;
priv->rng.read = pic32_rng_read;
- ret = hwrng_register(&priv->rng);
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
if (ret)
goto err_register;
@@ -113,7 +113,6 @@ static int pic32_rng_remove(struct platform_device *pdev)
{
struct pic32_rng *rng = platform_get_drvdata(pdev);
- hwrng_unregister(&rng->rng);
writel(0, rng->base + RNGCON);
clk_disable_unprepare(rng->clk);
return 0;
diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c
index 7bdab8c8a6a8..2a9fea72b2e0 100644
--- a/drivers/char/hw_random/xiphera-trng.c
+++ b/drivers/char/hw_random/xiphera-trng.c
@@ -63,14 +63,12 @@ static int xiphera_trng_probe(struct platform_device *pdev)
int ret;
struct xiphera_trng *trng;
struct device *dev = &pdev->dev;
- struct resource *res;
trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
if (!trng)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- trng->mem = devm_ioremap_resource(dev, res);
+ trng->mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->mem))
return PTR_ERR(trng->mem);
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 862c2fd933c7..0e22e3b0a04e 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -546,7 +546,7 @@ static int lp_open(struct inode *inode, struct file *file)
}
/* Determine if the peripheral supports ECP mode */
lp_claim_parport_or_block(&lp_table[minor]);
- if ( (lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) &&
+ if ((lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) &&
!parport_negotiate(lp_table[minor].dev->port,
IEEE1284_MODE_ECP)) {
printk(KERN_INFO "lp%d: ECP mode\n", minor);
@@ -590,7 +590,7 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
return -ENODEV;
if ((LP_F(minor) & LP_EXIST) == 0)
return -ENODEV;
- switch ( cmd ) {
+ switch (cmd) {
case LPTIME:
if (arg > UINT_MAX / HZ)
return -EINVAL;
diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
index 5e1618a76b2a..8588b51202e5 100644
--- a/drivers/char/mwave/tp3780i.c
+++ b/drivers/char/mwave/tp3780i.c
@@ -177,14 +177,10 @@ int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData)
return retval;
}
-int tp3780I_Cleanup(THINKPAD_BD_DATA * pBDData)
+void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData)
{
- int retval = 0;
-
PRINTK_2(TRACE_TP3780I,
"tp3780i::tp3780I_Cleanup entry and exit pBDData %p\n", pBDData);
-
- return retval;
}
int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
diff --git a/drivers/char/mwave/tp3780i.h b/drivers/char/mwave/tp3780i.h
index 07685b68538f..8bd976d42fae 100644
--- a/drivers/char/mwave/tp3780i.h
+++ b/drivers/char/mwave/tp3780i.h
@@ -91,7 +91,7 @@ int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData);
int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData);
int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData);
int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities);
-int tp3780I_Cleanup(THINKPAD_BD_DATA * pBDData);
+void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData);
int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0fe9e200e4c8..605969ed0f96 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -500,7 +500,6 @@ struct entropy_store {
unsigned short add_ptr;
unsigned short input_rotate;
int entropy_count;
- unsigned int initialized:1;
unsigned int last_data_init:1;
__u8 last_data[EXTRACT_SIZE];
};
@@ -660,7 +659,7 @@ static void process_random_ready_list(void)
*/
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
- int entropy_count, orig, has_initialized = 0;
+ int entropy_count, orig;
const int pool_size = r->poolinfo->poolfracbits;
int nfrac = nbits << ENTROPY_SHIFT;
@@ -717,23 +716,14 @@ retry:
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
- if (has_initialized) {
- r->initialized = 1;
- kill_fasync(&fasync, SIGIO, POLL_IN);
- }
-
trace_credit_entropy_bits(r->name, nbits,
entropy_count >> ENTROPY_SHIFT, _RET_IP_);
if (r == &input_pool) {
int entropy_bits = entropy_count >> ENTROPY_SHIFT;
- if (crng_init < 2) {
- if (entropy_bits < 128)
- return;
+ if (crng_init < 2 && entropy_bits >= 128)
crng_reseed(&primary_crng, r);
- entropy_bits = ENTROPY_BITS(r);
- }
}
}
@@ -819,7 +809,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
{
- memcpy(&crng->state[0], "expand 32-byte k", 16);
+ chacha_init_consts(crng->state);
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
crng_init_try_arch(crng);
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
@@ -827,7 +817,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
static void __init crng_initialize_primary(struct crng_state *crng)
{
- memcpy(&crng->state[0], "expand 32-byte k", 16);
+ chacha_init_consts(crng->state);
_extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
if (crng_init_try_arch_early(crng) && trust_cpu) {
invalidate_batched_entropy();
@@ -1372,8 +1362,7 @@ retry:
}
/*
- * This function does the actual extraction for extract_entropy and
- * extract_entropy_user.
+ * This function does the actual extraction for extract_entropy.
*
* Note: we assume that .poolwords is a multiple of 16 words.
*/
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 3633ed70f48f..1b18ce5ebab1 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -41,6 +41,27 @@ struct acpi_tcpa {
};
};
+/* Check that the given log is indeed a TPM2 log. */
+static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
+{
+ struct tcg_efi_specid_event_head *efispecid;
+ struct tcg_pcr_event *event_header;
+ int n;
+
+ if (len < sizeof(*event_header))
+ return false;
+ len -= sizeof(*event_header);
+ event_header = bios_event_log;
+
+ if (len < sizeof(*efispecid))
+ return false;
+ efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
+
+ n = memcmp(efispecid->signature, TCG_SPECID_SIG,
+ sizeof(TCG_SPECID_SIG));
+ return n == 0;
+}
+
/* read binary bios log */
int tpm_read_log_acpi(struct tpm_chip *chip)
{
@@ -52,6 +73,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
struct acpi_table_tpm2 *tbl;
struct acpi_tpm2_phy *tpm2_phy;
int format;
+ int ret;
log = &chip->log;
@@ -112,6 +134,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
log->bios_event_log_end = log->bios_event_log + len;
+ ret = -EIO;
virt = acpi_os_map_iomem(start, len);
if (!virt)
goto err;
@@ -119,11 +142,19 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
memcpy_fromio(log->bios_event_log, virt, len);
acpi_os_unmap_iomem(virt, len);
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 &&
+ !tpm_is_tpm2_log(log->bios_event_log, len)) {
+ /* try EFI log next */
+ ret = -ENODEV;
+ goto err;
+ }
+
return format;
err:
kfree(log->bios_event_log);
log->bios_event_log = NULL;
- return -EIO;
+ return ret;
}
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
index 7460f230bae4..8512ec76d526 100644
--- a/drivers/char/tpm/eventlog/common.c
+++ b/drivers/char/tpm/eventlog/common.c
@@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
int log_version;
int rc = 0;
+ if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
+ return;
+
rc = tpm_read_log(chip);
if (rc < 0)
return;
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
index 35229e5143ca..e6cb9d525e30 100644
--- a/drivers/char/tpm/eventlog/efi.c
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
{
struct efi_tcg2_final_events_table *final_tbl = NULL;
+ int final_events_log_size = efi_tpm_final_log_size;
struct linux_efi_tpm_eventlog *log_tbl;
struct tpm_bios_log *log;
u32 log_size;
@@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip)
ret = tpm_log_version;
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
- efi_tpm_final_log_size == 0 ||
+ final_events_log_size == 0 ||
tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
goto out;
final_tbl = memremap(efi.tpm_final_log,
- sizeof(*final_tbl) + efi_tpm_final_log_size,
+ sizeof(*final_tbl) + final_events_log_size,
MEMREMAP_WB);
if (!final_tbl) {
pr_err("Could not map UEFI TPM final log\n");
@@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip)
goto out;
}
- efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
+ /*
+ * The 'final events log' size excludes the 'final events preboot log'
+ * at its beginning.
+ */
+ final_events_log_size -= log_tbl->final_events_preboot_size;
+ /*
+ * Allocate memory for the 'combined log' where we will append the
+ * 'final events log' to.
+ */
tmp = krealloc(log->bios_event_log,
- log_size + efi_tpm_final_log_size,
+ log_size + final_events_log_size,
GFP_KERNEL);
if (!tmp) {
kfree(log->bios_event_log);
@@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip)
log->bios_event_log = tmp;
/*
- * Copy any of the final events log that didn't also end up in the
- * main log. Events can be logged in both if events are generated
+ * Append any of the 'final events log' that didn't also end up in the
+ * 'main log'. Events can be logged in both if events are generated
* between GetEventLog() and ExitBootServices().
*/
memcpy((void *)log->bios_event_log + log_size,
final_tbl->events + log_tbl->final_events_preboot_size,
- efi_tpm_final_log_size);
+ final_events_log_size);
+ /*
+ * The size of the 'combined log' is the size of the 'main log' plus
+ * the size of the 'final events log'.
+ */
log->bios_event_log_end = log->bios_event_log +
- log_size + efi_tpm_final_log_size;
+ log_size + final_events_log_size;
out:
memunmap(final_tbl);
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index ec9a65e7887d..f19c227d20f4 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -483,6 +483,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
expected = be32_to_cpup((__be32 *)(buf + 2));
if (expected > buf_len) {
dev_err(&chip->dev, "Buffer too small to receive i2c data\n");
+ rc = -E2BIG;
goto out_err;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 1836cc56e357..59dfd9c421a1 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1456,18 +1456,15 @@ static int add_port(struct ports_device *portdev, u32 id)
*/
send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
- if (pdrvdata.debugfs_dir) {
- /*
- * Finally, create the debugfs file that we can use to
- * inspect a port's state at any time
- */
- snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
- port->portdev->vdev->index, id);
- port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
- pdrvdata.debugfs_dir,
- port,
- &port_debugfs_fops);
- }
+ /*
+ * Finally, create the debugfs file that we can use to
+ * inspect a port's state at any time
+ */
+ snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
+ port->portdev->vdev->index, id);
+ port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ pdrvdata.debugfs_dir,
+ port, &port_debugfs_fops);
return 0;
free_inbufs:
@@ -2244,8 +2241,6 @@ static int __init init(void)
}
pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
- if (!pdrvdata.debugfs_dir)
- pr_warn("Error creating debugfs dir for virtio-ports\n");
INIT_LIST_HEAD(&pdrvdata.consoles);
INIT_LIST_HEAD(&pdrvdata.portdevs);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index d0177824c518..1b885964fb34 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -51,7 +51,7 @@
static unsigned arch_timers_present __initdata;
-static void __iomem *arch_counter_base;
+static void __iomem *arch_counter_base __ro_after_init;
struct arch_timer {
void __iomem *base;
@@ -60,15 +60,16 @@ struct arch_timer {
#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
-static u32 arch_timer_rate;
-static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
+static u32 arch_timer_rate __ro_after_init;
+u32 arch_timer_rate1 __ro_after_init;
+static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
static struct clock_event_device __percpu *arch_timer_evt;
-static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
-static bool arch_timer_c3stop;
-static bool arch_timer_mem_use_virtual;
-static bool arch_counter_suspend_stop;
+static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI;
+static bool arch_timer_c3stop __ro_after_init;
+static bool arch_timer_mem_use_virtual __ro_after_init;
+static bool arch_counter_suspend_stop __ro_after_init;
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
#else
@@ -76,7 +77,7 @@ static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
static cpumask_t evtstrm_available = CPU_MASK_NONE;
-static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
+static bool evtstrm_enable __ro_after_init = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
static int __init early_evtstrm_cfg(char *buf)
{
@@ -176,7 +177,7 @@ static notrace u64 arch_counter_get_cntvct(void)
* to exist on arm64. arm doesn't use this before DT is probed so even
* if we don't have the cp15 accessors we won't have a problem.
*/
-u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
+u64 (*arch_timer_read_counter)(void) __ro_after_init = arch_counter_get_cntvct;
EXPORT_SYMBOL_GPL(arch_timer_read_counter);
static u64 arch_counter_read(struct clocksource *cs)
@@ -925,7 +926,7 @@ static int validate_timer_rate(void)
* rate was probed first, and don't verify that others match. If the first node
* probed has a clock-frequency property, this overrides the HW register.
*/
-static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
+static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np)
{
/* Who has more than one independent system counter? */
if (arch_timer_rate)
@@ -939,7 +940,7 @@ static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
pr_warn("frequency not available\n");
}
-static void arch_timer_banner(unsigned type)
+static void __init arch_timer_banner(unsigned type)
{
pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index 996900d017c6..2fc93e46cea3 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -18,7 +18,7 @@
#define RATE_32K 32768
-#define TIMER_MODE_CONTINOUS 0x1
+#define TIMER_MODE_CONTINUOUS 0x1
#define TIMER_DOWNCOUNT_VAL 0xffffffff
#define PRCMU_TIMER_REF 0
@@ -55,13 +55,13 @@ static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
/*
* The A9 sub system expects the timer to be configured as
- * a continous looping timer.
+ * a continuous looping timer.
* The PRCMU should configure it but if it for some reason
* don't we do it here.
*/
if (readl(clksrc_dbx500_timer_base + PRCMU_TIMER_MODE) !=
- TIMER_MODE_CONTINOUS) {
- writel(TIMER_MODE_CONTINOUS,
+ TIMER_MODE_CONTINUOUS) {
+ writel(TIMER_MODE_CONTINUOUS,
clksrc_dbx500_timer_base + PRCMU_TIMER_MODE);
writel(TIMER_DOWNCOUNT_VAL,
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 42e7e43b8fcd..3819ef5b7098 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -38,7 +38,7 @@ static int __init timer_get_base_and_rate(struct device_node *np,
}
/*
- * Not all implementations use a periphal clock, so don't panic
+ * Not all implementations use a peripheral clock, so don't panic
* if it's not present
*/
pclk = of_clk_get_by_name(np, "pclk");
@@ -52,18 +52,34 @@ static int __init timer_get_base_and_rate(struct device_node *np,
return 0;
timer_clk = of_clk_get_by_name(np, "timer");
- if (IS_ERR(timer_clk))
- return PTR_ERR(timer_clk);
+ if (IS_ERR(timer_clk)) {
+ ret = PTR_ERR(timer_clk);
+ goto out_pclk_disable;
+ }
ret = clk_prepare_enable(timer_clk);
if (ret)
- return ret;
+ goto out_timer_clk_put;
*rate = clk_get_rate(timer_clk);
- if (!(*rate))
- return -EINVAL;
+ if (!(*rate)) {
+ ret = -EINVAL;
+ goto out_timer_clk_disable;
+ }
return 0;
+
+out_timer_clk_disable:
+ clk_disable_unprepare(timer_clk);
+out_timer_clk_put:
+ clk_put(timer_clk);
+out_pclk_disable:
+ if (!IS_ERR(pclk)) {
+ clk_disable_unprepare(pclk);
+ clk_put(pclk);
+ }
+ iounmap(*base);
+ return ret;
}
static int __init add_clockevent(struct device_node *event_timer)
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 269a691bd2c4..977fd05ac35f 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -18,6 +18,9 @@
#include <linux/sched_clock.h>
#include <linux/mm.h>
#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/acpi.h>
#include <clocksource/hyperv_timer.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
@@ -43,14 +46,13 @@ static u64 hv_sched_clock_offset __ro_after_init;
*/
static bool direct_mode_enabled;
-static int stimer0_irq;
-static int stimer0_vector;
+static int stimer0_irq = -1;
static int stimer0_message_sint;
+static DEFINE_PER_CPU(long, stimer0_evt);
/*
- * ISR for when stimer0 is operating in Direct Mode. Direct Mode
- * does not use VMbus or any VMbus messages, so process here and not
- * in the VMbus driver code.
+ * Common code for stimer0 interrupts coming via Direct Mode or
+ * as a VMbus message.
*/
void hv_stimer0_isr(void)
{
@@ -61,6 +63,16 @@ void hv_stimer0_isr(void)
}
EXPORT_SYMBOL_GPL(hv_stimer0_isr);
+/*
+ * stimer0 interrupt handler for architectures that support
+ * per-cpu interrupts, which also implies Direct Mode.
+ */
+static irqreturn_t hv_stimer0_percpu_isr(int irq, void *dev_id)
+{
+ hv_stimer0_isr();
+ return IRQ_HANDLED;
+}
+
static int hv_ce_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
@@ -68,16 +80,16 @@ static int hv_ce_set_next_event(unsigned long delta,
current_tick = hv_read_reference_counter();
current_tick += delta;
- hv_init_timer(0, current_tick);
+ hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick);
return 0;
}
static int hv_ce_shutdown(struct clock_event_device *evt)
{
- hv_init_timer(0, 0);
- hv_init_timer_config(0, 0);
- if (direct_mode_enabled)
- hv_disable_stimer0_percpu_irq(stimer0_irq);
+ hv_set_register(HV_REGISTER_STIMER0_COUNT, 0);
+ hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0);
+ if (direct_mode_enabled && stimer0_irq >= 0)
+ disable_percpu_irq(stimer0_irq);
return 0;
}
@@ -95,8 +107,9 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
* on the specified hardware vector/IRQ.
*/
timer_cfg.direct_mode = 1;
- timer_cfg.apic_vector = stimer0_vector;
- hv_enable_stimer0_percpu_irq(stimer0_irq);
+ timer_cfg.apic_vector = HYPERV_STIMER0_VECTOR;
+ if (stimer0_irq >= 0)
+ enable_percpu_irq(stimer0_irq, IRQ_TYPE_NONE);
} else {
/*
* When it expires, the timer will generate a VMbus message,
@@ -105,7 +118,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
timer_cfg.direct_mode = 0;
timer_cfg.sintx = stimer0_message_sint;
}
- hv_init_timer_config(0, timer_cfg.as_uint64);
+ hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64);
return 0;
}
@@ -169,10 +182,58 @@ int hv_stimer_cleanup(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(hv_stimer_cleanup);
+/*
+ * These placeholders are overridden by arch specific code on
+ * architectures that need special setup of the stimer0 IRQ because
+ * they don't support per-cpu IRQs (such as x86/x64).
+ */
+void __weak hv_setup_stimer0_handler(void (*handler)(void))
+{
+};
+
+void __weak hv_remove_stimer0_handler(void)
+{
+};
+
+/* Called only on architectures with per-cpu IRQs (i.e., not x86/x64) */
+static int hv_setup_stimer0_irq(void)
+{
+ int ret;
+
+ ret = acpi_register_gsi(NULL, HYPERV_STIMER0_VECTOR,
+ ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH);
+ if (ret < 0) {
+ pr_err("Can't register Hyper-V stimer0 GSI. Error %d", ret);
+ return ret;
+ }
+ stimer0_irq = ret;
+
+ ret = request_percpu_irq(stimer0_irq, hv_stimer0_percpu_isr,
+ "Hyper-V stimer0", &stimer0_evt);
+ if (ret) {
+ pr_err("Can't request Hyper-V stimer0 IRQ %d. Error %d",
+ stimer0_irq, ret);
+ acpi_unregister_gsi(stimer0_irq);
+ stimer0_irq = -1;
+ }
+ return ret;
+}
+
+static void hv_remove_stimer0_irq(void)
+{
+ if (stimer0_irq == -1) {
+ hv_remove_stimer0_handler();
+ } else {
+ free_percpu_irq(stimer0_irq, &stimer0_evt);
+ acpi_unregister_gsi(stimer0_irq);
+ stimer0_irq = -1;
+ }
+}
+
/* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
-int hv_stimer_alloc(void)
+int hv_stimer_alloc(bool have_percpu_irqs)
{
- int ret = 0;
+ int ret;
/*
* Synthetic timers are always available except on old versions of
@@ -188,29 +249,37 @@ int hv_stimer_alloc(void)
direct_mode_enabled = ms_hyperv.misc_features &
HV_STIMER_DIRECT_MODE_AVAILABLE;
- if (direct_mode_enabled) {
- ret = hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector,
- hv_stimer0_isr);
+
+ /*
+ * If Direct Mode isn't enabled, the remainder of the initialization
+ * is done later by hv_stimer_legacy_init()
+ */
+ if (!direct_mode_enabled)
+ return 0;
+
+ if (have_percpu_irqs) {
+ ret = hv_setup_stimer0_irq();
if (ret)
- goto free_percpu;
+ goto free_clock_event;
+ } else {
+ hv_setup_stimer0_handler(hv_stimer0_isr);
+ }
- /*
- * Since we are in Direct Mode, stimer initialization
- * can be done now with a CPUHP value in the same range
- * as other clockevent devices.
- */
- ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING,
- "clockevents/hyperv/stimer:starting",
- hv_stimer_init, hv_stimer_cleanup);
- if (ret < 0)
- goto free_stimer0_irq;
+ /*
+ * Since we are in Direct Mode, stimer initialization
+ * can be done now with a CPUHP value in the same range
+ * as other clockevent devices.
+ */
+ ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING,
+ "clockevents/hyperv/stimer:starting",
+ hv_stimer_init, hv_stimer_cleanup);
+ if (ret < 0) {
+ hv_remove_stimer0_irq();
+ goto free_clock_event;
}
return ret;
-free_stimer0_irq:
- hv_remove_stimer0_irq(stimer0_irq);
- stimer0_irq = 0;
-free_percpu:
+free_clock_event:
free_percpu(hv_clock_event);
hv_clock_event = NULL;
return ret;
@@ -254,23 +323,6 @@ void hv_stimer_legacy_cleanup(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(hv_stimer_legacy_cleanup);
-
-/* hv_stimer_free - Free global resources allocated by hv_stimer_alloc() */
-void hv_stimer_free(void)
-{
- if (!hv_clock_event)
- return;
-
- if (direct_mode_enabled) {
- cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING);
- hv_remove_stimer0_irq(stimer0_irq);
- stimer0_irq = 0;
- }
- free_percpu(hv_clock_event);
- hv_clock_event = NULL;
-}
-EXPORT_SYMBOL_GPL(hv_stimer_free);
-
/*
* Do a global cleanup of clockevents for the cases of kexec and
* vmbus exit
@@ -287,12 +339,17 @@ void hv_stimer_global_cleanup(void)
hv_stimer_legacy_cleanup(cpu);
}
- /*
- * If Direct Mode is enabled, the cpuhp teardown callback
- * (hv_stimer_cleanup) will be run on all CPUs to stop the
- * stimers.
- */
- hv_stimer_free();
+ if (!hv_clock_event)
+ return;
+
+ if (direct_mode_enabled) {
+ cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING);
+ hv_remove_stimer0_irq();
+ stimer0_irq = -1;
+ }
+ free_percpu(hv_clock_event);
+ hv_clock_event = NULL;
+
}
EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
@@ -302,14 +359,6 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
* the other that uses the TSC reference page feature as defined in the
* TLFS. The MSR version is for compatibility with old versions of
* Hyper-V and 32-bit x86. The TSC reference page version is preferred.
- *
- * The Hyper-V clocksource ratings of 250 are chosen to be below the
- * TSC clocksource rating of 300. In configurations where Hyper-V offers
- * an InvariantTSC, the TSC is not marked "unstable", so the TSC clocksource
- * is available and preferred. With the higher rating, it will be the
- * default. On older hardware and Hyper-V versions, the TSC is marked
- * "unstable", so no TSC clocksource is created and the selected Hyper-V
- * clocksource will be the default.
*/
u64 (*hv_read_reference_counter)(void);
@@ -331,7 +380,7 @@ static u64 notrace read_hv_clock_tsc(void)
u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
if (current_tick == U64_MAX)
- hv_get_time_ref_count(current_tick);
+ current_tick = hv_get_register(HV_REGISTER_TIME_REF_COUNT);
return current_tick;
}
@@ -352,9 +401,9 @@ static void suspend_hv_clock_tsc(struct clocksource *arg)
u64 tsc_msr;
/* Disable the TSC page */
- hv_get_reference_tsc(tsc_msr);
+ tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr &= ~BIT_ULL(0);
- hv_set_reference_tsc(tsc_msr);
+ hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
}
@@ -364,39 +413,44 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
u64 tsc_msr;
/* Re-enable the TSC page */
- hv_get_reference_tsc(tsc_msr);
+ tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr &= GENMASK_ULL(11, 0);
tsc_msr |= BIT_ULL(0) | (u64)phys_addr;
- hv_set_reference_tsc(tsc_msr);
+ hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
}
+#ifdef VDSO_CLOCKMODE_HVCLOCK
static int hv_cs_enable(struct clocksource *cs)
{
- hv_enable_vdso_clocksource();
+ vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
return 0;
}
+#endif
static struct clocksource hyperv_cs_tsc = {
.name = "hyperv_clocksource_tsc_page",
- .rating = 250,
+ .rating = 500,
.read = read_hv_clock_tsc_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.suspend= suspend_hv_clock_tsc,
.resume = resume_hv_clock_tsc,
+#ifdef VDSO_CLOCKMODE_HVCLOCK
.enable = hv_cs_enable,
+ .vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK,
+#else
+ .vdso_clock_mode = VDSO_CLOCKMODE_NONE,
+#endif
};
static u64 notrace read_hv_clock_msr(void)
{
- u64 current_tick;
/*
* Read the partition counter to get the current tick count. This count
* is set to 0 when the partition is created and is incremented in
* 100 nanosecond units.
*/
- hv_get_time_ref_count(current_tick);
- return current_tick;
+ return hv_get_register(HV_REGISTER_TIME_REF_COUNT);
}
static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
@@ -412,12 +466,36 @@ static u64 notrace read_hv_sched_clock_msr(void)
static struct clocksource hyperv_cs_msr = {
.name = "hyperv_clocksource_msr",
- .rating = 250,
+ .rating = 500,
.read = read_hv_clock_msr_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+/*
+ * Reference to pv_ops must be inline so objtool
+ * detection of noinstr violations can work correctly.
+ */
+#ifdef CONFIG_GENERIC_SCHED_CLOCK
+static __always_inline void hv_setup_sched_clock(void *sched_clock)
+{
+ /*
+ * We're on an architecture with generic sched clock (not x86/x64).
+ * The Hyper-V sched clock read function returns nanoseconds, not
+ * the normal 100ns units of the Hyper-V synthetic clock.
+ */
+ sched_clock_register(sched_clock, 64, NSEC_PER_SEC);
+}
+#elif defined CONFIG_PARAVIRT
+static __always_inline void hv_setup_sched_clock(void *sched_clock)
+{
+ /* We're on x86/x64 *and* using PV ops */
+ paravirt_set_sched_clock(sched_clock);
+}
+#else /* !CONFIG_GENERIC_SCHED_CLOCK && !CONFIG_PARAVIRT */
+static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
+#endif /* CONFIG_GENERIC_SCHED_CLOCK */
+
static bool __init hv_init_tsc_clocksource(void)
{
u64 tsc_msr;
@@ -429,6 +507,22 @@ static bool __init hv_init_tsc_clocksource(void)
if (hv_root_partition)
return false;
+ /*
+ * If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly
+ * handles frequency and offset changes due to live migration,
+ * pause/resume, and other VM management operations. So lower the
+ * Hyper-V Reference TSC rating, causing the generic TSC to be used.
+ * TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference
+ * TSC will be preferred over the virtualized ARM64 arch counter.
+ * While the Hyper-V MSR clocksource won't be used since the
+ * Reference TSC clocksource is present, change its rating as
+ * well for consistency.
+ */
+ if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
+ hyperv_cs_tsc.rating = 250;
+ hyperv_cs_msr.rating = 250;
+ }
+
hv_read_reference_counter = read_hv_clock_tsc;
phys_addr = virt_to_phys(hv_get_tsc_page());
@@ -439,12 +533,11 @@ static bool __init hv_init_tsc_clocksource(void)
* (which already has at least the low 12 bits set to zero since
* it is page aligned). Also set the "enable" bit, which is bit 0.
*/
- hv_get_reference_tsc(tsc_msr);
+ tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr &= GENMASK_ULL(11, 0);
tsc_msr = tsc_msr | 0x1 | (u64)phys_addr;
- hv_set_reference_tsc(tsc_msr);
+ hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
- hv_set_clocksource_vdso(hyperv_cs_tsc);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
hv_sched_clock_offset = hv_read_reference_counter();
@@ -457,7 +550,7 @@ void __init hv_init_clocksource(void)
{
/*
* Try to set up the TSC page clocksource. If it succeeds, we're
- * done. Otherwise, set up the MSR clocksoruce. At least one of
+ * done. Otherwise, set up the MSR clocksource. At least one of
* these will always be available except on very old versions of
* Hyper-V on x86. In that case we won't have a Hyper-V
* clocksource, but Linux will still run with a clocksource based
diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c
index 029efc2731b4..06d25754e606 100644
--- a/drivers/clocksource/ingenic-ost.c
+++ b/drivers/clocksource/ingenic-ost.c
@@ -88,9 +88,9 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
return PTR_ERR(ost->regs);
map = device_node_to_regmap(dev->parent->of_node);
- if (!map) {
+ if (IS_ERR(map)) {
dev_err(dev, "regmap not found");
- return -EINVAL;
+ return PTR_ERR(map);
}
ost->clk = devm_clk_get(dev, "ost");
@@ -167,13 +167,14 @@ static const struct ingenic_ost_soc_info jz4725b_ost_soc_info = {
.is64bit = false,
};
-static const struct ingenic_ost_soc_info jz4770_ost_soc_info = {
+static const struct ingenic_ost_soc_info jz4760b_ost_soc_info = {
.is64bit = true,
};
static const struct of_device_id ingenic_ost_of_match[] = {
{ .compatible = "ingenic,jz4725b-ost", .data = &jz4725b_ost_soc_info, },
- { .compatible = "ingenic,jz4770-ost", .data = &jz4770_ost_soc_info, },
+ { .compatible = "ingenic,jz4760b-ost", .data = &jz4760b_ost_soc_info, },
+ { .compatible = "ingenic,jz4770-ost", .data = &jz4760b_ost_soc_info, },
{ }
};
diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c
index 905fd6b163a8..24ed0f1f089b 100644
--- a/drivers/clocksource/ingenic-timer.c
+++ b/drivers/clocksource/ingenic-timer.c
@@ -264,6 +264,7 @@ static const struct ingenic_soc_info jz4725b_soc_info = {
static const struct of_device_id ingenic_tcu_of_match[] = {
{ .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
+ { .compatible = "ingenic,jz4760-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, },
{ /* sentinel */ }
@@ -358,6 +359,7 @@ err_free_ingenic_tcu:
TIMER_OF_DECLARE(jz4740_tcu_intc, "ingenic,jz4740-tcu", ingenic_tcu_init);
TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init);
+TIMER_OF_DECLARE(jz4760_tcu_intc, "ingenic,jz4760-tcu", ingenic_tcu_init);
TIMER_OF_DECLARE(jz4770_tcu_intc, "ingenic,jz4770-tcu", ingenic_tcu_init);
TIMER_OF_DECLARE(x1000_tcu_intc, "ingenic,x1000-tcu", ingenic_tcu_init);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index c98f8851fd68..d7ed99f0001f 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -339,8 +339,9 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
SH_CMT16_CMCSR_CKS512);
} else {
- sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM |
- SH_CMT32_CMCSR_CMTOUT_IE |
+ u32 cmtout = ch->cmt->info->model <= SH_CMT_48BIT ?
+ SH_CMT32_CMCSR_CMTOUT_IE : 0;
+ sh_cmt_write_cmcsr(ch, cmtout | SH_CMT32_CMCSR_CMM |
SH_CMT32_CMCSR_CMR_IRQ |
SH_CMT32_CMCSR_CKS_RCLK8);
}
diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
index 787dbebbb432..27af17c99590 100644
--- a/drivers/clocksource/timer-atmel-tcb.c
+++ b/drivers/clocksource/timer-atmel-tcb.c
@@ -455,9 +455,9 @@ static int __init tcb_clksrc_init(struct device_node *node)
tcaddr = tc.regs;
if (bits == 32) {
- /* use apropriate function to read 32 bit counter */
+ /* use appropriate function to read 32 bit counter */
clksrc.read = tc_get_cycles32;
- /* setup ony channel 0 */
+ /* setup only channel 0 */
tcb_setup_single_chan(&tc, best_divisor_idx);
tc_sched_clock = tc_sched_clock_read32;
tc_delay_timer.read_current_timer = tc_delay_timer_read32;
diff --git a/drivers/clocksource/timer-fsl-ftm.c b/drivers/clocksource/timer-fsl-ftm.c
index 12a2ed7cfaff..93f336ec875a 100644
--- a/drivers/clocksource/timer-fsl-ftm.c
+++ b/drivers/clocksource/timer-fsl-ftm.c
@@ -116,7 +116,7 @@ static int ftm_set_next_event(unsigned long delta,
* to the MOD register latches the value into a buffer. The MOD
* register is updated with the value of its write buffer with
* the following scenario:
- * a, the counter source clock is diabled.
+ * a, the counter source clock is disabled.
*/
ftm_counter_disable(priv->clkevt_base);
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
index ab623b25a47b..cfa4ec7ef396 100644
--- a/drivers/clocksource/timer-microchip-pit64b.c
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -237,7 +237,7 @@ static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate,
break;
}
- /* Use the bigest prescaler if we didn't match one. */
+ /* Use the biggest prescaler if we didn't match one. */
if (*pres == MCHP_PIT64B_PRES_MAX)
*pres = MCHP_PIT64B_PRES_MAX - 1;
}
diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c
index 9780ffd8010e..a00520cbb660 100644
--- a/drivers/clocksource/timer-npcm7xx.c
+++ b/drivers/clocksource/timer-npcm7xx.c
@@ -208,5 +208,6 @@ static int __init npcm7xx_timer_init(struct device_node *np)
return 0;
}
+TIMER_OF_DECLARE(wpcm450, "nuvoton,wpcm450-timer", npcm7xx_timer_init);
TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init);
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index 572da477c6d3..529cc6a51cdb 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -211,10 +211,10 @@ out_fail:
}
/**
- * timer_of_cleanup - release timer_of ressources
+ * timer_of_cleanup - release timer_of resources
* @to: timer_of structure
*
- * Release the ressources that has been used in timer_of_init().
+ * Release the resources that has been used in timer_of_init().
* This function should be called in init error cases
*/
void __init timer_of_cleanup(struct timer_of *to)
diff --git a/drivers/clocksource/timer-pistachio.c b/drivers/clocksource/timer-pistachio.c
index a2dd85d0c1d7..6f37181a8c63 100644
--- a/drivers/clocksource/timer-pistachio.c
+++ b/drivers/clocksource/timer-pistachio.c
@@ -71,7 +71,7 @@ static u64 notrace
pistachio_clocksource_read_cycles(struct clocksource *cs)
{
struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
- u32 counter, overflw;
+ u32 counter, overflow;
unsigned long flags;
/*
@@ -80,7 +80,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs)
*/
raw_spin_lock_irqsave(&pcs->lock, flags);
- overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0);
+ overflow = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0);
counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
raw_spin_unlock_irqrestore(&pcs->lock, flags);
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index 33b3e8aa2cc5..b6f97960d8ee 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -2,6 +2,7 @@
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -449,13 +450,13 @@ static int dmtimer_set_next_event(unsigned long cycles,
struct dmtimer_systimer *t = &clkevt->t;
void __iomem *pend = t->base + t->pend;
- writel_relaxed(0xffffffff - cycles, t->base + t->counter);
while (readl_relaxed(pend) & WP_TCRR)
cpu_relax();
+ writel_relaxed(0xffffffff - cycles, t->base + t->counter);
- writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
while (readl_relaxed(pend) & WP_TCLR)
cpu_relax();
+ writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
return 0;
}
@@ -490,18 +491,18 @@ static int dmtimer_set_periodic(struct clock_event_device *evt)
dmtimer_clockevent_shutdown(evt);
/* Looks like we need to first set the load value separately */
- writel_relaxed(clkevt->period, t->base + t->load);
while (readl_relaxed(pend) & WP_TLDR)
cpu_relax();
+ writel_relaxed(clkevt->period, t->base + t->load);
- writel_relaxed(clkevt->period, t->base + t->counter);
while (readl_relaxed(pend) & WP_TCRR)
cpu_relax();
+ writel_relaxed(clkevt->period, t->base + t->counter);
- writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
- t->base + t->ctrl);
while (readl_relaxed(pend) & WP_TCLR)
cpu_relax();
+ writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
+ t->base + t->ctrl);
return 0;
}
@@ -530,17 +531,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt)
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
}
-static int __init dmtimer_clockevent_init(struct device_node *np)
+static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
+ struct device_node *np,
+ unsigned int features,
+ const struct cpumask *cpumask,
+ const char *name,
+ int rating)
{
- struct dmtimer_clockevent *clkevt;
struct clock_event_device *dev;
struct dmtimer_systimer *t;
int error;
- clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
- if (!clkevt)
- return -ENOMEM;
-
t = &clkevt->t;
dev = &clkevt->dev;
@@ -548,24 +549,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
* We mostly use cpuidle_coupled with ARM local timers for runtime,
* so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
*/
- dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- dev->rating = 300;
+ dev->features = features;
+ dev->rating = rating;
dev->set_next_event = dmtimer_set_next_event;
dev->set_state_shutdown = dmtimer_clockevent_shutdown;
dev->set_state_periodic = dmtimer_set_periodic;
dev->set_state_oneshot = dmtimer_clockevent_shutdown;
+ dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
dev->tick_resume = dmtimer_clockevent_shutdown;
- dev->cpumask = cpu_possible_mask;
+ dev->cpumask = cpumask;
dev->irq = irq_of_parse_and_map(np, 0);
- if (!dev->irq) {
- error = -ENXIO;
- goto err_out_free;
- }
+ if (!dev->irq)
+ return -ENXIO;
error = dmtimer_systimer_setup(np, &clkevt->t);
if (error)
- goto err_out_free;
+ return error;
clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
@@ -577,38 +577,132 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
- IRQF_TIMER, "clockevent", clkevt);
+ IRQF_TIMER, name, clkevt);
if (error)
goto err_out_unmap;
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
- pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
- of_find_property(np, "ti,timer-alwon", NULL) ?
+ pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
+ name, of_find_property(np, "ti,timer-alwon", NULL) ?
"always-on " : "", t->rate, np->parent);
- clockevents_config_and_register(dev, t->rate,
- 3, /* Timer internal resynch latency */
+ return 0;
+
+err_out_unmap:
+ iounmap(t->base);
+
+ return error;
+}
+
+static int __init dmtimer_clockevent_init(struct device_node *np)
+{
+ struct dmtimer_clockevent *clkevt;
+ int error;
+
+ clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
+ if (!clkevt)
+ return -ENOMEM;
+
+ error = dmtimer_clkevt_init_common(clkevt, np,
+ CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ cpu_possible_mask, "clockevent",
+ 300);
+ if (error)
+ goto err_out_free;
+
+ clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
+ 3, /* Timer internal resync latency */
0xffffffff);
if (of_machine_is_compatible("ti,am33xx") ||
of_machine_is_compatible("ti,am43")) {
- dev->suspend = omap_clockevent_idle;
- dev->resume = omap_clockevent_unidle;
+ clkevt->dev.suspend = omap_clockevent_idle;
+ clkevt->dev.resume = omap_clockevent_unidle;
}
return 0;
-err_out_unmap:
- iounmap(t->base);
-
err_out_free:
kfree(clkevt);
return error;
}
+/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */
+static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
+
+static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
+{
+ struct dmtimer_clockevent *clkevt;
+ int error;
+
+ if (!cpu_possible(cpu))
+ return -EINVAL;
+
+ if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") ||
+ !of_property_read_bool(np->parent, "ti,no-idle"))
+ pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent);
+
+ clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
+
+ error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT,
+ cpumask_of(cpu), "percpu-dmtimer",
+ 500);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+/* See TRM for timer internal resynch latency */
+static int omap_dmtimer_starting_cpu(unsigned int cpu)
+{
+ struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
+ struct clock_event_device *dev = &clkevt->dev;
+ struct dmtimer_systimer *t = &clkevt->t;
+
+ clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX);
+ irq_force_affinity(dev->irq, cpumask_of(cpu));
+
+ return 0;
+}
+
+static int __init dmtimer_percpu_timer_startup(void)
+{
+ struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0);
+ struct dmtimer_systimer *t = &clkevt->t;
+
+ if (t->sysc) {
+ cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING,
+ "clockevents/omap/gptimer:starting",
+ omap_dmtimer_starting_cpu, NULL);
+ }
+
+ return 0;
+}
+subsys_initcall(dmtimer_percpu_timer_startup);
+
+static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
+{
+ struct device_node *arm_timer;
+
+ arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
+ if (of_device_is_available(arm_timer)) {
+ pr_warn_once("ARM architected timer wrap issue i940 detected\n");
+ return 0;
+ }
+
+ if (pa == 0x48034000) /* dra7 dmtimer3 */
+ return dmtimer_percpu_timer_init(np, 0);
+ else if (pa == 0x48036000) /* dra7 dmtimer4 */
+ return dmtimer_percpu_timer_init(np, 1);
+
+ return 0;
+}
+
/* Clocksource */
static struct dmtimer_clocksource *
to_dmtimer_clocksource(struct clocksource *cs)
@@ -742,6 +836,9 @@ static int __init dmtimer_systimer_init(struct device_node *np)
if (clockevent == pa)
return dmtimer_clockevent_init(np);
+ if (of_machine_is_compatible("ti,dra7"))
+ return dmtimer_percpu_quirk_init(np, pa);
+
return 0;
}
diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c
index 1a86a4e7e344..911c92146eca 100644
--- a/drivers/clocksource/timer-vf-pit.c
+++ b/drivers/clocksource/timer-vf-pit.c
@@ -136,7 +136,7 @@ static int __init pit_clockevent_init(unsigned long rate, int irq)
/*
* The value for the LDVAL register trigger is calculated as:
* LDVAL trigger = (period / clock period) - 1
- * The pit is a 32-bit down count timer, when the conter value
+ * The pit is a 32-bit down count timer, when the counter value
* reaches 0, it will generate an interrupt, thus the minimal
* LDVAL trigger value is 1. And then the min_delta is
* minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
index 856fb2045656..b8e75210a0e3 100644
--- a/drivers/crypto/allwinner/Kconfig
+++ b/drivers/crypto/allwinner/Kconfig
@@ -71,10 +71,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
config CRYPTO_DEV_SUN8I_CE_HASH
bool "Enable support for hash on sun8i-ce"
depends on CRYPTO_DEV_SUN8I_CE
- select MD5
- select SHA1
- select SHA256
- select SHA512
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
help
Say y to enable support for hash algorithms.
@@ -132,8 +132,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG
config CRYPTO_DEV_SUN8I_SS_HASH
bool "Enable support for hash on sun8i-ss"
depends on CRYPTO_DEV_SUN8I_SS
- select MD5
- select SHA1
- select SHA256
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
help
Say y to enable support for hash algorithms.
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index c2e6f5ed1d79..dec79fa3ebaf 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -561,7 +561,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
sizeof(struct sun4i_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm));
- err = pm_runtime_get_sync(op->ss->dev);
+ err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0)
goto error_pm;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 709905ec4680..44b8fc4b786d 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -288,8 +288,7 @@ static int sun4i_ss_pm_suspend(struct device *dev)
{
struct sun4i_ss_ctx *ss = dev_get_drvdata(dev);
- if (ss->reset)
- reset_control_assert(ss->reset);
+ reset_control_assert(ss->reset);
clk_disable_unprepare(ss->ssclk);
clk_disable_unprepare(ss->busclk);
@@ -314,12 +313,10 @@ static int sun4i_ss_pm_resume(struct device *dev)
goto err_enable;
}
- if (ss->reset) {
- err = reset_control_deassert(ss->reset);
- if (err) {
- dev_err(ss->dev, "Cannot deassert reset control\n");
- goto err_enable;
- }
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(ss->dev, "Cannot deassert reset control\n");
+ goto err_enable;
}
return err;
@@ -401,12 +398,10 @@ static int sun4i_ss_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
- if (IS_ERR(ss->reset)) {
- if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
- return PTR_ERR(ss->reset);
+ if (IS_ERR(ss->reset))
+ return PTR_ERR(ss->reset);
+ if (!ss->reset)
dev_info(&pdev->dev, "no reset control found\n");
- ss->reset = NULL;
- }
/*
* Check that clock have the correct rates given in the datasheet
@@ -459,7 +454,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
* this info could be useful
*/
- err = pm_runtime_get_sync(ss->dev);
+ err = pm_runtime_resume_and_get(ss->dev);
if (err < 0)
goto error_pm;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index c1b4585e9bbc..d28292762b32 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -27,7 +27,7 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
op->ss = algt->ss;
- err = pm_runtime_get_sync(op->ss->dev);
+ err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0)
return err;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
index 443160a114bb..491fcb7b81b4 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
@@ -29,7 +29,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
ss = algt->ss;
- err = pm_runtime_get_sync(ss->dev);
+ err = pm_runtime_resume_and_get(ss->dev);
if (err < 0)
return err;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 33707a2e55ff..54ae8d16e493 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -240,11 +240,14 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
theend_sgs:
if (areq->src == areq->dst) {
- dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
} else {
if (nr_sgs > 0)
- dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
}
theend_iv:
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 158422ff5695..00194d1d9ae6 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
if (err)
goto error_alg;
- err = pm_runtime_get_sync(ce->dev);
+ err = pm_runtime_resume_and_get(ce->dev);
if (err < 0)
goto error_alg;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 2f09a37306e2..88194718a806 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -405,7 +405,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index cfde9ee4356b..cd1baee424a1 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -99,6 +99,7 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, dma_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
+ err = -EFAULT;
goto err_iv;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index ed2a69f82e1c..9ef1c85c4aaa 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -232,10 +232,13 @@ sgd_next:
theend_sgs:
if (areq->src == areq->dst) {
- dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
- dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
}
theend_iv:
@@ -351,7 +354,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
op->enginectx.op.prepare_request = NULL;
op->enginectx.op.unprepare_request = NULL;
- err = pm_runtime_get_sync(op->ss->dev);
+ err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0) {
dev_err(op->ss->dev, "pm error %d\n", err);
goto error_pm;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index e0ddc684798d..80e89066dbd1 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev)
if (err)
goto error_alg;
- err = pm_runtime_get_sync(ss->dev);
+ err = pm_runtime_resume_and_get(ss->dev);
if (err < 0)
goto error_alg;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 11cbcbc83a7b..3c073eb3db03 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
bf = (__le32 *)pad;
result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
- if (!result)
+ if (!result) {
+ kfree(pad);
return -ENOMEM;
+ }
for (i = 0; i < MAX_SG; i++) {
rctx->t_dst[i].addr = 0;
@@ -432,14 +434,14 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
- kfree(pad);
-
memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
- kfree(result);
theend:
+ kfree(pad);
+ kfree(result);
crypto_finalize_hash_request(engine, breq, err);
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
index 08a1473b2145..3191527928e4 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
@@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
if (dma_mapping_error(ss->dev, dma_iv)) {
dev_err(ss->dev, "Cannot DMA MAP IV\n");
- return -EFAULT;
+ err = -EFAULT;
+ goto err_free;
}
dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
@@ -167,6 +168,7 @@ err_iv:
memcpy(ctx->seed, d + dlen, ctx->slen);
}
memzero_explicit(d, todo);
+err_free:
kfree(d);
return err;
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index a3fa849b139a..ded732242732 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -115,7 +115,7 @@ int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
}
-/**
+/*
* AES Functions
*/
static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
@@ -374,7 +374,7 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
}
-/**
+/*
* AES-CCM Functions
*/
@@ -489,7 +489,7 @@ int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
}
-/**
+/*
* AES-GCM Functions
*/
@@ -617,7 +617,7 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
return crypto4xx_crypt_aes_gcm(req, true);
}
-/**
+/*
* HASH SHA1 Functions
*/
static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
@@ -711,7 +711,7 @@ int crypto4xx_hash_digest(struct ahash_request *req)
ctx->sa_len, 0, NULL);
}
-/**
+/*
* SHA1 Algorithm
*/
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 8d1b918a0533..8278d98074e9 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -44,7 +44,7 @@
#define PPC4XX_SEC_VERSION_STR "0.5"
-/**
+/*
* PPC4xx Crypto Engine Initialization Routine
*/
static void crypto4xx_hw_init(struct crypto4xx_device *dev)
@@ -159,7 +159,7 @@ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
ctx->sa_len = 0;
}
-/**
+/*
* alloc memory for the gather ring
* no need to alloc buf for the ring
* gdr_tail, gdr_head and gdr_count are initialized by this function
@@ -268,7 +268,7 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
return tail;
}
-/**
+/*
* alloc memory for the gather ring
* no need to alloc buf for the ring
* gdr_tail, gdr_head and gdr_count are initialized by this function
@@ -346,7 +346,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
return &dev->gdr[idx];
}
-/**
+/*
* alloc memory for the scatter ring
* need to alloc buf for the ring
* sdr_tail, sdr_head and sdr_count are initialized by this function
@@ -930,7 +930,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
return is_busy ? -EBUSY : -EINPROGRESS;
}
-/**
+/*
* Algorithm Registration Functions
*/
static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
@@ -1097,7 +1097,7 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
} while (head != tail);
}
-/**
+/*
* Top Half of isr.
*/
static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
@@ -1186,7 +1186,7 @@ static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
return 0;
}
-/**
+/*
* Supported Crypto Algorithms
*/
static struct crypto4xx_alg_common crypto4xx_alg[] = {
@@ -1369,7 +1369,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
} },
};
-/**
+/*
* Module Initialization Routine
*/
static int crypto4xx_probe(struct platform_device *ofdev)
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index a4e25b46cd0a..56c10668c0ab 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -188,7 +188,7 @@ int crypto4xx_hash_final(struct ahash_request *req);
int crypto4xx_hash_update(struct ahash_request *req);
int crypto4xx_hash_init(struct ahash_request *req);
-/**
+/*
* Note: Only use this function to copy items that is word aligned.
*/
static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index c4c0a1a75941..1038061224da 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -104,7 +104,7 @@
#define CRYPTO4XX_PRNG_LFSR_L 0x00070030
#define CRYPTO4XX_PRNG_LFSR_H 0x00070034
-/**
+/*
* Initialize CRYPTO ENGINE registers, and memory bases.
*/
#define PPC4XX_PDR_POLL 0x3ff
@@ -123,7 +123,7 @@
#define PPC4XX_INT_TIMEOUT_CNT 0
#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1
-/**
+/*
* all follow define are ad hoc
*/
#define PPC4XX_RING_RETRY 100
@@ -131,7 +131,7 @@
#define PPC4XX_SDR_SIZE PPC4XX_NUM_SD
#define PPC4XX_GDR_SIZE PPC4XX_NUM_GD
-/**
+/*
* Generic Security Association (SA) with all possible fields. These will
* never likely used except for reference purpose. These structure format
* can be not changed as the hardware expects them to be layout as defined.
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
index fe756abfc19f..e98e4e7abbad 100644
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -14,7 +14,7 @@
#define AES_IV_SIZE 16
-/**
+/*
* Contents of Dynamic Security Association (SA) with all possible fields
*/
union dynamic_sa_contents {
@@ -122,7 +122,7 @@ union sa_command_0 {
#define SA_AES_KEY_LEN_256 4
#define SA_REV2 1
-/**
+/*
* The follow defines bits sa_command_1
* In Basic hash mode this bit define simple hash or hmac.
* In IPsec mode, this bit define muting control.
@@ -172,7 +172,7 @@ struct dynamic_sa_ctl {
union sa_command_1 sa_command_1;
} __attribute__((packed));
-/**
+/*
* State Record for Security Association (SA)
*/
struct sa_state_record {
@@ -184,7 +184,7 @@ struct sa_state_record {
};
} __attribute__((packed));
-/**
+/*
* Security Association (SA) for AES128
*
*/
@@ -213,7 +213,7 @@ struct dynamic_sa_aes192 {
#define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4)
#define SA_AES192_CONTENTS 0x3e000062
-/**
+/*
* Security Association (SA) for AES256
*/
struct dynamic_sa_aes256 {
@@ -228,7 +228,7 @@ struct dynamic_sa_aes256 {
#define SA_AES256_CONTENTS 0x3e000082
#define SA_AES_CONTENTS 0x3e000002
-/**
+/*
* Security Association (SA) for AES128 CCM
*/
struct dynamic_sa_aes128_ccm {
@@ -242,7 +242,7 @@ struct dynamic_sa_aes128_ccm {
#define SA_AES128_CCM_CONTENTS 0x3e000042
#define SA_AES_CCM_CONTENTS 0x3e000002
-/**
+/*
* Security Association (SA) for AES128_GCM
*/
struct dynamic_sa_aes128_gcm {
@@ -258,7 +258,7 @@ struct dynamic_sa_aes128_gcm {
#define SA_AES128_GCM_CONTENTS 0x3e000442
#define SA_AES_GCM_CONTENTS 0x3e000402
-/**
+/*
* Security Association (SA) for HASH160: HMAC-SHA1
*/
struct dynamic_sa_hash160 {
diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
index 3af732f25c1c..7356716274cb 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.h
+++ b/drivers/crypto/amcc/crypto4xx_trng.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index 8b5e07316352..c6865cbd334b 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -236,10 +236,10 @@ static int meson_cipher(struct skcipher_request *areq)
dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
if (areq->src == areq->dst) {
- dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
- dma_unmap_sg(mc->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE);
}
if (areq->iv && ivsize > 0) {
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 5bbeff433c8c..6e7ae896717c 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -217,9 +217,6 @@ static int meson_crypto_probe(struct platform_device *pdev)
struct meson_dev *mc;
int err, i;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 9bd8e5167be3..333fbefbbccb 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -26,7 +26,7 @@
static struct atmel_ecc_driver_data driver_data;
/**
- * atmel_ecdh_ctx - transformation context
+ * struct atmel_ecdh_ctx - transformation context
* @client : pointer to i2c client device
* @fallback : used for unsupported curves or when user wants to use its own
* private key.
@@ -34,7 +34,6 @@ static struct atmel_ecc_driver_data driver_data;
* of the user to not call set_secret() while
* generate_public_key() or compute_shared_secret() are in flight.
* @curve_id : elliptic curve id
- * @n_sz : size in bytes of the n prime
* @do_fallback: true when the device doesn't support the curve or when the user
* wants to use its own private key.
*/
@@ -43,7 +42,6 @@ struct atmel_ecdh_ctx {
struct crypto_kpp *fallback;
const u8 *public_key;
unsigned int curve_id;
- size_t n_sz;
bool do_fallback;
};
@@ -51,7 +49,6 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
int status)
{
struct kpp_request *req = areq;
- struct atmel_ecdh_ctx *ctx = work_data->ctx;
struct atmel_i2c_cmd *cmd = &work_data->cmd;
size_t copied, n_sz;
@@ -59,7 +56,7 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
goto free_work_data;
/* might want less than we've got */
- n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
+ n_sz = min_t(size_t, ATMEL_ECC_NIST_P256_N_SIZE, req->dst_len);
/* copy the shared secret */
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
@@ -73,14 +70,6 @@ free_work_data:
kpp_request_complete(req, status);
}
-static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
-{
- if (curve_id == ECC_CURVE_NIST_P256)
- return ATMEL_ECC_NIST_P256_N_SIZE;
-
- return 0;
-}
-
/*
* A random private key is generated and stored in the device. The device
* returns the pair public key.
@@ -104,8 +93,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
- ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id);
- if (!ctx->n_sz || params.key_size) {
+ if (params.key_size) {
/* fallback to ecdh software implementation */
ctx->do_fallback = true;
return crypto_kpp_set_secret(ctx->fallback, buf, len);
@@ -125,7 +113,6 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
goto free_cmd;
ctx->do_fallback = false;
- ctx->curve_id = params.curve_id;
atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2);
@@ -263,6 +250,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
struct crypto_kpp *fallback;
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->client = atmel_ecc_i2c_client_alloc();
if (IS_ERR(ctx->client)) {
pr_err("tfm - i2c_client binding failed\n");
@@ -306,7 +294,7 @@ static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm)
return ATMEL_ECC_PUBKEY_SIZE;
}
-static struct kpp_alg atmel_ecdh = {
+static struct kpp_alg atmel_ecdh_nist_p256 = {
.set_secret = atmel_ecdh_set_secret,
.generate_public_key = atmel_ecdh_generate_public_key,
.compute_shared_secret = atmel_ecdh_compute_shared_secret,
@@ -315,7 +303,7 @@ static struct kpp_alg atmel_ecdh = {
.max_size = atmel_ecdh_max_size,
.base = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
- .cra_name = "ecdh",
+ .cra_name = "ecdh-nist-p256",
.cra_driver_name = "atmel-ecdh",
.cra_priority = ATMEL_ECC_PRIORITY,
.cra_module = THIS_MODULE,
@@ -340,14 +328,14 @@ static int atmel_ecc_probe(struct i2c_client *client,
&driver_data.i2c_client_list);
spin_unlock(&driver_data.i2c_list_lock);
- ret = crypto_register_kpp(&atmel_ecdh);
+ ret = crypto_register_kpp(&atmel_ecdh_nist_p256);
if (ret) {
spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node);
spin_unlock(&driver_data.i2c_list_lock);
dev_err(&client->dev, "%s alg registration failed\n",
- atmel_ecdh.base.cra_driver_name);
+ atmel_ecdh_nist_p256.base.cra_driver_name);
} else {
dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n");
}
@@ -365,7 +353,7 @@ static int atmel_ecc_remove(struct i2c_client *client)
return -EBUSY;
}
- crypto_unregister_kpp(&atmel_ecdh);
+ crypto_unregister_kpp(&atmel_ecdh_nist_p256);
spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node);
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index e8e8281e027d..6fd3e969211d 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -339,7 +339,7 @@ int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
if (bus_clk_rate > 1000000L) {
- dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
+ dev_err(dev, "%u exceeds maximum supported clock frequency (1MHz)\n",
bus_clk_rate);
return -EINVAL;
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 352d80cb5ae9..1b13f601fd95 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -434,7 +434,7 @@ static int atmel_sha_init(struct ahash_request *req)
ctx->flags = 0;
- dev_dbg(dd->dev, "init: digest size: %d\n",
+ dev_dbg(dd->dev, "init: digest size: %u\n",
crypto_ahash_digestsize(tfm));
switch (crypto_ahash_digestsize(tfm)) {
@@ -1102,7 +1102,7 @@ static int atmel_sha_start(struct atmel_sha_dev *dd)
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
int err;
- dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+ dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %u\n",
ctx->op, req->nbytes);
err = atmel_sha_hw_init(dd);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 4d63cb13a54f..6f01c51e3c37 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1217,7 +1217,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
if (IS_ERR(tdes_dd->io_base)) {
- dev_err(dev, "can't ioremap\n");
err = PTR_ERR(tdes_dd->io_base);
goto err_tasklet_kill;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 851b149f7170..053315e260c2 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1019,6 +1019,7 @@ static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
* a SPU response message for an AEAD request. Includes buffers to catch SPU
* message headers and the response data.
* @mssg: mailbox message containing the receive sg
+ * @req: Crypto API request
* @rctx: crypto request context
* @rx_frag_num: number of scatterlist elements required to hold the
* SPU response message
@@ -2952,9 +2953,9 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
/**
* rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
- * cipher: AEAD structure
- * key: Key followed by 4 bytes of salt
- * keylen: Length of key plus salt, in bytes
+ * @cipher: AEAD structure
+ * @key: Key followed by 4 bytes of salt
+ * @keylen: Length of key plus salt, in bytes
*
* Extracts salt from key and stores it to be prepended to IV on each request.
* Digest is always 16 bytes
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index 007abf92cc05..6283e8c6d51d 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -457,7 +457,7 @@ u16 spum_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
* @cipher_mode: Algo type
* @data_size: Length of plaintext (bytes)
*
- * @Return: Length of padding, in bytes
+ * Return: Length of padding, in bytes
*/
u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
unsigned int data_size)
@@ -510,10 +510,10 @@ u32 spum_assoc_resp_len(enum spu_cipher_mode cipher_mode,
}
/**
- * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
+ * spum_aead_ivlen() - Calculate the length of the AEAD IV to be included
* in a SPU request after the AAD and before the payload.
* @cipher_mode: cipher mode
- * @iv_ctr_len: initialization vector length in bytes
+ * @iv_len: initialization vector length in bytes
*
* In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need
* to include the IV as a separate field in the SPU request msg.
@@ -543,9 +543,9 @@ enum hash_type spum_hash_type(u32 src_sent)
/**
* spum_digest_size() - Determine the size of a hash digest to expect the SPU to
* return.
- * alg_digest_size: Number of bytes in the final digest for the given algo
- * alg: The hash algorithm
- * htype: Type of hash operation (init, update, full, etc)
+ * @alg_digest_size: Number of bytes in the final digest for the given algo
+ * @alg: The hash algorithm
+ * @htype: Type of hash operation (init, update, full, etc)
*
* When doing incremental hashing for an algorithm with a truncated hash
* (e.g., SHA224), the SPU returns the full digest so that it can be fed back as
@@ -580,7 +580,7 @@ u32 spum_digest_size(u32 alg_digest_size, enum hash_alg alg,
* @aead_parms: Parameters related to AEAD operation
* @data_size: Length of data to be encrypted or authenticated. If AEAD, does
* not include length of AAD.
-
+ *
* Return: the length of the SPU header in bytes. 0 if an error occurs.
*/
u32 spum_create_request(u8 *spu_hdr,
@@ -911,7 +911,7 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
* setkey() time in spu_cipher_req_init().
* @spu_hdr: Start of the request message header (MH field)
* @spu_req_hdr_len: Length in bytes of the SPU request header
- * @isInbound: 0 encrypt, 1 decrypt
+ * @is_inbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed
* @data_size: Length of the data in the BD field
*
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 2db35b5ccaa2..07989bb8c220 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -543,7 +543,8 @@ void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
/**
* spu2_fmd_init() - At setkey time, initialize the fixed meta data for
* subsequent skcipher requests for this context.
- * @spu2_cipher_type: Cipher algorithm
+ * @fmd: Start of FMD field to be written
+ * @spu2_type: Cipher algorithm
* @spu2_mode: Cipher mode
* @cipher_key_len: Length of cipher key, in bytes
* @cipher_iv_len: Length of cipher initialization vector, in bytes
@@ -598,7 +599,7 @@ static int spu2_fmd_init(struct SPU2_FMD *fmd,
* SPU request packet.
* @fmd: Start of FMD field to be written
* @is_inbound: true if decrypting. false if encrypting.
- * @authFirst: true if alg authenticates before encrypting
+ * @auth_first: true if alg authenticates before encrypting
* @protocol: protocol selector
* @cipher_type: cipher algorithm
* @cipher_mode: cipher mode
@@ -640,6 +641,7 @@ static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd,
* spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of
* SPU request packet.
* @fmd: Start of FMD field to be written
+ * @is_inbound: true if decrypting. false if encrypting.
* @assoc_size: Length of additional associated data, in bytes
* @auth_key_len: Length of authentication key, in bytes
* @cipher_key_len: Length of cipher key, in bytes
@@ -793,7 +795,7 @@ u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
}
/**
- * spu_payload_length() - Given a SPU2 message header, extract the payload
+ * spu2_payload_length() - Given a SPU2 message header, extract the payload
* length.
* @spu_hdr: Start of SPU message header (FMD)
*
@@ -812,10 +814,11 @@ u32 spu2_payload_length(u8 *spu_hdr)
}
/**
- * spu_response_hdr_len() - Determine the expected length of a SPU response
+ * spu2_response_hdr_len() - Determine the expected length of a SPU response
* header.
* @auth_key_len: Length of authentication key, in bytes
* @enc_key_len: Length of encryption key, in bytes
+ * @is_hash: Unused
*
* For SPU2, includes just FMD. OMD is never requested.
*
@@ -827,7 +830,7 @@ u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash)
}
/**
- * spu_hash_pad_len() - Calculate the length of hash padding required to extend
+ * spu2_hash_pad_len() - Calculate the length of hash padding required to extend
* data to a full block size.
* @hash_alg: hash algorithm
* @hash_mode: hash mode
@@ -845,8 +848,10 @@ u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
}
/**
- * spu2_gcm_ccm_padlen() - Determine the length of GCM/CCM padding for either
+ * spu2_gcm_ccm_pad_len() - Determine the length of GCM/CCM padding for either
* the AAD field or the data.
+ * @cipher_mode: Unused
+ * @data_size: Unused
*
* Return: 0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required.
*/
@@ -857,7 +862,7 @@ u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
}
/**
- * spu_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
+ * spu2_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
* associated data in a SPU2 output packet.
* @cipher_mode: cipher mode
* @assoc_len: length of additional associated data, in bytes
@@ -878,11 +883,11 @@ u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode,
return resp_len;
}
-/*
- * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
+/**
+ * spu2_aead_ivlen() - Calculate the length of the AEAD IV to be included
* in a SPU request after the AAD and before the payload.
* @cipher_mode: cipher mode
- * @iv_ctr_len: initialization vector length in bytes
+ * @iv_len: initialization vector length in bytes
*
* For SPU2, AEAD IV is included in OMD and does not need to be repeated
* prior to the payload.
@@ -909,9 +914,9 @@ enum hash_type spu2_hash_type(u32 src_sent)
/**
* spu2_digest_size() - Determine the size of a hash digest to expect the SPU to
* return.
- * alg_digest_size: Number of bytes in the final digest for the given algo
- * alg: The hash algorithm
- * htype: Type of hash operation (init, update, full, etc)
+ * @alg_digest_size: Number of bytes in the final digest for the given algo
+ * @alg: The hash algorithm
+ * @htype: Type of hash operation (init, update, full, etc)
*
*/
u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
@@ -921,7 +926,7 @@ u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
}
/**
- * spu_create_request() - Build a SPU2 request message header, includint FMD and
+ * spu2_create_request() - Build a SPU2 request message header, includint FMD and
* OMD.
* @spu_hdr: Start of buffer where SPU request header is to be written
* @req_opts: SPU request message options
@@ -1105,7 +1110,7 @@ u32 spu2_create_request(u8 *spu_hdr,
}
/**
- * spu_cipher_req_init() - Build an skcipher SPU2 request message header,
+ * spu2_cipher_req_init() - Build an skcipher SPU2 request message header,
* including FMD and OMD.
* @spu_hdr: Location of start of SPU request (FMD field)
* @cipher_parms: Parameters describing cipher request
@@ -1162,11 +1167,11 @@ u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
}
/**
- * spu_cipher_req_finish() - Finish building a SPU request message header for a
+ * spu2_cipher_req_finish() - Finish building a SPU request message header for a
* block cipher request.
* @spu_hdr: Start of the request message header (MH field)
* @spu_req_hdr_len: Length in bytes of the SPU request header
- * @isInbound: 0 encrypt, 1 decrypt
+ * @is_inbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed
* @data_size: Length of the data in the BD field
*
@@ -1222,7 +1227,7 @@ void spu2_cipher_req_finish(u8 *spu_hdr,
}
/**
- * spu_request_pad() - Create pad bytes at the end of the data.
+ * spu2_request_pad() - Create pad bytes at the end of the data.
* @pad_start: Start of buffer where pad bytes are to be written
* @gcm_padding: Length of GCM padding, in bytes
* @hash_pad_len: Number of bytes of padding extend data to full block
@@ -1311,7 +1316,7 @@ u8 spu2_rx_status_len(void)
}
/**
- * spu_status_process() - Process the status from a SPU response message.
+ * spu2_status_process() - Process the status from a SPU response message.
* @statp: start of STATUS word
*
* Return: 0 - if status is good and response should be processed
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index c4669a96eaec..d5d9cabea55a 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -119,8 +119,8 @@ int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes)
* @from_skip: number of bytes to skip in from_sg. Non-zero when previous
* request included part of the buffer in entry in from_sg.
* Assumes from_skip < from_sg->length.
- * @from_nents number of entries in from_sg
- * @length number of bytes to copy. may reach this limit before exhausting
+ * @from_nents: number of entries in from_sg
+ * @length: number of bytes to copy. may reach this limit before exhausting
* from_sg.
*
* Copies the entries themselves, not the data in the entries. Assumes to_sg has
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index a780e627838a..8b8ed77d8715 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -71,6 +71,9 @@ struct caam_skcipher_alg {
* @adata: authentication algorithm details
* @cdata: encryption algorithm details
* @authsize: authentication tag (a.k.a. ICV / MAC) size
+ * @xts_key_fallback: true if fallback tfm needs to be used due
+ * to unsupported xts key lengths
+ * @fallback: xts fallback tfm
*/
struct caam_ctx {
struct caam_flc flc[NUM_OP];
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index dd5f101e43f8..e313233ec6de 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -187,7 +187,8 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
}
/**
- * Count leading zeros, need it to strip, from a given scatterlist
+ * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
+ * from a given scatterlist
*
* @sgl : scatterlist to count zeros from
* @nbytes: number of zeros, in bytes, to strip
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 711b1acdd4e0..06ee42e8a245 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -10,7 +10,6 @@
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/printk.h>
-#include <linux/version.h>
#include "cptpf.h"
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index 99b053094f5a..c288c4b51783 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -10,7 +10,7 @@
#include "nitrox_isr.h"
#include "nitrox_mbx.h"
-/**
+/*
* One vector for each type of ring
* - NPS packet ring, AQMQ ring and ZQMQ ring
*/
@@ -216,7 +216,7 @@ static void nps_core_int_tasklet(unsigned long data)
}
}
-/**
+/*
* nps_core_int_isr - interrupt handler for NITROX errors and
* mailbox communication
*/
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 53ef06792133..df95ba26b414 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -58,14 +58,15 @@ static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
struct device *dev = DEV(ndev);
- dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, sr->in.sg, sg_nents(sr->in.sg),
+ DMA_BIDIRECTIONAL);
dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
DMA_TO_DEVICE);
kfree(sr->in.sgcomp);
sr->in.sg = NULL;
sr->in.sgmap_cnt = 0;
- dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt,
+ dma_unmap_sg(dev, sr->out.sg, sg_nents(sr->out.sg),
DMA_BIDIRECTIONAL);
dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
DMA_TO_DEVICE);
@@ -178,7 +179,7 @@ static int dma_map_inbufs(struct nitrox_softreq *sr,
return 0;
incomp_err:
- dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
sr->in.sgmap_cnt = 0;
return ret;
}
@@ -203,7 +204,7 @@ static int dma_map_outbufs(struct nitrox_softreq *sr,
return 0;
outcomp_map_err:
- dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_BIDIRECTIONAL);
sr->out.sgmap_cnt = 0;
sr->out.sg = NULL;
return ret;
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
index 58fb3ed6e644..54f6fb054119 100644
--- a/drivers/crypto/cavium/zip/common.h
+++ b/drivers/crypto/cavium/zip/common.h
@@ -56,7 +56,6 @@
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/version.h>
/* Device specific zlib function definitions */
#include "zip_device.h"
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 88275b4867ea..5976530c00a8 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -59,7 +59,7 @@ struct ccp_crypto_queue {
#define CCP_CRYPTO_MAX_QLEN 100
static struct ccp_crypto_queue req_queue;
-static spinlock_t req_queue_lock;
+static DEFINE_SPINLOCK(req_queue_lock);
struct ccp_crypto_cmd {
struct list_head entry;
@@ -410,7 +410,6 @@ static int ccp_crypto_init(void)
return ret;
}
- spin_lock_init(&req_queue_lock);
INIT_LIST_HEAD(&req_queue.cmds);
req_queue.backlog = &req_queue.cmds;
req_queue.cmd_count = 0;
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 0971ee60f840..6777582aa1ce 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -548,7 +548,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
return ccp->cmd_q_count == suspended;
}
-int ccp_dev_suspend(struct sp_device *sp)
+void ccp_dev_suspend(struct sp_device *sp)
{
struct ccp_device *ccp = sp->ccp_data;
unsigned long flags;
@@ -556,7 +556,7 @@ int ccp_dev_suspend(struct sp_device *sp)
/* If there's no device there's nothing to do */
if (!ccp)
- return 0;
+ return;
spin_lock_irqsave(&ccp->cmd_lock, flags);
@@ -572,11 +572,9 @@ int ccp_dev_suspend(struct sp_device *sp)
while (!ccp_queues_suspended(ccp))
wait_event_interruptible(ccp->suspend_queue,
ccp_queues_suspended(ccp));
-
- return 0;
}
-int ccp_dev_resume(struct sp_device *sp)
+void ccp_dev_resume(struct sp_device *sp)
{
struct ccp_device *ccp = sp->ccp_data;
unsigned long flags;
@@ -584,7 +582,7 @@ int ccp_dev_resume(struct sp_device *sp)
/* If there's no device there's nothing to do */
if (!ccp)
- return 0;
+ return;
spin_lock_irqsave(&ccp->cmd_lock, flags);
@@ -597,8 +595,6 @@ int ccp_dev_resume(struct sp_device *sp)
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- return 0;
}
int ccp_dev_init(struct sp_device *sp)
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index d6a8f4e4b14a..bb88198c874e 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -2418,7 +2418,6 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
dst.address += CCP_ECC_OUTPUT_SIZE;
ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
CCP_ECC_MODULUS_BYTES);
- dst.address += CCP_ECC_OUTPUT_SIZE;
/* Restore the workarea address */
dst.address = save;
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index cb9b4c4e371e..da3872c48308 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -21,6 +21,7 @@
#include <linux/ccp.h>
#include <linux/firmware.h>
#include <linux/gfp.h>
+#include <linux/cpufeature.h>
#include <asm/smp.h>
@@ -972,6 +973,11 @@ int sev_dev_init(struct psp_device *psp)
struct sev_device *sev;
int ret = -ENOMEM;
+ if (!boot_cpu_has(X86_FEATURE_SEV)) {
+ dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n");
+ return 0;
+ }
+
sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
if (!sev)
goto e_err;
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index 6284a15e5047..7eb3e4668286 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -213,12 +213,8 @@ void sp_destroy(struct sp_device *sp)
int sp_suspend(struct sp_device *sp)
{
- int ret;
-
if (sp->dev_vdata->ccp_vdata) {
- ret = ccp_dev_suspend(sp);
- if (ret)
- return ret;
+ ccp_dev_suspend(sp);
}
return 0;
@@ -226,12 +222,8 @@ int sp_suspend(struct sp_device *sp)
int sp_resume(struct sp_device *sp)
{
- int ret;
-
if (sp->dev_vdata->ccp_vdata) {
- ret = ccp_dev_resume(sp);
- if (ret)
- return ret;
+ ccp_dev_resume(sp);
}
return 0;
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 0218d0670eee..20377e67f65d 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -134,8 +134,8 @@ struct sp_device *sp_get_psp_master_device(void);
int ccp_dev_init(struct sp_device *sp);
void ccp_dev_destroy(struct sp_device *sp);
-int ccp_dev_suspend(struct sp_device *sp);
-int ccp_dev_resume(struct sp_device *sp);
+void ccp_dev_suspend(struct sp_device *sp);
+void ccp_dev_resume(struct sp_device *sp);
#else /* !CONFIG_CRYPTO_DEV_SP_CCP */
@@ -144,15 +144,8 @@ static inline int ccp_dev_init(struct sp_device *sp)
return 0;
}
static inline void ccp_dev_destroy(struct sp_device *sp) { }
-
-static inline int ccp_dev_suspend(struct sp_device *sp)
-{
- return 0;
-}
-static inline int ccp_dev_resume(struct sp_device *sp)
-{
- return 0;
-}
+static inline void ccp_dev_suspend(struct sp_device *sp) { }
+static inline void ccp_dev_resume(struct sp_device *sp) { }
#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index f471dbaef1fb..f468594ef8af 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -356,6 +356,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
+ { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
index 5e697a90ea7f..5c9d47f3be37 100644
--- a/drivers/crypto/ccp/tee-dev.c
+++ b/drivers/crypto/ccp/tee-dev.c
@@ -5,7 +5,7 @@
* Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
* Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
*
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
*/
#include <linux/types.h>
@@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
if (!start_addr)
return -ENOMEM;
+ memset(start_addr, 0x0, ring_size);
rb_mgr->ring_start = start_addr;
rb_mgr->ring_size = ring_size;
rb_mgr->ring_pa = __psp_pa(start_addr);
@@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
void *buf, size_t len, struct tee_ring_cmd **resp)
{
struct tee_ring_cmd *cmd;
- u32 rptr, wptr;
int nloop = 1000, ret = 0;
+ u32 rptr;
*resp = NULL;
mutex_lock(&tee->rb_mgr.mutex);
- wptr = tee->rb_mgr.wptr;
-
- /* Check if ring buffer is full */
+ /* Loop until empty entry found in ring buffer */
do {
+ /* Get pointer to ring buffer command entry */
+ cmd = (struct tee_ring_cmd *)
+ (tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
+
rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
- if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
+ /* Check if ring buffer is full or command entry is waiting
+ * for response from TEE
+ */
+ if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
+ cmd->flag == CMD_WAITING_FOR_RESPONSE))
break;
- dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
- rptr, wptr);
+ dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, tee->rb_mgr.wptr);
- /* Wait if ring buffer is full */
+ /* Wait if ring buffer is full or TEE is processing data */
mutex_unlock(&tee->rb_mgr.mutex);
schedule_timeout_interruptible(msecs_to_jiffies(10));
mutex_lock(&tee->rb_mgr.mutex);
} while (--nloop);
- if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
- dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
- rptr, wptr);
+ if (!nloop &&
+ (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
+ cmd->flag == CMD_WAITING_FOR_RESPONSE)) {
+ dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
+ rptr, tee->rb_mgr.wptr, cmd->flag);
ret = -EBUSY;
goto unlock;
}
- /* Pointer to empty data entry in ring buffer */
- cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
+ /* Do not submit command if PSP got disabled while processing any
+ * command in another thread
+ */
+ if (psp_dead) {
+ ret = -EBUSY;
+ goto unlock;
+ }
/* Write command data into ring buffer */
cmd->cmd_id = cmd_id;
@@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
memset(&cmd->buf[0], 0, sizeof(cmd->buf));
memcpy(&cmd->buf[0], buf, len);
+ /* Indicate driver is waiting for response */
+ cmd->flag = CMD_WAITING_FOR_RESPONSE;
+
/* Update local copy of write pointer */
tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
@@ -309,14 +326,14 @@ static int tee_wait_cmd_completion(struct psp_tee_device *tee,
struct tee_ring_cmd *resp,
unsigned int timeout)
{
- /* ~5ms sleep per loop => nloop = timeout * 200 */
- int nloop = timeout * 200;
+ /* ~1ms sleep per loop => nloop = timeout * 1000 */
+ int nloop = timeout * 1000;
while (--nloop) {
if (resp->cmd_state == TEE_CMD_STATE_COMPLETED)
return 0;
- usleep_range(5000, 5100);
+ usleep_range(1000, 1100);
}
dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n",
@@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
return ret;
ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
- if (ret)
+ if (ret) {
+ resp->flag = CMD_RESPONSE_TIMEDOUT;
return ret;
+ }
memcpy(buf, &resp->buf[0], len);
*status = resp->status;
+ resp->flag = CMD_RESPONSE_COPIED;
+
return 0;
}
EXPORT_SYMBOL(psp_tee_process_cmd);
diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
index f09960112115..49d26158b71e 100644
--- a/drivers/crypto/ccp/tee-dev.h
+++ b/drivers/crypto/ccp/tee-dev.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
*
* Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
* Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
@@ -18,7 +18,7 @@
#include <linux/mutex.h>
#define TEE_DEFAULT_TIMEOUT 10
-#define MAX_BUFFER_SIZE 992
+#define MAX_BUFFER_SIZE 988
/**
* enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
@@ -82,6 +82,20 @@ enum tee_cmd_state {
};
/**
+ * enum cmd_resp_state - TEE command's response status maintained by driver
+ * @CMD_RESPONSE_INVALID: initial state when no command is written to ring
+ * @CMD_WAITING_FOR_RESPONSE: driver waiting for response from TEE
+ * @CMD_RESPONSE_TIMEDOUT: failed to get response from TEE
+ * @CMD_RESPONSE_COPIED: driver has copied response from TEE
+ */
+enum cmd_resp_state {
+ CMD_RESPONSE_INVALID,
+ CMD_WAITING_FOR_RESPONSE,
+ CMD_RESPONSE_TIMEDOUT,
+ CMD_RESPONSE_COPIED,
+};
+
+/**
* struct tee_ring_cmd - Structure of the command buffer in TEE ring
* @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer
* interface
@@ -91,6 +105,7 @@ enum tee_cmd_state {
* @pdata: private data (currently unused)
* @res1: reserved region
* @buf: TEE command specific buffer
+ * @flag: refers to &enum cmd_resp_state
*/
struct tee_ring_cmd {
u32 cmd_id;
@@ -100,6 +115,7 @@ struct tee_ring_cmd {
u64 pdata;
u32 res1[2];
u8 buf[MAX_BUFFER_SIZE];
+ u32 flag;
/* Total size: 1024 bytes */
} __packed;
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index d0e59e942568..e599ac6dc162 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -352,10 +352,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
/* Map registers space */
new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
- if (IS_ERR(new_drvdata->cc_base)) {
- dev_err(dev, "Failed to ioremap registers");
+ if (IS_ERR(new_drvdata->cc_base))
return PTR_ERR(new_drvdata->cc_base);
- }
dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
req_mem_cc_regs);
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index f5a336634daa..6933546f87b1 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -126,11 +126,6 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
return container_of(ctx->dev, struct uld_ctx, dev);
}
-static inline int is_ofld_imm(const struct sk_buff *skb)
-{
- return (skb->len <= SGE_MAX_WR_LEN);
-}
-
static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
{
memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
@@ -769,13 +764,14 @@ static inline void create_wreq(struct chcr_context *ctx,
struct uld_ctx *u_ctx = ULD_CTX(ctx);
unsigned int tx_channel_id, rx_channel_id;
unsigned int txqidx = 0, rxqidx = 0;
- unsigned int qid, fid;
+ unsigned int qid, fid, portno;
get_qidxs(req, &txqidx, &rxqidx);
qid = u_ctx->lldi.rxq_ids[rxqidx];
fid = u_ctx->lldi.rxq_ids[0];
+ portno = rxqidx / ctx->rxq_perchan;
tx_channel_id = txqidx / ctx->txq_perchan;
- rx_channel_id = rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
@@ -797,15 +793,13 @@ static inline void create_wreq(struct chcr_context *ctx,
/**
* create_cipher_wr - form the WR for cipher operations
- * @req: cipher req.
- * @ctx: crypto driver context of the request.
- * @qid: ingress qid where response of this WR should be received.
- * @op_type: encryption or decryption
+ * @wrparam: Container for create_cipher_wr()'s parameters
*/
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
@@ -822,6 +816,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
struct adapter *adap = padap(ctx->dev);
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
reqctx->dst_ofst);
dst_size = get_space_for_phys_dsgl(nents);
@@ -1559,7 +1554,8 @@ static inline void chcr_free_shash(struct crypto_shash *base_hash)
/**
* create_hash_wr - Create hash work request
- * @req - Cipher req base
+ * @req: Cipher req base
+ * @param: Container for create_hash_wr()'s parameters
*/
static struct sk_buff *create_hash_wr(struct ahash_request *req,
struct hash_wr_param *param)
@@ -1580,6 +1576,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
int error = 0;
unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
param->sg_len) <= SGE_MAX_WR_LEN;
@@ -2438,6 +2435,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2457,6 +2455,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct adapter *adap = padap(ctx->dev);
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
if (req->cryptlen == 0)
return NULL;
@@ -2710,9 +2709,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
struct dsgl_walk dsgl_walk;
unsigned int authsize = crypto_aead_authsize(tfm);
struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
u32 temp;
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
temp = req->assoclen + req->cryptlen +
@@ -2752,9 +2753,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct dsgl_walk dsgl_walk;
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
reqctx->dst_ofst);
@@ -2958,6 +2961,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
@@ -2967,6 +2971,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
assoclen = req->assoclen - 8;
else
@@ -3127,6 +3133,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
@@ -3143,6 +3150,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
struct adapter *adap = padap(ctx->dev);
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
assoclen = req->assoclen - 8;
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index f91f9d762a45..39c70e6255f9 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -1,4 +1,4 @@
-/**
+/*
* This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
*
* Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
@@ -184,7 +184,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
struct uld_ctx *u_ctx;
/* Create the device and add it in the device list */
- pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
+ pr_info_once("%s\n", DRV_DESC);
if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
return ERR_PTR(-EOPNOTSUPP);
@@ -309,4 +309,3 @@ module_exit(chcr_crypto_exit);
MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chelsio Communications");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index b02f981e7c32..f7c8bb95a71b 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -44,7 +44,6 @@
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
-#define DRV_VERSION "1.0.0.0-ko"
#define DRV_DESC "Chelsio T6 Crypto Co-processor Driver"
#define MAX_PENDING_REQ_TO_HW 20
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 4ee010f39912..fa5a9f207bc9 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -21,7 +21,7 @@
/* Static structures */
static void __iomem *_iobase;
-static spinlock_t lock;
+static DEFINE_SPINLOCK(lock);
/* Write a 128 bit field (either a writable key or IV) */
static inline void
@@ -383,8 +383,6 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto erequest;
}
- spin_lock_init(&lock);
-
/* Clear any pending activity */
iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 843192666dc3..e572f9982d4e 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -68,6 +68,8 @@ config CRYPTO_DEV_HISI_HPRE
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
+ select CRYPTO_CURVE25519
+ select CRYPTO_ECDH
help
Support for HiSilicon HPRE(High Performance RSA Engine)
accelerator, which can accelerate RSA and DH algorithms.
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 181c109b19f7..e0b4a1982ee9 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -10,6 +10,14 @@
#define HPRE_PF_DEF_Q_NUM 64
#define HPRE_PF_DEF_Q_BASE 0
+/*
+ * type used in qm sqc DW6.
+ * 0 - Algorithm which has been supported in V2, like RSA, DH and so on;
+ * 1 - ECC algorithm in V3.
+ */
+#define HPRE_V2_ALG_TYPE 0
+#define HPRE_V3_ECC_ALG_TYPE 1
+
enum {
HPRE_CLUSTER0,
HPRE_CLUSTER1,
@@ -18,7 +26,6 @@ enum {
};
enum hpre_ctrl_dbgfs_file {
- HPRE_CURRENT_QM,
HPRE_CLEAR_ENABLE,
HPRE_CLUSTER_CTRL,
HPRE_DEBUG_FILE_NUM,
@@ -75,6 +82,9 @@ enum hpre_alg_type {
HPRE_ALG_KG_CRT = 0x3,
HPRE_ALG_DH_G2 = 0x4,
HPRE_ALG_DH = 0x5,
+ HPRE_ALG_ECC_MUL = 0xD,
+ /* shared by x25519 and x448, but x448 is not supported now */
+ HPRE_ALG_CURVE25519_MUL = 0x10,
};
struct hpre_sqe {
@@ -92,8 +102,8 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
-struct hisi_qp *hpre_create_qp(void);
-int hpre_algs_register(void);
-void hpre_algs_unregister(void);
+struct hisi_qp *hpre_create_qp(u8 type);
+int hpre_algs_register(struct hisi_qm *qm);
+void hpre_algs_unregister(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index a87f9904087a..a380087c83f7 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/akcipher.h>
+#include <crypto/curve25519.h>
#include <crypto/dh.h>
+#include <crypto/ecc_curve.h>
+#include <crypto/ecdh.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/kpp.h>
#include <crypto/internal/rsa.h>
@@ -36,6 +39,13 @@ struct hpre_ctx;
#define HPRE_DFX_SEC_TO_US 1000000
#define HPRE_DFX_US_TO_NS 1000
+/* size in bytes of the n prime */
+#define HPRE_ECC_NIST_P192_N_SIZE 24
+#define HPRE_ECC_NIST_P256_N_SIZE 32
+
+/* size in bytes */
+#define HPRE_ECC_HW256_KSZ_B 32
+
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@@ -61,14 +71,35 @@ struct hpre_dh_ctx {
* else if base if the counterpart public key we
* compute the shared secret
* ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
+ * low address: d--->n, please refer to Hisilicon HPRE UM
*/
- char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */
+ char *xa_p;
dma_addr_t dma_xa_p;
char *g; /* m */
dma_addr_t dma_g;
};
+struct hpre_ecdh_ctx {
+ /* low address: p->a->k->b */
+ unsigned char *p;
+ dma_addr_t dma_p;
+
+ /* low address: x->y */
+ unsigned char *g;
+ dma_addr_t dma_g;
+};
+
+struct hpre_curve25519_ctx {
+ /* low address: p->a->k */
+ unsigned char *p;
+ dma_addr_t dma_p;
+
+ /* gx coordinate */
+ unsigned char *g;
+ dma_addr_t dma_g;
+};
+
struct hpre_ctx {
struct hisi_qp *qp;
struct hpre_asym_request **req_list;
@@ -80,7 +111,11 @@ struct hpre_ctx {
union {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
+ struct hpre_ecdh_ctx ecdh;
+ struct hpre_curve25519_ctx curve25519;
};
+ /* for ecc algorithms */
+ unsigned int curve_id;
};
struct hpre_asym_request {
@@ -91,6 +126,8 @@ struct hpre_asym_request {
union {
struct akcipher_request *rsa;
struct kpp_request *dh;
+ struct kpp_request *ecdh;
+ struct kpp_request *curve25519;
} areq;
int err;
int req_id;
@@ -152,12 +189,12 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
}
}
-static struct hisi_qp *hpre_get_qp_and_start(void)
+static struct hisi_qp *hpre_get_qp_and_start(u8 type)
{
struct hisi_qp *qp;
int ret;
- qp = hpre_create_qp();
+ qp = hpre_create_qp(type);
if (!qp) {
pr_err("Can not create hpre qp!\n");
return ERR_PTR(-ENODEV);
@@ -261,8 +298,6 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
dma_addr_t tmp;
tmp = le64_to_cpu(sqe->in);
- if (unlikely(!tmp))
- return;
if (src) {
if (req->src)
@@ -272,8 +307,6 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
tmp = le64_to_cpu(sqe->out);
- if (unlikely(!tmp))
- return;
if (req->dst) {
if (dst)
@@ -288,13 +321,16 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
void **kreq)
{
+ struct device *dev = HPRE_DEV(ctx);
struct hpre_asym_request *req;
- int err, id, done;
+ unsigned int err, done, alg;
+ int id;
#define HPRE_NO_HW_ERR 0
#define HPRE_HW_TASK_DONE 3
#define HREE_HW_ERR_MASK 0x7ff
#define HREE_SQE_DONE_MASK 0x3
+#define HREE_ALG_TYPE_MASK 0x1f
id = (int)le16_to_cpu(sqe->tag);
req = ctx->req_list[id];
hpre_rm_req_from_ctx(req);
@@ -307,7 +343,11 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
HREE_SQE_DONE_MASK;
if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
- return 0;
+ return 0;
+
+ alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
+ dev_err_ratelimited(dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
+ alg, done, err);
return -EINVAL;
}
@@ -413,7 +453,6 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
struct hpre_sqe *sqe = resp;
struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
-
if (unlikely(!req)) {
atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
return;
@@ -422,18 +461,29 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
req->cb(ctx, resp);
}
-static int hpre_ctx_init(struct hpre_ctx *ctx)
+static void hpre_stop_qp_and_put(struct hisi_qp *qp)
+{
+ hisi_qm_stop_qp(qp);
+ hisi_qm_free_qps(&qp, 1);
+}
+
+static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
{
struct hisi_qp *qp;
+ int ret;
- qp = hpre_get_qp_and_start();
+ qp = hpre_get_qp_and_start(type);
if (IS_ERR(qp))
return PTR_ERR(qp);
qp->qp_ctx = ctx;
qp->req_cb = hpre_alg_cb;
- return hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+ ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+ if (ret)
+ hpre_stop_qp_and_put(qp);
+
+ return ret;
}
static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
@@ -510,7 +560,6 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
return ret;
}
-#ifdef CONFIG_CRYPTO_DH
static int hpre_dh_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
@@ -674,7 +723,7 @@ static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- return hpre_ctx_init(ctx);
+ return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
}
static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
@@ -683,7 +732,6 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
hpre_dh_clear_ctx(ctx, true);
}
-#endif
static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
{
@@ -1100,7 +1148,7 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->rsa.soft_tfm);
}
- ret = hpre_ctx_init(ctx);
+ ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
if (ret)
crypto_free_akcipher(ctx->rsa.soft_tfm);
@@ -1115,6 +1163,734 @@ static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
crypto_free_akcipher(ctx->rsa.soft_tfm);
}
+static void hpre_key_to_big_end(u8 *data, int len)
+{
+ int i, j;
+ u8 tmp;
+
+ for (i = 0; i < len / 2; i++) {
+ j = len - i - 1;
+ tmp = data[j];
+ data[j] = data[i];
+ data[i] = tmp;
+ }
+}
+
+static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
+ bool is_ecdh)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz = ctx->key_sz;
+ unsigned int shift = sz << 1;
+
+ if (is_clear_all)
+ hisi_qm_stop_qp(ctx->qp);
+
+ if (is_ecdh && ctx->ecdh.p) {
+ /* ecdh: p->a->k->b */
+ memzero_explicit(ctx->ecdh.p + shift, sz);
+ dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
+ ctx->ecdh.p = NULL;
+ } else if (!is_ecdh && ctx->curve25519.p) {
+ /* curve25519: p->a->k */
+ memzero_explicit(ctx->curve25519.p + shift, sz);
+ dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
+ ctx->curve25519.dma_p);
+ ctx->curve25519.p = NULL;
+ }
+
+ hpre_ctx_clear(ctx, is_clear_all);
+}
+
+static unsigned int hpre_ecdh_supported_curve(unsigned short id)
+{
+ switch (id) {
+ case ECC_CURVE_NIST_P192:
+ case ECC_CURVE_NIST_P256:
+ return HPRE_ECC_HW256_KSZ_B;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
+{
+ unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
+ u8 i = 0;
+
+ while (i < ndigits - 1) {
+ memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64));
+ i++;
+ }
+
+ memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz);
+ hpre_key_to_big_end((u8 *)addr, cur_sz);
+}
+
+static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
+ unsigned int cur_sz)
+{
+ unsigned int shifta = ctx->key_sz << 1;
+ unsigned int shiftb = ctx->key_sz << 2;
+ void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
+ void *a = ctx->ecdh.p + shifta - cur_sz;
+ void *b = ctx->ecdh.p + shiftb - cur_sz;
+ void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
+ void *y = ctx->ecdh.g + shifta - cur_sz;
+ const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
+ char *n;
+
+ if (unlikely(!curve))
+ return -EINVAL;
+
+ n = kzalloc(ctx->key_sz, GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
+ fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
+ fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
+ fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
+ fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
+ fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
+
+ if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
+ kfree(n);
+ return -EINVAL;
+ }
+
+ kfree(n);
+ return 0;
+}
+
+static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
+{
+ switch (id) {
+ case ECC_CURVE_NIST_P192:
+ return HPRE_ECC_NIST_P192_N_SIZE;
+ case ECC_CURVE_NIST_P256:
+ return HPRE_ECC_NIST_P256_N_SIZE;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz, shift, curve_sz;
+ int ret;
+
+ ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
+ if (!ctx->key_sz)
+ return -EINVAL;
+
+ curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+ if (!curve_sz || params->key_size > curve_sz)
+ return -EINVAL;
+
+ sz = ctx->key_sz;
+
+ if (!ctx->ecdh.p) {
+ ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
+ GFP_KERNEL);
+ if (!ctx->ecdh.p)
+ return -ENOMEM;
+ }
+
+ shift = sz << 2;
+ ctx->ecdh.g = ctx->ecdh.p + shift;
+ ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
+
+ ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
+ if (ret) {
+ dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
+ dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
+ ctx->ecdh.p = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool hpre_key_is_zero(char *key, unsigned short key_sz)
+{
+ int i;
+
+ for (i = 0; i < key_sz; i++)
+ if (key[i])
+ return false;
+
+ return true;
+}
+
+static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz, sz_shift;
+ struct ecdh params;
+ int ret;
+
+ if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
+ dev_err(dev, "failed to decode ecdh key!\n");
+ return -EINVAL;
+ }
+
+ if (hpre_key_is_zero(params.key, params.key_size)) {
+ dev_err(dev, "Invalid hpre key!\n");
+ return -EINVAL;
+ }
+
+ hpre_ecc_clear_ctx(ctx, false, true);
+
+ ret = hpre_ecdh_set_param(ctx, &params);
+ if (ret < 0) {
+ dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
+ return ret;
+ }
+
+ sz = ctx->key_sz;
+ sz_shift = (sz << 1) + sz - params.key_size;
+ memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
+
+ return 0;
+}
+
+static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ struct hpre_sqe *sqe = &req->req;
+ dma_addr_t dma;
+
+ dma = le64_to_cpu(sqe->in);
+
+ if (src && req->src)
+ dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
+
+ dma = le64_to_cpu(sqe->out);
+
+ if (req->dst)
+ dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
+ if (dst)
+ dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
+}
+
+static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
+{
+ unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ struct hpre_asym_request *req = NULL;
+ struct kpp_request *areq;
+ u64 overtime_thrhld;
+ char *p;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.ecdh;
+ areq->dst_len = ctx->key_sz << 1;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
+ p = sg_virt(areq->dst);
+ memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
+ memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
+
+ hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ kpp_request_complete(areq, ret);
+
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
+}
+
+static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
+ struct kpp_request *req)
+{
+ struct hpre_asym_request *h_req;
+ struct hpre_sqe *msg;
+ int req_id;
+ void *tmp;
+
+ if (req->dst_len < ctx->key_sz << 1) {
+ req->dst_len = ctx->key_sz << 1;
+ return -EINVAL;
+ }
+
+ tmp = kpp_request_ctx(req);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_ecdh_cb;
+ h_req->areq.ecdh = req;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ msg->key = cpu_to_le64(ctx->ecdh.dma_p);
+
+ msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
+ msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+ h_req->ctx = ctx;
+
+ req_id = hpre_add_req_to_ctx(h_req);
+ if (req_id < 0)
+ return -EBUSY;
+
+ msg->tag = cpu_to_le16((u16)req_id);
+ return 0;
+}
+
+static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int tmpshift;
+ dma_addr_t dma = 0;
+ void *ptr;
+ int shift;
+
+ /* Src_data include gx and gy. */
+ shift = ctx->key_sz - (len >> 1);
+ if (unlikely(shift < 0))
+ return -EINVAL;
+
+ ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
+ if (unlikely(!ptr))
+ return -ENOMEM;
+
+ tmpshift = ctx->key_sz << 1;
+ scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
+ memcpy(ptr + shift, ptr + tmpshift, len >> 1);
+ memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
+
+ hpre_req->src = ptr;
+ msg->in = cpu_to_le64(dma);
+ return 0;
+}
+
+static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ dma_addr_t dma = 0;
+
+ if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
+ dev_err(dev, "data or data length is illegal!\n");
+ return -EINVAL;
+ }
+
+ hpre_req->dst = NULL;
+ dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma))) {
+ dev_err(dev, "dma map data err!\n");
+ return -ENOMEM;
+ }
+
+ msg->out = cpu_to_le64(dma);
+ return 0;
+}
+
+static int hpre_ecdh_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ void *tmp = kpp_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ret;
+
+ ret = hpre_ecdh_msg_request_set(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
+ return ret;
+ }
+
+ if (req->src) {
+ ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init src data, ret = %d!\n", ret);
+ goto clear_all;
+ }
+ } else {
+ msg->in = cpu_to_le64(ctx->ecdh.dma_g);
+ }
+
+ ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
+ goto clear_all;
+ }
+
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
+ ret = hpre_send(ctx, msg);
+ if (likely(!ret))
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+ return ret;
+}
+
+static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ /* max size is the pub_key_size, include x and y */
+ return ctx->key_sz << 1;
+}
+
+static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P192;
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P256;
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ hpre_ecc_clear_ctx(ctx, true, true);
+}
+
+static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
+ unsigned int len)
+{
+ u8 secret[CURVE25519_KEY_SIZE] = { 0 };
+ unsigned int sz = ctx->key_sz;
+ const struct ecc_curve *curve;
+ unsigned int shift = sz << 1;
+ void *p;
+
+ /*
+ * The key from 'buf' is in little-endian, we should preprocess it as
+ * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
+ * then convert it to big endian. Only in this way, the result can be
+ * the same as the software curve-25519 that exists in crypto.
+ */
+ memcpy(secret, buf, len);
+ curve25519_clamp_secret(secret);
+ hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
+
+ p = ctx->curve25519.p + sz - len;
+
+ curve = ecc_get_curve25519();
+
+ /* fill curve parameters */
+ fill_curve_param(p, curve->p, len, curve->g.ndigits);
+ fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
+ memcpy(p + shift, secret, len);
+ fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
+ memzero_explicit(secret, CURVE25519_KEY_SIZE);
+}
+
+static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
+ unsigned int len)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz = ctx->key_sz;
+ unsigned int shift = sz << 1;
+
+ /* p->a->k->gx */
+ if (!ctx->curve25519.p) {
+ ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
+ &ctx->curve25519.dma_p,
+ GFP_KERNEL);
+ if (!ctx->curve25519.p)
+ return -ENOMEM;
+ }
+
+ ctx->curve25519.g = ctx->curve25519.p + shift + sz;
+ ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
+
+ hpre_curve25519_fill_curve(ctx, buf, len);
+
+ return 0;
+}
+
+static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ int ret = -EINVAL;
+
+ if (len != CURVE25519_KEY_SIZE ||
+ !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
+ dev_err(dev, "key is null or key len is not 32bytes!\n");
+ return ret;
+ }
+
+ /* Free old secret if any */
+ hpre_ecc_clear_ctx(ctx, false, false);
+
+ ctx->key_sz = CURVE25519_KEY_SIZE;
+ ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
+ if (ret) {
+ dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
+ hpre_ecc_clear_ctx(ctx, false, false);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ struct hpre_sqe *sqe = &req->req;
+ dma_addr_t dma;
+
+ dma = le64_to_cpu(sqe->in);
+
+ if (src && req->src)
+ dma_free_coherent(dev, ctx->key_sz, req->src, dma);
+
+ dma = le64_to_cpu(sqe->out);
+
+ if (req->dst)
+ dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
+ if (dst)
+ dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
+}
+
+static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
+{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ struct hpre_asym_request *req = NULL;
+ struct kpp_request *areq;
+ u64 overtime_thrhld;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.curve25519;
+ areq->dst_len = ctx->key_sz;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
+ hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
+
+ hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ kpp_request_complete(areq, ret);
+
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
+}
+
+static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
+ struct kpp_request *req)
+{
+ struct hpre_asym_request *h_req;
+ struct hpre_sqe *msg;
+ int req_id;
+ void *tmp;
+
+ if (unlikely(req->dst_len < ctx->key_sz)) {
+ req->dst_len = ctx->key_sz;
+ return -EINVAL;
+ }
+
+ tmp = kpp_request_ctx(req);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_curve25519_cb;
+ h_req->areq.curve25519 = req;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ msg->key = cpu_to_le64(ctx->curve25519.dma_p);
+
+ msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
+ msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+ h_req->ctx = ctx;
+
+ req_id = hpre_add_req_to_ctx(h_req);
+ if (req_id < 0)
+ return -EBUSY;
+
+ msg->tag = cpu_to_le16((u16)req_id);
+ return 0;
+}
+
+static void hpre_curve25519_src_modulo_p(u8 *ptr)
+{
+ int i;
+
+ for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
+ ptr[i] = 0;
+
+ /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
+ ptr[i] -= 0xed;
+}
+
+static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ u8 p[CURVE25519_KEY_SIZE] = { 0 };
+ const struct ecc_curve *curve;
+ dma_addr_t dma = 0;
+ u8 *ptr;
+
+ if (len != CURVE25519_KEY_SIZE) {
+ dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
+ return -EINVAL;
+ }
+
+ ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
+ if (unlikely(!ptr))
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(ptr, data, 0, len, 0);
+
+ if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
+ dev_err(dev, "gx is null!\n");
+ goto err;
+ }
+
+ /*
+ * Src_data(gx) is in little-endian order, MSB in the final byte should
+ * be masked as described in RFC7748, then transform it to big-endian
+ * form, then hisi_hpre can use the data.
+ */
+ ptr[31] &= 0x7f;
+ hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
+
+ curve = ecc_get_curve25519();
+
+ fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
+
+ /*
+ * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
+ * we get its modulus to p, and then use it.
+ */
+ if (memcmp(ptr, p, ctx->key_sz) >= 0)
+ hpre_curve25519_src_modulo_p(ptr);
+
+ hpre_req->src = ptr;
+ msg->in = cpu_to_le64(dma);
+ return 0;
+
+err:
+ dma_free_coherent(dev, ctx->key_sz, ptr, dma);
+ return -EINVAL;
+}
+
+static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ dma_addr_t dma = 0;
+
+ if (!data || !sg_is_last(data) || len != ctx->key_sz) {
+ dev_err(dev, "data or data length is illegal!\n");
+ return -EINVAL;
+ }
+
+ hpre_req->dst = NULL;
+ dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma))) {
+ dev_err(dev, "dma map data err!\n");
+ return -ENOMEM;
+ }
+
+ msg->out = cpu_to_le64(dma);
+ return 0;
+}
+
+static int hpre_curve25519_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ void *tmp = kpp_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ret;
+
+ ret = hpre_curve25519_msg_request_set(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
+ return ret;
+ }
+
+ if (req->src) {
+ ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init src data, ret = %d!\n",
+ ret);
+ goto clear_all;
+ }
+ } else {
+ msg->in = cpu_to_le64(ctx->curve25519.dma_g);
+ }
+
+ ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
+ goto clear_all;
+ }
+
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
+ ret = hpre_send(ctx, msg);
+ if (likely(!ret))
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+ return ret;
+}
+
+static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return ctx->key_sz;
+}
+
+static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ hpre_ecc_clear_ctx(ctx, true, false);
+}
+
static struct akcipher_alg rsa = {
.sign = hpre_rsa_dec,
.verify = hpre_rsa_enc,
@@ -1135,7 +1911,6 @@ static struct akcipher_alg rsa = {
},
};
-#ifdef CONFIG_CRYPTO_DH
static struct kpp_alg dh = {
.set_secret = hpre_dh_set_secret,
.generate_public_key = hpre_dh_compute_value,
@@ -1152,9 +1927,83 @@ static struct kpp_alg dh = {
.cra_module = THIS_MODULE,
},
};
-#endif
-int hpre_algs_register(void)
+static struct kpp_alg ecdh_nist_p192 = {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p192_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p192",
+ .cra_driver_name = "hpre-ecdh",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static struct kpp_alg ecdh_nist_p256 = {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p256_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "hpre-ecdh",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static struct kpp_alg curve25519_alg = {
+ .set_secret = hpre_curve25519_set_secret,
+ .generate_public_key = hpre_curve25519_compute_value,
+ .compute_shared_secret = hpre_curve25519_compute_value,
+ .max_size = hpre_curve25519_max_size,
+ .init = hpre_curve25519_init_tfm,
+ .exit = hpre_curve25519_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "curve25519",
+ .cra_driver_name = "hpre-curve25519",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+
+static int hpre_register_ecdh(void)
+{
+ int ret;
+
+ ret = crypto_register_kpp(&ecdh_nist_p192);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_kpp(&ecdh_nist_p256);
+ if (ret) {
+ crypto_unregister_kpp(&ecdh_nist_p192);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hpre_unregister_ecdh(void)
+{
+ crypto_unregister_kpp(&ecdh_nist_p256);
+ crypto_unregister_kpp(&ecdh_nist_p192);
+}
+
+int hpre_algs_register(struct hisi_qm *qm)
{
int ret;
@@ -1162,19 +2011,37 @@ int hpre_algs_register(void)
ret = crypto_register_akcipher(&rsa);
if (ret)
return ret;
-#ifdef CONFIG_CRYPTO_DH
+
ret = crypto_register_kpp(&dh);
if (ret)
- crypto_unregister_akcipher(&rsa);
-#endif
+ goto unreg_rsa;
+
+ if (qm->ver >= QM_HW_V3) {
+ ret = hpre_register_ecdh();
+ if (ret)
+ goto unreg_dh;
+ ret = crypto_register_kpp(&curve25519_alg);
+ if (ret)
+ goto unreg_ecdh;
+ }
+ return 0;
+unreg_ecdh:
+ hpre_unregister_ecdh();
+unreg_dh:
+ crypto_unregister_kpp(&dh);
+unreg_rsa:
+ crypto_unregister_akcipher(&rsa);
return ret;
}
-void hpre_algs_unregister(void)
+void hpre_algs_unregister(struct hisi_qm *qm)
{
- crypto_unregister_akcipher(&rsa);
-#ifdef CONFIG_CRYPTO_DH
+ if (qm->ver >= QM_HW_V3) {
+ crypto_unregister_kpp(&curve25519_alg);
+ hpre_unregister_ecdh();
+ }
+
crypto_unregister_kpp(&dh);
-#endif
+ crypto_unregister_akcipher(&rsa);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index e7a2c70eb9cf..046bc962c8b2 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -13,7 +13,6 @@
#include <linux/uacce.h>
#include "hpre.h"
-#define HPRE_QUEUE_NUM_V2 1024
#define HPRE_QM_ABNML_INT_MASK 0x100004
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HPRE_COMM_CNT_CLR_CE 0x0
@@ -119,7 +118,6 @@ static struct hisi_qm_list hpre_devices = {
};
static const char * const hpre_debug_file_name[] = {
- [HPRE_CURRENT_QM] = "current_qm",
[HPRE_CLEAR_ENABLE] = "rdclr_en",
[HPRE_CLUSTER_CTRL] = "cluster_ctrl",
};
@@ -226,41 +224,44 @@ static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
-struct hisi_qp *hpre_create_qp(void)
+struct hisi_qp *hpre_create_qp(u8 type)
{
int node = cpu_to_node(smp_processor_id());
struct hisi_qp *qp = NULL;
int ret;
- ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp);
+ if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE)
+ return NULL;
+
+ /*
+ * type: 0 - RSA/DH. algorithm supported in V2,
+ * 1 - ECC algorithm in V3.
+ */
+ ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp);
if (!ret)
return qp;
return NULL;
}
-static void hpre_pasid_enable(struct hisi_qm *qm)
+static void hpre_config_pasid(struct hisi_qm *qm)
{
- u32 val;
-
- val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
- val |= BIT(HPRE_PASID_EN_BIT);
- writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
- val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
- val |= BIT(HPRE_PASID_EN_BIT);
- writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
-}
+ u32 val1, val2;
-static void hpre_pasid_disable(struct hisi_qm *qm)
-{
- u32 val;
+ if (qm->ver >= QM_HW_V3)
+ return;
- val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
- val &= ~BIT(HPRE_PASID_EN_BIT);
- writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
- val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
- val &= ~BIT(HPRE_PASID_EN_BIT);
- writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
+ val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
+ val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
+ if (qm->use_sva) {
+ val1 |= BIT(HPRE_PASID_EN_BIT);
+ val2 |= BIT(HPRE_PASID_EN_BIT);
+ } else {
+ val1 &= ~BIT(HPRE_PASID_EN_BIT);
+ val2 &= ~BIT(HPRE_PASID_EN_BIT);
+ }
+ writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG);
+ writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG);
}
static int hpre_cfg_by_dsm(struct hisi_qm *qm)
@@ -320,7 +321,7 @@ static int hpre_set_cluster(struct hisi_qm *qm)
}
/*
- * For Kunpeng 920, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
+ * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV).
* Or it may stay in D3 state when we bind and unbind hpre quickly,
* as it does FLR triggered by hardware.
*/
@@ -383,15 +384,14 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
if (qm->ver == QM_HW_V2) {
ret = hpre_cfg_by_dsm(qm);
if (ret)
- dev_err(dev, "acpi_evaluate_dsm err.\n");
+ return ret;
disable_flr_of_bme(qm);
-
- /* Enable data buffer pasid */
- if (qm->use_sva)
- hpre_pasid_enable(qm);
}
+ /* Config data buffer pasid needed by Kunpeng 920 */
+ hpre_config_pasid(qm);
+
return ret;
}
@@ -401,10 +401,6 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
unsigned long offset;
int i;
- /* clear current_qm */
- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
/* clear clusterX/cluster_ctrl */
for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
@@ -456,49 +452,6 @@ static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
return &hpre->qm;
}
-static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
-{
- struct hisi_qm *qm = hpre_file_to_qm(file);
-
- return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
-{
- struct hisi_qm *qm = hpre_file_to_qm(file);
- u32 num_vfs = qm->vfs_num;
- u32 vfq_num, tmp;
-
- if (val > num_vfs)
- return -EINVAL;
-
- /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (val == 0) {
- qm->debug.curr_qm_qp_num = qm->qp_num;
- } else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
- if (val == num_vfs) {
- qm->debug.curr_qm_qp_num =
- qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num;
- } else {
- qm->debug.curr_qm_qp_num = vfq_num;
- }
- }
-
- writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- return 0;
-}
-
static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
@@ -519,7 +472,7 @@ static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
- return 0;
+ return 0;
}
static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
@@ -541,7 +494,7 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
- return 0;
+ return 0;
}
static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
@@ -554,9 +507,6 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
spin_lock_irq(&file->lock);
switch (file->type) {
- case HPRE_CURRENT_QM:
- val = hpre_current_qm_read(file);
- break;
case HPRE_CLEAR_ENABLE:
val = hpre_clear_enable_read(file);
break;
@@ -597,11 +547,6 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
spin_lock_irq(&file->lock);
switch (file->type) {
- case HPRE_CURRENT_QM:
- ret = hpre_current_qm_write(file, val);
- if (ret)
- goto err_input;
- break;
case HPRE_CLEAR_ENABLE:
ret = hpre_clear_enable_write(file, val);
if (ret)
@@ -740,11 +685,6 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm)
{
int ret;
- ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM,
- HPRE_CURRENT_QM);
- if (ret)
- return ret;
-
ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
HPRE_CLEAR_ENABLE);
if (ret)
@@ -812,9 +752,9 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
}
if (pdev->revision >= QM_HW_V3)
- qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2\n";
+ qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2";
else
- qm->algs = "rsa\ndh\n";
+ qm->algs = "rsa\ndh";
qm->mode = uacce_mode;
qm->pdev = pdev;
qm->ver = pdev->revision;
@@ -867,6 +807,20 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
}
+static void hpre_err_info_init(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ err_info->ce = QM_BASE_CE;
+ err_info->fe = 0;
+ err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
+ HPRE_OOO_ECC_2BIT_ERR;
+ err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE;
+ err_info->msi_wr_port = HPRE_WR_MSI_PORT;
+ err_info->acpi_rst = "HRST";
+ err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
+}
+
static const struct hisi_qm_err_ini hpre_err_ini = {
.hw_init = hpre_set_user_domain_and_cache,
.hw_err_enable = hpre_hw_error_enable,
@@ -875,16 +829,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.clear_dev_hw_err_status = hpre_clear_hw_err_status,
.log_dev_hw_err = hpre_log_hw_error,
.open_axi_master_ooo = hpre_open_axi_master_ooo,
- .err_info = {
- .ce = QM_BASE_CE,
- .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
- .fe = 0,
- .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
- HPRE_OOO_ECC_2BIT_ERR,
- .dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE,
- .msi_wr_port = HPRE_WR_MSI_PORT,
- .acpi_rst = "HRST",
- }
+ .err_info_init = hpre_err_info_init,
};
static int hpre_pf_probe_init(struct hpre *hpre)
@@ -892,13 +837,12 @@ static int hpre_pf_probe_init(struct hpre *hpre)
struct hisi_qm *qm = &hpre->qm;
int ret;
- qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
-
ret = hpre_set_user_domain_and_cache(qm);
if (ret)
return ret;
qm->err_ini = &hpre_err_ini;
+ qm->err_ini->err_info_init(qm);
hisi_qm_dev_err_init(qm);
return 0;
@@ -1006,8 +950,6 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF) {
- if (qm->use_sva && qm->ver == QM_HW_V2)
- hpre_pasid_disable(qm);
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
hisi_qm_dev_err_uninit(qm);
@@ -1016,7 +958,6 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_uninit(qm);
}
-
static const struct pci_error_handlers hpre_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
.slot_reset = hisi_qm_dev_slot_reset,
@@ -1075,4 +1016,5 @@ module_exit(hpre_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 13cb4216561a..ce439a0c66c9 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -38,6 +38,7 @@
#define QM_MB_CMD_SQC_BT 0x4
#define QM_MB_CMD_CQC_BT 0x5
#define QM_MB_CMD_SQC_VFT_V2 0x6
+#define QM_MB_CMD_STOP_QP 0x8
#define QM_MB_CMD_SEND_BASE 0x300
#define QM_MB_EVENT_SHIFT 8
@@ -93,6 +94,12 @@
#define QM_DB_PRIORITY_SHIFT_V1 48
#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
+#define QM_QUE_ISO_CFG_V 0x0030
+#define QM_QUE_ISO_EN 0x100154
+#define QM_CAPBILITY 0x100158
+#define QM_QP_NUN_MASK GENMASK(10, 0)
+#define QM_QP_DB_INTERVAL 0x10000
+#define QM_QP_MAX_NUM_SHIFT 11
#define QM_DB_CMD_SHIFT_V2 12
#define QM_DB_RAND_SHIFT_V2 16
#define QM_DB_INDEX_SHIFT_V2 32
@@ -129,9 +136,9 @@
#define QM_DFX_CNT_CLR_CE 0x100118
#define QM_ABNORMAL_INT_SOURCE 0x100000
-#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0)
+#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0)
#define QM_ABNORMAL_INT_MASK 0x100004
-#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff
+#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
#define QM_ABNORMAL_INT_STATUS 0x100008
#define QM_ABNORMAL_INT_SET 0x10000c
#define QM_ABNORMAL_INF00 0x100010
@@ -164,6 +171,14 @@
#define ACC_AM_ROB_ECC_INT_STS 0x300104
#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
+#define QM_DFX_MB_CNT_VF 0x104010
+#define QM_DFX_DB_CNT_VF 0x104020
+#define QM_DFX_SQE_CNT_VF_SQN 0x104030
+#define QM_DFX_CQE_CNT_VF_CQN 0x104040
+#define QM_DFX_QN_SHIFT 16
+#define CURRENT_FUN_MASK GENMASK(5, 0)
+#define CURRENT_Q_MASK GENMASK(31, 16)
+
#define POLL_PERIOD 10
#define POLL_TIMEOUT 1000
#define WAIT_PERIOD_US_MAX 200
@@ -173,6 +188,7 @@
#define QM_CACHE_WB_DONE 0x208
#define PCI_BAR_2 2
+#define PCI_BAR_4 4
#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
#define QMC_ALIGN(sz) ALIGN(sz, 32)
@@ -334,6 +350,7 @@ struct hisi_qm_hw_ops {
void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
+ int (*stop_qp)(struct hisi_qp *qp);
};
struct qm_dfx_item {
@@ -350,6 +367,7 @@ static struct qm_dfx_item qm_dfx_files[] = {
};
static const char * const qm_debug_file_name[] = {
+ [CURRENT_QM] = "current_qm",
[CURRENT_Q] = "current_q",
[CLEAR_ENABLE] = "clear_enable",
};
@@ -373,6 +391,8 @@ static const struct hisi_qm_hw_error qm_hw_error[] = {
{ .int_msk = BIT(10), .msg = "qm_db_timeout" },
{ .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
{ .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
+ { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
+ { .int_msk = BIT(14), .msg = "qm_flr_timeout" },
{ /* sentinel */ }
};
@@ -557,21 +577,22 @@ static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
{
- u64 doorbell;
- u64 dbase;
+ void __iomem *io_base = qm->io_base;
u16 randata = 0;
+ u64 doorbell;
if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
- dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
+ io_base = qm->db_io_base + (u64)qn * qm->db_interval +
+ QM_DOORBELL_SQ_CQ_BASE_V2;
else
- dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
+ io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
((u64)randata << QM_DB_RAND_SHIFT_V2) |
((u64)index << QM_DB_INDEX_SHIFT_V2) |
((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
- writeq(doorbell, qm->io_base + dbase);
+ writeq(doorbell, io_base);
}
static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
@@ -865,6 +886,26 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
return 0;
}
+static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
+{
+ u32 remain_q_num, vfq_num;
+ u32 num_vfs = qm->vfs_num;
+
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
+ if (vfq_num >= qm->max_qp_num)
+ return qm->max_qp_num;
+
+ remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
+ if (vfq_num + remain_q_num <= qm->max_qp_num)
+ return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
+
+ /*
+ * if vfq_num + remain_q_num > max_qp_num, the last VFs,
+ * each with one more queue.
+ */
+ return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
+}
+
static struct hisi_qm *file_to_qm(struct debugfs_file *file)
{
struct qm_debug *debug = file->debug;
@@ -918,6 +959,41 @@ static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
return 0;
}
+static u32 current_qm_read(struct debugfs_file *file)
+{
+ struct hisi_qm *qm = file_to_qm(file);
+
+ return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+}
+
+static int current_qm_write(struct debugfs_file *file, u32 val)
+{
+ struct hisi_qm *qm = file_to_qm(file);
+ u32 tmp;
+
+ if (val > qm->vfs_num)
+ return -EINVAL;
+
+ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+ if (!val)
+ qm->debug.curr_qm_qp_num = qm->qp_num;
+ else
+ qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
+
+ writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ return 0;
+}
+
static ssize_t qm_debug_read(struct file *filp, char __user *buf,
size_t count, loff_t *pos)
{
@@ -929,6 +1005,9 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf,
mutex_lock(&file->lock);
switch (index) {
+ case CURRENT_QM:
+ val = current_qm_read(file);
+ break;
case CURRENT_Q:
val = current_q_read(file);
break;
@@ -971,27 +1050,24 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
mutex_lock(&file->lock);
switch (index) {
+ case CURRENT_QM:
+ ret = current_qm_write(file, val);
+ break;
case CURRENT_Q:
ret = current_q_write(file, val);
- if (ret)
- goto err_input;
break;
case CLEAR_ENABLE:
ret = clear_enable_write(file, val);
- if (ret)
- goto err_input;
break;
default:
ret = -EINVAL;
- goto err_input;
}
mutex_unlock(&file->lock);
- return count;
+ if (ret)
+ return ret;
-err_input:
- mutex_unlock(&file->lock);
- return ret;
+ return count;
}
static const struct file_operations qm_debug_fops = {
@@ -1529,12 +1605,12 @@ static const struct file_operations qm_cmd_fops = {
.write = qm_cmd_write,
};
-static void qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
+static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
+ enum qm_debug_file index)
{
- struct dentry *qm_d = qm->debug.qm_d;
struct debugfs_file *file = qm->debug.files + index;
- debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
+ debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
&qm_debug_fops);
file->index = index;
@@ -1628,7 +1704,7 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
writel(error_status, qm->io_base +
QM_ABNORMAL_INT_SOURCE);
- writel(qm->err_ini->err_info.nfe,
+ writel(qm->err_info.nfe,
qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_RECOVERED;
}
@@ -1639,6 +1715,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static int qm_stop_qp(struct hisi_qp *qp)
+{
+ return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
+}
+
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
.get_irq_num = qm_get_irq_num_v1,
@@ -1654,6 +1735,16 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
.hw_error_handle = qm_hw_error_handle_v2,
};
+static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
+ .get_vft = qm_get_vft_v2,
+ .qm_db = qm_db_v2,
+ .get_irq_num = qm_get_irq_num_v2,
+ .hw_error_init = qm_hw_error_init_v2,
+ .hw_error_uninit = qm_hw_error_uninit_v2,
+ .hw_error_handle = qm_hw_error_handle_v2,
+ .stop_qp = qm_stop_qp,
+};
+
static void *qm_get_avail_sqe(struct hisi_qp *qp)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
@@ -1933,6 +2024,14 @@ static int qm_drain_qp(struct hisi_qp *qp)
if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
return 0;
+ /* Kunpeng930 supports drain qp by device */
+ if (qm->ops->stop_qp) {
+ ret = qm->ops->stop_qp(qp);
+ if (ret)
+ dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
+ return ret;
+ }
+
addr = qm_ctx_alloc(qm, size, &dma_addr);
if (IS_ERR(addr)) {
dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
@@ -2132,6 +2231,8 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
{
struct hisi_qp *qp = q->priv;
struct hisi_qm *qm = qp->qm;
+ resource_size_t phys_base = qm->db_phys_base +
+ qp->qp_id * qm->db_interval;
size_t sz = vma->vm_end - vma->vm_start;
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
@@ -2143,16 +2244,19 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
if (qm->ver == QM_HW_V1) {
if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
return -EINVAL;
- } else {
+ } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
return -EINVAL;
+ } else {
+ if (sz > qm->db_interval)
+ return -EINVAL;
}
vma->vm_flags |= VM_IO;
return remap_pfn_range(vma, vma->vm_start,
- qm->phys_base >> PAGE_SHIFT,
+ phys_base >> PAGE_SHIFT,
sz, pgprot_noncached(vma->vm_page_prot));
case UACCE_QFRT_DUS:
if (sz != qp->qdma.size)
@@ -2267,14 +2371,20 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->priv = qm;
uacce->algs = qm->algs;
- if (qm->ver == QM_HW_V1) {
- mmio_page_nr = QM_DOORBELL_PAGE_NR;
+ if (qm->ver == QM_HW_V1)
uacce->api_ver = HISI_QM_API_VER_BASE;
- } else {
+ else if (qm->ver == QM_HW_V2)
+ uacce->api_ver = HISI_QM_API_VER2_BASE;
+ else
+ uacce->api_ver = HISI_QM_API_VER3_BASE;
+
+ if (qm->ver == QM_HW_V1)
+ mmio_page_nr = QM_DOORBELL_PAGE_NR;
+ else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
mmio_page_nr = QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
- uacce->api_ver = HISI_QM_API_VER2_BASE;
- }
+ else
+ mmio_page_nr = qm->db_interval / PAGE_SIZE;
dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
@@ -2482,8 +2592,10 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
if (qm->ver == QM_HW_V1)
qm->ops = &qm_hw_ops_v1;
- else
+ else if (qm->ver == QM_HW_V2)
qm->ops = &qm_hw_ops_v2;
+ else
+ qm->ops = &qm_hw_ops_v3;
pci_set_drvdata(pdev, qm);
mutex_init(&qm->mailbox_lock);
@@ -2492,13 +2604,23 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
qm->misc_ctl = false;
}
-static void hisi_qm_pci_uninit(struct hisi_qm *qm)
+static void qm_put_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
- pci_free_irq_vectors(pdev);
+ if (qm->use_db_isolation)
+ iounmap(qm->db_io_base);
+
iounmap(qm->io_base);
pci_release_mem_regions(pdev);
+}
+
+static void hisi_qm_pci_uninit(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+
+ pci_free_irq_vectors(pdev);
+ qm_put_pci_res(qm);
pci_disable_device(pdev);
}
@@ -2527,7 +2649,6 @@ void hisi_qm_uninit(struct hisi_qm *qm)
hisi_qm_cache_wb(qm);
dma_free_coherent(dev, qm->qdma.size,
qm->qdma.va, qm->qdma.dma);
- memset(&qm->qdma, 0, sizeof(qm->qdma));
}
qm_irq_unregister(qm);
@@ -2681,7 +2802,7 @@ static int __hisi_qm_start(struct hisi_qm *qm)
{
int ret;
- WARN_ON(!qm->qdma.dma);
+ WARN_ON(!qm->qdma.va);
if (qm->fun_type == QM_HW_PF) {
ret = qm_dev_mem_reset(qm);
@@ -2930,9 +3051,11 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
qm->debug.qm_d = qm_d;
/* only show this in PF */
- if (qm->fun_type == QM_HW_PF)
+ if (qm->fun_type == QM_HW_PF) {
+ qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
- qm_create_debugfs_file(qm, i);
+ qm_create_debugfs_file(qm, qm_d, i);
+ }
debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
@@ -2960,6 +3083,10 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
struct qm_dfx_registers *regs;
int i;
+ /* clear current_qm */
+ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
/* clear current_q */
writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
@@ -2982,7 +3109,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
static void qm_hw_error_init(struct hisi_qm *qm)
{
- const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info;
+ struct hisi_qm_err_info *err_info = &qm->err_info;
if (!qm->ops->hw_error_init) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
@@ -3175,30 +3302,46 @@ EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
{
- u32 remain_q_num, q_num, i, j;
+ u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
+ u32 max_qp_num = qm->max_qp_num;
u32 q_base = qm->qp_num;
int ret;
if (!num_vfs)
return -EINVAL;
- remain_q_num = qm->ctrl_qp_num - qm->qp_num;
+ vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
- /* If remain queues not enough, return error. */
- if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs)
+ /* If vfs_q_num is less than num_vfs, return error. */
+ if (vfs_q_num < num_vfs)
return -EINVAL;
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+ q_num = vfs_q_num / num_vfs;
+ remain_q_num = vfs_q_num % num_vfs;
+
+ for (i = num_vfs; i > 0; i--) {
+ /*
+ * if q_num + remain_q_num > max_qp_num in last vf, divide the
+ * remaining queues equally.
+ */
+ if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
+ act_q_num = q_num + remain_q_num;
+ remain_q_num = 0;
+ } else if (remain_q_num > 0) {
+ act_q_num = q_num + 1;
+ remain_q_num--;
+ } else {
+ act_q_num = q_num;
+ }
+
+ act_q_num = min_t(int, act_q_num, max_qp_num);
+ ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
if (ret) {
- for (j = i; j > 0; j--)
+ for (j = num_vfs; j > i; j--)
hisi_qm_set_vft(qm, j, 0, 0);
return ret;
}
- q_base += q_num;
+ q_base += act_q_num;
}
return 0;
@@ -3318,15 +3461,15 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
/* get device hardware error status */
err_sts = qm->err_ini->get_dev_hw_err_status(qm);
if (err_sts) {
- if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
+ if (err_sts & qm->err_info.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
if (qm->err_ini->log_dev_hw_err)
qm->err_ini->log_dev_hw_err(qm, err_sts);
/* ce error does not need to be reset */
- if ((err_sts | qm->err_ini->err_info.dev_ce_mask) ==
- qm->err_ini->err_info.dev_ce_mask) {
+ if ((err_sts | qm->err_info.dev_ce_mask) ==
+ qm->err_info.dev_ce_mask) {
if (qm->err_ini->clear_dev_hw_err_status)
qm->err_ini->clear_dev_hw_err_status(qm,
err_sts);
@@ -3639,7 +3782,7 @@ static int qm_soft_reset(struct hisi_qm *qm)
acpi_status s;
s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
- qm->err_ini->err_info.acpi_rst,
+ qm->err_info.acpi_rst,
NULL, &value);
if (ACPI_FAILURE(s)) {
pci_err(pdev, "NO controller reset method!\n");
@@ -3707,12 +3850,11 @@ static void qm_restart_prepare(struct hisi_qm *qm)
/* temporarily close the OOO port used for PEH to write out MSI */
value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
- writel(value & ~qm->err_ini->err_info.msi_wr_port,
+ writel(value & ~qm->err_info.msi_wr_port,
qm->io_base + ACC_AM_CFG_PORT_WR_EN);
/* clear dev ecc 2bit error source if having */
- value = qm_get_dev_err_status(qm) &
- qm->err_ini->err_info.ecc_2bits_mask;
+ value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
if (value && qm->err_ini->clear_dev_hw_err_status)
qm->err_ini->clear_dev_hw_err_status(qm, value);
@@ -3736,7 +3878,7 @@ static void qm_restart_done(struct hisi_qm *qm)
/* open the OOO port for PEH to write out MSI */
value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
- value |= qm->err_ini->err_info.msi_wr_port;
+ value |= qm->err_info.msi_wr_port;
writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
qm->err_status.is_qm_ecc_mbit = false;
@@ -3875,8 +4017,7 @@ static int qm_check_dev_error(struct hisi_qm *qm)
if (ret)
return ret;
- return (qm_get_dev_err_status(qm) &
- qm->err_ini->err_info.ecc_2bits_mask);
+ return (qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask);
}
void hisi_qm_reset_prepare(struct pci_dev *pdev)
@@ -4084,7 +4225,7 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
mutex_unlock(&qm_list->lock);
if (flag) {
- ret = qm_list->register_to_crypto();
+ ret = qm_list->register_to_crypto(qm);
if (ret) {
mutex_lock(&qm_list->lock);
list_del(&qm->list);
@@ -4115,59 +4256,134 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
mutex_unlock(&qm_list->lock);
if (list_empty(&qm_list->list))
- qm_list->unregister_from_crypto();
+ qm_list->unregister_from_crypto(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
-static int hisi_qm_pci_init(struct hisi_qm *qm)
+static int qm_get_qp_num(struct hisi_qm *qm)
+{
+ if (qm->ver == QM_HW_V1)
+ qm->ctrl_qp_num = QM_QNUM_V1;
+ else if (qm->ver == QM_HW_V2)
+ qm->ctrl_qp_num = QM_QNUM_V2;
+ else
+ qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
+ QM_QP_NUN_MASK;
+
+ if (qm->use_db_isolation)
+ qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
+ QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
+ else
+ qm->max_qp_num = qm->ctrl_qp_num;
+
+ /* check if qp number is valid */
+ if (qm->qp_num > qm->max_qp_num) {
+ dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
+ qm->qp_num, qm->max_qp_num);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_get_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
- unsigned int num_vec;
int ret;
- ret = pci_enable_device_mem(pdev);
- if (ret < 0) {
- dev_err(dev, "Failed to enable device mem!\n");
- return ret;
- }
-
ret = pci_request_mem_regions(pdev, qm->dev_name);
if (ret < 0) {
dev_err(dev, "Failed to request mem regions!\n");
- goto err_disable_pcidev;
+ return ret;
}
qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
- qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
- qm->io_base = ioremap(qm->phys_base, qm->phys_size);
+ qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
if (!qm->io_base) {
ret = -EIO;
- goto err_release_mem_regions;
+ goto err_request_mem_regions;
+ }
+
+ if (qm->ver > QM_HW_V2) {
+ if (qm->fun_type == QM_HW_PF)
+ qm->use_db_isolation = readl(qm->io_base +
+ QM_QUE_ISO_EN) & BIT(0);
+ else
+ qm->use_db_isolation = readl(qm->io_base +
+ QM_QUE_ISO_CFG_V) & BIT(0);
+ }
+
+ if (qm->use_db_isolation) {
+ qm->db_interval = QM_QP_DB_INTERVAL;
+ qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
+ qm->db_io_base = ioremap(qm->db_phys_base,
+ pci_resource_len(pdev, PCI_BAR_4));
+ if (!qm->db_io_base) {
+ ret = -EIO;
+ goto err_ioremap;
+ }
+ } else {
+ qm->db_phys_base = qm->phys_base;
+ qm->db_io_base = qm->io_base;
+ qm->db_interval = 0;
}
+ if (qm->fun_type == QM_HW_PF) {
+ ret = qm_get_qp_num(qm);
+ if (ret)
+ goto err_db_ioremap;
+ }
+
+ return 0;
+
+err_db_ioremap:
+ if (qm->use_db_isolation)
+ iounmap(qm->db_io_base);
+err_ioremap:
+ iounmap(qm->io_base);
+err_request_mem_regions:
+ pci_release_mem_regions(pdev);
+ return ret;
+}
+
+static int hisi_qm_pci_init(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct device *dev = &pdev->dev;
+ unsigned int num_vec;
+ int ret;
+
+ ret = pci_enable_device_mem(pdev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable device mem!\n");
+ return ret;
+ }
+
+ ret = qm_get_pci_res(qm);
+ if (ret)
+ goto err_disable_pcidev;
+
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret < 0)
- goto err_iounmap;
+ goto err_get_pci_res;
pci_set_master(pdev);
if (!qm->ops->get_irq_num) {
ret = -EOPNOTSUPP;
- goto err_iounmap;
+ goto err_get_pci_res;
}
num_vec = qm->ops->get_irq_num(qm);
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to enable MSI vectors!\n");
- goto err_iounmap;
+ goto err_get_pci_res;
}
return 0;
-err_iounmap:
- iounmap(qm->io_base);
-err_release_mem_regions:
- pci_release_mem_regions(pdev);
+err_get_pci_res:
+ qm_put_pci_res(qm);
err_disable_pcidev:
pci_disable_device(pdev);
return ret;
@@ -4187,28 +4403,28 @@ int hisi_qm_init(struct hisi_qm *qm)
hisi_qm_pre_init(qm);
- ret = qm_alloc_uacce(qm);
- if (ret < 0)
- dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
-
ret = hisi_qm_pci_init(qm);
if (ret)
- goto err_remove_uacce;
+ return ret;
ret = qm_irq_register(qm);
if (ret)
- goto err_pci_uninit;
+ goto err_pci_init;
if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
/* v2 starts to support get vft by mailbox */
ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
if (ret)
- goto err_irq_unregister;
+ goto err_irq_register;
}
+ ret = qm_alloc_uacce(qm);
+ if (ret < 0)
+ dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
+
ret = hisi_qm_memory_init(qm);
if (ret)
- goto err_irq_unregister;
+ goto err_alloc_uacce;
INIT_WORK(&qm->work, qm_work_process);
if (qm->fun_type == QM_HW_PF)
@@ -4218,13 +4434,13 @@ int hisi_qm_init(struct hisi_qm *qm)
return 0;
-err_irq_unregister:
- qm_irq_unregister(qm);
-err_pci_uninit:
- hisi_qm_pci_uninit(qm);
-err_remove_uacce:
+err_alloc_uacce:
uacce_remove(qm->uacce);
qm->uacce = NULL;
+err_irq_register:
+ qm_irq_unregister(qm);
+err_pci_init:
+ hisi_qm_pci_uninit(qm);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_init);
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 54967c6b9c78..acefdf8b3a50 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -51,14 +51,6 @@
#define PEH_AXUSER_CFG 0x401001
#define PEH_AXUSER_CFG_ENABLE 0xffffffff
-#define QM_DFX_MB_CNT_VF 0x104010
-#define QM_DFX_DB_CNT_VF 0x104020
-#define QM_DFX_SQE_CNT_VF_SQN 0x104030
-#define QM_DFX_CQE_CNT_VF_CQN 0x104040
-#define QM_DFX_QN_SHIFT 16
-#define CURRENT_FUN_MASK GENMASK(5, 0)
-#define CURRENT_Q_MASK GENMASK(31, 16)
-
#define QM_AXI_RRESP BIT(0)
#define QM_AXI_BRESP BIT(1)
#define QM_ECC_MBIT BIT(2)
@@ -72,10 +64,13 @@
#define QM_DB_TIMEOUT BIT(10)
#define QM_OF_FIFO_OF BIT(11)
#define QM_DB_RANDOM_INVALID BIT(12)
+#define QM_MAILBOX_TIMEOUT BIT(13)
+#define QM_FLR_TIMEOUT BIT(14)
#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
- QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID)
+ QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \
+ QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT)
#define QM_BASE_CE QM_ECC_1BIT
#define QM_Q_DEPTH 1024
@@ -123,6 +118,7 @@ enum qm_fun_type {
};
enum qm_debug_file {
+ CURRENT_QM,
CURRENT_Q,
CLEAR_ENABLE,
DEBUG_FILE_NUM,
@@ -193,14 +189,14 @@ struct hisi_qm_err_ini {
void (*open_axi_master_ooo)(struct hisi_qm *qm);
void (*close_axi_master_ooo)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
- struct hisi_qm_err_info err_info;
+ void (*err_info_init)(struct hisi_qm *qm);
};
struct hisi_qm_list {
struct mutex lock;
struct list_head list;
- int (*register_to_crypto)(void);
- void (*unregister_from_crypto)(void);
+ int (*register_to_crypto)(struct hisi_qm *qm);
+ void (*unregister_from_crypto)(struct hisi_qm *qm);
};
struct hisi_qm {
@@ -209,12 +205,15 @@ struct hisi_qm {
const char *dev_name;
struct pci_dev *pdev;
void __iomem *io_base;
+ void __iomem *db_io_base;
u32 sqe_size;
u32 qp_base;
u32 qp_num;
u32 qp_in_used;
u32 ctrl_qp_num;
+ u32 max_qp_num;
u32 vfs_num;
+ u32 db_interval;
struct list_head list;
struct hisi_qm_list *qm_list;
@@ -230,6 +229,7 @@ struct hisi_qm {
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
+ struct hisi_qm_err_info err_info;
struct hisi_qm_err_status err_status;
unsigned long misc_ctl; /* driver removing and reset sched */
@@ -252,8 +252,11 @@ struct hisi_qm {
const char *algs;
bool use_sva;
bool is_frozen;
+
+ /* doorbell isolation enable */
+ bool use_db_isolation;
resource_size_t phys_base;
- resource_size_t phys_size;
+ resource_size_t db_phys_base;
struct uacce_device *uacce;
int mode;
};
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 8ca945ac297e..0a3c8f019b02 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2016-2017 Hisilicon Limited. */
+/* Copyright (c) 2016-2017 HiSilicon Limited. */
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index 91ee2bb575df..c8de1b51c843 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Driver for the Hisilicon SEC units found on Hip06 Hip07
+ * Driver for the HiSilicon SEC units found on Hip06 Hip07
*
- * Copyright (c) 2016-2017 Hisilicon Limited.
+ * Copyright (c) 2016-2017 HiSilicon Limited.
*/
#include <linux/acpi.h>
#include <linux/atomic.h>
@@ -233,7 +233,7 @@ static int sec_queue_map_io(struct sec_queue *queue)
IORESOURCE_MEM,
2 + queue->queue_id);
if (!res) {
- dev_err(dev, "Failed to get queue %d memory resource\n",
+ dev_err(dev, "Failed to get queue %u memory resource\n",
queue->queue_id);
return -ENOMEM;
}
@@ -653,12 +653,12 @@ static int sec_queue_free(struct sec_queue *queue)
struct sec_dev_info *info = queue->dev_info;
if (queue->queue_id >= SEC_Q_NUM) {
- dev_err(info->dev, "No queue %d\n", queue->queue_id);
+ dev_err(info->dev, "No queue %u\n", queue->queue_id);
return -ENODEV;
}
if (!queue->in_use) {
- dev_err(info->dev, "Queue %d is idle\n", queue->queue_id);
+ dev_err(info->dev, "Queue %u is idle\n", queue->queue_id);
return -ENODEV;
}
@@ -834,6 +834,7 @@ int sec_queue_stop_release(struct sec_queue *queue)
/**
* sec_queue_empty() - Is this hardware queue currently empty.
+ * @queue: The queue to test
*
* We need to know if we have an empty queue for some of the chaining modes
* as if it is not empty we may need to hold the message in a software queue
@@ -1315,6 +1316,6 @@ static struct platform_driver sec_driver = {
module_platform_driver(sec_driver);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Hisilicon Security Accelerators");
+MODULE_DESCRIPTION("HiSilicon Security Accelerators");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com");
MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>");
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
index 4d9063a8b10b..179a8250d691 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.h
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2017 Hisilicon Limited. */
+/* Copyright (c) 2016-2017 HiSilicon Limited. */
#ifndef _SEC_DRV_H_
#define _SEC_DRV_H_
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 08491912afd5..dfdce2f21e65 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -4,8 +4,6 @@
#ifndef __HISI_SEC_V2_H
#define __HISI_SEC_V2_H
-#include <linux/list.h>
-
#include "../qm.h"
#include "sec_crypto.h"
@@ -50,7 +48,7 @@ struct sec_req {
int err_type;
int req_id;
- int flag;
+ u32 flag;
/* Status of the SEC request */
bool fake_busy;
@@ -139,6 +137,7 @@ struct sec_ctx {
bool pbuf_supported;
struct sec_cipher_ctx c_ctx;
struct sec_auth_ctx a_ctx;
+ struct device *dev;
};
enum sec_endian {
@@ -148,7 +147,6 @@ enum sec_endian {
};
enum sec_debug_file_index {
- SEC_CURRENT_QM,
SEC_CLEAR_ENABLE,
SEC_DEBUG_FILE_NUM,
};
@@ -183,6 +181,6 @@ struct sec_dev {
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
-int sec_register_to_crypto(void);
-void sec_unregister_from_crypto(void);
+int sec_register_to_crypto(struct hisi_qm *qm);
+void sec_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 2eaa516b3231..133aede8bf07 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -7,6 +7,7 @@
#include <crypto/des.h>
#include <crypto/hash.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/des.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/skcipher.h>
@@ -43,7 +44,6 @@
#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR 128
-#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
#define SEC_CIPHER_AUTH 0xfe
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
@@ -96,7 +96,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
0, QM_Q_DEPTH, GFP_ATOMIC);
mutex_unlock(&qp_ctx->req_lock);
if (unlikely(req_id < 0)) {
- dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
+ dev_err(req->ctx->dev, "alloc req id fail!\n");
return req_id;
}
@@ -112,7 +112,7 @@ static void sec_free_req_id(struct sec_req *req)
int req_id = req->req_id;
if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
- dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
+ dev_err(req->ctx->dev, "free request id invalid!\n");
return;
}
@@ -138,7 +138,7 @@ static int sec_aead_verify(struct sec_req *req)
aead_req->cryptlen + aead_req->assoclen -
authsize);
if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
- dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n");
+ dev_err(req->ctx->dev, "aead verify failure!\n");
return -EBADMSG;
}
@@ -177,7 +177,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
if (unlikely(req->err_type || done != SEC_SQE_DONE ||
(ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
(ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
- dev_err(SEC_CTX_DEV(ctx),
+ dev_err_ratelimited(ctx->dev,
"err_type[%d],done[%d],flag[%d]\n",
req->err_type, done, flag);
err = -EIO;
@@ -326,8 +326,8 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
static int sec_alg_resource_alloc(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = SEC_CTX_DEV(ctx);
struct sec_alg_res *res = qp_ctx->res;
+ struct device *dev = ctx->dev;
int ret;
ret = sec_alloc_civ_resource(dev, res);
@@ -360,7 +360,7 @@ alloc_fail:
static void sec_alg_resource_free(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
sec_free_civ_resource(dev, qp_ctx->res);
@@ -373,7 +373,7 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
int qp_ctx_id, int alg_type)
{
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
struct sec_qp_ctx *qp_ctx;
struct hisi_qp *qp;
int ret = -ENOMEM;
@@ -428,7 +428,7 @@ err_destroy_idr:
static void sec_release_qp_ctx(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
hisi_qm_stop_qp(qp_ctx->qp);
sec_alg_resource_free(ctx, qp_ctx);
@@ -452,6 +452,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
+ ctx->dev = &sec->qm.pdev->dev;
ctx->hlf_q_num = sec->ctx_q_num >> 1;
ctx->pbuf_supported = ctx->sec->iommu_used;
@@ -476,11 +477,9 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
-
kfree(ctx->qp_ctx);
err_destroy_qps:
sec_destroy_qps(ctx->qps, sec->ctx_q_num);
-
return ret;
}
@@ -499,7 +498,7 @@ static int sec_cipher_init(struct sec_ctx *ctx)
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
&c_ctx->c_key_dma, GFP_KERNEL);
if (!c_ctx->c_key)
return -ENOMEM;
@@ -512,7 +511,7 @@ static void sec_cipher_uninit(struct sec_ctx *ctx)
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
c_ctx->c_key, c_ctx->c_key_dma);
}
@@ -520,7 +519,7 @@ static int sec_auth_init(struct sec_ctx *ctx)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
&a_ctx->a_key_dma, GFP_KERNEL);
if (!a_ctx->a_key)
return -ENOMEM;
@@ -533,7 +532,7 @@ static void sec_auth_uninit(struct sec_ctx *ctx)
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
a_ctx->a_key, a_ctx->a_key_dma);
}
@@ -546,7 +545,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
- dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+ pr_err("get error skcipher iv size!\n");
return -EINVAL;
}
@@ -573,10 +572,18 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
sec_ctx_base_uninit(ctx);
}
-static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
+static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
const u32 keylen,
const enum sec_cmode c_mode)
{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ int ret;
+
+ ret = verify_skcipher_des3_key(tfm, key);
+ if (ret)
+ return ret;
+
switch (keylen) {
case SEC_DES3_2KEY_SIZE:
c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
@@ -633,12 +640,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct device *dev = ctx->dev;
int ret;
if (c_mode == SEC_CMODE_XTS) {
ret = xts_verify_key(tfm, key, keylen);
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
+ dev_err(dev, "xts mode key err!\n");
return ret;
}
}
@@ -648,7 +656,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
switch (c_alg) {
case SEC_CALG_3DES:
- ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
+ ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
break;
case SEC_CALG_AES:
case SEC_CALG_SM4:
@@ -659,7 +667,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
+ dev_err(dev, "set sec key err!\n");
return ret;
}
@@ -691,7 +699,7 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -701,21 +709,14 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
copy_size = c_req->c_len;
pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
- qp_ctx->res[req_id].pbuf,
- copy_size);
-
+ qp_ctx->res[req_id].pbuf,
+ copy_size);
if (unlikely(pbuf_length != copy_size)) {
dev_err(dev, "copy src data to pbuf error!\n");
return -EINVAL;
}
c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
-
- if (!c_req->c_in_dma) {
- dev_err(dev, "fail to set pbuffer address!\n");
- return -ENOMEM;
- }
-
c_req->c_out_dma = c_req->c_in_dma;
return 0;
@@ -727,7 +728,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -739,7 +740,6 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
qp_ctx->res[req_id].pbuf,
copy_size);
-
if (unlikely(pbuf_length != copy_size))
dev_err(dev, "copy pbuf data to dst error!\n");
}
@@ -751,7 +751,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct sec_alg_res *res = &qp_ctx->res[req->req_id];
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
int ret;
if (req->use_pbuf) {
@@ -806,7 +806,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
if (req->use_pbuf) {
sec_cipher_pbuf_unmap(ctx, req, dst);
@@ -891,6 +891,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct device *dev = ctx->dev;
struct crypto_authenc_keys keys;
int ret;
@@ -904,13 +905,13 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ret = sec_aead_aes_set_key(c_ctx, &keys);
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n");
+ dev_err(dev, "set sec cipher key err!\n");
goto bad_key;
}
ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n");
+ dev_err(dev, "set sec auth key err!\n");
goto bad_key;
}
@@ -1062,7 +1063,7 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
cryptlen - iv_size);
if (unlikely(sz != iv_size))
- dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
+ dev_err(req->ctx->dev, "copy output iv error!\n");
}
static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
@@ -1160,7 +1161,7 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
ret = sec_skcipher_bd_fill(ctx, req);
if (unlikely(ret)) {
- dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n");
+ dev_err(ctx->dev, "skcipher bd fill is error!\n");
return ret;
}
@@ -1194,7 +1195,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
a_req->assoclen);
if (unlikely(sz != authsize)) {
- dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n");
+ dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
}
}
@@ -1259,7 +1260,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
ret = ctx->req_op->bd_send(ctx, req);
if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
(ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
- dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+ dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
goto err_send_req;
}
@@ -1325,7 +1326,7 @@ static int sec_aead_init(struct crypto_aead *tfm)
ctx->alg_type = SEC_AEAD;
ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
- dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n");
+ dev_err(ctx->dev, "get error aead iv size!\n");
return -EINVAL;
}
@@ -1374,7 +1375,7 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(auth_ctx->hash_tfm)) {
- dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n");
+ dev_err(ctx->dev, "aead alloc shash error!\n");
sec_aead_exit(tfm);
return PTR_ERR(auth_ctx->hash_tfm);
}
@@ -1405,10 +1406,40 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
return sec_aead_ctx_init(tfm, "sha512");
}
+
+static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
+ struct sec_req *sreq)
+{
+ u32 cryptlen = sreq->c_req.sk_req->cryptlen;
+ struct device *dev = ctx->dev;
+ u8 c_mode = ctx->c_ctx.c_mode;
+ int ret = 0;
+
+ switch (c_mode) {
+ case SEC_CMODE_XTS:
+ if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
+ dev_err(dev, "skcipher XTS mode input length error!\n");
+ ret = -EINVAL;
+ }
+ break;
+ case SEC_CMODE_ECB:
+ case SEC_CMODE_CBC:
+ if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
+ dev_err(dev, "skcipher AES input length error!\n");
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct skcipher_request *sk_req = sreq->c_req.sk_req;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
if (unlikely(!sk_req->src || !sk_req->dst)) {
@@ -1429,12 +1460,9 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
}
return 0;
} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
- if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
- dev_err(dev, "skcipher aes input length error!\n");
- return -EINVAL;
- }
- return 0;
+ return sec_skcipher_cryptlen_ckeck(ctx, sreq);
}
+
dev_err(dev, "skcipher algorithm error!\n");
return -EINVAL;
@@ -1531,14 +1559,15 @@ static struct skcipher_alg sec_skciphers[] = {
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
- u8 c_alg = ctx->c_ctx.c_alg;
struct aead_request *req = sreq->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
size_t authsize = crypto_aead_authsize(tfm);
+ struct device *dev = ctx->dev;
+ u8 c_alg = ctx->c_ctx.c_alg;
if (unlikely(!req->src || !req->dst || !req->cryptlen ||
req->assoclen > SEC_MAX_AAD_LEN)) {
- dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
+ dev_err(dev, "aead input param error!\n");
return -EINVAL;
}
@@ -1550,7 +1579,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
/* Support AES only */
if (unlikely(c_alg != SEC_CALG_AES)) {
- dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
+ dev_err(dev, "aead crypto alg error!\n");
return -EINVAL;
}
if (sreq->c_req.encrypt)
@@ -1559,7 +1588,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
sreq->c_req.c_len = req->cryptlen - authsize;
if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
- dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n");
+ dev_err(dev, "aead crypto length error!\n");
return -EINVAL;
}
@@ -1634,7 +1663,7 @@ static struct aead_alg sec_aeads[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
};
-int sec_register_to_crypto(void)
+int sec_register_to_crypto(struct hisi_qm *qm)
{
int ret;
@@ -1651,7 +1680,7 @@ int sec_register_to_crypto(void)
return ret;
}
-void sec_unregister_from_crypto(void)
+void sec_unregister_from_crypto(struct hisi_qm *qm)
{
crypto_unregister_skciphers(sec_skciphers,
ARRAY_SIZE(sec_skciphers));
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index b2786e17d8fe..9c78edac56a4 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -64,7 +64,6 @@ enum sec_addr_type {
};
struct sec_sqe_type2 {
-
/*
* mac_len: 0~4 bits
* a_key_len: 5~10 bits
@@ -120,7 +119,6 @@ struct sec_sqe_type2 {
/* c_pad_len_field: 0~1 bits */
__le16 c_pad_len_field;
-
__le64 long_a_data_len;
__le64 a_ivin_addr;
__le64 a_key_addr;
@@ -211,6 +209,6 @@ struct sec_sqe {
struct sec_sqe_type2 type2;
};
-int sec_register_to_crypto(void);
-void sec_unregister_from_crypto(void);
+int sec_register_to_crypto(struct hisi_qm *qm);
+void sec_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index dc68ba76f65e..6f0062d4408c 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -19,7 +19,6 @@
#define SEC_VF_NUM 63
#define SEC_QUEUE_NUM_V1 4096
-#define SEC_QUEUE_NUM_V2 1024
#define SEC_PF_PCI_DEVICE_ID 0xa255
#define SEC_VF_PCI_DEVICE_ID 0xa256
@@ -35,18 +34,16 @@
#define SEC_CTX_Q_NUM_MAX 32
#define SEC_CTRL_CNT_CLR_CE 0x301120
-#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
-#define SEC_ENGINE_PF_CFG_OFF 0x300000
-#define SEC_ACC_COMMON_REG_OFF 0x1000
+#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
#define SEC_CORE_INT_SOURCE 0x301010
#define SEC_CORE_INT_MASK 0x301000
#define SEC_CORE_INT_STATUS 0x301008
#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
-#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF)
-#define SEC_ECC_ADDR(err) ((err) >> 0)
+#define SEC_ECC_NUM 16
+#define SEC_ECC_MASH 0xFF
#define SEC_CORE_INT_DISABLE 0x0
-#define SEC_CORE_INT_ENABLE 0x1ff
-#define SEC_CORE_INT_CLEAR 0x1ff
+#define SEC_CORE_INT_ENABLE 0x7c1ff
+#define SEC_CORE_INT_CLEAR 0x7c1ff
#define SEC_SAA_ENABLE 0x17f
#define SEC_RAS_CE_REG 0x301050
@@ -54,24 +51,24 @@
#define SEC_RAS_NFE_REG 0x301058
#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
-#define SEC_RAS_NFE_ENB_MSK 0x177
-#define SEC_RAS_DISABLE 0x0
-#define SEC_MEM_START_INIT_REG 0x0100
-#define SEC_MEM_INIT_DONE_REG 0x0104
+#define SEC_RAS_NFE_ENB_MSK 0x7c177
+#define SEC_RAS_DISABLE 0x0
+#define SEC_MEM_START_INIT_REG 0x301100
+#define SEC_MEM_INIT_DONE_REG 0x301104
-#define SEC_CONTROL_REG 0x0200
+#define SEC_CONTROL_REG 0x301200
#define SEC_TRNG_EN_SHIFT 8
#define SEC_CLK_GATE_ENABLE BIT(3)
#define SEC_CLK_GATE_DISABLE (~BIT(3))
#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
-#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
-#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
-#define SEC_SAA_EN_REG 0x0270
-#define SEC_BD_ERR_CHK_EN_REG0 0x0380
-#define SEC_BD_ERR_CHK_EN_REG1 0x0384
-#define SEC_BD_ERR_CHK_EN_REG3 0x038c
+#define SEC_INTERFACE_USER_CTRL0_REG 0x301220
+#define SEC_INTERFACE_USER_CTRL1_REG 0x301224
+#define SEC_SAA_EN_REG 0x301270
+#define SEC_BD_ERR_CHK_EN_REG0 0x301380
+#define SEC_BD_ERR_CHK_EN_REG1 0x301384
+#define SEC_BD_ERR_CHK_EN_REG3 0x30138c
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
@@ -95,9 +92,6 @@
#define SEC_SQE_MASK_OFFSET 64
#define SEC_SQE_MASK_LEN 48
-#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
- SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
-
struct sec_hw_error {
u32 int_msk;
const char *msg;
@@ -117,20 +111,66 @@ static struct hisi_qm_list sec_devices = {
};
static const struct sec_hw_error sec_hw_errors[] = {
- {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
- {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
- {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
- {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"},
- {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"},
- {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"},
- {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"},
- {.int_msk = BIT(7), .msg = "sec_bd_err_rint"},
- {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"},
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "sec_axi_rresp_err_rint"
+ },
+ {
+ .int_msk = BIT(1),
+ .msg = "sec_axi_bresp_err_rint"
+ },
+ {
+ .int_msk = BIT(2),
+ .msg = "sec_ecc_2bit_err_rint"
+ },
+ {
+ .int_msk = BIT(3),
+ .msg = "sec_ecc_1bit_err_rint"
+ },
+ {
+ .int_msk = BIT(4),
+ .msg = "sec_req_trng_timeout_rint"
+ },
+ {
+ .int_msk = BIT(5),
+ .msg = "sec_fsm_hbeat_rint"
+ },
+ {
+ .int_msk = BIT(6),
+ .msg = "sec_channel_req_rng_timeout_rint"
+ },
+ {
+ .int_msk = BIT(7),
+ .msg = "sec_bd_err_rint"
+ },
+ {
+ .int_msk = BIT(8),
+ .msg = "sec_chain_buff_err_rint"
+ },
+ {
+ .int_msk = BIT(14),
+ .msg = "sec_no_secure_access"
+ },
+ {
+ .int_msk = BIT(15),
+ .msg = "sec_wrapping_key_auth_err"
+ },
+ {
+ .int_msk = BIT(16),
+ .msg = "sec_km_key_crc_fail"
+ },
+ {
+ .int_msk = BIT(17),
+ .msg = "sec_axi_poison_err"
+ },
+ {
+ .int_msk = BIT(18),
+ .msg = "sec_sva_err"
+ },
+ {}
};
static const char * const sec_dbg_file_name[] = {
- [SEC_CURRENT_QM] = "current_qm",
[SEC_CLEAR_ENABLE] = "clear_enable",
};
@@ -277,9 +317,7 @@ static u8 sec_get_endian(struct hisi_qm *qm)
"cannot access a register in VF!\n");
return SEC_LE;
}
- reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
- SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
-
+ reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
/* BD little endian mode */
if (!(reg & BIT(0)))
return SEC_LE;
@@ -299,13 +337,13 @@ static int sec_engine_init(struct hisi_qm *qm)
u32 reg;
/* disable clock gate control */
- reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
reg &= SEC_CLK_GATE_DISABLE;
- writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
- writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
+ writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
- ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG,
reg, reg & 0x1, SEC_DELAY_10_US,
SEC_POLL_TIMEOUT_US);
if (ret) {
@@ -313,40 +351,40 @@ static int sec_engine_init(struct hisi_qm *qm)
return ret;
}
- reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
reg |= (0x1 << SEC_TRNG_EN_SHIFT);
- writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
- reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+ reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
reg |= SEC_USER0_SMMU_NORMAL;
- writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+ writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
- reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+ reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
reg &= SEC_USER1_SMMU_MASK;
if (qm->use_sva && qm->ver == QM_HW_V2)
reg |= SEC_USER1_SMMU_SVA;
else
reg |= SEC_USER1_SMMU_NORMAL;
- writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+ writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
writel(SEC_SINGLE_PORT_MAX_TRANS,
qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
- writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG));
+ writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
/* Enable sm4 extra mode, as ctr/ecb */
writel_relaxed(SEC_BD_ERR_CHK_EN0,
- SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0));
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
/* Enable sm4 xts mode multiple iv */
writel_relaxed(SEC_BD_ERR_CHK_EN1,
- SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
writel_relaxed(SEC_BD_ERR_CHK_EN3,
- SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3));
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
/* config endian */
- reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
reg |= sec_get_endian(qm);
- writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
return 0;
}
@@ -381,10 +419,6 @@ static void sec_debug_regs_clear(struct hisi_qm *qm)
{
int i;
- /* clear current_qm */
- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
/* clear sec dfx regs */
writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
@@ -406,7 +440,7 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
return;
}
- val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+ val = readl(qm->io_base + SEC_CONTROL_REG);
/* clear SEC hw error source if having */
writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
@@ -422,14 +456,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
/* enable SEC block master OOO when m-bit error occur */
val = val | SEC_AXI_SHUTDOWN_ENABLE;
- writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val, qm->io_base + SEC_CONTROL_REG);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
{
u32 val;
- val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+ val = readl(qm->io_base + SEC_CONTROL_REG);
/* disable RAS int */
writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
@@ -442,51 +476,7 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
/* disable SEC block master OOO when m-bit error occur */
val = val & SEC_AXI_SHUTDOWN_DISABLE;
- writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
-}
-
-static u32 sec_current_qm_read(struct sec_debug_file *file)
-{
- struct hisi_qm *qm = file->qm;
-
- return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
-{
- struct hisi_qm *qm = file->qm;
- u32 vfq_num;
- u32 tmp;
-
- if (val > qm->vfs_num)
- return -EINVAL;
-
- /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (!val) {
- qm->debug.curr_qm_qp_num = qm->qp_num;
- } else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
-
- if (val == qm->vfs_num)
- qm->debug.curr_qm_qp_num =
- qm->ctrl_qp_num - qm->qp_num -
- (qm->vfs_num - 1) * vfq_num;
- else
- qm->debug.curr_qm_qp_num = vfq_num;
- }
-
- writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- return 0;
+ writel(val, qm->io_base + SEC_CONTROL_REG);
}
static u32 sec_clear_enable_read(struct sec_debug_file *file)
@@ -523,9 +513,6 @@ static ssize_t sec_debug_read(struct file *filp, char __user *buf,
spin_lock_irq(&file->lock);
switch (file->index) {
- case SEC_CURRENT_QM:
- val = sec_current_qm_read(file);
- break;
case SEC_CLEAR_ENABLE:
val = sec_clear_enable_read(file);
break;
@@ -566,11 +553,6 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
spin_lock_irq(&file->lock);
switch (file->index) {
- case SEC_CURRENT_QM:
- ret = sec_current_qm_write(file, val);
- if (ret)
- goto err_input;
- break;
case SEC_CLEAR_ENABLE:
ret = sec_clear_enable_write(file, val);
if (ret)
@@ -655,7 +637,7 @@ static int sec_debug_init(struct hisi_qm *qm)
int i;
if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
- for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
+ for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) {
spin_lock_init(&sec->debug.files[i].lock);
sec->debug.files[i].index = i;
sec->debug.files[i].qm = qm;
@@ -712,7 +694,8 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
err_val = readl(qm->io_base +
SEC_CORE_SRAM_ECC_ERR_INFO);
dev_err(dev, "multi ecc sram num=0x%x\n",
- SEC_ECC_NUM(err_val));
+ ((err_val) >> SEC_ECC_NUM) &
+ SEC_ECC_MASH);
}
}
errs++;
@@ -733,9 +716,23 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
- val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
- writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
- writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+ val = readl(qm->io_base + SEC_CONTROL_REG);
+ writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG);
+ writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
+}
+
+static void sec_err_info_init(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ err_info->ce = QM_BASE_CE;
+ err_info->fe = 0;
+ err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
+ err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
+ err_info->msi_wr_port = BIT(0);
+ err_info->acpi_rst = "SRST";
+ err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
+ QM_ACC_WB_NOT_READY_TIMEOUT;
}
static const struct hisi_qm_err_ini sec_err_ini = {
@@ -746,16 +743,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.clear_dev_hw_err_status = sec_clear_hw_err_status,
.log_dev_hw_err = sec_log_hw_error,
.open_axi_master_ooo = sec_open_axi_master_ooo,
- .err_info = {
- .ce = QM_BASE_CE,
- .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
- QM_ACC_WB_NOT_READY_TIMEOUT,
- .fe = 0,
- .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
- .dev_ce_mask = SEC_RAS_CE_ENB_MSK,
- .msi_wr_port = BIT(0),
- .acpi_rst = "SRST",
- }
+ .err_info_init = sec_err_info_init,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -763,12 +751,8 @@ static int sec_pf_probe_init(struct sec_dev *sec)
struct hisi_qm *qm = &sec->qm;
int ret;
- if (qm->ver == QM_HW_V1)
- qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
- else
- qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
-
qm->err_ini = &sec_err_ini;
+ qm->err_ini->err_info_init(qm);
ret = sec_set_user_domain_and_cache(qm);
if (ret)
@@ -786,7 +770,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->pdev = pdev;
qm->ver = pdev->revision;
- qm->algs = "cipher\ndigest\naead\n";
+ qm->algs = "cipher\ndigest\naead";
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -909,10 +893,15 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
pci_warn(pdev, "Failed to init debugfs!\n");
- ret = hisi_qm_alg_register(qm, &sec_devices);
- if (ret < 0) {
- pr_err("Failed to register driver to crypto.\n");
- goto err_qm_stop;
+ if (qm->qp_num >= ctx_q_num) {
+ ret = hisi_qm_alg_register(qm, &sec_devices);
+ if (ret < 0) {
+ pr_err("Failed to register driver to crypto.\n");
+ goto err_qm_stop;
+ }
+ } else {
+ pci_warn(qm->pdev,
+ "Failed to use kernel mode, qp not enough!\n");
}
if (qm->uacce) {
@@ -948,7 +937,9 @@ static void sec_remove(struct pci_dev *pdev)
struct hisi_qm *qm = pci_get_drvdata(pdev);
hisi_qm_wait_task_finish(qm, &sec_devices);
- hisi_qm_alg_unregister(qm, &sec_devices);
+ if (qm->qp_num >= ctx_q_num)
+ hisi_qm_alg_unregister(qm, &sec_devices);
+
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev, true);
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 3bff6394acaf..057273769f26 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -56,7 +56,7 @@ struct hisi_acc_sgl_pool {
struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
u32 count, u32 sge_nr)
{
- u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl = 0;
+ u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl;
struct hisi_acc_sgl_pool *pool;
struct mem_block *block;
u32 i, j;
@@ -66,6 +66,11 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
sgl_size = sizeof(struct acc_hw_sge) * sge_nr +
sizeof(struct hisi_acc_hw_sgl);
+
+ /*
+ * the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1),
+ * block size may exceed 2^31 on ia64, so the max of block size is 2^31
+ */
block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ?
PAGE_SHIFT + MAX_ORDER - 1 : 31);
sgl_num_per_block = block_size / sgl_size;
@@ -85,8 +90,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
block[i].sgl = dma_alloc_coherent(dev, block_size,
&block[i].sgl_dma,
GFP_KERNEL);
- if (!block[i].sgl)
+ if (!block[i].sgl) {
+ dev_err(dev, "Fail to allocate hw SG buffer!\n");
goto err_free_mem;
+ }
block[i].size = block_size;
}
@@ -95,8 +102,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size,
&block[i].sgl_dma,
GFP_KERNEL);
- if (!block[i].sgl)
+ if (!block[i].sgl) {
+ dev_err(dev, "Fail to allocate remained hw SG buffer!\n");
goto err_free_mem;
+ }
block[i].size = remain_sgl * sgl_size;
}
@@ -167,6 +176,7 @@ static void sg_map_to_hw_sg(struct scatterlist *sgl,
{
hw_sge->buf = sg_dma_address(sgl);
hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
+ hw_sge->page_ctrl = sg_virt(sgl);
}
static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
@@ -182,6 +192,18 @@ static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
hw_sgl->entry_sum_in_chain = cpu_to_le16(sum);
}
+static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
+{
+ struct acc_hw_sge *hw_sge = hw_sgl->sge_entries;
+ int i;
+
+ for (i = 0; i < le16_to_cpu(hw_sgl->entry_sum_in_sgl); i++) {
+ hw_sge[i].page_ctrl = NULL;
+ hw_sge[i].buf = 0;
+ hw_sge[i].len = 0;
+ }
+}
+
/**
* hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl.
* @dev: The device which hw sgl belongs to.
@@ -211,16 +233,19 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
sg_n = sg_nents(sgl);
sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- if (!sg_n_mapped)
+ if (!sg_n_mapped) {
+ dev_err(dev, "DMA mapping for SG error!\n");
return ERR_PTR(-EINVAL);
+ }
if (sg_n_mapped > pool->sge_nr) {
- dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n");
return ERR_PTR(-EINVAL);
}
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
if (IS_ERR(curr_hw_sgl)) {
+ dev_err(dev, "Get SGL error!\n");
dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(-ENOMEM);
@@ -256,7 +281,7 @@ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
return;
dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
-
+ clear_hw_sgl_sge(hw_sgl);
hw_sgl->entry_sum_in_chain = 0;
hw_sgl->entry_sum_in_sgl = 0;
hw_sgl->entry_length_in_sgl = 0;
diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c
index 29712685498a..829f2caf0f67 100644
--- a/drivers/crypto/hisilicon/trng/trng.c
+++ b/drivers/crypto/hisilicon/trng/trng.c
@@ -18,6 +18,8 @@
#define HISI_TRNG_REG 0x00F0
#define HISI_TRNG_BYTES 4
#define HISI_TRNG_QUALITY 512
+#define HISI_TRNG_VERSION 0x01B8
+#define HISI_TRNG_VER_V1 GENMASK(31, 0)
#define SLEEP_US 10
#define TIMEOUT_US 10000
#define SW_DRBG_NUM_SHIFT 2
@@ -50,6 +52,7 @@ struct hisi_trng {
struct hisi_trng_list *trng_list;
struct list_head list;
struct hwrng rng;
+ u32 ver;
bool is_used;
struct mutex mutex;
};
@@ -260,6 +263,7 @@ static int hisi_trng_probe(struct platform_device *pdev)
return PTR_ERR(trng->base);
trng->is_used = false;
+ trng->ver = readl(trng->base + HISI_TRNG_VERSION);
if (!trng_devices.is_init) {
INIT_LIST_HEAD(&trng_devices.list);
mutex_init(&trng_devices.lock);
@@ -267,7 +271,8 @@ static int hisi_trng_probe(struct platform_device *pdev)
}
hisi_trng_add_to_list(trng);
- if (atomic_inc_return(&trng_active_devs) == 1) {
+ if (trng->ver != HISI_TRNG_VER_V1 &&
+ atomic_inc_return(&trng_active_devs) == 1) {
ret = crypto_register_rng(&hisi_trng_alg);
if (ret) {
dev_err(&pdev->dev,
@@ -289,7 +294,8 @@ static int hisi_trng_probe(struct platform_device *pdev)
return ret;
err_crypto_unregister:
- if (atomic_dec_return(&trng_active_devs) == 0)
+ if (trng->ver != HISI_TRNG_VER_V1 &&
+ atomic_dec_return(&trng_active_devs) == 0)
crypto_unregister_rng(&hisi_trng_alg);
err_remove_from_list:
@@ -305,7 +311,8 @@ static int hisi_trng_remove(struct platform_device *pdev)
while (hisi_trng_del_from_list(trng))
;
- if (atomic_dec_return(&trng_active_devs) == 0)
+ if (trng->ver != HISI_TRNG_VER_V1 &&
+ atomic_dec_return(&trng_active_devs) == 0)
crypto_unregister_rng(&hisi_trng_alg);
return 0;
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 92397f993e23..517fdbdff3ea 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -33,35 +33,55 @@ struct hisi_zip_sqe {
u32 consumed;
u32 produced;
u32 comp_data_length;
+ /*
+ * status: 0~7 bits
+ * rsvd: 8~31 bits
+ */
u32 dw3;
u32 input_data_length;
- u32 lba_l;
- u32 lba_h;
+ u32 dw5;
+ u32 dw6;
+ /*
+ * in_sge_data_offset: 0~23 bits
+ * rsvd: 24~27 bits
+ * sqe_type: 29~31 bits
+ */
u32 dw7;
+ /*
+ * out_sge_data_offset: 0~23 bits
+ * rsvd: 24~31 bits
+ */
u32 dw8;
+ /*
+ * request_type: 0~7 bits
+ * buffer_type: 8~11 bits
+ * rsvd: 13~31 bits
+ */
u32 dw9;
u32 dw10;
- u32 priv_info;
+ u32 dw11;
u32 dw12;
- u32 tag;
+ /* tag: in sqe type 0 */
+ u32 dw13;
u32 dest_avail_out;
- u32 rsvd0;
- u32 comp_head_addr_l;
- u32 comp_head_addr_h;
+ u32 dw15;
+ u32 dw16;
+ u32 dw17;
u32 source_addr_l;
u32 source_addr_h;
u32 dest_addr_l;
u32 dest_addr_h;
- u32 stream_ctx_addr_l;
- u32 stream_ctx_addr_h;
- u32 cipher_key1_addr_l;
- u32 cipher_key1_addr_h;
- u32 cipher_key2_addr_l;
- u32 cipher_key2_addr_h;
+ u32 dw22;
+ u32 dw23;
+ u32 dw24;
+ u32 dw25;
+ /* tag: in sqe type 3 */
+ u32 dw26;
+ u32 dw27;
u32 rsvd1[4];
};
int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
-int hisi_zip_register_to_crypto(void);
-void hisi_zip_unregister_from_crypto(void);
+int hisi_zip_register_to_crypto(struct hisi_qm *qm);
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 08b4660b014c..9520a4113c81 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -10,6 +10,7 @@
#define HZIP_BD_STATUS_M GENMASK(7, 0)
/* hisi_zip_sqe dw7 */
#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
+#define HZIP_SQE_TYPE_M GENMASK(31, 28)
/* hisi_zip_sqe dw8 */
#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
/* hisi_zip_sqe dw9 */
@@ -91,8 +92,22 @@ struct hisi_zip_qp_ctx {
struct hisi_zip_ctx *ctx;
};
+struct hisi_zip_sqe_ops {
+ u8 sqe_type;
+ void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
+ void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
+ void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
+ void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
+ void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
+ void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
+ u32 (*get_tag)(struct hisi_zip_sqe *sqe);
+ u32 (*get_status)(struct hisi_zip_sqe *sqe);
+ u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
+};
+
struct hisi_zip_ctx {
struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
+ const struct hisi_zip_sqe_ops *ops;
};
static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
@@ -119,35 +134,367 @@ static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
-static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
+static u16 get_extra_field_size(const u8 *start)
+{
+ return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
+}
+
+static u32 get_name_field_size(const u8 *start)
+{
+ return strlen(start) + 1;
+}
+
+static u32 get_comment_field_size(const u8 *start)
+{
+ return strlen(start) + 1;
+}
+
+static u32 __get_gzip_head_size(const u8 *src)
+{
+ u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT);
+ u32 size = GZIP_HEAD_FEXTRA_SHIFT;
+
+ if (head_flg & GZIP_HEAD_FEXTRA_BIT)
+ size += get_extra_field_size(src + size);
+ if (head_flg & GZIP_HEAD_FNAME_BIT)
+ size += get_name_field_size(src + size);
+ if (head_flg & GZIP_HEAD_FCOMMENT_BIT)
+ size += get_comment_field_size(src + size);
+ if (head_flg & GZIP_HEAD_FHCRC_BIT)
+ size += GZIP_HEAD_FHCRC_SIZE;
+
+ return size;
+}
+
+static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
+{
+ char buf[HZIP_GZIP_HEAD_BUF];
+
+ sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf));
+
+ return __get_gzip_head_size(buf);
+}
+
+static int add_comp_head(struct scatterlist *dst, u8 req_type)
+{
+ int head_size = TO_HEAD_SIZE(req_type);
+ const u8 *head = TO_HEAD(req_type);
+ int ret;
+
+ ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
+ if (ret != head_size) {
+ pr_err("the head size of buffer is wrong (%d)!\n", ret);
+ return -ENOMEM;
+ }
+
+ return head_size;
+}
+
+static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
+{
+ if (!acomp_req->src || !acomp_req->slen)
+ return -EINVAL;
+
+ if (req_type == HZIP_ALG_TYPE_GZIP &&
+ acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)
+ return -EINVAL;
+
+ switch (req_type) {
+ case HZIP_ALG_TYPE_ZLIB:
+ return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
+ case HZIP_ALG_TYPE_GZIP:
+ return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
+ default:
+ pr_err("request type does not support!\n");
+ return -EINVAL;
+ }
+}
+
+static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
+ struct hisi_zip_qp_ctx *qp_ctx,
+ size_t head_size, bool is_comp)
+{
+ struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
+ struct hisi_zip_req *q = req_q->q;
+ struct hisi_zip_req *req_cache;
+ int req_id;
+
+ write_lock(&req_q->req_lock);
+
+ req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
+ if (req_id >= req_q->size) {
+ write_unlock(&req_q->req_lock);
+ dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
+ return ERR_PTR(-EAGAIN);
+ }
+ set_bit(req_id, req_q->req_bitmap);
+
+ req_cache = q + req_id;
+ req_cache->req_id = req_id;
+ req_cache->req = req;
+
+ if (is_comp) {
+ req_cache->sskip = 0;
+ req_cache->dskip = head_size;
+ } else {
+ req_cache->sskip = head_size;
+ req_cache->dskip = 0;
+ }
+
+ write_unlock(&req_q->req_lock);
+
+ return req_cache;
+}
+
+static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
+ struct hisi_zip_req *req)
+{
+ struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
+
+ write_lock(&req_q->req_lock);
+ clear_bit(req->req_id, req_q->req_bitmap);
+ memset(req, 0, sizeof(struct hisi_zip_req));
+ write_unlock(&req_q->req_lock);
+}
+
+static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+ sqe->source_addr_l = lower_32_bits(req->dma_src);
+ sqe->source_addr_h = upper_32_bits(req->dma_src);
+ sqe->dest_addr_l = lower_32_bits(req->dma_dst);
+ sqe->dest_addr_h = upper_32_bits(req->dma_dst);
+}
+
+static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+ struct acomp_req *a_req = req->req;
+
+ sqe->input_data_length = a_req->slen - req->sskip;
+ sqe->dest_avail_out = a_req->dlen - req->dskip;
+ sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, req->sskip);
+ sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, req->dskip);
+}
+
+static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
{
u32 val;
- val = (sqe->dw9) & ~HZIP_BUF_TYPE_M;
+ val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
sqe->dw9 = val;
}
-static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag)
+static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
{
- sqe->tag = tag;
+ u32 val;
+
+ val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
+ val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
+ sqe->dw9 = val;
}
-static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
- dma_addr_t s_addr, dma_addr_t d_addr, u32 slen,
- u32 dlen, u32 sskip, u32 dskip)
+static void hisi_zip_fill_tag_v1(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
{
+ sqe->dw13 = req->req_id;
+}
+
+static void hisi_zip_fill_tag_v2(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+ sqe->dw26 = req->req_id;
+}
+
+static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
+{
+ u32 val;
+
+ val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
+ val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type);
+ sqe->dw7 = val;
+}
+
+static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
+ u8 req_type, struct hisi_zip_req *req)
+{
+ const struct hisi_zip_sqe_ops *ops = ctx->ops;
+
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
- sqe->input_data_length = slen - sskip;
- sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip);
- sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip);
- sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
- sqe->dest_avail_out = dlen - dskip;
- sqe->source_addr_l = lower_32_bits(s_addr);
- sqe->source_addr_h = upper_32_bits(s_addr);
- sqe->dest_addr_l = lower_32_bits(d_addr);
- sqe->dest_addr_h = upper_32_bits(d_addr);
+ ops->fill_addr(sqe, req);
+ ops->fill_buf_size(sqe, req);
+ ops->fill_buf_type(sqe, HZIP_SGL);
+ ops->fill_req_type(sqe, req_type);
+ ops->fill_tag(sqe, req);
+ ops->fill_sqe_type(sqe, ops->sqe_type);
+}
+
+static int hisi_zip_do_work(struct hisi_zip_req *req,
+ struct hisi_zip_qp_ctx *qp_ctx)
+{
+ struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
+ struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+ struct acomp_req *a_req = req->req;
+ struct hisi_qp *qp = qp_ctx->qp;
+ struct device *dev = &qp->qm->pdev->dev;
+ struct hisi_zip_sqe zip_sqe;
+ int ret;
+
+ if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
+ return -EINVAL;
+
+ req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
+ req->req_id << 1, &req->dma_src);
+ if (IS_ERR(req->hw_src)) {
+ dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
+ PTR_ERR(req->hw_src));
+ return PTR_ERR(req->hw_src);
+ }
+
+ req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
+ (req->req_id << 1) + 1,
+ &req->dma_dst);
+ if (IS_ERR(req->hw_dst)) {
+ ret = PTR_ERR(req->hw_dst);
+ dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
+ ret);
+ goto err_unmap_input;
+ }
+
+ hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req);
+
+ /* send command to start a task */
+ atomic64_inc(&dfx->send_cnt);
+ ret = hisi_qp_send(qp, &zip_sqe);
+ if (ret < 0) {
+ atomic64_inc(&dfx->send_busy_cnt);
+ ret = -EAGAIN;
+ dev_dbg_ratelimited(dev, "failed to send request!\n");
+ goto err_unmap_output;
+ }
+
+ return -EINPROGRESS;
+
+err_unmap_output:
+ hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
+err_unmap_input:
+ hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
+ return ret;
+}
+
+static u32 hisi_zip_get_tag_v1(struct hisi_zip_sqe *sqe)
+{
+ return sqe->dw13;
+}
+
+static u32 hisi_zip_get_tag_v2(struct hisi_zip_sqe *sqe)
+{
+ return sqe->dw26;
+}
+
+static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
+{
+ return sqe->dw3 & HZIP_BD_STATUS_M;
+}
+
+static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
+{
+ return sqe->produced;
+}
+
+static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
+{
+ struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
+ const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
+ struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+ struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
+ struct device *dev = &qp->qm->pdev->dev;
+ struct hisi_zip_sqe *sqe = data;
+ u32 tag = ops->get_tag(sqe);
+ struct hisi_zip_req *req = req_q->q + tag;
+ struct acomp_req *acomp_req = req->req;
+ u32 status, dlen, head_size;
+ int err = 0;
+
+ atomic64_inc(&dfx->recv_cnt);
+ status = ops->get_status(sqe);
+ if (status != 0 && status != HZIP_NC_ERR) {
+ dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
+ (qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
+ sqe->produced);
+ atomic64_inc(&dfx->err_bd_cnt);
+ err = -EIO;
+ }
+
+ dlen = ops->get_dstlen(sqe);
+
+ hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
+
+ head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
+ acomp_req->dlen = dlen + head_size;
+
+ if (acomp_req->base.complete)
+ acomp_request_complete(acomp_req, err);
+
+ hisi_zip_remove_req(qp_ctx, req);
+}
+
+static int hisi_zip_acompress(struct acomp_req *acomp_req)
+{
+ struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
+ struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
+ struct device *dev = &qp_ctx->qp->qm->pdev->dev;
+ struct hisi_zip_req *req;
+ int head_size;
+ int ret;
+
+ /* let's output compression head now */
+ head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
+ if (head_size < 0) {
+ dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
+ head_size);
+ return head_size;
+ }
+
+ req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = hisi_zip_do_work(req, qp_ctx);
+ if (ret != -EINPROGRESS) {
+ dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
+ hisi_zip_remove_req(qp_ctx, req);
+ }
+
+ return ret;
+}
+
+static int hisi_zip_adecompress(struct acomp_req *acomp_req)
+{
+ struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
+ struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
+ struct device *dev = &qp_ctx->qp->qm->pdev->dev;
+ struct hisi_zip_req *req;
+ int head_size, ret;
+
+ head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
+ if (head_size < 0) {
+ dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
+ head_size);
+ return head_size;
+ }
+
+ req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = hisi_zip_do_work(req, qp_ctx);
+ if (ret != -EINPROGRESS) {
+ dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
+ ret);
+ hisi_zip_remove_req(qp_ctx, req);
+ }
+
+ return ret;
}
static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
@@ -177,9 +524,36 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
hisi_qm_release_qp(ctx->qp);
}
+static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
+ .sqe_type = 0,
+ .fill_addr = hisi_zip_fill_addr,
+ .fill_buf_size = hisi_zip_fill_buf_size,
+ .fill_buf_type = hisi_zip_fill_buf_type,
+ .fill_req_type = hisi_zip_fill_req_type,
+ .fill_tag = hisi_zip_fill_tag_v1,
+ .fill_sqe_type = hisi_zip_fill_sqe_type,
+ .get_tag = hisi_zip_get_tag_v1,
+ .get_status = hisi_zip_get_status,
+ .get_dstlen = hisi_zip_get_dstlen,
+};
+
+static const struct hisi_zip_sqe_ops hisi_zip_ops_v2 = {
+ .sqe_type = 0x3,
+ .fill_addr = hisi_zip_fill_addr,
+ .fill_buf_size = hisi_zip_fill_buf_size,
+ .fill_buf_type = hisi_zip_fill_buf_type,
+ .fill_req_type = hisi_zip_fill_req_type,
+ .fill_tag = hisi_zip_fill_tag_v2,
+ .fill_sqe_type = hisi_zip_fill_sqe_type,
+ .get_tag = hisi_zip_get_tag_v2,
+ .get_status = hisi_zip_get_status,
+ .get_dstlen = hisi_zip_get_dstlen,
+};
+
static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
{
struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
+ struct hisi_zip_qp_ctx *qp_ctx;
struct hisi_zip *hisi_zip;
int ret, i, j;
@@ -193,8 +567,9 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
- ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i,
- req_type);
+ qp_ctx = &hisi_zip_ctx->qp_ctx[i];
+ qp_ctx->ctx = hisi_zip_ctx;
+ ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type);
if (ret) {
for (j = i - 1; j >= 0; j--)
hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
@@ -203,9 +578,14 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
return ret;
}
- hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip;
+ qp_ctx->zip_dev = hisi_zip;
}
+ if (hisi_zip->qm.ver < QM_HW_V3)
+ hisi_zip_ctx->ops = &hisi_zip_ops_v1;
+ else
+ hisi_zip_ctx->ops = &hisi_zip_ops_v2;
+
return 0;
}
@@ -217,38 +597,6 @@ static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
}
-static u16 get_extra_field_size(const u8 *start)
-{
- return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
-}
-
-static u32 get_name_field_size(const u8 *start)
-{
- return strlen(start) + 1;
-}
-
-static u32 get_comment_field_size(const u8 *start)
-{
- return strlen(start) + 1;
-}
-
-static u32 __get_gzip_head_size(const u8 *src)
-{
- u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT);
- u32 size = GZIP_HEAD_FEXTRA_SHIFT;
-
- if (head_flg & GZIP_HEAD_FEXTRA_BIT)
- size += get_extra_field_size(src + size);
- if (head_flg & GZIP_HEAD_FNAME_BIT)
- size += get_name_field_size(src + size);
- if (head_flg & GZIP_HEAD_FCOMMENT_BIT)
- size += get_comment_field_size(src + size);
- if (head_flg & GZIP_HEAD_FHCRC_BIT)
- size += GZIP_HEAD_FHCRC_SIZE;
-
- return size;
-}
-
static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
{
struct hisi_zip_req_q *req_q;
@@ -336,52 +684,6 @@ static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
ctx->qp_ctx[i].sgl_pool);
}
-static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
- struct hisi_zip_req *req)
-{
- struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
-
- write_lock(&req_q->req_lock);
- clear_bit(req->req_id, req_q->req_bitmap);
- memset(req, 0, sizeof(struct hisi_zip_req));
- write_unlock(&req_q->req_lock);
-}
-
-static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
-{
- struct hisi_zip_sqe *sqe = data;
- struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
- struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
- struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- struct hisi_zip_req *req = req_q->q + sqe->tag;
- struct acomp_req *acomp_req = req->req;
- struct device *dev = &qp->qm->pdev->dev;
- u32 status, dlen, head_size;
- int err = 0;
-
- atomic64_inc(&dfx->recv_cnt);
- status = sqe->dw3 & HZIP_BD_STATUS_M;
- if (status != 0 && status != HZIP_NC_ERR) {
- dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
- (qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
- sqe->produced);
- atomic64_inc(&dfx->err_bd_cnt);
- err = -EIO;
- }
- dlen = sqe->produced;
-
- hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
- hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
-
- head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
- acomp_req->dlen = dlen + head_size;
-
- if (acomp_req->base.complete)
- acomp_request_complete(acomp_req, err);
-
- hisi_zip_remove_req(qp_ctx, req);
-}
-
static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
void (*fn)(struct hisi_qp *, void *))
{
@@ -439,204 +741,6 @@ static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
hisi_zip_ctx_exit(ctx);
}
-static int add_comp_head(struct scatterlist *dst, u8 req_type)
-{
- int head_size = TO_HEAD_SIZE(req_type);
- const u8 *head = TO_HEAD(req_type);
- int ret;
-
- ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
- if (ret != head_size) {
- pr_err("the head size of buffer is wrong (%d)!\n", ret);
- return -ENOMEM;
- }
-
- return head_size;
-}
-
-static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
-{
- char buf[HZIP_GZIP_HEAD_BUF];
-
- sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf));
-
- return __get_gzip_head_size(buf);
-}
-
-static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
-{
- if (!acomp_req->src || !acomp_req->slen)
- return -EINVAL;
-
- if ((req_type == HZIP_ALG_TYPE_GZIP) &&
- (acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
- return -EINVAL;
-
- switch (req_type) {
- case HZIP_ALG_TYPE_ZLIB:
- return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
- case HZIP_ALG_TYPE_GZIP:
- return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
- default:
- pr_err("request type does not support!\n");
- return -EINVAL;
- }
-}
-
-static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
- struct hisi_zip_qp_ctx *qp_ctx,
- size_t head_size, bool is_comp)
-{
- struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- struct hisi_zip_req *q = req_q->q;
- struct hisi_zip_req *req_cache;
- int req_id;
-
- write_lock(&req_q->req_lock);
-
- req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
- if (req_id >= req_q->size) {
- write_unlock(&req_q->req_lock);
- dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
- return ERR_PTR(-EAGAIN);
- }
- set_bit(req_id, req_q->req_bitmap);
-
- req_cache = q + req_id;
- req_cache->req_id = req_id;
- req_cache->req = req;
-
- if (is_comp) {
- req_cache->sskip = 0;
- req_cache->dskip = head_size;
- } else {
- req_cache->sskip = head_size;
- req_cache->dskip = 0;
- }
-
- write_unlock(&req_q->req_lock);
-
- return req_cache;
-}
-
-static int hisi_zip_do_work(struct hisi_zip_req *req,
- struct hisi_zip_qp_ctx *qp_ctx)
-{
- struct acomp_req *a_req = req->req;
- struct hisi_qp *qp = qp_ctx->qp;
- struct device *dev = &qp->qm->pdev->dev;
- struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
- struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
- struct hisi_zip_sqe zip_sqe;
- dma_addr_t input, output;
- int ret;
-
- if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
- return -EINVAL;
-
- req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
- req->req_id << 1, &input);
- if (IS_ERR(req->hw_src)) {
- dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
- PTR_ERR(req->hw_src));
- return PTR_ERR(req->hw_src);
- }
- req->dma_src = input;
-
- req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
- (req->req_id << 1) + 1,
- &output);
- if (IS_ERR(req->hw_dst)) {
- ret = PTR_ERR(req->hw_dst);
- dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
- ret);
- goto err_unmap_input;
- }
- req->dma_dst = output;
-
- hisi_zip_fill_sqe(&zip_sqe, qp->req_type, input, output, a_req->slen,
- a_req->dlen, req->sskip, req->dskip);
- hisi_zip_config_buf_type(&zip_sqe, HZIP_SGL);
- hisi_zip_config_tag(&zip_sqe, req->req_id);
-
- /* send command to start a task */
- atomic64_inc(&dfx->send_cnt);
- ret = hisi_qp_send(qp, &zip_sqe);
- if (ret < 0) {
- atomic64_inc(&dfx->send_busy_cnt);
- ret = -EAGAIN;
- dev_dbg_ratelimited(dev, "failed to send request!\n");
- goto err_unmap_output;
- }
-
- return -EINPROGRESS;
-
-err_unmap_output:
- hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
-err_unmap_input:
- hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
- return ret;
-}
-
-static int hisi_zip_acompress(struct acomp_req *acomp_req)
-{
- struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
- struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
- struct device *dev = &qp_ctx->qp->qm->pdev->dev;
- struct hisi_zip_req *req;
- int head_size;
- int ret;
-
- /* let's output compression head now */
- head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
- if (head_size < 0) {
- dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
- head_size);
- return head_size;
- }
-
- req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
- dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
- hisi_zip_remove_req(qp_ctx, req);
- }
-
- return ret;
-}
-
-static int hisi_zip_adecompress(struct acomp_req *acomp_req)
-{
- struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
- struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
- struct device *dev = &qp_ctx->qp->qm->pdev->dev;
- struct hisi_zip_req *req;
- int head_size, ret;
-
- head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
- if (head_size < 0) {
- dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
- head_size);
- return head_size;
- }
-
- req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
- dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
- ret);
- hisi_zip_remove_req(qp_ctx, req);
- }
-
- return ret;
-}
-
static struct acomp_alg hisi_zip_acomp_zlib = {
.init = hisi_zip_acomp_init,
.exit = hisi_zip_acomp_exit,
@@ -665,7 +769,7 @@ static struct acomp_alg hisi_zip_acomp_gzip = {
}
};
-int hisi_zip_register_to_crypto(void)
+int hisi_zip_register_to_crypto(struct hisi_qm *qm)
{
int ret;
@@ -684,7 +788,7 @@ int hisi_zip_register_to_crypto(void)
return ret;
}
-void hisi_zip_unregister_from_crypto(void)
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
{
crypto_unregister_acomp(&hisi_zip_acomp_gzip);
crypto_unregister_acomp(&hisi_zip_acomp_zlib);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 02c445722445..2178b40e9f82 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -18,7 +18,6 @@
#define PCI_DEVICE_ID_ZIP_VF 0xa251
#define HZIP_QUEUE_NUM_V1 4096
-#define HZIP_QUEUE_NUM_V2 1024
#define HZIP_CLOCK_GATE_CTRL 0x301004
#define COMP0_ENABLE BIT(0)
@@ -69,10 +68,10 @@
#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
-#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
+#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
-#define HZIP_CORE_INT_MASK_ALL GENMASK(10, 0)
+#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
#define HZIP_COMP_CORE_NUM 2
#define HZIP_DECOMP_CORE_NUM 6
#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
@@ -134,17 +133,17 @@ static const struct hisi_zip_hw_error zip_hw_error[] = {
{ .int_msk = BIT(8), .msg = "zip_com_inf_err" },
{ .int_msk = BIT(9), .msg = "zip_enc_inf_err" },
{ .int_msk = BIT(10), .msg = "zip_pre_out_err" },
+ { .int_msk = BIT(11), .msg = "zip_axi_poison_err" },
+ { .int_msk = BIT(12), .msg = "zip_sva_err" },
{ /* sentinel */ }
};
enum ctrl_debug_file_index {
- HZIP_CURRENT_QM,
HZIP_CLEAR_ENABLE,
HZIP_DEBUG_FILE_NUM,
};
static const char * const ctrl_debug_file_name[] = {
- [HZIP_CURRENT_QM] = "current_qm",
[HZIP_CLEAR_ENABLE] = "clear_enable",
};
@@ -363,48 +362,6 @@ static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
return &hisi_zip->qm;
}
-static u32 current_qm_read(struct ctrl_debug_file *file)
-{
- struct hisi_qm *qm = file_to_qm(file);
-
- return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int current_qm_write(struct ctrl_debug_file *file, u32 val)
-{
- struct hisi_qm *qm = file_to_qm(file);
- u32 vfq_num;
- u32 tmp;
-
- if (val > qm->vfs_num)
- return -EINVAL;
-
- /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (val == 0) {
- qm->debug.curr_qm_qp_num = qm->qp_num;
- } else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
- if (val == qm->vfs_num)
- qm->debug.curr_qm_qp_num = qm->ctrl_qp_num -
- qm->qp_num - (qm->vfs_num - 1) * vfq_num;
- else
- qm->debug.curr_qm_qp_num = vfq_num;
- }
-
- writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- return 0;
-}
-
static u32 clear_enable_read(struct ctrl_debug_file *file)
{
struct hisi_qm *qm = file_to_qm(file);
@@ -438,9 +395,6 @@ static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
spin_lock_irq(&file->lock);
switch (file->index) {
- case HZIP_CURRENT_QM:
- val = current_qm_read(file);
- break;
case HZIP_CLEAR_ENABLE:
val = clear_enable_read(file);
break;
@@ -478,11 +432,6 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
spin_lock_irq(&file->lock);
switch (file->index) {
- case HZIP_CURRENT_QM:
- ret = current_qm_write(file, val);
- if (ret)
- goto err_input;
- break;
case HZIP_CLEAR_ENABLE:
ret = clear_enable_write(file, val);
if (ret)
@@ -580,7 +529,7 @@ static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
int i;
- for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
+ for (i = HZIP_CLEAR_ENABLE; i < HZIP_DEBUG_FILE_NUM; i++) {
spin_lock_init(&zip->ctrl->files[i].lock);
zip->ctrl->files[i].ctrl = zip->ctrl;
zip->ctrl->files[i].index = i;
@@ -627,10 +576,6 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
int i, j;
- /* clear current_qm */
- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
/* enable register read_clear bit */
writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
for (i = 0; i < ARRAY_SIZE(core_offsets); i++)
@@ -714,6 +659,22 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
qm->io_base + HZIP_CORE_INT_SET);
}
+static void hisi_zip_err_info_init(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ err_info->ce = QM_BASE_CE;
+ err_info->fe = 0;
+ err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
+ err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE;
+ err_info->msi_wr_port = HZIP_WR_PORT;
+ err_info->acpi_rst = "ZRST";
+ err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT;
+
+ if (qm->ver >= QM_HW_V3)
+ err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT;
+}
+
static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.hw_init = hisi_zip_set_user_domain_and_cache,
.hw_err_enable = hisi_zip_hw_error_enable,
@@ -723,16 +684,7 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.log_dev_hw_err = hisi_zip_log_hw_error,
.open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
.close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
- .err_info = {
- .ce = QM_BASE_CE,
- .nfe = QM_BASE_NFE |
- QM_ACC_WB_NOT_READY_TIMEOUT,
- .fe = 0,
- .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC,
- .dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE,
- .msi_wr_port = HZIP_WR_PORT,
- .acpi_rst = "ZRST",
- }
+ .err_info_init = hisi_zip_err_info_init,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -746,13 +698,8 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl;
ctrl->hisi_zip = hisi_zip;
-
- if (qm->ver == QM_HW_V1)
- qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1;
- else
- qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2;
-
qm->err_ini = &hisi_zip_err_ini;
+ qm->err_ini->err_info_init(qm);
hisi_zip_set_user_domain_and_cache(qm);
hisi_qm_dev_err_init(qm);
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index e813115d5432..aa4c7b2af3e2 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -963,8 +963,6 @@ static int img_hash_probe(struct platform_device *pdev)
hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdev->io_base)) {
err = PTR_ERR(hdev->io_base);
- dev_err(dev, "can't ioremap, returned %d\n", err);
-
goto res_err;
}
@@ -972,7 +970,6 @@ static int img_hash_probe(struct platform_device *pdev)
hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
if (IS_ERR(hdev->cpu_addr)) {
- dev_err(dev, "can't ioremap write port\n");
err = PTR_ERR(hdev->cpu_addr);
goto res_err;
}
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 6364583b88b2..9ff885d50edf 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -688,7 +688,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Leave the DSE threads reset state */
writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
- /* Configure the procesing engine thresholds */
+ /* Configure the processing engine thresholds */
writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 8b0f17fc09fb..0616e369522e 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -265,7 +265,7 @@ static int setup_crypt_desc(void)
return 0;
}
-static spinlock_t desc_lock;
+static DEFINE_SPINLOCK(desc_lock);
static struct crypt_ctl *get_crypt_desc(void)
{
int i;
@@ -293,7 +293,7 @@ static struct crypt_ctl *get_crypt_desc(void)
}
}
-static spinlock_t emerg_lock;
+static DEFINE_SPINLOCK(emerg_lock);
static struct crypt_ctl *get_crypt_desc_emerg(void)
{
int i;
@@ -1379,9 +1379,6 @@ static int __init ixp_module_init(void)
if (IS_ERR(pdev))
return PTR_ERR(pdev);
- spin_lock_init(&desc_lock);
- spin_lock_init(&emerg_lock);
-
err = init_ixp_crypto(&pdev->dev);
if (err) {
platform_device_unregister(pdev);
diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c
index b6b25d994af3..e2a39fdaf623 100644
--- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c
@@ -1623,10 +1623,8 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
}
aes_dev->base_reg = devm_ioremap_resource(&pdev->dev, aes_mem);
- if (IS_ERR(aes_dev->base_reg)) {
- dev_err(dev, "Failed to get base address\n");
+ if (IS_ERR(aes_dev->base_reg))
return PTR_ERR(aes_dev->base_reg);
- }
/* Get and request IRQ */
aes_dev->irq = platform_get_irq(pdev, 0);
@@ -1649,8 +1647,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
/* Initialize crypto engine */
aes_dev->engine = crypto_engine_alloc_init(dev, true);
- if (!aes_dev->engine)
+ if (!aes_dev->engine) {
+ rc = -ENOMEM;
goto list_del;
+ }
rc = crypto_engine_start(aes_dev->engine);
if (rc) {
diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
index c4b97b4160e9..0379dbf32a4c 100644
--- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
@@ -1192,10 +1192,8 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
}
hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
- if (IS_ERR(hcu_dev->io_base)) {
- dev_err(dev, "Could not io-remap mem resource.\n");
+ if (IS_ERR(hcu_dev->io_base))
return PTR_ERR(hcu_dev->io_base);
- }
init_completion(&hcu_dev->irq_done);
@@ -1220,8 +1218,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
/* Initialize crypto engine */
hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
- if (!hcu_dev->engine)
+ if (!hcu_dev->engine) {
+ rc = -ENOMEM;
goto list_del;
+ }
rc = crypto_engine_start(hcu_dev->engine);
if (rc) {
diff --git a/drivers/crypto/keembay/ocs-hcu.c b/drivers/crypto/keembay/ocs-hcu.c
index 81eecacf603a..deb9bd460ee6 100644
--- a/drivers/crypto/keembay/ocs-hcu.c
+++ b/drivers/crypto/keembay/ocs-hcu.c
@@ -93,7 +93,7 @@
#define OCS_HCU_WAIT_BUSY_TIMEOUT_US 1000000
/**
- * struct ocs_hcu_dma_list - An entry in an OCS DMA linked list.
+ * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list.
* @src_addr: Source address of the data.
* @src_len: Length of data to be fetched.
* @nxt_desc: Next descriptor to fetch.
@@ -107,7 +107,7 @@ struct ocs_hcu_dma_entry {
};
/**
- * struct ocs_dma_list - OCS-specific DMA linked list.
+ * struct ocs_hcu_dma_list - OCS-specific DMA linked list.
* @head: The head of the list (points to the array backing the list).
* @tail: The current tail of the list; NULL if the list is empty.
* @dma_addr: The DMA address of @head (i.e., the DMA address of the backing
@@ -597,7 +597,7 @@ int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
}
/**
- * ocs_hcu_digest() - Perform a hashing iteration.
+ * ocs_hcu_hash_update() - Perform a hashing iteration.
* @hcu_dev: The OCS HCU device to use.
* @ctx: The OCS HCU hashing context.
* @dma_list: The OCS DMA list mapping the input data to process.
@@ -632,7 +632,7 @@ int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
}
/**
- * ocs_hcu_hash_final() - Update and finalize hash computation.
+ * ocs_hcu_hash_finup() - Update and finalize hash computation.
* @hcu_dev: The OCS HCU device to use.
* @ctx: The OCS HCU hashing context.
* @dma_list: The OCS DMA list mapping the input data to process.
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index 3518fac29834..ecedd91a8d85 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -121,14 +121,14 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
struct pci_dev *pdev);
-int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
- struct pci_dev *pdev, u64 reg, u64 *val);
+int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val, int blkaddr);
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 val);
+ u64 reg, u64 val, int blkaddr);
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val);
+ u64 reg, u64 *val, int blkaddr);
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 val);
+ u64 reg, u64 val, int blkaddr);
struct otx2_cptlfs_info;
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 51cb6404ded7..9074876d38e5 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -43,7 +43,7 @@ int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
}
int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val)
+ u64 reg, u64 *val, int blkaddr)
{
struct cpt_rd_wr_reg_msg *reg_msg;
@@ -62,12 +62,13 @@ int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
reg_msg->is_write = 0;
reg_msg->reg_offset = reg;
reg_msg->ret_val = val;
+ reg_msg->blkaddr = blkaddr;
return 0;
}
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 val)
+ u64 reg, u64 val, int blkaddr)
{
struct cpt_rd_wr_reg_msg *reg_msg;
@@ -86,16 +87,17 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
reg_msg->is_write = 1;
reg_msg->reg_offset = reg;
reg_msg->val = val;
+ reg_msg->blkaddr = blkaddr;
return 0;
}
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val)
+ u64 reg, u64 *val, int blkaddr)
{
int ret;
- ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val);
+ ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val, blkaddr);
if (ret)
return ret;
@@ -103,11 +105,11 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
}
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 val)
+ u64 reg, u64 val, int blkaddr)
{
int ret;
- ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val);
+ ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val, blkaddr);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index 823a4571fd67..34aba1532761 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -56,7 +56,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
- &lf_ctrl.u);
+ &lf_ctrl.u, lfs->blkaddr);
if (ret)
return ret;
@@ -64,7 +64,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
- lf_ctrl.u);
+ lf_ctrl.u, lfs->blkaddr);
return ret;
}
@@ -77,7 +77,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
- &lf_ctrl.u);
+ &lf_ctrl.u, lfs->blkaddr);
if (ret)
return ret;
@@ -85,7 +85,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
- lf_ctrl.u);
+ lf_ctrl.u, lfs->blkaddr);
return ret;
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index 314e97354100..ab1678fc564d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -95,6 +95,7 @@ struct otx2_cptlfs_info {
u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
u8 kvf_limits; /* Kernel crypto limits */
atomic_t state; /* LF's state. started/reset */
+ int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */
};
static inline void otx2_cpt_free_instruction_queues(
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index 8c899ad531a5..e19af1356f12 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -51,6 +51,7 @@ struct otx2_cptpf_dev {
u8 max_vfs; /* Maximum number of VFs supported by CPT */
u8 enabled_vfs; /* Number of enabled VFs */
u8 kvf_limits; /* Kernel crypto limits */
+ bool has_cpt1;
};
irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 5277e04badd9..58f47e3ab62e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -451,19 +451,19 @@ static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
return 0;
}
-static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
+static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
{
int timeout = 10, ret;
u64 reg = 0;
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_BLK_RST, 0x1);
+ CPT_AF_BLK_RST, 0x1, blkaddr);
if (ret)
return ret;
do {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_BLK_RST, &reg);
+ CPT_AF_BLK_RST, &reg, blkaddr);
if (ret)
return ret;
@@ -478,11 +478,35 @@ static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
return ret;
}
+static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
+{
+ int ret = 0;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_device_reset(cptpf, BLKADDR_CPT0);
+}
+
+static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
+{
+ u64 cfg;
+
+ cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
+ if (cfg & BIT_ULL(11))
+ cptpf->has_cpt1 = true;
+}
+
static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
{
union otx2_cptx_af_constants1 af_cnsts1 = {0};
int ret = 0;
+ /* check if 'implemented' bit is set for block BLKADDR_CPT1 */
+ cptpf_check_block_implemented(cptpf);
/* Reset the CPT PF device */
ret = cptpf_device_reset(cptpf);
if (ret)
@@ -490,7 +514,8 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
/* Get number of SE, IE and AE engines */
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_CONSTANTS1, &af_cnsts1.u);
+ CPT_AF_CONSTANTS1, &af_cnsts1.u,
+ BLKADDR_CPT0);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 1dc3ba298139..a531f4c8b441 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -153,16 +153,16 @@ static int get_ucode_type(struct device *dev,
}
static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, int blkaddr)
{
return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_UCODE_BASE(eng),
- (u64)dma_addr);
+ (u64)dma_addr, blkaddr);
}
-static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
+static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
+ struct otx2_cptpf_dev *cptpf, int blkaddr)
{
- struct otx2_cptpf_dev *cptpf = obj;
struct otx2_cpt_engs_rsvd *engs;
dma_addr_t dma_addr;
int i, bit, ret;
@@ -170,7 +170,7 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
/* Set PF number for microcode fetches */
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_PF_FUNC,
- cptpf->pf_id << RVU_PFVF_PF_SHIFT);
+ cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
if (ret)
return ret;
@@ -187,7 +187,8 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
*/
for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
if (!eng_grp->g->eng_ref_cnt[bit]) {
- ret = __write_ucode_base(cptpf, bit, dma_addr);
+ ret = __write_ucode_base(cptpf, bit, dma_addr,
+ blkaddr);
if (ret)
return ret;
}
@@ -195,23 +196,32 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
return 0;
}
-static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
- void *obj)
+static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
{
struct otx2_cptpf_dev *cptpf = obj;
- struct otx2_cpt_bitmap bmap;
+ int ret;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
+}
+
+static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ struct otx2_cptpf_dev *cptpf,
+ struct otx2_cpt_bitmap bmap,
+ int blkaddr)
+{
int i, timeout = 10;
int busy, ret;
u64 reg = 0;
- bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
- if (!bmap.size)
- return -EINVAL;
-
/* Detach the cores from group */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), &reg);
+ CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
if (ret)
return ret;
@@ -221,7 +231,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), reg);
+ CPT_AF_EXEX_CTL2(i), reg,
+ blkaddr);
if (ret)
return ret;
}
@@ -237,7 +248,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_STS(i), &reg);
+ CPT_AF_EXEX_STS(i), &reg,
+ blkaddr);
if (ret)
return ret;
@@ -253,7 +265,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
if (!eng_grp->g->eng_ref_cnt[i]) {
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_CTL(i), 0x0);
+ CPT_AF_EXEX_CTL(i), 0x0,
+ blkaddr);
if (ret)
return ret;
}
@@ -262,22 +275,39 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
return 0;
}
-static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
- void *obj)
+static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
{
struct otx2_cptpf_dev *cptpf = obj;
struct otx2_cpt_bitmap bmap;
- u64 reg = 0;
- int i, ret;
+ int ret;
bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
if (!bmap.size)
return -EINVAL;
+ if (cptpf->has_cpt1) {
+ ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
+ BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
+ BLKADDR_CPT0);
+}
+
+static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ struct otx2_cptpf_dev *cptpf,
+ struct otx2_cpt_bitmap bmap,
+ int blkaddr)
+{
+ u64 reg = 0;
+ int i, ret;
+
/* Attach the cores to the group */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), &reg);
+ CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
if (ret)
return ret;
@@ -287,7 +317,8 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), reg);
+ CPT_AF_EXEX_CTL2(i), reg,
+ blkaddr);
if (ret)
return ret;
}
@@ -295,15 +326,33 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
/* Enable the cores */
for_each_set_bit(i, bmap.bits, bmap.size) {
- ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox,
- cptpf->pdev,
- CPT_AF_EXEX_CTL(i), 0x1);
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x1,
+ blkaddr);
if (ret)
return ret;
}
- ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+ return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+}
- return ret;
+static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_bitmap bmap;
+ int ret;
+
+ bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
+ BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
}
static int load_fw(struct device *dev, struct fw_info_t *fw_info,
@@ -1140,20 +1189,18 @@ release_fw:
return ret;
}
-int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
+static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
+ int blkaddr)
{
- int i, ret, busy, total_cores;
- int timeout = 10;
- u64 reg = 0;
-
- total_cores = cptpf->eng_grps.avail.max_se_cnt +
- cptpf->eng_grps.avail.max_ie_cnt +
- cptpf->eng_grps.avail.max_ae_cnt;
+ int timeout = 10, ret;
+ int i, busy;
+ u64 reg;
/* Disengage the cores from groups */
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), 0x0);
+ CPT_AF_EXEX_CTL2(i), 0x0,
+ blkaddr);
if (ret)
return ret;
@@ -1173,7 +1220,8 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_STS(i), &reg);
+ CPT_AF_EXEX_STS(i), &reg,
+ blkaddr);
if (ret)
return ret;
@@ -1187,13 +1235,30 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
/* Disable the cores */
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_EXEX_CTL(i), 0x0);
+ CPT_AF_EXEX_CTL(i), 0x0,
+ blkaddr);
if (ret)
return ret;
}
return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
}
+int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
+{
+ int total_cores, ret;
+
+ total_cores = cptpf->eng_grps.avail.max_se_cnt +
+ cptpf->eng_grps.avail.max_ie_cnt +
+ cptpf->eng_grps.avail.max_ae_cnt;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
+}
+
void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps)
{
@@ -1354,6 +1419,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
lfs->pdev = pdev;
lfs->reg_base = cptpf->reg_base;
lfs->mbox = &cptpf->afpf_mbox;
+ lfs->blkaddr = BLKADDR_CPT0;
ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
OTX2_CPT_QUEUE_HI_PRIO, 1);
if (ret)
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index 92e921eceed7..d6314ea9ae89 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CBC routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 4c9362eebefd..e7384d107573 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CCM routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 6d5ce1a66f1e..13f518802343 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CTR routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 77e338dc33f1..7a729dc2bc17 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES ECB routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 19c6ed5baea4..fc9baca13920 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES GCM routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 48dc1c98ca52..eb5c8f689360 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES XCBC routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 13c65deda8e9..446f611726df 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -932,8 +932,10 @@ static int __init nx_powernv_probe_vas(struct device_node *pn)
ret = find_nx_device_tree(dn, chip_id, vasid,
NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip);
- if (ret)
+ if (ret) {
+ of_node_put(dn);
return ret;
+ }
}
if (!ct_842 || !ct_gzip) {
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 90d9a37a57f6..b0ad665e4bda 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* SHA-256 routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index eb8627a0f317..c29103a1a0b6 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* SHA-512 routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 1d0e8a1ba160..010e87d9da36 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
@@ -200,7 +200,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
* @sg: sg list head
* @end: sg lisg end
* @delta: is the amount we need to crop in order to bound the list.
- *
+ * @nbytes: length of data in the scatterlists or data length - whichever
+ * is greater.
*/
static long int trim_sg_list(struct nx_sg *sg,
struct nx_sg *end,
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
index 1975bcbee997..ee7cd88bb10a 100644
--- a/drivers/crypto/nx/nx_debugfs.c
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* debugfs routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a45bdcf3026d..0dd4c6b157de 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
dd->err = 0;
}
- err = pm_runtime_get_sync(dd->dev);
+ err = pm_runtime_resume_and_get(dd->dev);
if (err < 0) {
- pm_runtime_put_noidle(dd->dev);
dev_err(dd->dev, "failed to get sync: %d\n", err);
return err;
}
@@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "%s: failed to get_sync(%d)\n",
__func__, err);
@@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
static int omap_aes_resume(struct device *dev)
{
- pm_runtime_get_sync(dev);
+ pm_runtime_resume_and_get(dev);
return 0;
}
#endif
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index 6a9be01fdf33..3524ddd48930 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -224,6 +224,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->uof_get_name = uof_get_name;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index f5990d042c9a..1dd64af22bea 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -212,6 +212,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+ hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 1d1532e8fb6d..067ca5e17d38 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index cadcf12884c8..30337390513c 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -214,6 +214,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+ hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 04742a6d91ca..51ea88c0b17d 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 5527344546e5..ac435b44f1d2 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -173,6 +173,7 @@ struct adf_hw_device_data {
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
+ void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
index 1aa17303838d..9e560c7d4163 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
@@ -179,3 +179,28 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
return capabilities;
}
EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
+
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+ u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+ unsigned long accel_mask = hw_data->accel_mask;
+ void __iomem *pmisc_addr;
+ struct adf_bar *pmisc;
+ int pmisc_id;
+ u32 i = 0;
+
+ pmisc_id = hw_data->get_misc_bar_id(hw_data);
+ pmisc = &GET_BARS(accel_dev)[pmisc_id];
+ pmisc_addr = pmisc->virt_addr;
+
+ /* Configures WDT timers */
+ for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
+ /* Enable WDT for sym and dc */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
+ /* Enable WDT for pke */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
index 3816e6500352..756b0ddfac5e 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
@@ -113,11 +113,24 @@ do { \
/* Power gating */
#define ADF_POWERGATE_PKE BIT(24)
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x2000000
+#define ADF_SSMWDT_OFFSET 0x54
+#define ADF_SSMWDTPKE_OFFSET 0x58
+#define ADF_SSMWDT(i) (ADF_SSMWDT_OFFSET + ((i) * 0x4000))
+#define ADF_SSMWDTPKE(i) (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000))
+
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs);
void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
void adf_gen2_get_arb_info(struct arb_info *arb_info);
u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
index b72ff58e0bc7..000528327b29 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
@@ -99,3 +99,43 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
+
+static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
+ u32 *lower)
+{
+ *lower = lower_32_bits(value);
+ *upper = upper_32_bits(value);
+}
+
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+ u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+ u32 ssm_wdt_pke_high = 0;
+ u32 ssm_wdt_pke_low = 0;
+ u32 ssm_wdt_high = 0;
+ u32 ssm_wdt_low = 0;
+ void __iomem *pmisc_addr;
+ struct adf_bar *pmisc;
+ int pmisc_id;
+
+ pmisc_id = hw_data->get_misc_bar_id(hw_data);
+ pmisc = &GET_BARS(accel_dev)[pmisc_id];
+ pmisc_addr = pmisc->virt_addr;
+
+ /* Convert 64bit WDT timer value into 32bit values for
+ * mmio write to 32bit CSRs.
+ */
+ adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
+ adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
+ &ssm_wdt_pke_low);
+
+ /* Enable WDT for sym and dc */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
+ /* Enable WDT for pke */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index 8ab62b2ac311..b8fca1ff7aab 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -94,6 +94,18 @@ do { \
ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_SRV_ARB_EN, (value))
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000
+#define ADF_SSMWDTL_OFFSET 0x54
+#define ADF_SSMWDTH_OFFSET 0x5C
+#define ADF_SSMWDTPKEL_OFFSET 0x58
+#define ADF_SSMWDTPKEH_OFFSET 0x60
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 42029153408e..744c40351428 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -162,6 +162,10 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
+ /* Set ssm watch dog timer */
+ if (hw_data->set_ssm_wdtimer)
+ hw_data->set_ssm_wdtimer(accel_dev);
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index c45853463530..e3ad5587be49 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -291,19 +291,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
ret = adf_isr_alloc_msix_entry_table(accel_dev);
if (ret)
- return ret;
- if (adf_enable_msix(accel_dev))
goto err_out;
- if (adf_setup_bh(accel_dev))
- goto err_out;
+ ret = adf_enable_msix(accel_dev);
+ if (ret)
+ goto err_free_msix_table;
- if (adf_request_irqs(accel_dev))
- goto err_out;
+ ret = adf_setup_bh(accel_dev);
+ if (ret)
+ goto err_disable_msix;
+
+ ret = adf_request_irqs(accel_dev);
+ if (ret)
+ goto err_cleanup_bh;
return 0;
+
+err_cleanup_bh:
+ adf_cleanup_bh(accel_dev);
+
+err_disable_msix:
+ adf_disable_msix(&accel_dev->accel_pci_dev);
+
+err_free_msix_table:
+ adf_isr_free_msix_entry_table(accel_dev);
+
err_out:
- adf_isr_resource_free(accel_dev);
- return -EFAULT;
+ return ret;
}
EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
index 8b090b7ae8c6..a1b77bd7a894 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -169,7 +169,7 @@ out:
* @msg: Message to send
* @vf_nr: VF number to which the message will be sent
*
- * Function sends a messge from the PF to a VF
+ * Function sends a message from the PF to a VF
*
* Return: 0 on success, error code otherwise.
*/
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 888c1e047295..8ba28409fb74 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -172,6 +172,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
ring->base_addr, ring->dma_addr);
+ ring->base_addr = NULL;
return -EFAULT;
}
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
index 2c98fb63f7b7..e85bd62d134a 100644
--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -8,7 +8,7 @@
* adf_vf2pf_init() - send init msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
- * Function sends an init messge from the VF to a PF
+ * Function sends an init message from the VF to a PF
*
* Return: 0 on success, error code otherwise.
*/
@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(adf_vf2pf_init);
* adf_vf2pf_shutdown() - send shutdown msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
- * Function sends a shutdown messge from the VF to a PF
+ * Function sends a shutdown message from the VF to a PF
*
* Return: void
*/
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 38d316a42ba6..888388acb6bd 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -261,17 +261,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
goto err_out;
if (adf_setup_pf2vf_bh(accel_dev))
- goto err_out;
+ goto err_disable_msi;
if (adf_setup_bh(accel_dev))
- goto err_out;
+ goto err_cleanup_pf2vf_bh;
if (adf_request_msi_irq(accel_dev))
- goto err_out;
+ goto err_cleanup_bh;
return 0;
+
+err_cleanup_bh:
+ adf_cleanup_bh(accel_dev);
+
+err_cleanup_pf2vf_bh:
+ adf_cleanup_pf2vf_bh(accel_dev);
+
+err_disable_msi:
+ adf_disable_msi(accel_dev);
+
err_out:
- adf_vf_isr_resource_free(accel_dev);
return -EFAULT;
}
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index ff78c73c47e3..f998ed58457c 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -718,8 +718,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
int n = sg_nents(sgl);
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
- dma_addr_t blp;
- dma_addr_t bloutp = 0;
+ dma_addr_t blp = DMA_MAPPING_ERROR;
+ dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
@@ -731,9 +731,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(!bufl))
return -ENOMEM;
- blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, blp)))
- goto err_in;
+ for_each_sg(sgl, sg, n, i)
+ bufl->bufers[i].addr = DMA_MAPPING_ERROR;
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
@@ -750,6 +749,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
sg_nctr++;
}
bufl->num_bufs = sg_nctr;
+ blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, blp)))
+ goto err_in;
qat_req->buf.bl = bufl;
qat_req->buf.blp = blp;
qat_req->buf.sz = sz;
@@ -764,10 +766,11 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
goto err_in;
- bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, bloutp)))
- goto err_out;
+
bufers = buflout->bufers;
+ for_each_sg(sglout, sg, n, i)
+ bufers[i].addr = DMA_MAPPING_ERROR;
+
for_each_sg(sglout, sg, n, i) {
int y = sg_nctr;
@@ -784,6 +787,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
}
buflout->num_bufs = sg_nctr;
buflout->num_mapped_bufs = sg_nctr;
+ bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, bloutp)))
+ goto err_out;
qat_req->buf.blout = buflout;
qat_req->buf.bloutp = bloutp;
qat_req->buf.sz_out = sz_out;
@@ -795,17 +801,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
return 0;
err_out:
+ if (!dma_mapping_error(dev, bloutp))
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+
n = sg_nents(sglout);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, bloutp))
- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
kfree(buflout);
err_in:
+ if (!dma_mapping_error(dev, blp))
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
n = sg_nents(sgl);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
@@ -813,8 +823,6 @@ err_in:
bufl->bufers[i].len,
DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, blp))
- dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n");
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index c972554a755e..29999da716cc 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index cffa9fc628ff..850f257d00f3 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -40,7 +40,6 @@ struct qce_cipher_reqctx {
struct scatterlist result_sg;
struct sg_table dst_tbl;
struct scatterlist *dst_sg;
- struct sg_table src_tbl;
struct scatterlist *src_sg;
unsigned int cryptlen;
struct skcipher_request fallback_req; // keep at the end
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index a73db2a5637f..dceb9579d87a 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
}
-static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
- u32 totallen, u32 offset)
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
{
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
@@ -295,19 +294,18 @@ static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
{
u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
- unsigned int xtsdusize;
qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
enckeylen / 2);
qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
- /* xts du size 512B */
- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+ /* Set data unit size to cryptlen. Anything else causes
+ * crypto engine to return back incorrect results.
+ */
+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
}
-static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
- u32 totallen, u32 offset)
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
{
struct skcipher_request *req = skcipher_request_cast(async_req);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
@@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
- qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
+ qce_write(qce, REG_ENCR_SEG_START, 0);
if (IS_CTR(flags)) {
qce_write(qce, REG_CNTR_MASK, ~0);
@@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
qce_write(qce, REG_CNTR_MASK2, ~0);
}
- qce_write(qce, REG_SEG_SIZE, totallen);
+ qce_write(qce, REG_SEG_SIZE, rctx->cryptlen);
/* get little endianness */
config = qce_config_reg(qce, 1);
@@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
}
#endif
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset)
+int qce_start(struct crypto_async_request *async_req, u32 type)
{
switch (type) {
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
- return qce_setup_regs_skcipher(async_req, totallen, offset);
+ return qce_setup_regs_skcipher(async_req);
#endif
#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
- return qce_setup_regs_ahash(async_req, totallen, offset);
+ return qce_setup_regs_ahash(async_req);
#endif
default:
return -EINVAL;
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 85ba16418a04..3bc244bcca2d 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -94,7 +94,6 @@ struct qce_alg_template {
void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
int qce_check_status(struct qce_device *qce, u32 *status);
void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset);
+int qce_start(struct crypto_async_request *async_req, u32 type);
#endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 61c418c12345..8e6fcf2c21cc 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -12,9 +12,15 @@
#include "core.h"
#include "sha.h"
-/* crypto hw padding constant for first operation */
-#define SHA_PADDING 64
-#define SHA_PADDING_MASK (SHA_PADDING - 1)
+struct qce_sha_saved_state {
+ u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE];
+ u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE];
+ __be32 byte_count[2];
+ unsigned int pending_buflen;
+ unsigned int flags;
+ u64 count;
+ bool first_blk;
+};
static LIST_HEAD(ahash_algs);
@@ -107,7 +113,7 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
qce_dma_issue_pending(&qce->dma);
- ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+ ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
@@ -139,97 +145,37 @@ static int qce_ahash_init(struct ahash_request *req)
static int qce_ahash_export(struct ahash_request *req, void *out)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
- unsigned long flags = rctx->flags;
- unsigned int digestsize = crypto_ahash_digestsize(ahash);
- unsigned int blocksize =
- crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-
- if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
- struct sha1_state *out_state = out;
-
- out_state->count = rctx->count;
- qce_cpu_to_be32p_array((__be32 *)out_state->state,
- rctx->digest, digestsize);
- memcpy(out_state->buffer, rctx->buf, blocksize);
- } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
- struct sha256_state *out_state = out;
-
- out_state->count = rctx->count;
- qce_cpu_to_be32p_array((__be32 *)out_state->state,
- rctx->digest, digestsize);
- memcpy(out_state->buf, rctx->buf, blocksize);
- } else {
- return -EINVAL;
- }
+ struct qce_sha_saved_state *export_state = out;
- return 0;
-}
-
-static int qce_import_common(struct ahash_request *req, u64 in_count,
- const u32 *state, const u8 *buffer, bool hmac)
-{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
- unsigned int digestsize = crypto_ahash_digestsize(ahash);
- unsigned int blocksize;
- u64 count = in_count;
-
- blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
- rctx->count = in_count;
- memcpy(rctx->buf, buffer, blocksize);
-
- if (in_count <= blocksize) {
- rctx->first_blk = 1;
- } else {
- rctx->first_blk = 0;
- /*
- * For HMAC, there is a hardware padding done when first block
- * is set. Therefore the byte_count must be incremened by 64
- * after the first block operation.
- */
- if (hmac)
- count += SHA_PADDING;
- }
-
- rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
- rctx->byte_count[1] = (__force __be32)(count >> 32);
- qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
- digestsize);
- rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+ memcpy(export_state->pending_buf, rctx->buf, rctx->buflen);
+ memcpy(export_state->partial_digest, rctx->digest, sizeof(rctx->digest));
+ export_state->byte_count[0] = rctx->byte_count[0];
+ export_state->byte_count[1] = rctx->byte_count[1];
+ export_state->pending_buflen = rctx->buflen;
+ export_state->count = rctx->count;
+ export_state->first_blk = rctx->first_blk;
+ export_state->flags = rctx->flags;
return 0;
}
static int qce_ahash_import(struct ahash_request *req, const void *in)
{
- struct qce_sha_reqctx *rctx;
- unsigned long flags;
- bool hmac;
- int ret;
-
- ret = qce_ahash_init(req);
- if (ret)
- return ret;
-
- rctx = ahash_request_ctx(req);
- flags = rctx->flags;
- hmac = IS_SHA_HMAC(flags);
-
- if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
- const struct sha1_state *state = in;
-
- ret = qce_import_common(req, state->count, state->state,
- state->buffer, hmac);
- } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
- const struct sha256_state *state = in;
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ const struct qce_sha_saved_state *import_state = in;
- ret = qce_import_common(req, state->count, state->state,
- state->buf, hmac);
- }
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->count = import_state->count;
+ rctx->buflen = import_state->pending_buflen;
+ rctx->first_blk = import_state->first_blk;
+ rctx->flags = import_state->flags;
+ rctx->byte_count[0] = import_state->byte_count[0];
+ rctx->byte_count[1] = import_state->byte_count[1];
+ memcpy(rctx->buf, import_state->pending_buf, rctx->buflen);
+ memcpy(rctx->digest, import_state->partial_digest, sizeof(rctx->digest));
- return ret;
+ return 0;
}
static int qce_ahash_update(struct ahash_request *req)
@@ -270,6 +216,25 @@ static int qce_ahash_update(struct ahash_request *req)
/* calculate how many bytes will be hashed later */
hash_later = total % blocksize;
+
+ /*
+ * At this point, there is more than one block size of data. If
+ * the available data to transfer is exactly a multiple of block
+ * size, save the last block to be transferred in qce_ahash_final
+ * (with the last block bit set) if this is indeed the end of data
+ * stream. If not this saved block will be transferred as part of
+ * next update. If this block is not held back and if this is
+ * indeed the end of data stream, the digest obtained will be wrong
+ * since qce_ahash_final will see that rctx->buflen is 0 and return
+ * doing nothing which in turn means that a digest will not be
+ * copied to the destination result buffer. qce_ahash_final cannot
+ * be made to alter this behavior and allowed to proceed if
+ * rctx->buflen is 0 because the crypto engine BAM does not allow
+ * for zero length transfers.
+ */
+ if (!hash_later)
+ hash_later = blocksize;
+
if (hash_later) {
unsigned int src_offset = req->nbytes - hash_later;
scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
@@ -450,7 +415,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "sha1-qce",
.digestsize = SHA1_DIGEST_SIZE,
.blocksize = SHA1_BLOCK_SIZE,
- .statesize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha1,
},
{
@@ -459,7 +424,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "sha256-qce",
.digestsize = SHA256_DIGEST_SIZE,
.blocksize = SHA256_BLOCK_SIZE,
- .statesize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha256,
},
{
@@ -468,7 +433,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "hmac-sha1-qce",
.digestsize = SHA1_DIGEST_SIZE,
.blocksize = SHA1_BLOCK_SIZE,
- .statesize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha1,
},
{
@@ -477,7 +442,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "hmac-sha256-qce",
.digestsize = SHA256_DIGEST_SIZE,
.blocksize = SHA256_BLOCK_SIZE,
- .statesize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha256,
},
};
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index a2d3da0ad95f..c0a0d8c4fce1 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
+#include <linux/errno.h>
#include <crypto/aes.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
@@ -143,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
qce_dma_issue_pending(&qce->dma);
- ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
+ ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
@@ -167,16 +168,33 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
+ unsigned int __keylen;
int ret;
if (!key || !keylen)
return -EINVAL;
- switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
+ /*
+ * AES XTS key1 = key2 not supported by crypto engine.
+ * Revisit to request a fallback cipher in this case.
+ */
+ if (IS_XTS(flags)) {
+ __keylen = keylen >> 1;
+ if (!memcmp(key, key + __keylen, __keylen))
+ return -ENOKEY;
+ } else {
+ __keylen = keylen;
+ }
+
+ switch (__keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
memcpy(ctx->enc_key, key, keylen);
break;
+ case AES_KEYSIZE_192:
+ break;
+ default:
+ return -EINVAL;
}
ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
@@ -204,12 +222,27 @@ static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
+ u32 _key[6];
int err;
err = verify_skcipher_des3_key(ablk, key);
if (err)
return err;
+ /*
+ * The crypto engine does not support any two keys
+ * being the same for triple des algorithms. The
+ * verify_skcipher_des3_key does not check for all the
+ * below conditions. Return -ENOKEY in case any two keys
+ * are the same. Revisit to see if a fallback cipher
+ * is needed to handle this condition.
+ */
+ memcpy(_key, key, DES3_EDE_KEY_SIZE);
+ if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
+ !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
+ !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
+ return -ENOKEY;
+
ctx->enc_keylen = keylen;
memcpy(ctx->enc_key, key, keylen);
return 0;
@@ -221,6 +254,7 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ unsigned int blocksize = crypto_skcipher_blocksize(tfm);
int keylen;
int ret;
@@ -228,14 +262,31 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
- * is not a multiple of it; pass such requests to the fallback
+ /* CE does not handle 0 length messages */
+ if (!req->cryptlen)
+ return 0;
+
+ /*
+ * ECB and CBC algorithms require message lengths to be
+ * multiples of block size.
+ */
+ if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
+ if (!IS_ALIGNED(req->cryptlen, blocksize))
+ return -EINVAL;
+
+ /*
+ * Conditions for requesting a fallback cipher
+ * AES-192 (not supported by crypto engine (CE))
+ * AES-XTS request with len <= 512 byte (not recommended to use CE)
+ * AES-XTS request with len > QCE_SECTOR_SIZE and
+ * is not a multiple of it.(Revisit this condition to check if it is
+ * needed in all versions of CE)
*/
if (IS_AES(rctx->flags) &&
- (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
- req->cryptlen <= aes_sw_max_len) ||
- (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
- req->cryptlen % QCE_SECTOR_SIZE))) {
+ ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
+ (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) ||
+ (req->cryptlen > QCE_SECTOR_SIZE &&
+ req->cryptlen % QCE_SECTOR_SIZE))))) {
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
@@ -307,7 +358,7 @@ static const struct qce_skcipher_def skcipher_def[] = {
.name = "ecb(aes)",
.drv_name = "ecb-aes-qce",
.blocksize = AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = 0,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
},
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 81befe7febaa..ed03058497bc 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -48,7 +48,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
{
struct ahash_request *req = ahash_request_cast(dev->async_req);
struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
- int reg_status = 0;
+ int reg_status;
reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 682c8a450a57..55aa3a71169b 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
@@ -401,7 +402,7 @@ static const struct samsung_aes_variant exynos_aes_data = {
static const struct samsung_aes_variant exynos5433_slim_aes_data = {
.aes_offset = 0x400,
.hash_offset = 0x800,
- .clk_names = { "pclk", "aclk", },
+ .clk_names = { "aclk", "pclk", },
};
static const struct of_device_id s5p_sss_dt_match[] = {
@@ -424,13 +425,9 @@ MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
static inline const struct samsung_aes_variant *find_s5p_sss_version
(const struct platform_device *pdev)
{
- if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
- const struct of_device_id *match;
+ if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node))
+ return of_device_get_match_data(&pdev->dev);
- match = of_match_node(s5p_sss_dt_match,
- pdev->dev.of_node);
- return (const struct samsung_aes_variant *)match->data;
- }
return (const struct samsung_aes_variant *)
platform_get_device_id(pdev)->driver_data;
}
@@ -2159,7 +2156,7 @@ static struct skcipher_alg algs[] = {
static int s5p_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int i, j, err = -ENODEV;
+ int i, j, err;
const struct samsung_aes_variant *variant;
struct s5p_aes_dev *pdata;
struct resource *res;
@@ -2189,14 +2186,14 @@ static int s5p_aes_probe(struct platform_device *pdev)
}
pdata->res = res;
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ pdata->ioaddr = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->ioaddr)) {
if (!pdata->use_hash)
return PTR_ERR(pdata->ioaddr);
/* try AES without HASH */
res->end -= 0x300;
pdata->use_hash = false;
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ pdata->ioaddr = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->ioaddr))
return PTR_ERR(pdata->ioaddr);
}
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index f300b0a5958a..1c6929fb3a13 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -69,8 +69,24 @@
/* Max Authentication tag size */
#define SA_MAX_AUTH_TAG_SZ 64
-#define PRIV_ID 0x1
-#define PRIV 0x1
+enum sa_algo_id {
+ SA_ALG_CBC_AES = 0,
+ SA_ALG_EBC_AES,
+ SA_ALG_CBC_DES3,
+ SA_ALG_ECB_DES3,
+ SA_ALG_SHA1,
+ SA_ALG_SHA256,
+ SA_ALG_SHA512,
+ SA_ALG_AUTHENC_SHA1_AES,
+ SA_ALG_AUTHENC_SHA256_AES,
+};
+
+struct sa_match_data {
+ u8 priv;
+ u8 priv_id;
+ u32 supported_algos;
+ bool skip_engine_control;
+};
static struct device *sa_k3_dev;
@@ -696,8 +712,9 @@ static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
}
static
-int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
- u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
+int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
+ const u8 *enc_key, u16 enc_key_sz,
+ const u8 *auth_key, u16 auth_key_sz,
struct algo_data *ad, u8 enc, u32 *swinfo)
{
int enc_sc_offset = 0;
@@ -732,8 +749,8 @@ int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
memcpy(&sc_buf[2], &sc_id, 2);
sc_buf[4] = 0x0;
- sc_buf[5] = PRIV_ID;
- sc_buf[6] = PRIV;
+ sc_buf[5] = match_data->priv_id;
+ sc_buf[6] = match_data->priv;
sc_buf[7] = 0x0;
/* Prepare context for encryption engine */
@@ -892,8 +909,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
return ret;
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
- &ctx->enc.epib[1]))
+ if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0,
+ ad, 1, &ctx->enc.epib[1]))
goto badkey;
cmdl_len = sa_format_cmdl_gen(&cfg,
@@ -905,8 +922,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
ctx->enc.cmdl_size = cmdl_len;
/* Setup Decryption Security Context & Command label template */
- if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
- &ctx->dec.epib[1]))
+ if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0,
+ ad, 0, &ctx->dec.epib[1]))
goto badkey;
cfg.enc_eng_id = ad->enc_eng.eng_id;
@@ -1106,7 +1123,7 @@ static int sa_run(struct sa_req *req)
else
dma_rx = pdata->dma_rx1;
- ddev = dma_rx->device->dev;
+ ddev = dmaengine_get_dma_device(pdata->dma_tx);
rxd->ddev = ddev;
memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
@@ -1146,8 +1163,10 @@ static int sa_run(struct sa_req *req)
mapped_sg->sgt.sgl = src;
mapped_sg->sgt.orig_nents = src_nents;
ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
- if (ret)
+ if (ret) {
+ kfree(rxd);
return ret;
+ }
mapped_sg->dir = dir_src;
mapped_sg->mapped = true;
@@ -1155,8 +1174,10 @@ static int sa_run(struct sa_req *req)
mapped_sg->sgt.sgl = req->src;
mapped_sg->sgt.orig_nents = sg_nents;
ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
- if (ret)
+ if (ret) {
+ kfree(rxd);
return ret;
+ }
mapped_sg->dir = dir_src;
mapped_sg->mapped = true;
@@ -1446,9 +1467,10 @@ static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
cfg.akey = NULL;
cfg.akey_len = 0;
+ ctx->dev_data = dev_get_drvdata(sa_k3_dev);
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
- &ctx->enc.epib[1]))
+ if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0,
+ ad, 0, &ctx->enc.epib[1]))
goto badkey;
cmdl_len = sa_format_cmdl_gen(&cfg,
@@ -1716,6 +1738,7 @@ static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
int ret;
memzero_explicit(ctx, sizeof(*ctx));
+ ctx->dev_data = data;
ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->shash)) {
@@ -1817,8 +1840,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
cfg.akey_len = keys.authkeylen;
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
- keys.authkey, keys.authkeylen,
+ if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey,
+ keys.enckeylen, keys.authkey, keys.authkeylen,
ad, 1, &ctx->enc.epib[1]))
return -EINVAL;
@@ -1831,8 +1854,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
ctx->enc.cmdl_size = cmdl_len;
/* Setup Decryption Security Context & Command label template */
- if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
- keys.authkey, keys.authkeylen,
+ if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey,
+ keys.enckeylen, keys.authkey, keys.authkeylen,
ad, 0, &ctx->dec.epib[1]))
return -EINVAL;
@@ -1950,7 +1973,7 @@ static int sa_aead_decrypt(struct aead_request *req)
}
static struct sa_alg_tmpl sa_algs[] = {
- {
+ [SA_ALG_CBC_AES] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(aes)",
@@ -1973,7 +1996,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_EBC_AES] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(aes)",
@@ -1995,7 +2018,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_CBC_DES3] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(des3_ede)",
@@ -2018,7 +2041,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_ECB_DES3] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(des3_ede)",
@@ -2040,7 +2063,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_SHA1] = {
.type = CRYPTO_ALG_TYPE_AHASH,
.alg.ahash = {
.halg.base = {
@@ -2069,7 +2092,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
- {
+ [SA_ALG_SHA256] = {
.type = CRYPTO_ALG_TYPE_AHASH,
.alg.ahash = {
.halg.base = {
@@ -2098,7 +2121,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
- {
+ [SA_ALG_SHA512] = {
.type = CRYPTO_ALG_TYPE_AHASH,
.alg.ahash = {
.halg.base = {
@@ -2127,7 +2150,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
- {
+ [SA_ALG_AUTHENC_SHA1_AES] = {
.type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
@@ -2154,7 +2177,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_aead_decrypt,
},
},
- {
+ [SA_ALG_AUTHENC_SHA256_AES] = {
.type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
@@ -2185,13 +2208,19 @@ static struct sa_alg_tmpl sa_algs[] = {
};
/* Register the algorithms in crypto framework */
-static void sa_register_algos(const struct device *dev)
+static void sa_register_algos(struct sa_crypto_data *dev_data)
{
+ const struct sa_match_data *match_data = dev_data->match_data;
+ struct device *dev = dev_data->dev;
char *alg_name;
u32 type;
int i, err;
for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
+ /* Skip unsupported algos */
+ if (!(match_data->supported_algos & BIT(i)))
+ continue;
+
type = sa_algs[i].type;
if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
alg_name = sa_algs[i].alg.skcipher.base.cra_name;
@@ -2329,14 +2358,39 @@ static int sa_link_child(struct device *dev, void *data)
return 0;
}
+static struct sa_match_data am654_match_data = {
+ .priv = 1,
+ .priv_id = 1,
+ .supported_algos = GENMASK(SA_ALG_AUTHENC_SHA256_AES, 0),
+};
+
+static struct sa_match_data am64_match_data = {
+ .priv = 0,
+ .priv_id = 0,
+ .supported_algos = BIT(SA_ALG_CBC_AES) |
+ BIT(SA_ALG_EBC_AES) |
+ BIT(SA_ALG_SHA256) |
+ BIT(SA_ALG_SHA512) |
+ BIT(SA_ALG_AUTHENC_SHA256_AES),
+ .skip_engine_control = true,
+};
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
+ { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
+ { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
static int sa_ul_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct resource *res;
static void __iomem *saul_base;
struct sa_crypto_data *dev_data;
- u32 val;
int ret;
dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
@@ -2350,7 +2404,7 @@ static int sa_ul_probe(struct platform_device *pdev)
dev_set_drvdata(sa_k3_dev, dev_data);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
ret);
@@ -2362,18 +2416,28 @@ static int sa_ul_probe(struct platform_device *pdev)
if (ret)
goto disable_pm_runtime;
+ match = of_match_node(of_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "No compatible match found\n");
+ return -ENODEV;
+ }
+ dev_data->match_data = match->data;
+
spin_lock_init(&dev_data->scid_lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
saul_base = devm_ioremap_resource(dev, res);
dev_data->base = saul_base;
- val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
- SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
- SA_EEC_TRNG_EN;
- writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
+ if (!dev_data->match_data->skip_engine_control) {
+ u32 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
+ SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
+ SA_EEC_TRNG_EN;
- sa_register_algos(dev);
+ writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
+ }
+
+ sa_register_algos(dev_data);
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
if (ret)
@@ -2419,13 +2483,6 @@ static int sa_ul_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id of_match[] = {
- {.compatible = "ti,j721e-sa2ul",},
- {.compatible = "ti,am654-sa2ul",},
- {},
-};
-MODULE_DEVICE_TABLE(of, of_match);
-
static struct platform_driver sa_ul_driver = {
.probe = sa_ul_probe,
.remove = sa_ul_remove,
diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h
index f597ddecde34..ed66d1f111db 100644
--- a/drivers/crypto/sa2ul.h
+++ b/drivers/crypto/sa2ul.h
@@ -171,9 +171,12 @@ struct sa_tfm_ctx;
#define SA_UNSAFE_DATA_SZ_MIN 240
#define SA_UNSAFE_DATA_SZ_MAX 256
+struct sa_match_data;
+
/**
* struct sa_crypto_data - Crypto driver instance data
* @base: Base address of the register space
+ * @soc_data: Pointer to SoC specific data
* @pdev: Platform device pointer
* @sc_pool: security context pool
* @dev: Device pointer
@@ -189,6 +192,7 @@ struct sa_tfm_ctx;
*/
struct sa_crypto_data {
void __iomem *base;
+ const struct sa_match_data *match_data;
struct platform_device *pdev;
struct dma_pool *sc_pool;
struct device *dev;
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 2a4793176c71..7389a0536ff0 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
int ret;
u32 cfg, hw_mode;
- pm_runtime_get_sync(cryp->dev);
+ pm_runtime_resume_and_get(cryp->dev);
/* Disable interrupt */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
@@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
if (!cryp)
return -ENODEV;
- ret = pm_runtime_get_sync(cryp->dev);
+ ret = pm_runtime_resume_and_get(cryp->dev);
if (ret < 0)
return ret;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 7ac0573ef663..389de9e3302d 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
struct stm32_hash_request_ctx *rctx)
{
- pm_runtime_get_sync(hdev->dev);
+ pm_runtime_resume_and_get(hdev->dev);
if (!(HASH_FLAGS_INIT & hdev->flags)) {
stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
@@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
u32 *preg;
unsigned int i;
- pm_runtime_get_sync(hdev->dev);
+ pm_runtime_resume_and_get(hdev->dev);
while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
cpu_relax();
@@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
preg = rctx->hw_context;
- pm_runtime_get_sync(hdev->dev);
+ pm_runtime_resume_and_get(hdev->dev);
stm32_hash_write(hdev, HASH_IMR, *preg++);
stm32_hash_write(hdev, HASH_STR, *preg++);
@@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
if (!hdev)
return -ENODEV;
- ret = pm_runtime_get_sync(hdev->dev);
+ ret = pm_runtime_resume_and_get(hdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
index 9866c2a5e9a7..759d0d9786fd 100644
--- a/drivers/crypto/ux500/cryp/cryp.c
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -15,7 +15,7 @@
#include "cryp_p.h"
#include "cryp.h"
-/**
+/*
* cryp_wait_until_done - wait until the device logic is not busy
*/
void cryp_wait_until_done(struct cryp_device_data *device_data)
@@ -285,6 +285,7 @@ int cryp_configure_init_vector(struct cryp_device_data *device_data,
* other device context parameter
* @device_data: Pointer to the device data struct for base address.
* @ctx: Crypto device context
+ * @cryp_mode: Mode: Polling, Interrupt or DMA
*/
void cryp_save_device_context(struct cryp_device_data *device_data,
struct cryp_device_context *ctx,
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
index 8da7f87b339b..db5713d7c940 100644
--- a/drivers/crypto/ux500/cryp/cryp.h
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index c3adeb2e5823..30cdd5253929 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
@@ -62,7 +62,7 @@ struct cryp_driver_data {
/**
* struct cryp_ctx - Crypto context
* @config: Crypto mode.
- * @key[CRYP_MAX_KEY_SIZE]: Key.
+ * @key: Key array.
* @keylen: Length of key.
* @iv: Pointer to initialization vector.
* @indata: Pointer to indata.
@@ -73,6 +73,7 @@ struct cryp_driver_data {
* @updated: Updated flag.
* @dev_ctx: Device dependent context.
* @device: Pointer to the device.
+ * @session_id: Atomic session ID.
*/
struct cryp_ctx {
struct cryp_config config;
@@ -608,12 +609,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx)
chan = ctx->device->dma.chan_mem2cryp;
dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
- ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
+ ctx->device->dma.nents_src, DMA_TO_DEVICE);
chan = ctx->device->dma.chan_cryp2mem;
dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
- ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
+ ctx->device->dma.nents_dst, DMA_FROM_DEVICE);
}
static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
@@ -1290,7 +1291,6 @@ static int ux500_cryp_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(device_data->base)) {
- dev_err(dev, "[%s]: ioremap failed!", __func__);
ret = PTR_ERR(device_data->base);
goto out;
}
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c
index 7ebde69e8c76..6d2f07bec98a 100644
--- a/drivers/crypto/ux500/cryp/cryp_irq.c
+++ b/drivers/crypto/ux500/cryp/cryp_irq.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h
index 1984f30100ff..da90029ea141 100644
--- a/drivers/crypto/ux500/cryp/cryp_irq.h
+++ b/drivers/crypto/ux500/cryp/cryp_irq.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -19,7 +19,7 @@ enum cryp_irq_src_id {
CRYP_IRQ_SRC_ALL = 0x3
};
-/**
+/*
* M0 Funtions
*/
void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h
index 879ed68a12d7..4981a3f461e5 100644
--- a/drivers/crypto/ux500/cryp/cryp_irqp.h
+++ b/drivers/crypto/ux500/cryp/cryp_irqp.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -13,7 +13,7 @@
#include "cryp_irq.h"
-/**
+/*
*
* CRYP Registers - Offset mapping
* +-----------------+
diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h
index 0df84eaa8531..60b47fe4de35 100644
--- a/drivers/crypto/ux500/cryp/cryp_p.h
+++ b/drivers/crypto/ux500/cryp/cryp_p.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -17,7 +17,7 @@
#include "cryp.h"
#include "cryp_irqp.h"
-/**
+/*
* Generic Macros
*/
#define CRYP_SET_BITS(reg_name, mask) \
@@ -34,7 +34,7 @@
writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \
(((u32)val << shift) & (mask))), reg)
-/**
+/*
* CRYP specific Macros
*/
#define CRYP_PERIPHERAL_ID0 0xE3
@@ -48,7 +48,7 @@
#define CRYP_PCELL_ID2 0x05
#define CRYP_PCELL_ID3 0xB1
-/**
+/*
* CRYP register default values
*/
#define MAX_DEVICE_SUPPORT 2
@@ -62,7 +62,7 @@
#define CRYP_KEY_DEFAULT 0x0
#define CRYP_INIT_VECT_DEFAULT 0x0
-/**
+/*
* CRYP Control register specific mask
*/
#define CRYP_CR_SECURE_MASK BIT(0)
@@ -81,7 +81,6 @@
CRYP_CR_PRLG_MASK |\
CRYP_CR_ALGODIR_MASK |\
CRYP_CR_ALGOMODE_MASK |\
- CRYP_CR_DATATYPE_MASK |\
CRYP_CR_KEYSIZE_MASK |\
CRYP_CR_KEYRDEN_MASK |\
CRYP_CR_DATATYPE_MASK)
@@ -91,7 +90,7 @@
#define CRYP_SR_IFEM_MASK BIT(0)
#define CRYP_SR_BUSY_MASK BIT(4)
-/**
+/*
* Bit position used while setting bits in register
*/
#define CRYP_CR_PRLG_POS 1
@@ -107,7 +106,7 @@
#define CRYP_SR_BUSY_POS 4
-/**
+/*
* CRYP PCRs------PC_NAND control register
* BIT_MASK
*/
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index da284b0ea1b2..ecb7412e84e3 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -190,7 +190,7 @@ static void hash_dma_done(struct hash_ctx *ctx)
chan = ctx->device->dma.chan_mem2hash;
dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
- ctx->device->dma.sg_len, DMA_TO_DEVICE);
+ ctx->device->dma.nents, DMA_TO_DEVICE);
}
static int hash_dma_write(struct hash_ctx *ctx,
@@ -356,7 +356,7 @@ out:
/**
* hash_get_device_data - Checks for an available hash device and return it.
- * @hash_ctx: Structure for the hash context.
+ * @ctx: Structure for the hash context.
* @device_data: Structure for the hash device.
*
* This function check for an available hash device and return it to
@@ -542,7 +542,7 @@ static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
}
/**
- * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
+ * ux500_hash_init - Common hash init function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*
* Initialize structures.
@@ -585,6 +585,7 @@ static int ux500_hash_init(struct ahash_request *req)
* @device_data: Structure for the hash device.
* @message: Block (512 bits) of message to be written to
* the HASH hardware.
+ * @length: Message length
*
*/
static void hash_processblock(struct hash_device_data *device_data,
@@ -1295,7 +1296,7 @@ void hash_get_digest(struct hash_device_data *device_data,
}
/**
- * hash_update - The hash update function for SHA1/SHA2 (SHA256).
+ * ahash_update - The hash update function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*/
static int ahash_update(struct ahash_request *req)
@@ -1315,7 +1316,7 @@ static int ahash_update(struct ahash_request *req)
}
/**
- * hash_final - The hash final function for SHA1/SHA2 (SHA256).
+ * ahash_final - The hash final function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*/
static int ahash_final(struct ahash_request *req)
@@ -1615,9 +1616,6 @@ static struct hash_algo_template hash_algs[] = {
}
};
-/**
- * hash_algs_register_all -
- */
static int ahash_algs_register_all(struct hash_device_data *device_data)
{
int ret;
@@ -1640,9 +1638,6 @@ unreg:
return ret;
}
-/**
- * hash_algs_unregister_all -
- */
static void ahash_algs_unregister_all(struct hash_device_data *device_data)
{
int i;
@@ -1681,7 +1676,6 @@ static int ux500_hash_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(device_data->base)) {
- dev_err(dev, "%s: ioremap() failed!\n", __func__);
ret = PTR_ERR(device_data->base);
goto out;
}
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index d05c02baebcf..ec06189fbf99 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index d88084447f1c..ed0debc7acb5 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CBC routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 79ba062ee1c1..9a3da8cd62f3 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CTR routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 9fee1b1532a4..dabbccb41550 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES XTS routines supporting VMX In-core instructions on Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 14807ac2e3b9..5bc5710a6de0 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* GHASH routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015, 2019 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index a40d08e75fc0..7eb713cc87c8 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 244cb7d89678..2acc6173da36 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -4,6 +4,7 @@
#include <linux/security.h>
#include <linux/debugfs.h>
#include <linux/module.h>
+#include <linux/sizes.h>
#include <linux/mutex.h>
#include <linux/cdev.h>
#include <linux/idr.h>
@@ -96,21 +97,18 @@ struct mbox_cmd {
* @dev: driver core device object
* @cdev: char dev core object for ioctl operations
* @cxlm: pointer to the parent device driver data
- * @ops_active: active user of @cxlm in ops handlers
- * @ops_dead: completion when all @cxlm ops users have exited
* @id: id number of this memdev instance.
*/
struct cxl_memdev {
struct device dev;
struct cdev cdev;
struct cxl_mem *cxlm;
- struct percpu_ref ops_active;
- struct completion ops_dead;
int id;
};
static int cxl_mem_major;
static DEFINE_IDA(cxl_memdev_ida);
+static DECLARE_RWSEM(cxl_memdev_rwsem);
static struct dentry *cxl_debugfs;
static bool cxl_raw_allow_all;
@@ -169,7 +167,7 @@ struct cxl_mem_command {
* table will be validated against the user's input. For example, if size_in is
* 0, and the user passed in 1, it is an error.
*/
-static struct cxl_mem_command mem_commands[] = {
+static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
CXL_CMD(RAW, ~0, ~0, 0),
@@ -776,26 +774,43 @@ static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct cxl_memdev *cxlmd;
- struct inode *inode;
- int rc = -ENOTTY;
+ struct cxl_memdev *cxlmd = file->private_data;
+ int rc = -ENXIO;
- inode = file_inode(file);
- cxlmd = container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+ down_read(&cxl_memdev_rwsem);
+ if (cxlmd->cxlm)
+ rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+ up_read(&cxl_memdev_rwsem);
- if (!percpu_ref_tryget_live(&cxlmd->ops_active))
- return -ENXIO;
+ return rc;
+}
- rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+static int cxl_memdev_open(struct inode *inode, struct file *file)
+{
+ struct cxl_memdev *cxlmd =
+ container_of(inode->i_cdev, typeof(*cxlmd), cdev);
- percpu_ref_put(&cxlmd->ops_active);
+ get_device(&cxlmd->dev);
+ file->private_data = cxlmd;
- return rc;
+ return 0;
+}
+
+static int cxl_memdev_release_file(struct inode *inode, struct file *file)
+{
+ struct cxl_memdev *cxlmd =
+ container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+
+ put_device(&cxlmd->dev);
+
+ return 0;
}
static const struct file_operations cxl_memdev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = cxl_memdev_ioctl,
+ .open = cxl_memdev_open,
+ .release = cxl_memdev_release_file,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
@@ -984,7 +999,7 @@ static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev, u32 reg_lo,
return NULL;
}
- offset = ((u64)reg_hi << 32) | FIELD_GET(CXL_REGLOC_ADDR_MASK, reg_lo);
+ offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK);
bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
/* Basic sanity check that BAR is big enough */
@@ -1049,7 +1064,6 @@ static void cxl_memdev_release(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- percpu_ref_exit(&cxlmd->ops_active);
ida_free(&cxl_memdev_ida, cxlmd->id);
kfree(cxlmd);
}
@@ -1066,7 +1080,7 @@ static ssize_t firmware_version_show(struct device *dev,
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
- return sprintf(buf, "%.16s\n", cxlm->firmware_version);
+ return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
}
static DEVICE_ATTR_RO(firmware_version);
@@ -1076,7 +1090,7 @@ static ssize_t payload_max_show(struct device *dev,
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
- return sprintf(buf, "%zu\n", cxlm->payload_size);
+ return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
}
static DEVICE_ATTR_RO(payload_max);
@@ -1087,7 +1101,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
struct cxl_mem *cxlm = cxlmd->cxlm;
unsigned long long len = range_len(&cxlm->ram_range);
- return sprintf(buf, "%#llx\n", len);
+ return sysfs_emit(buf, "%#llx\n", len);
}
static struct device_attribute dev_attr_ram_size =
@@ -1100,7 +1114,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
struct cxl_mem *cxlm = cxlmd->cxlm;
unsigned long long len = range_len(&cxlm->pmem_range);
- return sprintf(buf, "%#llx\n", len);
+ return sysfs_emit(buf, "%#llx\n", len);
}
static struct device_attribute dev_attr_pmem_size =
@@ -1150,27 +1164,24 @@ static const struct device_type cxl_memdev_type = {
.groups = cxl_memdev_attribute_groups,
};
-static void cxlmdev_unregister(void *_cxlmd)
+static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
{
- struct cxl_memdev *cxlmd = _cxlmd;
- struct device *dev = &cxlmd->dev;
-
- percpu_ref_kill(&cxlmd->ops_active);
- cdev_device_del(&cxlmd->cdev, dev);
- wait_for_completion(&cxlmd->ops_dead);
+ down_write(&cxl_memdev_rwsem);
cxlmd->cxlm = NULL;
- put_device(dev);
+ up_write(&cxl_memdev_rwsem);
}
-static void cxlmdev_ops_active_release(struct percpu_ref *ref)
+static void cxl_memdev_unregister(void *_cxlmd)
{
- struct cxl_memdev *cxlmd =
- container_of(ref, typeof(*cxlmd), ops_active);
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct device *dev = &cxlmd->dev;
- complete(&cxlmd->ops_dead);
+ cdev_device_del(&cxlmd->cdev, dev);
+ cxl_memdev_shutdown(cxlmd);
+ put_device(dev);
}
-static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
{
struct pci_dev *pdev = cxlm->pdev;
struct cxl_memdev *cxlmd;
@@ -1180,22 +1191,11 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
if (!cxlmd)
- return -ENOMEM;
- init_completion(&cxlmd->ops_dead);
-
- /*
- * @cxlm is deallocated when the driver unbinds so operations
- * that are using it need to hold a live reference.
- */
- cxlmd->cxlm = cxlm;
- rc = percpu_ref_init(&cxlmd->ops_active, cxlmdev_ops_active_release, 0,
- GFP_KERNEL);
- if (rc)
- goto err_ref;
+ return ERR_PTR(-ENOMEM);
rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
if (rc < 0)
- goto err_id;
+ goto err;
cxlmd->id = rc;
dev = &cxlmd->dev;
@@ -1204,30 +1204,54 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
dev->bus = &cxl_bus_type;
dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
dev->type = &cxl_memdev_type;
- dev_set_name(dev, "mem%d", cxlmd->id);
+ device_set_pm_not_required(dev);
cdev = &cxlmd->cdev;
cdev_init(cdev, &cxl_memdev_fops);
+ return cxlmd;
+
+err:
+ kfree(cxlmd);
+ return ERR_PTR(rc);
+}
+
+static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+{
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ cxlmd = cxl_memdev_alloc(cxlm);
+ if (IS_ERR(cxlmd))
+ return PTR_ERR(cxlmd);
+
+ dev = &cxlmd->dev;
+ rc = dev_set_name(dev, "mem%d", cxlmd->id);
+ if (rc)
+ goto err;
+
+ /*
+ * Activate ioctl operations, no cxl_memdev_rwsem manipulation
+ * needed as this is ordered with cdev_add() publishing the device.
+ */
+ cxlmd->cxlm = cxlm;
+ cdev = &cxlmd->cdev;
rc = cdev_device_add(cdev, dev);
if (rc)
- goto err_add;
+ goto err;
- return devm_add_action_or_reset(dev->parent, cxlmdev_unregister, cxlmd);
+ return devm_add_action_or_reset(dev->parent, cxl_memdev_unregister,
+ cxlmd);
-err_add:
- ida_free(&cxl_memdev_ida, cxlmd->id);
-err_id:
+err:
/*
- * Theoretically userspace could have already entered the fops,
- * so flush ops_active.
+ * The cdev was briefly live, shutdown any ioctl operations that
+ * saw that state.
*/
- percpu_ref_kill(&cxlmd->ops_active);
- wait_for_completion(&cxlmd->ops_dead);
- percpu_ref_exit(&cxlmd->ops_active);
-err_ref:
- kfree(cxlmd);
-
+ cxl_memdev_shutdown(cxlmd);
+ put_device(dev);
return rc;
}
@@ -1396,6 +1420,7 @@ out:
*/
static int cxl_mem_identify(struct cxl_mem *cxlm)
{
+ /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
struct cxl_mbox_identify {
char fw_revision[0x10];
__le64 total_capacity;
@@ -1424,10 +1449,11 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
* For now, only the capacity is exported in sysfs
*/
cxlm->ram_range.start = 0;
- cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) - 1;
+ cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1;
cxlm->pmem_range.start = 0;
- cxlm->pmem_range.end = le64_to_cpu(id.persistent_capacity) - 1;
+ cxlm->pmem_range.end =
+ le64_to_cpu(id.persistent_capacity) * SZ_256M - 1;
memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 452e85ae87a8..5aee26e1bbd6 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -90,13 +90,11 @@ static ssize_t do_id_store(struct device_driver *drv, const char *buf,
list_add(&dax_id->list, &dax_drv->ids);
} else
rc = -ENOMEM;
- } else
- /* nothing to remove */;
+ }
} else if (action == ID_REMOVE) {
list_del(&dax_id->list);
kfree(dax_id);
- } else
- /* dax_id already added */;
+ }
mutex_unlock(&dax_bus_lock);
if (rc < 0)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index fe6a460c4373..af3ee288bc11 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
kfree(chan->dev);
err_free_local:
free_percpu(chan->local);
+ chan->local = NULL;
return rc;
}
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index e5162690de8f..db25f9b7778c 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -10,6 +10,7 @@ config DW_DMAC_CORE
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA platform driver"
+ depends on HAS_IOMEM
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller. This
@@ -18,6 +19,7 @@ config DW_DMAC
config DW_DMAC_PCI
tristate "Synopsys DesignWare AHB DMA PCI driver"
depends on PCI
+ depends on HAS_IOMEM
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller on the
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 84a6ea60ecf0..31c819544a22 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -282,6 +282,22 @@ void idxd_wq_drain(struct idxd_wq *wq)
idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
}
+void idxd_wq_reset(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 operand;
+
+ if (wq->state != IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
+ return;
+ }
+
+ operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+ idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
+ wq->state = IDXD_WQ_DISABLED;
+}
+
int idxd_wq_map_portal(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
@@ -363,8 +379,6 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
void idxd_wq_disable_cleanup(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
- struct device *dev = &idxd->pdev->dev;
- int i, wq_offset;
lockdep_assert_held(&idxd->dev_lock);
memset(wq->wqcfg, 0, idxd->wqcfg_size);
@@ -376,14 +390,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
wq->ats_dis = 0;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
-
- for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
- wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
- iowrite32(0, idxd->reg_base + wq_offset);
- dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
- wq->id, i, wq_offset,
- ioread32(idxd->reg_base + wq_offset));
- }
}
/* Device control bits */
@@ -574,6 +580,36 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
}
/* Device configuration bits */
+void idxd_msix_perm_setup(struct idxd_device *idxd)
+{
+ union msix_perm mperm;
+ int i, msixcnt;
+
+ msixcnt = pci_msix_vec_count(idxd->pdev);
+ if (msixcnt < 0)
+ return;
+
+ mperm.bits = 0;
+ mperm.pasid = idxd->pasid;
+ mperm.pasid_en = device_pasid_enabled(idxd);
+ for (i = 1; i < msixcnt; i++)
+ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+}
+
+void idxd_msix_perm_clear(struct idxd_device *idxd)
+{
+ union msix_perm mperm;
+ int i, msixcnt;
+
+ msixcnt = pci_msix_vec_count(idxd->pdev);
+ if (msixcnt < 0)
+ return;
+
+ mperm.bits = 0;
+ for (i = 1; i < msixcnt; i++)
+ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+}
+
static void idxd_group_config_write(struct idxd_group *group)
{
struct idxd_device *idxd = group->idxd;
@@ -642,7 +678,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
if (!wq->group)
return 0;
- memset(wq->wqcfg, 0, idxd->wqcfg_size);
+ /*
+ * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
+ * wq reset. This will copy back the sticky values that are present on some devices.
+ */
+ for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
+ wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
+ wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
+ }
/* byte 0-3 */
wq->wqcfg->wq_size = wq->size;
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 81a0e65fd316..76014c14f473 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -316,6 +316,8 @@ void idxd_unregister_driver(void);
struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
/* device interrupt control */
+void idxd_msix_perm_setup(struct idxd_device *idxd);
+void idxd_msix_perm_clear(struct idxd_device *idxd);
irqreturn_t idxd_irq_handler(int vec, void *data);
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
@@ -341,6 +343,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq);
void idxd_wq_drain(struct idxd_wq *wq);
+void idxd_wq_reset(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 085a0c3b62c6..6584b0ec07d5 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -65,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
struct idxd_irq_entry *irq_entry;
int i, msixcnt;
int rc = 0;
- union msix_perm mperm;
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt < 0) {
@@ -144,14 +143,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
}
idxd_unmask_error_interrupts(idxd);
-
- /* Setup MSIX permission table */
- mperm.bits = 0;
- mperm.pasid = idxd->pasid;
- mperm.pasid_en = device_pasid_enabled(idxd);
- for (i = 1; i < msixcnt; i++)
- iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
-
+ idxd_msix_perm_setup(idxd);
return 0;
err_no_irq:
@@ -510,6 +502,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
idxd_flush_work_list(irq_entry);
}
+ idxd_msix_perm_clear(idxd);
destroy_workqueue(idxd->wq);
}
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index a60ca11a5784..f1463fc58112 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -124,7 +124,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
for (i = 0; i < 4; i++)
idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
IDXD_SWERR_OFFSET + i * sizeof(u64));
- iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
+
+ iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
+ idxd->reg_base + IDXD_SWERR_OFFSET);
if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
int id = idxd->sw_err.wq_idx;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 4dbb03c545e4..18bf4d148989 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -275,7 +275,6 @@ static void disable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
- int rc;
mutex_lock(&wq->wq_lock);
dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
@@ -296,17 +295,13 @@ static void disable_wq(struct idxd_wq *wq)
idxd_wq_unmap_portal(wq);
idxd_wq_drain(wq);
- rc = idxd_wq_disable(wq);
+ idxd_wq_reset(wq);
idxd_wq_free_resources(wq);
wq->client_count = 0;
mutex_unlock(&wq->wq_lock);
- if (rc < 0)
- dev_warn(dev, "Failed to disable %s: %d\n",
- dev_name(&wq->conf_dev), rc);
- else
- dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+ dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
}
static int idxd_config_bus_remove(struct device *dev)
@@ -989,7 +984,7 @@ static ssize_t wq_size_store(struct device *dev,
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
- if (wq->state != IDXD_WQ_DISABLED)
+ if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
@@ -1449,8 +1444,14 @@ static ssize_t op_cap_show(struct device *dev,
{
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
+ int i, rc = 0;
+
+ for (i = 0; i < 4; i++)
+ rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
- return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
+ rc--;
+ rc += sysfs_emit_at(buf, rc, "\n");
+ return rc;
}
static DEVICE_ATTR_RO(op_cap);
diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
index f387c5bbc170..166934544161 100644
--- a/drivers/dma/plx_dma.c
+++ b/drivers/dma/plx_dma.c
@@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev)
rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
KBUILD_MODNAME, plxdev);
- if (rc) {
- kfree(plxdev);
- return rc;
- }
+ if (rc)
+ goto free_plx;
spin_lock_init(&plxdev->ring_lock);
tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
@@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev)
rc = dma_async_device_register(dma);
if (rc) {
pci_err(pdev, "Failed to register dma device: %d\n", rc);
- free_irq(pci_irq_vector(pdev, 0), plxdev);
- kfree(plxdev);
- return rc;
+ goto put_device;
}
pci_set_drvdata(pdev, plxdev);
return 0;
+
+put_device:
+ put_device(&pdev->dev);
+ free_irq(pci_irq_vector(pdev, 0), plxdev);
+free_plx:
+ kfree(plxdev);
+
+ return rc;
}
static int plx_dma_probe(struct pci_dev *pdev,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 71827d9b0aa1..b7260749e8ee 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
goto end;
}
if (!tdc->busy) {
- err = pm_runtime_get_sync(tdc->tdma->dev);
+ err = pm_runtime_resume_and_get(tdc->tdma->dev);
if (err < 0) {
dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
goto end;
@@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
int err;
- err = pm_runtime_get_sync(tdc->tdma->dev);
+ err = pm_runtime_resume_and_get(tdc->tdma->dev);
if (err < 0) {
dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
return;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 55df63dead8d..70b29bd079c9 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
struct xilinx_dpdma_tx_desc *desc;
struct virt_dma_desc *vdesc;
u32 reg, channels;
+ bool first_frame;
lockdep_assert_held(&chan->lock);
@@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
chan->running = true;
}
- if (chan->video_group)
- channels = xilinx_dpdma_chan_video_group_ready(chan);
- else
- channels = BIT(chan->id);
-
- if (!channels)
- return;
-
vdesc = vchan_next_desc(&chan->vchan);
if (!vdesc)
return;
@@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
upper_32_bits(sw_desc->dma_addr)));
- if (chan->first_frame)
+ first_frame = chan->first_frame;
+ chan->first_frame = false;
+
+ if (chan->video_group) {
+ channels = xilinx_dpdma_chan_video_group_ready(chan);
+ /*
+ * Trigger the transfer only when all channels in the group are
+ * ready.
+ */
+ if (!channels)
+ return;
+ } else {
+ channels = BIT(chan->id);
+ }
+
+ if (first_frame)
reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
else
reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
- chan->first_frame = false;
-
dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
}
@@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
*/
static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
{
- struct xilinx_dpdma_tx_desc *active = chan->desc.active;
+ struct xilinx_dpdma_tx_desc *active;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
xilinx_dpdma_debugfs_desc_done_irq(chan);
+ active = chan->desc.active;
if (active)
vchan_cyclic_callback(&active->vdesc);
else
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 337b0eea4e62..e1408075ef7d 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -44,6 +44,8 @@ static struct max8997_muic_irq muic_irqs[] = {
{ MAX8997_MUICIRQ_ChgDetRun, "muic-CHGDETRUN" },
{ MAX8997_MUICIRQ_ChgTyp, "muic-CHGTYP" },
{ MAX8997_MUICIRQ_OVP, "muic-OVP" },
+ { MAX8997_PMICIRQ_CHGINS, "pmic-CHGINS" },
+ { MAX8997_PMICIRQ_CHGRM, "pmic-CHGRM" },
};
/* Define supported cable type */
@@ -538,6 +540,8 @@ static void max8997_muic_irq_work(struct work_struct *work)
case MAX8997_MUICIRQ_DCDTmr:
case MAX8997_MUICIRQ_ChgDetRun:
case MAX8997_MUICIRQ_ChgTyp:
+ case MAX8997_PMICIRQ_CHGINS:
+ case MAX8997_PMICIRQ_CHGRM:
/* Handle charger cable */
ret = max8997_muic_chg_handler(info);
break;
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index 74d57d951b68..eb02cb962b5e 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/**
* extcon-qcom-spmi-misc.c - Qualcomm USB extcon driver to support USB ID
- * detection based on extcon-usb-gpio.c.
+ * and VBUS detection based on extcon-usb-gpio.c.
*
* Copyright (C) 2016 Linaro, Ltd.
* Stephen Boyd <stephen.boyd@linaro.org>
@@ -22,30 +22,56 @@
struct qcom_usb_extcon_info {
struct extcon_dev *edev;
- int irq;
+ int id_irq;
+ int vbus_irq;
struct delayed_work wq_detcable;
unsigned long debounce_jiffies;
};
static const unsigned int qcom_usb_extcon_cable[] = {
+ EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_NONE,
};
static void qcom_usb_extcon_detect_cable(struct work_struct *work)
{
- bool id;
+ bool state = false;
int ret;
+ union extcon_property_value val;
struct qcom_usb_extcon_info *info = container_of(to_delayed_work(work),
struct qcom_usb_extcon_info,
wq_detcable);
- /* check ID and update cable state */
- ret = irq_get_irqchip_state(info->irq, IRQCHIP_STATE_LINE_LEVEL, &id);
- if (ret)
- return;
+ if (info->id_irq > 0) {
+ /* check ID and update cable state */
+ ret = irq_get_irqchip_state(info->id_irq,
+ IRQCHIP_STATE_LINE_LEVEL, &state);
+ if (ret)
+ return;
+
+ if (!state) {
+ val.intval = true;
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS, val);
+ }
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !state);
+ }
- extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !id);
+ if (info->vbus_irq > 0) {
+ /* check VBUS and update cable state */
+ ret = irq_get_irqchip_state(info->vbus_irq,
+ IRQCHIP_STATE_LINE_LEVEL, &state);
+ if (ret)
+ return;
+
+ if (state) {
+ val.intval = true;
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_SS, val);
+ }
+ extcon_set_state_sync(info->edev, EXTCON_USB, state);
+ }
}
static irqreturn_t qcom_usb_irq_handler(int irq, void *dev_id)
@@ -80,6 +106,16 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
return ret;
}
+ ret = extcon_set_property_capability(info->edev,
+ EXTCON_USB, EXTCON_PROP_USB_SS);
+ ret |= extcon_set_property_capability(info->edev,
+ EXTCON_USB_HOST, EXTCON_PROP_USB_SS);
+ if (ret) {
+ dev_err(dev, "failed to register extcon props rc=%d\n",
+ ret);
+ return ret;
+ }
+
info->debounce_jiffies = msecs_to_jiffies(USB_ID_DEBOUNCE_MS);
ret = devm_delayed_work_autocancel(dev, &info->wq_detcable,
@@ -87,18 +123,35 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
if (ret)
return ret;
- info->irq = platform_get_irq_byname(pdev, "usb_id");
- if (info->irq < 0)
- return info->irq;
+ info->id_irq = platform_get_irq_byname(pdev, "usb_id");
+ if (info->id_irq > 0) {
+ ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
+ qcom_usb_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for ID IRQ\n");
+ return ret;
+ }
+ }
- ret = devm_request_threaded_irq(dev, info->irq, NULL,
+ info->vbus_irq = platform_get_irq_byname(pdev, "usb_vbus");
+ if (info->vbus_irq > 0) {
+ ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
qcom_usb_irq_handler,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
pdev->name, info);
- if (ret < 0) {
- dev_err(dev, "failed to request handler for ID IRQ\n");
- return ret;
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for VBUS IRQ\n");
+ return ret;
+ }
+ }
+
+ if (info->id_irq < 0 && info->vbus_irq < 0) {
+ dev_err(dev, "ID and VBUS IRQ not found\n");
+ return -EINVAL;
}
platform_set_drvdata(pdev, info);
@@ -116,8 +169,12 @@ static int qcom_usb_extcon_suspend(struct device *dev)
struct qcom_usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
- if (device_may_wakeup(dev))
- ret = enable_irq_wake(info->irq);
+ if (device_may_wakeup(dev)) {
+ if (info->id_irq > 0)
+ ret = enable_irq_wake(info->id_irq);
+ if (info->vbus_irq > 0)
+ ret = enable_irq_wake(info->vbus_irq);
+ }
return ret;
}
@@ -127,8 +184,12 @@ static int qcom_usb_extcon_resume(struct device *dev)
struct qcom_usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
- if (device_may_wakeup(dev))
- ret = disable_irq_wake(info->irq);
+ if (device_may_wakeup(dev)) {
+ if (info->id_irq > 0)
+ ret = disable_irq_wake(info->id_irq);
+ if (info->vbus_irq > 0)
+ ret = disable_irq_wake(info->vbus_irq);
+ }
return ret;
}
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 106d4da647bd..db41d1c58efd 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -144,6 +144,7 @@ enum sm5502_muic_acc_type {
SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE = 0x3e, /* | 001|11110| */
SM5502_MUIC_ADC_AUDIO_TYPE1_SEND_END = 0x5e, /* | 010|11110| */
/* |Dev Type1|--ADC| */
+ SM5502_MUIC_ADC_GROUND_USB_OTG = 0x80, /* | 100|00000| */
SM5502_MUIC_ADC_OPEN_USB = 0x5f, /* | 010|11111| */
SM5502_MUIC_ADC_OPEN_TA = 0xdf, /* | 110|11111| */
SM5502_MUIC_ADC_OPEN_USB_OTG = 0xff, /* | 111|11111| */
@@ -291,11 +292,27 @@ static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
* connected with to MUIC device.
*/
cable_type = adc & SM5502_REG_ADC_MASK;
- if (cable_type == SM5502_MUIC_ADC_GROUND)
- return SM5502_MUIC_ADC_GROUND;
switch (cable_type) {
case SM5502_MUIC_ADC_GROUND:
+ ret = regmap_read(info->regmap, SM5502_REG_DEV_TYPE1,
+ &dev_type1);
+ if (ret) {
+ dev_err(info->dev, "failed to read DEV_TYPE1 reg\n");
+ return ret;
+ }
+
+ switch (dev_type1) {
+ case SM5502_REG_DEV_TYPE1_USB_OTG_MASK:
+ cable_type = SM5502_MUIC_ADC_GROUND_USB_OTG;
+ break;
+ default:
+ dev_dbg(info->dev,
+ "cannot identify the cable type: adc(0x%x), dev_type1(0x%x)\n",
+ adc, dev_type1);
+ return -EINVAL;
+ }
+ break;
case SM5502_MUIC_ADC_SEND_END_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S1_BUTTON:
case SM5502_MUIC_ADC_REMOTE_S2_BUTTON:
@@ -396,6 +413,7 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
con_sw = DM_DP_SWITCH_OPEN;
vbus_sw = VBUSIN_SWITCH_VBUSOUT;
break;
+ case SM5502_MUIC_ADC_GROUND_USB_OTG:
case SM5502_MUIC_ADC_OPEN_USB_OTG:
id = EXTCON_USB_HOST;
con_sw = DM_DP_SWITCH_USB;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9811c40956e5..17c9d825188b 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2545,7 +2545,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
struct driver_data *driver_data = packet->driver_data;
int ret = -ENOENT;
- tasklet_disable(&ctx->tasklet);
+ tasklet_disable_in_atomic(&ctx->tasklet);
if (packet->ack != 0)
goto out;
@@ -3465,7 +3465,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
struct iso_context *ctx = container_of(base, struct iso_context, base);
int ret = 0;
- tasklet_disable(&ctx->context.tasklet);
+ tasklet_disable_in_atomic(&ctx->context.tasklet);
if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
context_tasklet((unsigned long)&ctx->context);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 3f14dffb9669..5dd19dbd67a3 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -237,6 +237,7 @@ config INTEL_STRATIX10_RSU
config QCOM_SCM
bool
depends on ARM || ARM64
+ depends on HAVE_ARM_SMCCC
select RESET_CONTROLLER
config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c23466e05e60..d0537573501e 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -13,7 +13,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
-Wno-pointer-sign \
$(call cc-disable-warning, address-of-packed-member) \
$(call cc-disable-warning, gnu) \
- -fno-asynchronous-unwind-tables
+ -fno-asynchronous-unwind-tables \
+ $(CLANG_FLAGS)
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 3d77f26c1e8c..bb6e77ee3898 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -136,12 +136,16 @@ MODULE_PARM_DESC(spincount,
"The number of loop iterations to use when using the spin handshake.");
/*
- * Platforms might not support S0ix logging in their GSMI handlers. In order to
- * avoid any side-effects of generating an SMI for S0ix logging, use the S0ix
- * related GSMI commands only for those platforms that explicitly enable this
- * option.
+ * Some older platforms with Apollo Lake chipsets do not support S0ix logging
+ * in their GSMI handlers, and behaved poorly when resuming via power button
+ * press if the logging was attempted. Updated firmware with proper behavior
+ * has long since shipped, removing the need for this opt-in parameter. It
+ * now exists as an opt-out parameter for folks defiantly running old
+ * firmware, or unforeseen circumstances. After the change from opt-in to
+ * opt-out has baked sufficiently, this parameter should probably be removed
+ * entirely.
*/
-static bool s0ix_logging_enable;
+static bool s0ix_logging_enable = true;
module_param(s0ix_logging_enable, bool, 0600);
static struct gsmi_buf *gsmi_buf_alloc(void)
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 5ff9438b7b46..d591dd9b7c60 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -118,10 +118,17 @@ config XILINX_PR_DECOUPLER
depends on FPGA_BRIDGE
depends on HAS_IOMEM
help
- Say Y to enable drivers for Xilinx LogiCORE PR Decoupler.
+ Say Y to enable drivers for Xilinx LogiCORE PR Decoupler
+ or Xilinx Dynamic Function eXchnage AIX Shutdown Manager.
The PR Decoupler exists in the FPGA fabric to isolate one
region of the FPGA from the busses while that region is
being reprogrammed during partial reconfig.
+ The Dynamic Function eXchange AXI shutdown manager prevents
+ AXI traffic from passing through the bridge. The controller
+ safely handles AXI4MM and AXI4-Lite interfaces on a
+ Reconfigurable Partition when it is undergoing dynamic
+ reconfiguration, preventing the system deadlock that can
+ occur if AXI transactions are interrupted by DFX.
config FPGA_REGION
tristate "FPGA Region"
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
index c4691187cca9..ab7be6217368 100644
--- a/drivers/fpga/dfl-afu-error.c
+++ b/drivers/fpga/dfl-afu-error.c
@@ -52,7 +52,7 @@ static int afu_port_err_clear(struct device *dev, u64 err)
struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
struct platform_device *pdev = to_platform_device(dev);
void __iomem *base_err, *base_hdr;
- int ret = -EBUSY;
+ int enable_ret = 0, ret = -EBUSY;
u64 v;
base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
@@ -96,18 +96,20 @@ static int afu_port_err_clear(struct device *dev, u64 err)
v = readq(base_err + PORT_FIRST_ERROR);
writeq(v, base_err + PORT_FIRST_ERROR);
} else {
+ dev_warn(dev, "%s: received 0x%llx, expected 0x%llx\n",
+ __func__, v, err);
ret = -EINVAL;
}
/* Clear mask */
__afu_port_err_mask(dev, false);
- /* Enable the Port by clear the reset */
- __afu_port_enable(pdev);
+ /* Enable the Port by clearing the reset */
+ enable_ret = __afu_port_enable(pdev);
done:
mutex_unlock(&pdata->lock);
- return ret;
+ return enable_ret ? enable_ret : ret;
}
static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 753cda4b2568..7f621e96d3b8 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -21,6 +21,9 @@
#include "dfl-afu.h"
+#define RST_POLL_INVL 10 /* us */
+#define RST_POLL_TIMEOUT 1000 /* us */
+
/**
* __afu_port_enable - enable a port by clear reset
* @pdev: port platform device.
@@ -32,7 +35,7 @@
*
* The caller needs to hold lock for protection.
*/
-void __afu_port_enable(struct platform_device *pdev)
+int __afu_port_enable(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
@@ -41,7 +44,7 @@ void __afu_port_enable(struct platform_device *pdev)
WARN_ON(!pdata->disable_count);
if (--pdata->disable_count != 0)
- return;
+ return 0;
base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
@@ -49,10 +52,20 @@ void __afu_port_enable(struct platform_device *pdev)
v = readq(base + PORT_HDR_CTRL);
v &= ~PORT_CTRL_SFTRST;
writeq(v, base + PORT_HDR_CTRL);
-}
-#define RST_POLL_INVL 10 /* us */
-#define RST_POLL_TIMEOUT 1000 /* us */
+ /*
+ * HW clears the ack bit to indicate that the port is fully out
+ * of reset.
+ */
+ if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
+ !(v & PORT_CTRL_SFTRST_ACK),
+ RST_POLL_INVL, RST_POLL_TIMEOUT)) {
+ dev_err(&pdev->dev, "timeout, failure to enable device\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
/**
* __afu_port_disable - disable a port by hold reset
@@ -86,7 +99,7 @@ int __afu_port_disable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, fail to reset device\n");
+ dev_err(&pdev->dev, "timeout, failure to disable device\n");
return -ETIMEDOUT;
}
@@ -110,10 +123,10 @@ static int __port_reset(struct platform_device *pdev)
int ret;
ret = __afu_port_disable(pdev);
- if (!ret)
- __afu_port_enable(pdev);
+ if (ret)
+ return ret;
- return ret;
+ return __afu_port_enable(pdev);
}
static int port_reset(struct platform_device *pdev)
@@ -872,11 +885,11 @@ static int afu_dev_destroy(struct platform_device *pdev)
static int port_enable_set(struct platform_device *pdev, bool enable)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret = 0;
+ int ret;
mutex_lock(&pdata->lock);
if (enable)
- __afu_port_enable(pdev);
+ ret = __afu_port_enable(pdev);
else
ret = __afu_port_disable(pdev);
mutex_unlock(&pdata->lock);
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index 576e94960086..e5020e2b1f3d 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -80,7 +80,7 @@ struct dfl_afu {
};
/* hold pdata->lock when call __afu_port_enable/disable */
-void __afu_port_enable(struct platform_device *pdev);
+int __afu_port_enable(struct platform_device *pdev);
int __afu_port_disable(struct platform_device *pdev);
void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 04e47e266f26..b44523ea8c91 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -69,14 +69,16 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
}
/* PCI Device ID */
-#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
-#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
-#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
-#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
+#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
+#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
+#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
+#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
+#define PCIE_DEVICE_ID_INTEL_PAC_D5005 0x0B2B
/* VF Device */
-#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
-#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
-#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
+#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
+#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
+#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
+#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF 0x0B2C
static struct pci_device_id cci_pcie_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
@@ -86,6 +88,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
{0,}
};
MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index 7d69af230567..ea2bde6e5bc4 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017, National Instruments Corp.
- * Copyright (c) 2017, Xilix Inc
+ * Copyright (c) 2017, Xilinx Inc
*
* FPGA Bridge Driver for the Xilinx LogiCORE Partial Reconfiguration
* Decoupler IP Core.
@@ -18,7 +18,12 @@
#define CTRL_CMD_COUPLE 0
#define CTRL_OFFSET 0
+struct xlnx_config_data {
+ const char *name;
+};
+
struct xlnx_pr_decoupler_data {
+ const struct xlnx_config_data *ipconfig;
void __iomem *io_base;
struct clk *clk;
};
@@ -76,15 +81,28 @@ static const struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
.enable_show = xlnx_pr_decoupler_enable_show,
};
+static const struct xlnx_config_data decoupler_config = {
+ .name = "Xilinx PR Decoupler",
+};
+
+static const struct xlnx_config_data shutdown_config = {
+ .name = "Xilinx DFX AXI Shutdown Manager",
+};
+
static const struct of_device_id xlnx_pr_decoupler_of_match[] = {
- { .compatible = "xlnx,pr-decoupler-1.00", },
- { .compatible = "xlnx,pr-decoupler", },
+ { .compatible = "xlnx,pr-decoupler-1.00", .data = &decoupler_config },
+ { .compatible = "xlnx,pr-decoupler", .data = &decoupler_config },
+ { .compatible = "xlnx,dfx-axi-shutdown-manager-1.00",
+ .data = &shutdown_config },
+ { .compatible = "xlnx,dfx-axi-shutdown-manager",
+ .data = &shutdown_config },
{},
};
MODULE_DEVICE_TABLE(of, xlnx_pr_decoupler_of_match);
static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct xlnx_pr_decoupler_data *priv;
struct fpga_bridge *br;
int err;
@@ -94,17 +112,23 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ if (np) {
+ const struct of_device_id *match;
+
+ match = of_match_node(xlnx_pr_decoupler_of_match, np);
+ if (match && match->data)
+ priv->ipconfig = match->data;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->io_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->io_base))
return PTR_ERR(priv->io_base);
priv->clk = devm_clk_get(&pdev->dev, "aclk");
- if (IS_ERR(priv->clk)) {
- if (PTR_ERR(priv->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "input clock not found\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+ "input clock not found\n");
err = clk_prepare_enable(priv->clk);
if (err) {
@@ -114,7 +138,7 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
clk_disable(priv->clk);
- br = devm_fpga_bridge_create(&pdev->dev, "Xilinx PR Decoupler",
+ br = devm_fpga_bridge_create(&pdev->dev, priv->ipconfig->name,
&xlnx_pr_decoupler_br_ops, priv);
if (!br) {
err = -ENOMEM;
@@ -125,7 +149,8 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
err = fpga_bridge_register(br);
if (err) {
- dev_err(&pdev->dev, "unable to register Xilinx PR Decoupler");
+ dev_err(&pdev->dev, "unable to register %s",
+ priv->ipconfig->name);
goto err_clk;
}
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index 27defa98092d..fee4d0abf6bf 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -233,25 +233,19 @@ static int xilinx_spi_probe(struct spi_device *spi)
/* PROGRAM_B is active low */
conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
- if (IS_ERR(conf->prog_b)) {
- dev_err(&spi->dev, "Failed to get PROGRAM_B gpio: %ld\n",
- PTR_ERR(conf->prog_b));
- return PTR_ERR(conf->prog_b);
- }
+ if (IS_ERR(conf->prog_b))
+ return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b),
+ "Failed to get PROGRAM_B gpio\n");
conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
- if (IS_ERR(conf->init_b)) {
- dev_err(&spi->dev, "Failed to get INIT_B gpio: %ld\n",
- PTR_ERR(conf->init_b));
- return PTR_ERR(conf->init_b);
- }
+ if (IS_ERR(conf->init_b))
+ return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b),
+ "Failed to get INIT_B gpio\n");
conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
- if (IS_ERR(conf->done)) {
- dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
- PTR_ERR(conf->done));
- return PTR_ERR(conf->done);
- }
+ if (IS_ERR(conf->done))
+ return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
+ "Failed to get DONE gpio\n");
mgr = devm_fpga_mgr_create(&spi->dev,
"Xilinx Slave Serial FPGA Manager",
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 41952bb818ad..56152263ab38 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -29,6 +29,7 @@
#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
struct gpio_regs {
+ u32 sysconfig;
u32 irqenable1;
u32 irqenable2;
u32 wake_en;
@@ -1069,6 +1070,7 @@ static void omap_gpio_init_context(struct gpio_bank *p)
const struct omap_gpio_reg_offs *regs = p->regs;
void __iomem *base = p->base;
+ p->context.sysconfig = readl_relaxed(base + regs->sysconfig);
p->context.ctrl = readl_relaxed(base + regs->ctrl);
p->context.oe = readl_relaxed(base + regs->direction);
p->context.wake_en = readl_relaxed(base + regs->wkup_en);
@@ -1088,6 +1090,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
const struct omap_gpio_reg_offs *regs = bank->regs;
void __iomem *base = bank->base;
+ writel_relaxed(bank->context.sysconfig, base + regs->sysconfig);
writel_relaxed(bank->context.wake_en, base + regs->wkup_en);
writel_relaxed(bank->context.ctrl, base + regs->ctrl);
writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0);
@@ -1115,6 +1118,10 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
bank->saved_datain = readl_relaxed(base + bank->regs->datain);
+ /* Save syconfig, it's runtime value can be different from init value */
+ if (bank->loses_context)
+ bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig);
+
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;
@@ -1279,6 +1286,7 @@ out_unlock:
static const struct omap_gpio_reg_offs omap2_gpio_regs = {
.revision = OMAP24XX_GPIO_REVISION,
+ .sysconfig = OMAP24XX_GPIO_SYSCONFIG,
.direction = OMAP24XX_GPIO_OE,
.datain = OMAP24XX_GPIO_DATAIN,
.dataout = OMAP24XX_GPIO_DATAOUT,
@@ -1302,6 +1310,7 @@ static const struct omap_gpio_reg_offs omap2_gpio_regs = {
static const struct omap_gpio_reg_offs omap4_gpio_regs = {
.revision = OMAP4_GPIO_REVISION,
+ .sysconfig = OMAP4_GPIO_SYSCONFIG,
.direction = OMAP4_GPIO_OE,
.datain = OMAP4_GPIO_DATAIN,
.dataout = OMAP4_GPIO_DATAOUT,
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 26c5466b8179..ae49bb23c6ed 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class,
long gpio;
struct gpio_desc *desc;
int status;
+ struct gpio_chip *gc;
+ int offset;
status = kstrtol(buf, 0, &gpio);
if (status < 0)
@@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class,
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
}
+ gc = desc->gdev->chip;
+ offset = gpio_chip_hwgpio(desc);
+ if (!gpiochip_line_is_valid(gc, offset)) {
+ pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
+ return -EINVAL;
+ }
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7d2c8b169827..326dae31b675 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -3300,7 +3300,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
struct amdgpu_bo *root;
uint64_t value, flags;
struct amdgpu_vm *vm;
- long r;
+ int r;
spin_lock(&adev->vm_manager.pasid_lock);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
@@ -3349,6 +3349,12 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
value = 0;
}
+ r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+ if (r) {
+ pr_debug("failed %d to reserve fence slot\n", r);
+ goto error_unlock;
+ }
+
r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
addr, flags, value, NULL, NULL,
NULL);
@@ -3360,7 +3366,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
error_unlock:
amdgpu_bo_unreserve(root);
if (r < 0)
- DRM_ERROR("Can't handle page fault (%ld)\n", r);
+ DRM_ERROR("Can't handle page fault (%d)\n", r);
error_unref:
amdgpu_bo_unref(&root);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 45d1172b7bff..63691deb7df3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3280,7 +3280,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000280, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x00800000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00001d00, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x00001d00, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003c0000, 0x00280400),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 573cf17262da..d699a5cf6c11 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4072,13 +4072,6 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
return true;
/*
- * The arbitrary tiling support for multiplane formats has not been hooked
- * up.
- */
- if (info->num_planes > 1)
- return false;
-
- /*
* For D swizzle the canonical modifier depends on the bpp, so check
* it here.
*/
@@ -4096,6 +4089,10 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
/* Per radeonsi comments 16/64 bpp are more complicated. */
if (info->cpp[0] != 4)
return false;
+ /* We support multi-planar formats, but not when combined with
+ * additional DCC metadata planes. */
+ if (info->num_planes > 1)
+ return false;
}
return true;
@@ -4296,7 +4293,7 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
- AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
@@ -4308,7 +4305,7 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
- AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 651884390137..4f8337c7fd2e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -646,7 +646,6 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
break;
case INTEL_BACKLIGHT_DISPLAY_DDI:
try_intel_interface = true;
- try_vesa_interface = true;
break;
default:
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index be6ac0dd846e..2ed309534e97 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -848,7 +848,8 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
if (lttpr_count < 0)
- return;
+ /* Still continue with enabling the port and link training. */
+ lttpr_count = 0;
if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index f94025ec603a..a9a8ba1d3aba 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -992,14 +992,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
* FIXME As we do with eDP, just make a note of the time here
* and perform the wait before the next panel power on.
*/
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
+ msleep(intel_dsi->panel_pwr_cycle_delay);
}
static void intel_dsi_shutdown(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
+ msleep(intel_dsi->panel_pwr_cycle_delay);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index fef1e857cefc..01c1d1b36acd 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -916,19 +916,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (!strncmp(cmd, "srm", 3) ||
!strncmp(cmd, "lrm", 3)) {
- if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) &&
- offset != 0x21f0) {
+ if (offset == i915_mmio_reg_offset(GEN8_L3SQCREG4) ||
+ offset == 0x21f0 ||
+ (IS_BROADWELL(gvt->gt->i915) &&
+ offset == i915_mmio_reg_offset(INSTPM)))
+ return 0;
+ else {
gvt_vgpu_err("%s access to register (%x)\n",
cmd, offset);
return -EPERM;
- } else
- return 0;
+ }
}
if (!strncmp(cmd, "lrr-src", 7) ||
!strncmp(cmd, "lrr-dst", 7)) {
- gvt_vgpu_err("not allowed cmd %s\n", cmd);
- return -EPERM;
+ if (IS_BROADWELL(gvt->gt->i915) && offset == 0x215c)
+ return 0;
+ else {
+ gvt_vgpu_err("not allowed cmd %s reg (%x)\n", cmd, offset);
+ return -EPERM;
+ }
}
if (!strncmp(cmd, "pipe_ctrl", 9)) {
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index e622aee6e4be..440c35f1abc9 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -105,7 +105,7 @@ static inline bool tasklet_is_locked(const struct tasklet_struct *t)
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (!atomic_fetch_inc(&t->count))
- tasklet_unlock_wait(t);
+ tasklet_unlock_spin_wait(t);
}
static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 97b57acc02e2..4b4d8d034782 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5471,12 +5471,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
int ret;
- memset(wm, 0, sizeof(*wm));
-
/* Watermarks calculated in master */
if (plane_state->planar_slave)
return 0;
+ memset(wm, 0, sizeof(*wm));
+
if (plane_state->planar_linked_plane) {
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index ba658fa9cf6c..183571c387b7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -481,11 +481,15 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
vmw_bo_unreference(&old_buf);
res->id = vcotbl->type;
+ /* Release the pin acquired in vmw_bo_init */
+ ttm_bo_unpin(bo);
+
return 0;
out_map_new:
ttm_bo_kunmap(&old_map);
out_wait:
+ ttm_bo_unpin(bo);
ttm_bo_unreserve(bo);
vmw_bo_unreference(&buf);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index dd69b51c40e4..6fa24645fbbf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -712,17 +712,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->last_read_seqno = (uint32_t) -100;
dev_priv->drm.dev_private = dev_priv;
- ret = vmw_setup_pci_resources(dev_priv, pci_id);
- if (ret)
- return ret;
- ret = vmw_detect_version(dev_priv);
- if (ret)
- goto out_no_pci_or_version;
-
mutex_init(&dev_priv->cmdbuf_mutex);
- mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
- mutex_init(&dev_priv->global_kms_state_mutex);
ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->resource_lock);
spin_lock_init(&dev_priv->hw_lock);
@@ -730,6 +721,14 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
spin_lock_init(&dev_priv->cap_lock);
spin_lock_init(&dev_priv->cursor_lock);
+ ret = vmw_setup_pci_resources(dev_priv, pci_id);
+ if (ret)
+ return ret;
+ ret = vmw_detect_version(dev_priv);
+ if (ret)
+ goto out_no_pci_or_version;
+
+
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
INIT_LIST_HEAD(&dev_priv->res_lru[i]);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5fa5bcd20cc5..eb76a6b9ebca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -529,7 +529,6 @@ struct vmw_private {
struct vmw_overlay *overlay_priv;
struct drm_property *hotplug_mode_update_property;
struct drm_property *implicit_placement_property;
- struct mutex global_kms_state_mutex;
spinlock_t cursor_lock;
struct drm_atomic_state *suspend_state;
@@ -592,7 +591,6 @@ struct vmw_private {
bool refuse_hibernation;
bool suspend_locked;
- struct mutex release_mutex;
atomic_t num_fifo_resources;
/*
@@ -1524,9 +1522,8 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
struct vmw_buffer_object *tmp_buf = *buf;
*buf = NULL;
- if (tmp_buf != NULL) {
+ if (tmp_buf != NULL)
ttm_bo_put(&tmp_buf->base);
- }
}
static inline struct vmw_buffer_object *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index a372980fe6a5..f2d625415458 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -94,6 +94,16 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
struct vmw_piter data_iter,
unsigned long num_data_pages);
+
+static inline void vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo)
+{
+ int ret = ttm_bo_reserve(bo, false, true, NULL);
+ BUG_ON(ret != 0);
+ ttm_bo_unpin(bo);
+ ttm_bo_unreserve(bo);
+}
+
+
/*
* vmw_setup_otable_base - Issue an object table base setup command to
* the device
@@ -277,6 +287,7 @@ out_no_setup:
&batch->otables[i]);
}
+ vmw_bo_unpin_unlocked(batch->otable_bo);
ttm_bo_put(batch->otable_bo);
batch->otable_bo = NULL;
return ret;
@@ -340,6 +351,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
BUG_ON(ret != 0);
vmw_bo_fence_single(bo, NULL);
+ ttm_bo_unpin(bo);
ttm_bo_unreserve(bo);
ttm_bo_put(batch->otable_bo);
@@ -528,6 +540,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
void vmw_mob_destroy(struct vmw_mob *mob)
{
if (mob->pt_bo) {
+ vmw_bo_unpin_unlocked(mob->pt_bo);
ttm_bo_put(mob->pt_bo);
mob->pt_bo = NULL;
}
@@ -643,6 +656,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
out_no_cmd_space:
vmw_fifo_resource_dec(dev_priv);
if (pt_set_up) {
+ vmw_bo_unpin_unlocked(mob->pt_bo);
ttm_bo_put(mob->pt_bo);
mob->pt_bo = NULL;
}
diff --git a/drivers/greybus/es2.c b/drivers/greybus/es2.c
index 48ad154df3a7..15661c7f3633 100644
--- a/drivers/greybus/es2.c
+++ b/drivers/greybus/es2.c
@@ -72,11 +72,11 @@ struct es2_cport_in {
};
/**
- * es2_ap_dev - ES2 USB Bridge to AP structure
+ * struct es2_ap_dev - ES2 USB Bridge to AP structure
* @usb_dev: pointer to the USB device we are.
* @usb_intf: pointer to the USB interface we are bound to.
* @hd: pointer to our gb_host_device structure
-
+ *
* @cport_in: endpoint, urbs and buffer for cport in messages
* @cport_out_endpoint: endpoint for for cport out messages
* @cport_out_urb: array of urbs for the CPort out messages
@@ -85,7 +85,7 @@ struct es2_cport_in {
* @cport_out_urb_cancelled: array of flags indicating whether the
* corresponding @cport_out_urb is being cancelled
* @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
- *
+ * @cdsi1_in_use: true if cport CDSI1 is in use
* @apb_log_task: task pointer for logging thread
* @apb_log_dentry: file system entry for the log file interface
* @apb_log_enable_dentry: file system entry for enabling logging
@@ -1171,7 +1171,7 @@ static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
char tmp_buf[3];
sprintf(tmp_buf, "%d\n", enable);
- return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
+ return simple_read_from_buffer(buf, count, ppos, tmp_buf, 2);
}
static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index dbac16641662..ddecc84fd6f0 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmi.h>
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/module.h>
@@ -22,9 +23,13 @@
#define ACEL_EN BIT(0)
#define GYRO_EN BIT(1)
-#define MAGNO_EN BIT(2)
+#define MAGNO_EN BIT(2)
#define ALS_EN BIT(19)
+static int sensor_mask_override = -1;
+module_param_named(sensor_mask, sensor_mask_override, int, 0444);
+MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
+
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
{
union sfh_cmd_param cmd_param;
@@ -73,12 +78,41 @@ void amd_stop_all_sensors(struct amd_mp2_dev *privdata)
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
+static const struct dmi_system_id dmi_sensor_mask_overrides[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 13-ag0xxx"),
+ },
+ .driver_data = (void *)(ACEL_EN | MAGNO_EN),
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 15-cp0xxx"),
+ },
+ .driver_data = (void *)(ACEL_EN | MAGNO_EN),
+ },
+ { }
+};
+
int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
{
int activestatus, num_of_sensors = 0;
+ const struct dmi_system_id *dmi_id;
+ u32 activecontrolstatus;
+
+ if (sensor_mask_override == -1) {
+ dmi_id = dmi_first_match(dmi_sensor_mask_overrides);
+ if (dmi_id)
+ sensor_mask_override = (long)dmi_id->driver_data;
+ }
+
+ if (sensor_mask_override >= 0) {
+ activestatus = sensor_mask_override;
+ } else {
+ activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
+ activestatus = activecontrolstatus >> 4;
+ }
- privdata->activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
- activestatus = privdata->activecontrolstatus >> 4;
if (ACEL_EN & activestatus)
sensor_id[num_of_sensors++] = accel_idx;
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index 8f8d19b2cfe5..489415f7c22c 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -61,7 +61,6 @@ struct amd_mp2_dev {
struct pci_dev *pdev;
struct amdtp_cl_data *cl_data;
void __iomem *mmio;
- u32 activecontrolstatus;
};
struct amd_mp2_sensor_info {
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index 3feaece13ade..6b665931147d 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (input_register_device(data->input2)) {
input_free_device(input2);
+ ret = -ENOENT;
goto exit;
}
}
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 1dfe184ebf5a..2ab22b925941 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -1222,6 +1222,9 @@ static const struct hid_device_id asus_devices[] = {
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD),
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2),
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 21e15627a461..477baa30889c 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -161,6 +161,7 @@ struct cp2112_device {
atomic_t read_avail;
atomic_t xfer_avail;
struct gpio_chip gc;
+ struct irq_chip irq;
u8 *in_out_buffer;
struct mutex lock;
@@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip cp2112_gpio_irqchip = {
- .name = "cp2112-gpio",
- .irq_startup = cp2112_gpio_irq_startup,
- .irq_shutdown = cp2112_gpio_irq_shutdown,
- .irq_ack = cp2112_gpio_irq_ack,
- .irq_mask = cp2112_gpio_irq_mask,
- .irq_unmask = cp2112_gpio_irq_unmask,
- .irq_set_type = cp2112_gpio_irq_type,
-};
-
static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
int pin)
{
@@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.can_sleep = 1;
dev->gc.parent = &hdev->dev;
+ dev->irq.name = "cp2112-gpio";
+ dev->irq.irq_startup = cp2112_gpio_irq_startup;
+ dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
+ dev->irq.irq_ack = cp2112_gpio_irq_ack;
+ dev->irq.irq_mask = cp2112_gpio_irq_mask;
+ dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
+ dev->irq.irq_set_type = cp2112_gpio_irq_type;
+ dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
+
girq = &dev->gc.irq;
- girq->chip = &cp2112_gpio_irqchip;
+ girq->chip = &dev->irq;
/* The event comes from the outside so no parent handler */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index d9319622da44..e60c31dd05ff 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -574,6 +574,8 @@ static void hammer_remove(struct hid_device *hdev)
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e42aaae3138f..67fd8a2f5aba 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -194,6 +194,7 @@
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
+#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6
#define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
#define USB_VENDOR_ID_ATEN 0x0557
@@ -493,6 +494,7 @@
#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
+#define USB_DEVICE_ID_GOOGLE_DON 0x5050
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 44d715c12f6a..2d70dc4bea65 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
!wacom_wac->shared->is_touch_on) {
if (!wacom_wac->shared->touch_down)
return;
- prox = 0;
+ prox = false;
}
wacom_wac->hid_data.num_received++;
@@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
{
struct wacom_features *features = &wacom_wac->features;
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
if (!(features->device_type & WACOM_DEVICETYPE_PEN))
return -ENODEV;
@@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
return 0;
}
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
@@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
{
struct wacom_features *features = &wacom_wac->features;
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
return -ENODEV;
@@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
/* setup has already been done */
return 0;
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
__set_bit(BTN_TOUCH, input_dev->keybit);
if (features->touch_max == 1) {
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 79e5356a737a..66c794d92391 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -23,6 +23,7 @@ config HYPERV_UTILS
config HYPERV_BALLOON
tristate "Microsoft Hyper-V Balloon driver"
depends on HYPERV
+ select PAGE_REPORTING
help
Select this option to enable Hyper-V Balloon driver.
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 0bd202de7960..c2635e913a92 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -209,31 +209,96 @@ int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
}
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
+static int send_modifychannel_without_ack(struct vmbus_channel *channel, u32 target_vp)
+{
+ struct vmbus_channel_modifychannel msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
+ msg.child_relid = channel->offermsg.child_relid;
+ msg.target_vp = target_vp;
+
+ ret = vmbus_post_msg(&msg, sizeof(msg), true);
+ trace_vmbus_send_modifychannel(&msg, ret);
+
+ return ret;
+}
+
+static int send_modifychannel_with_ack(struct vmbus_channel *channel, u32 target_vp)
+{
+ struct vmbus_channel_modifychannel *msg;
+ struct vmbus_channel_msginfo *info;
+ unsigned long flags;
+ int ret;
+
+ info = kzalloc(sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_modifychannel),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ init_completion(&info->waitevent);
+ info->waiting_channel = channel;
+
+ msg = (struct vmbus_channel_modifychannel *)info->msg;
+ msg->header.msgtype = CHANNELMSG_MODIFYCHANNEL;
+ msg->child_relid = channel->offermsg.child_relid;
+ msg->target_vp = target_vp;
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&info->msglistentry, &vmbus_connection.chn_msg_list);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ ret = vmbus_post_msg(msg, sizeof(*msg), true);
+ trace_vmbus_send_modifychannel(msg, ret);
+ if (ret != 0) {
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ goto free_info;
+ }
+
+ /*
+ * Release channel_mutex; otherwise, vmbus_onoffer_rescind() could block on
+ * the mutex and be unable to signal the completion.
+ *
+ * See the caller target_cpu_store() for information about the usage of the
+ * mutex.
+ */
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ wait_for_completion(&info->waitevent);
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (info->response.modify_response.status)
+ ret = -EAGAIN;
+
+free_info:
+ kfree(info);
+ return ret;
+}
+
/*
* Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
*
- * CHANNELMSG_MODIFYCHANNEL messages are aynchronous. Also, Hyper-V does not
- * ACK such messages. IOW we can't know when the host will stop interrupting
- * the "old" vCPU and start interrupting the "new" vCPU for the given channel.
+ * CHANNELMSG_MODIFYCHANNEL messages are aynchronous. When VMbus version 5.3
+ * or later is negotiated, Hyper-V always sends an ACK in response to such a
+ * message. For VMbus version 5.2 and earlier, it never sends an ACK. With-
+ * out an ACK, we can not know when the host will stop interrupting the "old"
+ * vCPU and start interrupting the "new" vCPU for the given channel.
*
* The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
* VERSION_WIN10_V4_1.
*/
-int vmbus_send_modifychannel(u32 child_relid, u32 target_vp)
+int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp)
{
- struct vmbus_channel_modifychannel conn_msg;
- int ret;
-
- memset(&conn_msg, 0, sizeof(conn_msg));
- conn_msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
- conn_msg.child_relid = child_relid;
- conn_msg.target_vp = target_vp;
-
- ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
-
- trace_vmbus_send_modifychannel(&conn_msg, ret);
-
- return ret;
+ if (vmbus_proto_version >= VERSION_WIN10_V5_3)
+ return send_modifychannel_with_ack(channel, target_vp);
+ return send_modifychannel_without_ack(channel, target_vp);
}
EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
@@ -385,7 +450,7 @@ nomem:
* @kbuffer: from kmalloc or vmalloc
* @size: page-size multiple
* @send_offset: the offset (in bytes) where the send ring buffer starts,
- * should be 0 for BUFFER type gpadl
+ * should be 0 for BUFFER type gpadl
* @gpadl_handle: some funky thing
*/
static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
@@ -653,7 +718,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
if (newchannel->rescind) {
err = -ENODEV;
- goto error_free_info;
+ goto error_clean_msglist;
}
err = vmbus_post_msg(open_msg,
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index f0ed730e2e4e..caf6d0c4bc1b 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -333,7 +333,6 @@ fw_error:
negop->icversion_data[1].minor = icmsg_minor;
return found_match;
}
-
EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
/*
@@ -593,10 +592,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
* CPUS_READ_UNLOCK CPUS_WRITE_UNLOCK
*
* Forbids: CPU1's LOAD from *not* seing CPU2's STORE &&
- * CPU2's SEARCH from *not* seeing CPU1's INSERT
+ * CPU2's SEARCH from *not* seeing CPU1's INSERT
*
* Forbids: CPU2's SEARCH from seeing CPU1's INSERT &&
- * CPU2's LOAD from *not* seing CPU1's STORE
+ * CPU2's LOAD from *not* seing CPU1's STORE
*/
cpus_read_lock();
@@ -756,6 +755,12 @@ static void init_vp_index(struct vmbus_channel *channel)
free_cpumask_var(available_mask);
}
+#define UNLOAD_DELAY_UNIT_MS 10 /* 10 milliseconds */
+#define UNLOAD_WAIT_MS (100*1000) /* 100 seconds */
+#define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
+#define UNLOAD_MSG_MS (5*1000) /* Every 5 seconds */
+#define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
+
static void vmbus_wait_for_unload(void)
{
int cpu;
@@ -773,12 +778,17 @@ static void vmbus_wait_for_unload(void)
* vmbus_connection.unload_event. If not, the last thing we can do is
* read message pages for all CPUs directly.
*
- * Wait no more than 10 seconds so that the panic path can't get
- * hung forever in case the response message isn't seen.
+ * Wait up to 100 seconds since an Azure host must writeback any dirty
+ * data in its disk cache before the VMbus UNLOAD request will
+ * complete. This flushing has been empirically observed to take up
+ * to 50 seconds in cases with a lot of dirty data, so allow additional
+ * leeway and for inaccuracies in mdelay(). But eventually time out so
+ * that the panic path can't get hung forever in case the response
+ * message isn't seen.
*/
- for (i = 0; i < 1000; i++) {
+ for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
if (completion_done(&vmbus_connection.unload_event))
- break;
+ goto completed;
for_each_online_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
@@ -801,9 +811,18 @@ static void vmbus_wait_for_unload(void)
vmbus_signal_eom(msg, message_type);
}
- mdelay(10);
+ /*
+ * Give a notice periodically so someone watching the
+ * serial output won't think it is completely hung.
+ */
+ if (!(i % UNLOAD_MSG_LOOPS))
+ pr_notice("Waiting for VMBus UNLOAD to complete\n");
+
+ mdelay(UNLOAD_DELAY_UNIT_MS);
}
+ pr_err("Continuing even though VMBus UNLOAD did not complete\n");
+completed:
/*
* We're crashing and already got the UNLOAD_RESPONSE, cleanup all
* maybe-pending messages on all CPUs to be able to receive new
@@ -827,6 +846,11 @@ static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
/*
* This is a global event; just wakeup the waiting thread.
* Once we successfully unload, we can cleanup the monitor state.
+ *
+ * NB. A malicious or compromised Hyper-V could send a spurious
+ * message of type CHANNELMSG_UNLOAD_RESPONSE, and trigger a call
+ * of the complete() below. Make sure that unload_event has been
+ * initialized by the time this complete() is executed.
*/
complete(&vmbus_connection.unload_event);
}
@@ -842,7 +866,7 @@ void vmbus_initiate_unload(bool crash)
if (vmbus_proto_version < VERSION_WIN8_1)
return;
- init_completion(&vmbus_connection.unload_event);
+ reinit_completion(&vmbus_connection.unload_event);
memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
hdr.msgtype = CHANNELMSG_UNLOAD;
vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
@@ -980,7 +1004,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
* UNLOCK channel_mutex
*
* Forbids: r1 == valid_relid &&
- * channels[valid_relid] == channel
+ * channels[valid_relid] == channel
*
* Note. r1 can be INVALID_RELID only for an hv_sock channel.
* None of the hv_sock channels which were present before the
@@ -1313,6 +1337,46 @@ static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
}
/*
+ * vmbus_onmodifychannel_response - Modify Channel response handler.
+ *
+ * This is invoked when we received a response to our channel modify request.
+ * Find the matching request, copy the response and signal the requesting thread.
+ */
+static void vmbus_onmodifychannel_response(struct vmbus_channel_message_header *hdr)
+{
+ struct vmbus_channel_modifychannel_response *response;
+ struct vmbus_channel_msginfo *msginfo;
+ unsigned long flags;
+
+ response = (struct vmbus_channel_modifychannel_response *)hdr;
+
+ trace_vmbus_onmodifychannel_response(response);
+
+ /*
+ * Find the modify msg, copy the response and signal/unblock the wait event.
+ */
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+
+ list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, msglistentry) {
+ struct vmbus_channel_message_header *responseheader =
+ (struct vmbus_channel_message_header *)msginfo->msg;
+
+ if (responseheader->msgtype == CHANNELMSG_MODIFYCHANNEL) {
+ struct vmbus_channel_modifychannel *modifymsg;
+
+ modifymsg = (struct vmbus_channel_modifychannel *)msginfo->msg;
+ if (modifymsg->child_relid == response->child_relid) {
+ memcpy(&msginfo->response.modify_response, response,
+ sizeof(*response));
+ complete(&msginfo->waitevent);
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+}
+
+/*
* vmbus_ongpadl_torndown - GPADL torndown handler.
*
* This is invoked when we received a response to our gpadl teardown request.
@@ -1429,6 +1493,8 @@ channel_message_table[CHANNELMSG_COUNT] = {
{ CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL, 0},
{ CHANNELMSG_MODIFYCHANNEL, 0, NULL, 0},
{ CHANNELMSG_TL_CONNECT_RESULT, 0, NULL, 0},
+ { CHANNELMSG_MODIFYCHANNEL_RESPONSE, 1, vmbus_onmodifychannel_response,
+ sizeof(struct vmbus_channel_modifychannel_response)},
};
/*
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index c83612cddb99..311cd005b3be 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -26,9 +26,11 @@
struct vmbus_connection vmbus_connection = {
.conn_state = DISCONNECTED,
+ .unload_event = COMPLETION_INITIALIZER(
+ vmbus_connection.unload_event),
.next_gpadl_handle = ATOMIC_INIT(0xE1E10),
- .ready_for_suspend_event= COMPLETION_INITIALIZER(
+ .ready_for_suspend_event = COMPLETION_INITIALIZER(
vmbus_connection.ready_for_suspend_event),
.ready_for_resume_event = COMPLETION_INITIALIZER(
vmbus_connection.ready_for_resume_event),
@@ -45,6 +47,7 @@ EXPORT_SYMBOL_GPL(vmbus_proto_version);
* Table of VMBus versions listed from newest to oldest.
*/
static __u32 vmbus_versions[] = {
+ VERSION_WIN10_V5_3,
VERSION_WIN10_V5_2,
VERSION_WIN10_V5_1,
VERSION_WIN10_V5,
@@ -60,7 +63,7 @@ static __u32 vmbus_versions[] = {
* Maximal VMBus protocol version guests can negotiate. Useful to cap the
* VMBus version for testing and debugging purpose.
*/
-static uint max_version = VERSION_WIN10_V5_2;
+static uint max_version = VERSION_WIN10_V5_3;
module_param(max_version, uint, S_IRUGO);
MODULE_PARM_DESC(max_version,
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index f202ac7f4b3d..e83507f49676 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -13,9 +13,10 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
-#include <linux/version.h>
#include <linux/random.h>
#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@@ -37,6 +38,42 @@ int hv_init(void)
}
/*
+ * Functions for allocating and freeing memory with size and
+ * alignment HV_HYP_PAGE_SIZE. These functions are needed because
+ * the guest page size may not be the same as the Hyper-V page
+ * size. We depend upon kmalloc() aligning power-of-two size
+ * allocations to the allocation size boundary, so that the
+ * allocated memory appears to Hyper-V as a page of the size
+ * it expects.
+ */
+
+void *hv_alloc_hyperv_page(void)
+{
+ BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
+
+ if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
+ return (void *)__get_free_page(GFP_KERNEL);
+ else
+ return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
+}
+
+void *hv_alloc_hyperv_zeroed_page(void)
+{
+ if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
+ return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ else
+ return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
+}
+
+void hv_free_hyperv_page(unsigned long addr)
+{
+ if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
+ free_page(addr);
+ else
+ kfree((void *)addr);
+}
+
+/*
* hv_post_message - Post a message using the hypervisor message IPC.
*
* This involves a hypercall.
@@ -68,7 +105,7 @@ int hv_post_message(union hv_connection_id connection_id,
*/
put_cpu_ptr(hv_cpu);
- return status & 0xFFFF;
+ return hv_result(status);
}
int hv_synic_alloc(void)
@@ -162,34 +199,48 @@ void hv_synic_enable_regs(unsigned int cpu)
union hv_synic_scontrol sctrl;
/* Setup the Synic's message page */
- hv_get_simp(simp.as_uint64);
+ simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 1;
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
>> HV_HYP_PAGE_SHIFT;
- hv_set_simp(simp.as_uint64);
+ hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
/* Setup the Synic's event page */
- hv_get_siefp(siefp.as_uint64);
+ siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 1;
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
>> HV_HYP_PAGE_SHIFT;
- hv_set_siefp(siefp.as_uint64);
+ hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
/* Setup the shared SINT. */
- hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+ if (vmbus_irq != -1)
+ enable_percpu_irq(vmbus_irq, 0);
+ shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
+ VMBUS_MESSAGE_SINT);
- shared_sint.vector = hv_get_vector();
+ shared_sint.vector = vmbus_interrupt;
shared_sint.masked = false;
- shared_sint.auto_eoi = hv_recommend_using_aeoi();
- hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+
+ /*
+ * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
+ * it doesn't provide a recommendation flag and AEOI must be disabled.
+ */
+#ifdef HV_DEPRECATING_AEOI_RECOMMENDED
+ shared_sint.auto_eoi =
+ !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
+#else
+ shared_sint.auto_eoi = 0;
+#endif
+ hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
+ shared_sint.as_uint64);
/* Enable the global synic bit */
- hv_get_synic_state(sctrl.as_uint64);
+ sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
sctrl.enable = 1;
- hv_set_synic_state(sctrl.as_uint64);
+ hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
}
int hv_synic_init(unsigned int cpu)
@@ -211,30 +262,71 @@ void hv_synic_disable_regs(unsigned int cpu)
union hv_synic_siefp siefp;
union hv_synic_scontrol sctrl;
- hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+ shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
+ VMBUS_MESSAGE_SINT);
shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
- hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+ hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
+ shared_sint.as_uint64);
- hv_get_simp(simp.as_uint64);
+ simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 0;
simp.base_simp_gpa = 0;
- hv_set_simp(simp.as_uint64);
+ hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
- hv_get_siefp(siefp.as_uint64);
+ siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 0;
siefp.base_siefp_gpa = 0;
- hv_set_siefp(siefp.as_uint64);
+ hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
/* Disable the global synic bit */
- hv_get_synic_state(sctrl.as_uint64);
+ sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
sctrl.enable = 0;
- hv_set_synic_state(sctrl.as_uint64);
+ hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
+
+ if (vmbus_irq != -1)
+ disable_percpu_irq(vmbus_irq);
+}
+
+#define HV_MAX_TRIES 3
+/*
+ * Scan the event flags page of 'this' CPU looking for any bit that is set. If we find one
+ * bit set, then wait for a few milliseconds. Repeat these steps for a maximum of 3 times.
+ * Return 'true', if there is still any set bit after this operation; 'false', otherwise.
+ *
+ * If a bit is set, that means there is a pending channel interrupt. The expectation is
+ * that the normal interrupt handling mechanism will find and process the channel interrupt
+ * "very soon", and in the process clear the bit.
+ */
+static bool hv_synic_event_pending(void)
+{
+ struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
+ union hv_synic_event_flags *event =
+ (union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT;
+ unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
+ bool pending;
+ u32 relid;
+ int tries = 0;
+
+retry:
+ pending = false;
+ for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) {
+ /* Special case - VMBus channel protocol messages */
+ if (relid == 0)
+ continue;
+ pending = true;
+ break;
+ }
+ if (pending && tries++ < HV_MAX_TRIES) {
+ usleep_range(10000, 20000);
+ goto retry;
+ }
+ return pending;
}
int hv_synic_cleanup(unsigned int cpu)
@@ -242,6 +334,9 @@ int hv_synic_cleanup(unsigned int cpu)
struct vmbus_channel *channel, *sc;
bool channel_found = false;
+ if (vmbus_connection.conn_state != CONNECTED)
+ goto always_cleanup;
+
/*
* Hyper-V does not provide a way to change the connect CPU once
* it is set; we must prevent the connect CPU from going offline
@@ -249,8 +344,7 @@ int hv_synic_cleanup(unsigned int cpu)
* path where the vmbus is already disconnected, the CPU must be
* allowed to shut down.
*/
- if (cpu == VMBUS_CONNECT_CPU &&
- vmbus_connection.conn_state == CONNECTED)
+ if (cpu == VMBUS_CONNECT_CPU)
return -EBUSY;
/*
@@ -277,9 +371,21 @@ int hv_synic_cleanup(unsigned int cpu)
}
mutex_unlock(&vmbus_connection.channel_mutex);
- if (channel_found && vmbus_connection.conn_state == CONNECTED)
+ if (channel_found)
+ return -EBUSY;
+
+ /*
+ * channel_found == false means that any channels that were previously
+ * assigned to the CPU have been reassigned elsewhere with a call of
+ * vmbus_send_modifychannel(). Scan the event flags page looking for
+ * bits that are set and waiting with a timeout for vmbus_chan_sched()
+ * to process such bits. If bits are still set after this operation
+ * and VMBus is connected, fail the CPU offlining operation.
+ */
+ if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending())
return -EBUSY;
+always_cleanup:
hv_stimer_legacy_cleanup(cpu);
hv_synic_disable_regs(cpu);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 2f776d78e3c1..58af84e30144 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -21,6 +21,7 @@
#include <linux/memory.h>
#include <linux/notifier.h>
#include <linux/percpu_counter.h>
+#include <linux/page_reporting.h>
#include <linux/hyperv.h>
#include <asm/hyperv-tlfs.h>
@@ -563,6 +564,8 @@ struct hv_dynmem_device {
* The negotiated version agreed by host.
*/
__u32 version;
+
+ struct page_reporting_dev_info pr_dev_info;
};
static struct hv_dynmem_device dm_device;
@@ -1568,6 +1571,89 @@ static void balloon_onchannelcallback(void *context)
}
+/* Hyper-V only supports reporting 2MB pages or higher */
+#define HV_MIN_PAGE_REPORTING_ORDER 9
+#define HV_MIN_PAGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << HV_MIN_PAGE_REPORTING_ORDER)
+static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
+ struct scatterlist *sgl, unsigned int nents)
+{
+ unsigned long flags;
+ struct hv_memory_hint *hint;
+ int i;
+ u64 status;
+ struct scatterlist *sg;
+
+ WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
+ WARN_ON_ONCE(sgl->length < HV_MIN_PAGE_REPORTING_LEN);
+ local_irq_save(flags);
+ hint = *(struct hv_memory_hint **)this_cpu_ptr(hyperv_pcpu_input_arg);
+ if (!hint) {
+ local_irq_restore(flags);
+ return -ENOSPC;
+ }
+
+ hint->type = HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD;
+ hint->reserved = 0;
+ for_each_sg(sgl, sg, nents, i) {
+ union hv_gpa_page_range *range;
+
+ range = &hint->ranges[i];
+ range->address_space = 0;
+ /* page reporting only reports 2MB pages or higher */
+ range->page.largepage = 1;
+ range->page.additional_pages =
+ (sg->length / HV_MIN_PAGE_REPORTING_LEN) - 1;
+ range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
+ range->base_large_pfn =
+ page_to_hvpfn(sg_page(sg)) >> HV_MIN_PAGE_REPORTING_ORDER;
+ }
+
+ status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
+ hint, NULL);
+ local_irq_restore(flags);
+ if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) {
+ pr_err("Cold memory discard hypercall failed with status %llx\n",
+ status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void enable_page_reporting(void)
+{
+ int ret;
+
+ /* Essentially, validating 'PAGE_REPORTING_MIN_ORDER' is big enough. */
+ if (pageblock_order < HV_MIN_PAGE_REPORTING_ORDER) {
+ pr_debug("Cold memory discard is only supported on 2MB pages and above\n");
+ return;
+ }
+
+ if (!hv_query_ext_cap(HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT)) {
+ pr_debug("Cold memory discard hint not supported by Hyper-V\n");
+ return;
+ }
+
+ BUILD_BUG_ON(PAGE_REPORTING_CAPACITY > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
+ dm_device.pr_dev_info.report = hv_free_page_report;
+ ret = page_reporting_register(&dm_device.pr_dev_info);
+ if (ret < 0) {
+ dm_device.pr_dev_info.report = NULL;
+ pr_err("Failed to enable cold memory discard: %d\n", ret);
+ } else {
+ pr_info("Cold memory discard hint enabled\n");
+ }
+}
+
+static void disable_page_reporting(void)
+{
+ if (dm_device.pr_dev_info.report) {
+ page_reporting_unregister(&dm_device.pr_dev_info);
+ dm_device.pr_dev_info.report = NULL;
+ }
+}
+
static int balloon_connect_vsp(struct hv_device *dev)
{
struct dm_version_request version_req;
@@ -1713,6 +1799,7 @@ static int balloon_probe(struct hv_device *dev,
if (ret != 0)
return ret;
+ enable_page_reporting();
dm_device.state = DM_INITIALIZED;
dm_device.thread =
@@ -1727,6 +1814,7 @@ static int balloon_probe(struct hv_device *dev,
probe_error:
dm_device.state = DM_INIT_ERROR;
dm_device.thread = NULL;
+ disable_page_reporting();
vmbus_close(dev->channel);
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
@@ -1749,6 +1837,7 @@ static int balloon_remove(struct hv_device *dev)
cancel_work_sync(&dm->ha_wrk.wrk);
kthread_stop(dm->thread);
+ disable_page_reporting();
vmbus_close(dev->channel);
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
index 6063bb21bb13..c02a1719e92f 100644
--- a/drivers/hv/hv_trace.h
+++ b/drivers/hv/hv_trace.h
@@ -103,6 +103,21 @@ TRACE_EVENT(vmbus_ongpadl_created,
)
);
+TRACE_EVENT(vmbus_onmodifychannel_response,
+ TP_PROTO(const struct vmbus_channel_modifychannel_response *response),
+ TP_ARGS(response),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = response->child_relid;
+ __entry->status = response->status;
+ ),
+ TP_printk("child_relid 0x%x, status %d",
+ __entry->child_relid, __entry->status
+ )
+ );
+
TRACE_EVENT(vmbus_ongpadl_torndown,
TP_PROTO(const struct vmbus_channel_gpadl_torndown *gpadltorndown),
TP_ARGS(gpadltorndown),
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 35833d4d1a1d..374f8afbf8a5 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -84,15 +84,6 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
ring_info->ring_buffer->write_index = next_write_location;
}
-/* Set the next read location for the specified ring buffer. */
-static inline void
-hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
- u32 next_read_location)
-{
- ring_info->ring_buffer->read_index = next_read_location;
- ring_info->priv_read_index = next_read_location;
-}
-
/* Get the size of the ring buffer. */
static inline u32
hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
@@ -313,7 +304,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
if (rqst_id == VMBUS_RQST_ERROR) {
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
- pr_err("No request id available\n");
return -EAGAIN;
}
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 10dce9f91216..b12d6827b222 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -48,8 +48,10 @@ static int hyperv_cpuhp_online;
static void *hv_panic_page;
+static long __percpu *vmbus_evt;
+
/* Values parsed from ACPI DSDT */
-static int vmbus_irq;
+int vmbus_irq;
int vmbus_interrupt;
/*
@@ -1381,7 +1383,13 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
- add_interrupt_randomness(hv_get_vector(), 0);
+ add_interrupt_randomness(vmbus_interrupt, 0);
+}
+
+static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
+{
+ vmbus_isr();
+ return IRQ_HANDLED;
}
/*
@@ -1392,22 +1400,36 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
{
size_t bytes_written;
- phys_addr_t panic_pa;
/* We are only interested in panics. */
if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
return;
- panic_pa = virt_to_phys(hv_panic_page);
-
/*
* Write dump contents to the page. No need to synchronize; panic should
* be single-threaded.
*/
kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE,
&bytes_written);
- if (bytes_written)
- hyperv_report_panic_msg(panic_pa, bytes_written);
+ if (!bytes_written)
+ return;
+ /*
+ * P3 to contain the physical address of the panic page & P4 to
+ * contain the size of the panic data in that page. Rest of the
+ * registers are no-op when the NOTIFY_MSG flag is set.
+ */
+ hv_set_register(HV_REGISTER_CRASH_P0, 0);
+ hv_set_register(HV_REGISTER_CRASH_P1, 0);
+ hv_set_register(HV_REGISTER_CRASH_P2, 0);
+ hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
+ hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
+
+ /*
+ * Let Hyper-V know there is crash data available along with
+ * the panic message.
+ */
+ hv_set_register(HV_REGISTER_CRASH_CTL,
+ (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
}
static struct kmsg_dumper hv_kmsg_dumper = {
@@ -1482,9 +1504,28 @@ static int vmbus_bus_init(void)
if (ret)
return ret;
- ret = hv_setup_vmbus_irq(vmbus_irq, vmbus_isr);
- if (ret)
- goto err_setup;
+ /*
+ * VMbus interrupts are best modeled as per-cpu interrupts. If
+ * on an architecture with support for per-cpu IRQs (e.g. ARM64),
+ * allocate a per-cpu IRQ using standard Linux kernel functionality.
+ * If not on such an architecture (e.g., x86/x64), then rely on
+ * code in the arch-specific portion of the code tree to connect
+ * the VMbus interrupt handler.
+ */
+
+ if (vmbus_irq == -1) {
+ hv_setup_vmbus_handler(vmbus_isr);
+ } else {
+ vmbus_evt = alloc_percpu(long);
+ ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
+ "Hyper-V VMbus", vmbus_evt);
+ if (ret) {
+ pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
+ vmbus_irq, ret);
+ free_percpu(vmbus_evt);
+ goto err_setup;
+ }
+ }
ret = hv_synic_alloc();
if (ret)
@@ -1521,7 +1562,7 @@ static int vmbus_bus_init(void)
* Register for panic kmsg callback only if the right
* capability is supported by the hypervisor.
*/
- hv_get_crash_ctl(hyperv_crash_ctl);
+ hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
hv_kmsg_dump_register();
@@ -1545,7 +1586,12 @@ err_connect:
err_cpuhp:
hv_synic_free();
err_alloc:
- hv_remove_vmbus_irq();
+ if (vmbus_irq == -1) {
+ hv_remove_vmbus_handler();
+ } else {
+ free_percpu_irq(vmbus_irq, vmbus_evt);
+ free_percpu(vmbus_evt);
+ }
err_setup:
bus_unregister(&hv_bus);
unregister_sysctl_table(hv_ctl_table_hdr);
@@ -1802,13 +1848,15 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
if (target_cpu == origin_cpu)
goto cpu_store_unlock;
- if (vmbus_send_modifychannel(channel->offermsg.child_relid,
+ if (vmbus_send_modifychannel(channel,
hv_cpu_number_to_vp_number(target_cpu))) {
ret = -EIO;
goto cpu_store_unlock;
}
/*
+ * For version before VERSION_WIN10_V5_3, the following warning holds:
+ *
* Warning. At this point, there is *no* guarantee that the host will
* have successfully processed the vmbus_send_modifychannel() request.
* See the header comment of vmbus_send_modifychannel() for more info.
@@ -2663,6 +2711,18 @@ static int __init hv_acpi_init(void)
ret = -ETIMEDOUT;
goto cleanup;
}
+
+ /*
+ * If we're on an architecture with a hardcoded hypervisor
+ * vector (i.e. x86/x64), override the VMbus interrupt found
+ * in the ACPI tables. Ensure vmbus_irq is not set since the
+ * normal Linux IRQ mechanism is not used in this case.
+ */
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+ vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
+ vmbus_irq = -1;
+#endif
+
hv_debug_init();
ret = vmbus_bus_init();
@@ -2693,7 +2753,12 @@ static void __exit vmbus_exit(void)
vmbus_connection.conn_state = DISCONNECTED;
hv_stimer_global_cleanup();
vmbus_disconnect();
- hv_remove_vmbus_irq();
+ if (vmbus_irq == -1) {
+ hv_remove_vmbus_handler();
+ } else {
+ free_percpu_irq(vmbus_irq, vmbus_evt);
+ free_percpu(vmbus_evt);
+ }
for_each_online_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 0062c8935653..b57bea167102 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -86,7 +86,7 @@ static int coresight_id_match(struct device *dev, void *data)
i_csdev->type != CORESIGHT_DEV_TYPE_SOURCE)
return 0;
- /* Get the source ID for both compoment */
+ /* Get the source ID for both components */
trace_id = source_ops(csdev)->trace_id(csdev);
i_trace_id = source_ops(i_csdev)->trace_id(i_csdev);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 0f603b4094f2..c1bec2ad3911 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -52,13 +52,13 @@ static ssize_t format_attr_contextid_show(struct device *dev,
{
int pid_fmt = ETM_OPT_CTXTID;
-#if defined(CONFIG_CORESIGHT_SOURCE_ETM4X)
+#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
#endif
return sprintf(page, "config:%d\n", pid_fmt);
}
-struct device_attribute format_attr_contextid =
+static struct device_attribute format_attr_contextid =
__ATTR(contextid, 0444, format_attr_contextid_show, NULL);
static struct attribute *etm_config_formats_attr[] = {
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 15016f757828..a5b13a7779c3 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -1951,6 +1951,7 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_UCI_ID(0x000bbd05, uci_id_etm4),/* Cortex-A55 */
CS_AMBA_UCI_ID(0x000bbd0a, uci_id_etm4),/* Cortex-A75 */
CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */
+ CS_AMBA_UCI_ID(0x000bbd41, uci_id_etm4),/* Cortex-A78 */
CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_UCI_ID(0x000bb802, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A55 */
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index c9ac3dc65113..24d0c974bfd5 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -844,7 +844,7 @@ static irqreturn_t intel_th_irq(int irq, void *data)
* @irq: irq number
*/
struct intel_th *
-intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
+intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata,
struct resource *devres, unsigned int ndevres)
{
int err, r, nr_mmios = 0;
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index f72803a02391..28509b02a0b5 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -543,7 +543,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
output->active = false;
for_each_set_bit(master, gth->output[output->port].master,
- TH_CONFIGURABLE_MASTERS) {
+ TH_CONFIGURABLE_MASTERS + 1) {
gth_master_set(gth, master, -1);
}
spin_unlock(&gth->gth_lock);
@@ -697,7 +697,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
othdev->output.port = -1;
othdev->output.active = false;
gth->output[port].output = NULL;
- for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
+ for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
if (gth->master[master] == port)
gth->master[master] = -1;
spin_unlock(&gth->gth_lock);
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 5fe694708b7a..89c67e0e1d34 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -74,7 +74,7 @@ struct intel_th_drvdata {
*/
struct intel_th_device {
struct device dev;
- struct intel_th_drvdata *drvdata;
+ const struct intel_th_drvdata *drvdata;
struct resource *resource;
unsigned int num_resources;
unsigned int type;
@@ -178,7 +178,7 @@ struct intel_th_driver {
/* file_operations for those who want a device node */
const struct file_operations *fops;
/* optional attributes */
- struct attribute_group *attr_group;
+ const struct attribute_group *attr_group;
/* source ops */
int (*set_output)(struct intel_th_device *thdev,
@@ -224,7 +224,7 @@ static inline struct intel_th *to_intel_th(struct intel_th_device *thdev)
}
struct intel_th *
-intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
+intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata,
struct resource *devres, unsigned int ndevres);
void intel_th_free(struct intel_th *th);
@@ -272,7 +272,7 @@ struct intel_th {
struct intel_th_device *thdev[TH_SUBDEVICE_MAX];
struct intel_th_device *hub;
- struct intel_th_drvdata *drvdata;
+ const struct intel_th_drvdata *drvdata;
struct resource resource[TH_MMIO_END];
int (*activate)(struct intel_th *);
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 7d95242db900..2edc4666633d 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -2095,7 +2095,7 @@ static struct attribute *msc_output_attrs[] = {
NULL,
};
-static struct attribute_group msc_output_group = {
+static const struct attribute_group msc_output_group = {
.attrs = msc_output_attrs,
};
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 251e75c9ba9d..7da4f298ed01 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -71,7 +71,7 @@ static void intel_th_pci_deactivate(struct intel_th *th)
static int intel_th_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- struct intel_th_drvdata *drvdata = (void *)id->driver_data;
+ const struct intel_th_drvdata *drvdata = (void *)id->driver_data;
struct resource resource[TH_MMIO_END + TH_NVEC_MAX] = {
[TH_MMIO_CONFIG] = pdev->resource[TH_PCI_CONFIG_BAR],
[TH_MMIO_SW] = pdev->resource[TH_PCI_STH_SW_BAR],
@@ -274,10 +274,20 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Alder Lake-M */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Rocket Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c
index 0da6b787f553..09132ab8bc23 100644
--- a/drivers/hwtracing/intel_th/pti.c
+++ b/drivers/hwtracing/intel_th/pti.c
@@ -142,7 +142,7 @@ static struct attribute *pti_output_attrs[] = {
NULL,
};
-static struct attribute_group pti_output_group = {
+static const struct attribute_group pti_output_group = {
.attrs = pti_output_attrs,
};
@@ -295,7 +295,7 @@ static struct attribute *lpp_output_attrs[] = {
NULL,
};
-static struct attribute_group lpp_output_group = {
+static const struct attribute_group lpp_output_group = {
.attrs = lpp_output_attrs,
};
diff --git a/drivers/hwtracing/stm/p_sys-t.c b/drivers/hwtracing/stm/p_sys-t.c
index 360b5c03df95..8254971c02e7 100644
--- a/drivers/hwtracing/stm/p_sys-t.c
+++ b/drivers/hwtracing/stm/p_sys-t.c
@@ -92,7 +92,7 @@ static void sys_t_policy_node_init(void *priv)
{
struct sys_t_policy_node *pn = priv;
- generate_random_uuid(pn->uuid.b);
+ uuid_gen(&pn->uuid);
}
static int sys_t_output_open(void *priv, struct stm_output *output)
@@ -292,6 +292,7 @@ static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output,
unsigned int m = output->master;
const unsigned char nil = 0;
u32 header = DATA_HEADER;
+ u8 uuid[UUID_SIZE];
ssize_t sz;
/* We require an existing policy node to proceed */
@@ -322,7 +323,8 @@ static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output,
return sz;
/* GUID */
- sz = stm_data_write(data, m, c, false, op->node.uuid.b, UUID_SIZE);
+ export_uuid(uuid, &op->node.uuid);
+ sz = stm_data_write(data, m, c, false, uuid, sizeof(op->node.uuid));
if (sz <= 0)
return sz;
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 603b4a9969d3..42103c3a177f 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -57,11 +57,6 @@ void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
*cend = policy_node->last_channel;
}
-static inline char *stp_policy_node_name(struct stp_policy_node *policy_node)
-{
- return policy_node->group.cg_item.ci_name ? : "<none>";
-}
-
static inline struct stp_policy *to_stp_policy(struct config_item *item)
{
return item ?
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index c590d36b5fd1..5c8e94b6cdb5 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -221,6 +221,10 @@ mv64xxx_i2c_hw_init(struct mv64xxx_i2c_data *drv_data)
writel(0, drv_data->reg_base + drv_data->reg_offsets.ext_addr);
writel(MV64XXX_I2C_REG_CONTROL_TWSIEN | MV64XXX_I2C_REG_CONTROL_STOP,
drv_data->reg_base + drv_data->reg_offsets.control);
+
+ if (drv_data->errata_delay)
+ udelay(5);
+
drv_data->state = MV64XXX_I2C_STATE_IDLE;
}
diff --git a/drivers/input/joystick/n64joy.c b/drivers/input/joystick/n64joy.c
index 8bcc529942bc..9dbca366613e 100644
--- a/drivers/input/joystick/n64joy.c
+++ b/drivers/input/joystick/n64joy.c
@@ -252,8 +252,8 @@ static int __init n64joy_probe(struct platform_device *pdev)
mutex_init(&priv->n64joy_mutex);
priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (!priv->reg_base) {
- err = -EINVAL;
+ if (IS_ERR(priv->reg_base)) {
+ err = PTR_ERR(priv->reg_base);
goto fail;
}
diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
index 63d5e488137d..e9fa1423f136 100644
--- a/drivers/input/keyboard/nspire-keypad.c
+++ b/drivers/input/keyboard/nspire-keypad.c
@@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
+static int nspire_keypad_open(struct input_dev *input)
{
+ struct nspire_keypad *keypad = input_get_drvdata(input);
unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
+ int error;
+
+ error = clk_prepare_enable(keypad->clk);
+ if (error)
+ return error;
cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
if (cycles_per_us == 0)
@@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
keypad->int_mask = 1 << 1;
writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
- /* Disable GPIO interrupts to prevent hanging on touchpad */
- /* Possibly used to detect touchpad events */
- writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
- /* Acknowledge existing interrupts */
- writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
-
- return 0;
-}
-
-static int nspire_keypad_open(struct input_dev *input)
-{
- struct nspire_keypad *keypad = input_get_drvdata(input);
- int error;
-
- error = clk_prepare_enable(keypad->clk);
- if (error)
- return error;
-
- error = nspire_keypad_chip_init(keypad);
- if (error) {
- clk_disable_unprepare(keypad->clk);
- return error;
- }
-
return 0;
}
@@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input)
{
struct nspire_keypad *keypad = input_get_drvdata(input);
+ /* Disable interrupts */
+ writel(0, keypad->reg_base + KEYPAD_INTMSK);
+ /* Acknowledge existing interrupts */
+ writel(~0, keypad->reg_base + KEYPAD_INT);
+
clk_disable_unprepare(keypad->clk);
}
@@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ error = clk_prepare_enable(keypad->clk);
+ if (error) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ return error;
+ }
+
+ /* Disable interrupts */
+ writel(0, keypad->reg_base + KEYPAD_INTMSK);
+ /* Acknowledge existing interrupts */
+ writel(~0, keypad->reg_base + KEYPAD_INT);
+
+ /* Disable GPIO interrupts to prevent hanging on touchpad */
+ /* Possibly used to detect touchpad events */
+ writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
+ /* Acknowledge existing GPIO interrupts */
+ writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
+
+ clk_disable_unprepare(keypad->clk);
+
input_set_drvdata(input, keypad);
input->id.bustype = BUS_HOST;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 9119e12a5778..a5a003553646 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
},
+ }, {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 4c2b579f6c8b..5f7706febcb0 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1441,7 +1441,7 @@ static int elants_i2c_probe(struct i2c_client *client,
touchscreen_parse_properties(ts->input, true, &ts->prop);
- if (ts->chip_id == EKTF3624) {
+ if (ts->chip_id == EKTF3624 && ts->phy_x && ts->phy_y) {
/* calculate resolution from size */
ts->x_res = DIV_ROUND_CLOSEST(ts->prop.max_x, ts->phy_x);
ts->y_res = DIV_ROUND_CLOSEST(ts->prop.max_y, ts->phy_y);
@@ -1449,8 +1449,7 @@ static int elants_i2c_probe(struct i2c_client *client,
input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
- if (ts->major_res > 0)
- input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
+ input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
index b63d7fdf0cd2..85a1f465c097 100644
--- a/drivers/input/touchscreen/s6sy761.c
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
u8 major = event[4];
u8 minor = event[5];
u8 z = event[6] & S6SY761_MASK_Z;
- u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
- u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
+ u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
+ u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
input_mt_slot(sdata->input, tid);
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index ca52647f8955..cdb3e11462c6 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -74,6 +74,15 @@ config INTERCONNECT_QCOM_SC7180
This is a driver for the Qualcomm Network-on-Chip on sc7180-based
platforms.
+config INTERCONNECT_QCOM_SDM660
+ tristate "Qualcomm SDM660 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sdm660-based
+ platforms.
+
config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
@@ -110,5 +119,14 @@ config INTERCONNECT_QCOM_SM8250
This is a driver for the Qualcomm Network-on-Chip on sm8250-based
platforms.
+config INTERCONNECT_QCOM_SM8350
+ tristate "Qualcomm SM8350 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on SM8350-based
+ platforms.
+
config INTERCONNECT_QCOM_SMD_RPM
tristate
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index c6a735df067e..46fc62447156 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -8,10 +8,12 @@ icc-osm-l3-objs := osm-l3.o
qnoc-qcs404-objs := qcs404.o
icc-rpmh-obj := icc-rpmh.o
qnoc-sc7180-objs := sc7180.o
+qnoc-sdm660-objs := sdm660.o
qnoc-sdm845-objs := sdm845.o
qnoc-sdx55-objs := sdx55.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
+qnoc-sm8350-objs := sm8350.o
icc-smd-rpm-objs := smd-rpm.o icc-rpm.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
@@ -22,8 +24,10 @@ obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SDM660) += qnoc-sdm660.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index cc6095492cbe..54de49ca7808 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -59,8 +59,8 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
qn->slv_rpm_id,
sum_bw);
if (ret) {
- pr_err("qcom_icc_rpm_smd_send slv error %d\n",
- ret);
+ pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
+ qn->slv_rpm_id, ret);
return ret;
}
}
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
new file mode 100644
index 000000000000..632dbdd21915
--- /dev/null
+++ b/drivers/interconnect/qcom/sdm660.c
@@ -0,0 +1,923 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm SDM630/SDM636/SDM660 Network-on-Chip (NoC) QoS driver
+ * Copyright (C) 2020, AngeloGioacchino Del Regno <kholk11@gmail.com>
+ */
+
+#include <dt-bindings/interconnect/qcom,sdm660.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "smd-rpm.h"
+
+#define RPM_BUS_MASTER_REQ 0x73616d62
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+
+/* BIMC QoS */
+#define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n))
+#define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n))
+#define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
+
+#define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000
+#define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300
+#define M_BKE_HEALTH_CFG_PRIOLVL_MASK 0x3
+#define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8
+#define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
+
+#define M_BKE_EN_EN_BMASK 0x1
+
+/* Valid for both NoC and BIMC */
+#define NOC_QOS_MODE_FIXED 0x0
+#define NOC_QOS_MODE_LIMITER 0x1
+#define NOC_QOS_MODE_BYPASS 0x2
+
+/* NoC QoS */
+#define NOC_PERM_MODE_FIXED 1
+#define NOC_PERM_MODE_BYPASS (1 << NOC_QOS_MODE_BYPASS)
+
+#define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
+#define NOC_QOS_PRIORITY_MASK 0xf
+#define NOC_QOS_PRIORITY_P1_SHIFT 0x2
+#define NOC_QOS_PRIORITY_P0_SHIFT 0x3
+
+#define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
+#define NOC_QOS_MODEn_MASK 0x3
+
+enum {
+ SDM660_MASTER_IPA = 1,
+ SDM660_MASTER_CNOC_A2NOC,
+ SDM660_MASTER_SDCC_1,
+ SDM660_MASTER_SDCC_2,
+ SDM660_MASTER_BLSP_1,
+ SDM660_MASTER_BLSP_2,
+ SDM660_MASTER_UFS,
+ SDM660_MASTER_USB_HS,
+ SDM660_MASTER_USB3,
+ SDM660_MASTER_CRYPTO_C0,
+ SDM660_MASTER_GNOC_BIMC,
+ SDM660_MASTER_OXILI,
+ SDM660_MASTER_MNOC_BIMC,
+ SDM660_MASTER_SNOC_BIMC,
+ SDM660_MASTER_PIMEM,
+ SDM660_MASTER_SNOC_CNOC,
+ SDM660_MASTER_QDSS_DAP,
+ SDM660_MASTER_APPS_PROC,
+ SDM660_MASTER_CNOC_MNOC_MMSS_CFG,
+ SDM660_MASTER_CNOC_MNOC_CFG,
+ SDM660_MASTER_CPP,
+ SDM660_MASTER_JPEG,
+ SDM660_MASTER_MDP_P0,
+ SDM660_MASTER_MDP_P1,
+ SDM660_MASTER_VENUS,
+ SDM660_MASTER_VFE,
+ SDM660_MASTER_QDSS_ETR,
+ SDM660_MASTER_QDSS_BAM,
+ SDM660_MASTER_SNOC_CFG,
+ SDM660_MASTER_BIMC_SNOC,
+ SDM660_MASTER_A2NOC_SNOC,
+ SDM660_MASTER_GNOC_SNOC,
+
+ SDM660_SLAVE_A2NOC_SNOC,
+ SDM660_SLAVE_EBI,
+ SDM660_SLAVE_HMSS_L3,
+ SDM660_SLAVE_BIMC_SNOC,
+ SDM660_SLAVE_CNOC_A2NOC,
+ SDM660_SLAVE_MPM,
+ SDM660_SLAVE_PMIC_ARB,
+ SDM660_SLAVE_TLMM_NORTH,
+ SDM660_SLAVE_TCSR,
+ SDM660_SLAVE_PIMEM_CFG,
+ SDM660_SLAVE_IMEM_CFG,
+ SDM660_SLAVE_MESSAGE_RAM,
+ SDM660_SLAVE_GLM,
+ SDM660_SLAVE_BIMC_CFG,
+ SDM660_SLAVE_PRNG,
+ SDM660_SLAVE_SPDM,
+ SDM660_SLAVE_QDSS_CFG,
+ SDM660_SLAVE_CNOC_MNOC_CFG,
+ SDM660_SLAVE_SNOC_CFG,
+ SDM660_SLAVE_QM_CFG,
+ SDM660_SLAVE_CLK_CTL,
+ SDM660_SLAVE_MSS_CFG,
+ SDM660_SLAVE_TLMM_SOUTH,
+ SDM660_SLAVE_UFS_CFG,
+ SDM660_SLAVE_A2NOC_CFG,
+ SDM660_SLAVE_A2NOC_SMMU_CFG,
+ SDM660_SLAVE_GPUSS_CFG,
+ SDM660_SLAVE_AHB2PHY,
+ SDM660_SLAVE_BLSP_1,
+ SDM660_SLAVE_SDCC_1,
+ SDM660_SLAVE_SDCC_2,
+ SDM660_SLAVE_TLMM_CENTER,
+ SDM660_SLAVE_BLSP_2,
+ SDM660_SLAVE_PDM,
+ SDM660_SLAVE_CNOC_MNOC_MMSS_CFG,
+ SDM660_SLAVE_USB_HS,
+ SDM660_SLAVE_USB3_0,
+ SDM660_SLAVE_SRVC_CNOC,
+ SDM660_SLAVE_GNOC_BIMC,
+ SDM660_SLAVE_GNOC_SNOC,
+ SDM660_SLAVE_CAMERA_CFG,
+ SDM660_SLAVE_CAMERA_THROTTLE_CFG,
+ SDM660_SLAVE_MISC_CFG,
+ SDM660_SLAVE_VENUS_THROTTLE_CFG,
+ SDM660_SLAVE_VENUS_CFG,
+ SDM660_SLAVE_MMSS_CLK_XPU_CFG,
+ SDM660_SLAVE_MMSS_CLK_CFG,
+ SDM660_SLAVE_MNOC_MPU_CFG,
+ SDM660_SLAVE_DISPLAY_CFG,
+ SDM660_SLAVE_CSI_PHY_CFG,
+ SDM660_SLAVE_DISPLAY_THROTTLE_CFG,
+ SDM660_SLAVE_SMMU_CFG,
+ SDM660_SLAVE_MNOC_BIMC,
+ SDM660_SLAVE_SRVC_MNOC,
+ SDM660_SLAVE_HMSS,
+ SDM660_SLAVE_LPASS,
+ SDM660_SLAVE_WLAN,
+ SDM660_SLAVE_CDSP,
+ SDM660_SLAVE_IPA,
+ SDM660_SLAVE_SNOC_BIMC,
+ SDM660_SLAVE_SNOC_CNOC,
+ SDM660_SLAVE_IMEM,
+ SDM660_SLAVE_PIMEM,
+ SDM660_SLAVE_QDSS_STM,
+ SDM660_SLAVE_SRVC_SNOC,
+
+ SDM660_A2NOC,
+ SDM660_BIMC,
+ SDM660_CNOC,
+ SDM660_GNOC,
+ SDM660_MNOC,
+ SDM660_SNOC,
+};
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_icc_provider, provider)
+
+static const struct clk_bulk_data bus_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+};
+
+static const struct clk_bulk_data bus_mm_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+ { .id = "iface" },
+};
+
+/**
+ * struct qcom_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ * @is_bimc_node: indicates whether to use bimc specific setting
+ * @regmap: regmap for QoS registers read/write access
+ * @mmio: NoC base iospace
+ */
+struct qcom_icc_provider {
+ struct icc_provider provider;
+ struct clk_bulk_data *bus_clks;
+ int num_clks;
+ bool is_bimc_node;
+ struct regmap *regmap;
+ void __iomem *mmio;
+};
+
+#define SDM660_MAX_LINKS 34
+
+/**
+ * struct qcom_icc_qos - Qualcomm specific interconnect QoS parameters
+ * @areq_prio: node requests priority
+ * @prio_level: priority level for bus communication
+ * @limit_commands: activate/deactivate limiter mode during runtime
+ * @ap_owned: indicates if the node is owned by the AP or by the RPM
+ * @qos_mode: default qos mode for this node
+ * @qos_port: qos port number for finding qos registers of this node
+ */
+struct qcom_icc_qos {
+ u32 areq_prio;
+ u32 prio_level;
+ bool limit_commands;
+ bool ap_owned;
+ int qos_mode;
+ int qos_port;
+};
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id: RPM id for devices that are bus masters
+ * @slv_rpm_id: RPM id for devices that are bus slaves
+ * @qos: NoC QoS setting parameters
+ * @rate: current bus clock rate in Hz
+ */
+struct qcom_icc_node {
+ unsigned char *name;
+ u16 id;
+ u16 links[SDM660_MAX_LINKS];
+ u16 num_links;
+ u16 buswidth;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ struct qcom_icc_qos qos;
+ u64 rate;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+ const struct regmap_config *regmap_cfg;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
+ _ap_owned, _qos_mode, _qos_prio, _qos_port, ...) \
+ static struct qcom_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .mas_rpm_id = _mas_rpm_id, \
+ .slv_rpm_id = _slv_rpm_id, \
+ .qos.ap_owned = _ap_owned, \
+ .qos.qos_mode = _qos_mode, \
+ .qos.areq_prio = _qos_prio, \
+ .qos.prio_level = _qos_prio, \
+ .qos.qos_port = _qos_port, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(mas_ipa, SDM660_MASTER_IPA, 8, 59, -1, true, NOC_QOS_MODE_FIXED, 1, 3, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_cnoc_a2noc, SDM660_MASTER_CNOC_A2NOC, 8, 146, -1, true, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_sdcc_1, SDM660_MASTER_SDCC_1, 8, 33, -1, false, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_sdcc_2, SDM660_MASTER_SDCC_2, 8, 35, -1, false, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_blsp_1, SDM660_MASTER_BLSP_1, 4, 41, -1, false, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_blsp_2, SDM660_MASTER_BLSP_2, 4, 39, -1, false, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_ufs, SDM660_MASTER_UFS, 8, 68, -1, true, NOC_QOS_MODE_FIXED, 1, 4, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_usb_hs, SDM660_MASTER_USB_HS, 8, 42, -1, true, NOC_QOS_MODE_FIXED, 1, 1, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_usb3, SDM660_MASTER_USB3, 8, 32, -1, true, NOC_QOS_MODE_FIXED, 1, 2, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_crypto, SDM660_MASTER_CRYPTO_C0, 8, 23, -1, true, NOC_QOS_MODE_FIXED, 1, 11, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_gnoc_bimc, SDM660_MASTER_GNOC_BIMC, 4, 144, -1, true, NOC_QOS_MODE_FIXED, 0, 0, SDM660_SLAVE_EBI);
+DEFINE_QNODE(mas_oxili, SDM660_MASTER_OXILI, 4, 6, -1, true, NOC_QOS_MODE_BYPASS, 0, 1, SDM660_SLAVE_HMSS_L3, SDM660_SLAVE_EBI, SDM660_SLAVE_BIMC_SNOC);
+DEFINE_QNODE(mas_mnoc_bimc, SDM660_MASTER_MNOC_BIMC, 4, 2, -1, true, NOC_QOS_MODE_BYPASS, 0, 2, SDM660_SLAVE_HMSS_L3, SDM660_SLAVE_EBI, SDM660_SLAVE_BIMC_SNOC);
+DEFINE_QNODE(mas_snoc_bimc, SDM660_MASTER_SNOC_BIMC, 4, 3, -1, false, -1, 0, -1, SDM660_SLAVE_HMSS_L3, SDM660_SLAVE_EBI);
+DEFINE_QNODE(mas_pimem, SDM660_MASTER_PIMEM, 4, 113, -1, true, NOC_QOS_MODE_FIXED, 1, 4, SDM660_SLAVE_HMSS_L3, SDM660_SLAVE_EBI);
+DEFINE_QNODE(mas_snoc_cnoc, SDM660_MASTER_SNOC_CNOC, 8, 52, -1, true, -1, 0, -1, SDM660_SLAVE_CLK_CTL, SDM660_SLAVE_QDSS_CFG, SDM660_SLAVE_QM_CFG, SDM660_SLAVE_SRVC_CNOC, SDM660_SLAVE_UFS_CFG, SDM660_SLAVE_TCSR, SDM660_SLAVE_A2NOC_SMMU_CFG, SDM660_SLAVE_SNOC_CFG, SDM660_SLAVE_TLMM_SOUTH, SDM660_SLAVE_MPM, SDM660_SLAVE_CNOC_MNOC_MMSS_CFG, SDM660_SLAVE_SDCC_2, SDM660_SLAVE_SDCC_1, SDM660_SLAVE_SPDM, SDM660_SLAVE_PMIC_ARB, SDM660_SLAVE_PRNG, SDM660_SLAVE_MSS_CFG, SDM660_SLAVE_GPUSS_CFG, SDM660_SLAVE_IMEM_CFG, SDM660_SLAVE_USB3_0, SDM660_SLAVE_A2NOC_CFG, SDM660_SLAVE_TLMM_NORTH, SDM660_SLAVE_USB_HS, SDM660_SLAVE_PDM, SDM660_SLAVE_TLMM_CENTER, SDM660_SLAVE_AHB2PHY, SDM660_SLAVE_BLSP_2, SDM660_SLAVE_BLSP_1, SDM660_SLAVE_PIMEM_CFG, SDM660_SLAVE_GLM, SDM660_SLAVE_MESSAGE_RAM, SDM660_SLAVE_BIMC_CFG, SDM660_SLAVE_CNOC_MNOC_CFG);
+DEFINE_QNODE(mas_qdss_dap, SDM660_MASTER_QDSS_DAP, 8, 49, -1, true, -1, 0, -1, SDM660_SLAVE_CLK_CTL, SDM660_SLAVE_QDSS_CFG, SDM660_SLAVE_QM_CFG, SDM660_SLAVE_SRVC_CNOC, SDM660_SLAVE_UFS_CFG, SDM660_SLAVE_TCSR, SDM660_SLAVE_A2NOC_SMMU_CFG, SDM660_SLAVE_SNOC_CFG, SDM660_SLAVE_TLMM_SOUTH, SDM660_SLAVE_MPM, SDM660_SLAVE_CNOC_MNOC_MMSS_CFG, SDM660_SLAVE_SDCC_2, SDM660_SLAVE_SDCC_1, SDM660_SLAVE_SPDM, SDM660_SLAVE_PMIC_ARB, SDM660_SLAVE_PRNG, SDM660_SLAVE_MSS_CFG, SDM660_SLAVE_GPUSS_CFG, SDM660_SLAVE_IMEM_CFG, SDM660_SLAVE_USB3_0, SDM660_SLAVE_A2NOC_CFG, SDM660_SLAVE_TLMM_NORTH, SDM660_SLAVE_USB_HS, SDM660_SLAVE_PDM, SDM660_SLAVE_TLMM_CENTER, SDM660_SLAVE_AHB2PHY, SDM660_SLAVE_BLSP_2, SDM660_SLAVE_BLSP_1, SDM660_SLAVE_PIMEM_CFG, SDM660_SLAVE_GLM, SDM660_SLAVE_MESSAGE_RAM, SDM660_SLAVE_CNOC_A2NOC, SDM660_SLAVE_BIMC_CFG, SDM660_SLAVE_CNOC_MNOC_CFG);
+DEFINE_QNODE(mas_apss_proc, SDM660_MASTER_APPS_PROC, 16, 0, -1, true, -1, 0, -1, SDM660_SLAVE_GNOC_SNOC, SDM660_SLAVE_GNOC_BIMC);
+DEFINE_QNODE(mas_cnoc_mnoc_mmss_cfg, SDM660_MASTER_CNOC_MNOC_MMSS_CFG, 8, 4, -1, true, -1, 0, -1, SDM660_SLAVE_VENUS_THROTTLE_CFG, SDM660_SLAVE_VENUS_CFG, SDM660_SLAVE_CAMERA_THROTTLE_CFG, SDM660_SLAVE_SMMU_CFG, SDM660_SLAVE_CAMERA_CFG, SDM660_SLAVE_CSI_PHY_CFG, SDM660_SLAVE_DISPLAY_THROTTLE_CFG, SDM660_SLAVE_DISPLAY_CFG, SDM660_SLAVE_MMSS_CLK_CFG, SDM660_SLAVE_MNOC_MPU_CFG, SDM660_SLAVE_MISC_CFG, SDM660_SLAVE_MMSS_CLK_XPU_CFG);
+DEFINE_QNODE(mas_cnoc_mnoc_cfg, SDM660_MASTER_CNOC_MNOC_CFG, 4, 5, -1, true, -1, 0, -1, SDM660_SLAVE_SRVC_MNOC);
+DEFINE_QNODE(mas_cpp, SDM660_MASTER_CPP, 16, 115, -1, true, NOC_QOS_MODE_BYPASS, 0, 4, SDM660_SLAVE_MNOC_BIMC);
+DEFINE_QNODE(mas_jpeg, SDM660_MASTER_JPEG, 16, 7, -1, true, NOC_QOS_MODE_BYPASS, 0, 6, SDM660_SLAVE_MNOC_BIMC);
+DEFINE_QNODE(mas_mdp_p0, SDM660_MASTER_MDP_P0, 16, 8, -1, true, NOC_QOS_MODE_BYPASS, 0, 0, SDM660_SLAVE_MNOC_BIMC); /* vrail-comp???? */
+DEFINE_QNODE(mas_mdp_p1, SDM660_MASTER_MDP_P1, 16, 61, -1, true, NOC_QOS_MODE_BYPASS, 0, 1, SDM660_SLAVE_MNOC_BIMC); /* vrail-comp??? */
+DEFINE_QNODE(mas_venus, SDM660_MASTER_VENUS, 16, 9, -1, true, NOC_QOS_MODE_BYPASS, 0, 1, SDM660_SLAVE_MNOC_BIMC);
+DEFINE_QNODE(mas_vfe, SDM660_MASTER_VFE, 16, 11, -1, true, NOC_QOS_MODE_BYPASS, 0, 5, SDM660_SLAVE_MNOC_BIMC);
+DEFINE_QNODE(mas_qdss_etr, SDM660_MASTER_QDSS_ETR, 8, 31, -1, true, NOC_QOS_MODE_FIXED, 1, 1, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IMEM, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_SNOC_BIMC);
+DEFINE_QNODE(mas_qdss_bam, SDM660_MASTER_QDSS_BAM, 4, 19, -1, true, NOC_QOS_MODE_FIXED, 1, 0, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IMEM, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_SNOC_BIMC);
+DEFINE_QNODE(mas_snoc_cfg, SDM660_MASTER_SNOC_CFG, 4, 20, -1, false, -1, 0, -1, SDM660_SLAVE_SRVC_SNOC);
+DEFINE_QNODE(mas_bimc_snoc, SDM660_MASTER_BIMC_SNOC, 8, 21, -1, false, -1, 0, -1, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IPA, SDM660_SLAVE_QDSS_STM, SDM660_SLAVE_LPASS, SDM660_SLAVE_HMSS, SDM660_SLAVE_CDSP, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_WLAN, SDM660_SLAVE_IMEM);
+DEFINE_QNODE(mas_gnoc_snoc, SDM660_MASTER_GNOC_SNOC, 8, 150, -1, false, -1, 0, -1, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IPA, SDM660_SLAVE_QDSS_STM, SDM660_SLAVE_LPASS, SDM660_SLAVE_HMSS, SDM660_SLAVE_CDSP, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_WLAN, SDM660_SLAVE_IMEM);
+DEFINE_QNODE(mas_a2noc_snoc, SDM660_MASTER_A2NOC_SNOC, 16, 112, -1, false, -1, 0, -1, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IPA, SDM660_SLAVE_QDSS_STM, SDM660_SLAVE_LPASS, SDM660_SLAVE_HMSS, SDM660_SLAVE_SNOC_BIMC, SDM660_SLAVE_CDSP, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_WLAN, SDM660_SLAVE_IMEM);
+DEFINE_QNODE(slv_a2noc_snoc, SDM660_SLAVE_A2NOC_SNOC, 16, -1, 143, false, -1, 0, -1, SDM660_MASTER_A2NOC_SNOC);
+DEFINE_QNODE(slv_ebi, SDM660_SLAVE_EBI, 4, -1, 0, false, -1, 0, -1, 0);
+DEFINE_QNODE(slv_hmss_l3, SDM660_SLAVE_HMSS_L3, 4, -1, 160, false, -1, 0, -1, 0);
+DEFINE_QNODE(slv_bimc_snoc, SDM660_SLAVE_BIMC_SNOC, 4, -1, 2, false, -1, 0, -1, SDM660_MASTER_BIMC_SNOC);
+DEFINE_QNODE(slv_cnoc_a2noc, SDM660_SLAVE_CNOC_A2NOC, 8, -1, 208, true, -1, 0, -1, SDM660_MASTER_CNOC_A2NOC);
+DEFINE_QNODE(slv_mpm, SDM660_SLAVE_MPM, 4, -1, 62, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_pmic_arb, SDM660_SLAVE_PMIC_ARB, 4, -1, 59, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_tlmm_north, SDM660_SLAVE_TLMM_NORTH, 8, -1, 214, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_tcsr, SDM660_SLAVE_TCSR, 4, -1, 50, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_pimem_cfg, SDM660_SLAVE_PIMEM_CFG, 4, -1, 167, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_imem_cfg, SDM660_SLAVE_IMEM_CFG, 4, -1, 54, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_message_ram, SDM660_SLAVE_MESSAGE_RAM, 4, -1, 55, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_glm, SDM660_SLAVE_GLM, 4, -1, 209, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_bimc_cfg, SDM660_SLAVE_BIMC_CFG, 4, -1, 56, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_prng, SDM660_SLAVE_PRNG, 4, -1, 44, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_spdm, SDM660_SLAVE_SPDM, 4, -1, 60, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_qdss_cfg, SDM660_SLAVE_QDSS_CFG, 4, -1, 63, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_BLSP_1, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(slv_snoc_cfg, SDM660_SLAVE_SNOC_CFG, 4, -1, 70, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_qm_cfg, SDM660_SLAVE_QM_CFG, 4, -1, 212, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_clk_ctl, SDM660_SLAVE_CLK_CTL, 4, -1, 47, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_mss_cfg, SDM660_SLAVE_MSS_CFG, 4, -1, 48, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_tlmm_south, SDM660_SLAVE_TLMM_SOUTH, 4, -1, 217, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_ufs_cfg, SDM660_SLAVE_UFS_CFG, 4, -1, 92, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_a2noc_cfg, SDM660_SLAVE_A2NOC_CFG, 4, -1, 150, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_a2noc_smmu_cfg, SDM660_SLAVE_A2NOC_SMMU_CFG, 8, -1, 152, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_gpuss_cfg, SDM660_SLAVE_GPUSS_CFG, 8, -1, 11, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_ahb2phy, SDM660_SLAVE_AHB2PHY, 4, -1, 163, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_blsp_1, SDM660_SLAVE_BLSP_1, 4, -1, 39, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_sdcc_1, SDM660_SLAVE_SDCC_1, 4, -1, 31, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_sdcc_2, SDM660_SLAVE_SDCC_2, 4, -1, 33, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_tlmm_center, SDM660_SLAVE_TLMM_CENTER, 4, -1, 218, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_blsp_2, SDM660_SLAVE_BLSP_2, 4, -1, 37, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_pdm, SDM660_SLAVE_PDM, 4, -1, 41, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_cnoc_mnoc_mmss_cfg, SDM660_SLAVE_CNOC_MNOC_MMSS_CFG, 8, -1, 58, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_MMSS_CFG);
+DEFINE_QNODE(slv_usb_hs, SDM660_SLAVE_USB_HS, 4, -1, 40, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_usb3_0, SDM660_SLAVE_USB3_0, 4, -1, 22, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_srvc_cnoc, SDM660_SLAVE_SRVC_CNOC, 4, -1, 76, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_gnoc_bimc, SDM660_SLAVE_GNOC_BIMC, 16, -1, 210, true, -1, 0, -1, SDM660_MASTER_GNOC_BIMC);
+DEFINE_QNODE(slv_gnoc_snoc, SDM660_SLAVE_GNOC_SNOC, 8, -1, 211, true, -1, 0, -1, SDM660_MASTER_GNOC_SNOC);
+DEFINE_QNODE(slv_camera_cfg, SDM660_SLAVE_CAMERA_CFG, 4, -1, 3, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_camera_throttle_cfg, SDM660_SLAVE_CAMERA_THROTTLE_CFG, 4, -1, 154, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_misc_cfg, SDM660_SLAVE_MISC_CFG, 4, -1, 8, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_venus_throttle_cfg, SDM660_SLAVE_VENUS_THROTTLE_CFG, 4, -1, 178, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_venus_cfg, SDM660_SLAVE_VENUS_CFG, 4, -1, 10, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_mmss_clk_xpu_cfg, SDM660_SLAVE_MMSS_CLK_XPU_CFG, 4, -1, 13, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_mmss_clk_cfg, SDM660_SLAVE_MMSS_CLK_CFG, 4, -1, 12, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_mnoc_mpu_cfg, SDM660_SLAVE_MNOC_MPU_CFG, 4, -1, 14, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_display_cfg, SDM660_SLAVE_DISPLAY_CFG, 4, -1, 4, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_csi_phy_cfg, SDM660_SLAVE_CSI_PHY_CFG, 4, -1, 224, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_display_throttle_cfg, SDM660_SLAVE_DISPLAY_THROTTLE_CFG, 4, -1, 156, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_smmu_cfg, SDM660_SLAVE_SMMU_CFG, 8, -1, 205, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_mnoc_bimc, SDM660_SLAVE_MNOC_BIMC, 16, -1, 16, true, -1, 0, -1, SDM660_MASTER_MNOC_BIMC);
+DEFINE_QNODE(slv_srvc_mnoc, SDM660_SLAVE_SRVC_MNOC, 8, -1, 17, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_hmss, SDM660_SLAVE_HMSS, 8, -1, 20, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_lpass, SDM660_SLAVE_LPASS, 4, -1, 21, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_wlan, SDM660_SLAVE_WLAN, 4, -1, 206, false, -1, 0, -1, 0);
+DEFINE_QNODE(slv_cdsp, SDM660_SLAVE_CDSP, 4, -1, 221, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_ipa, SDM660_SLAVE_IPA, 4, -1, 183, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_snoc_bimc, SDM660_SLAVE_SNOC_BIMC, 16, -1, 24, false, -1, 0, -1, SDM660_MASTER_SNOC_BIMC);
+DEFINE_QNODE(slv_snoc_cnoc, SDM660_SLAVE_SNOC_CNOC, 8, -1, 25, false, -1, 0, -1, SDM660_MASTER_SNOC_CNOC);
+DEFINE_QNODE(slv_imem, SDM660_SLAVE_IMEM, 8, -1, 26, false, -1, 0, -1, 0);
+DEFINE_QNODE(slv_pimem, SDM660_SLAVE_PIMEM, 8, -1, 166, false, -1, 0, -1, 0);
+DEFINE_QNODE(slv_qdss_stm, SDM660_SLAVE_QDSS_STM, 4, -1, 30, false, -1, 0, -1, 0);
+DEFINE_QNODE(slv_srvc_snoc, SDM660_SLAVE_SRVC_SNOC, 16, -1, 29, false, -1, 0, -1, 0);
+
+static struct qcom_icc_node *sdm660_a2noc_nodes[] = {
+ [MASTER_IPA] = &mas_ipa,
+ [MASTER_CNOC_A2NOC] = &mas_cnoc_a2noc,
+ [MASTER_SDCC_1] = &mas_sdcc_1,
+ [MASTER_SDCC_2] = &mas_sdcc_2,
+ [MASTER_BLSP_1] = &mas_blsp_1,
+ [MASTER_BLSP_2] = &mas_blsp_2,
+ [MASTER_UFS] = &mas_ufs,
+ [MASTER_USB_HS] = &mas_usb_hs,
+ [MASTER_USB3] = &mas_usb3,
+ [MASTER_CRYPTO_C0] = &mas_crypto,
+ [SLAVE_A2NOC_SNOC] = &slv_a2noc_snoc,
+};
+
+static const struct regmap_config sdm660_a2noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x20000,
+ .fast_io = true,
+};
+
+static struct qcom_icc_desc sdm660_a2noc = {
+ .nodes = sdm660_a2noc_nodes,
+ .num_nodes = ARRAY_SIZE(sdm660_a2noc_nodes),
+ .regmap_cfg = &sdm660_a2noc_regmap_config,
+};
+
+static struct qcom_icc_node *sdm660_bimc_nodes[] = {
+ [MASTER_GNOC_BIMC] = &mas_gnoc_bimc,
+ [MASTER_OXILI] = &mas_oxili,
+ [MASTER_MNOC_BIMC] = &mas_mnoc_bimc,
+ [MASTER_SNOC_BIMC] = &mas_snoc_bimc,
+ [MASTER_PIMEM] = &mas_pimem,
+ [SLAVE_EBI] = &slv_ebi,
+ [SLAVE_HMSS_L3] = &slv_hmss_l3,
+ [SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
+};
+
+static const struct regmap_config sdm660_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
+static struct qcom_icc_desc sdm660_bimc = {
+ .nodes = sdm660_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(sdm660_bimc_nodes),
+ .regmap_cfg = &sdm660_bimc_regmap_config,
+};
+
+static struct qcom_icc_node *sdm660_cnoc_nodes[] = {
+ [MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
+ [MASTER_QDSS_DAP] = &mas_qdss_dap,
+ [SLAVE_CNOC_A2NOC] = &slv_cnoc_a2noc,
+ [SLAVE_MPM] = &slv_mpm,
+ [SLAVE_PMIC_ARB] = &slv_pmic_arb,
+ [SLAVE_TLMM_NORTH] = &slv_tlmm_north,
+ [SLAVE_TCSR] = &slv_tcsr,
+ [SLAVE_PIMEM_CFG] = &slv_pimem_cfg,
+ [SLAVE_IMEM_CFG] = &slv_imem_cfg,
+ [SLAVE_MESSAGE_RAM] = &slv_message_ram,
+ [SLAVE_GLM] = &slv_glm,
+ [SLAVE_BIMC_CFG] = &slv_bimc_cfg,
+ [SLAVE_PRNG] = &slv_prng,
+ [SLAVE_SPDM] = &slv_spdm,
+ [SLAVE_QDSS_CFG] = &slv_qdss_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &slv_cnoc_mnoc_cfg,
+ [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+ [SLAVE_QM_CFG] = &slv_qm_cfg,
+ [SLAVE_CLK_CTL] = &slv_clk_ctl,
+ [SLAVE_MSS_CFG] = &slv_mss_cfg,
+ [SLAVE_TLMM_SOUTH] = &slv_tlmm_south,
+ [SLAVE_UFS_CFG] = &slv_ufs_cfg,
+ [SLAVE_A2NOC_CFG] = &slv_a2noc_cfg,
+ [SLAVE_A2NOC_SMMU_CFG] = &slv_a2noc_smmu_cfg,
+ [SLAVE_GPUSS_CFG] = &slv_gpuss_cfg,
+ [SLAVE_AHB2PHY] = &slv_ahb2phy,
+ [SLAVE_BLSP_1] = &slv_blsp_1,
+ [SLAVE_SDCC_1] = &slv_sdcc_1,
+ [SLAVE_SDCC_2] = &slv_sdcc_2,
+ [SLAVE_TLMM_CENTER] = &slv_tlmm_center,
+ [SLAVE_BLSP_2] = &slv_blsp_2,
+ [SLAVE_PDM] = &slv_pdm,
+ [SLAVE_CNOC_MNOC_MMSS_CFG] = &slv_cnoc_mnoc_mmss_cfg,
+ [SLAVE_USB_HS] = &slv_usb_hs,
+ [SLAVE_USB3_0] = &slv_usb3_0,
+ [SLAVE_SRVC_CNOC] = &slv_srvc_cnoc,
+};
+
+static const struct regmap_config sdm660_cnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static struct qcom_icc_desc sdm660_cnoc = {
+ .nodes = sdm660_cnoc_nodes,
+ .num_nodes = ARRAY_SIZE(sdm660_cnoc_nodes),
+ .regmap_cfg = &sdm660_cnoc_regmap_config,
+};
+
+static struct qcom_icc_node *sdm660_gnoc_nodes[] = {
+ [MASTER_APSS_PROC] = &mas_apss_proc,
+ [SLAVE_GNOC_BIMC] = &slv_gnoc_bimc,
+ [SLAVE_GNOC_SNOC] = &slv_gnoc_snoc,
+};
+
+static const struct regmap_config sdm660_gnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xe000,
+ .fast_io = true,
+};
+
+static struct qcom_icc_desc sdm660_gnoc = {
+ .nodes = sdm660_gnoc_nodes,
+ .num_nodes = ARRAY_SIZE(sdm660_gnoc_nodes),
+ .regmap_cfg = &sdm660_gnoc_regmap_config,
+};
+
+static struct qcom_icc_node *sdm660_mnoc_nodes[] = {
+ [MASTER_CPP] = &mas_cpp,
+ [MASTER_JPEG] = &mas_jpeg,
+ [MASTER_MDP_P0] = &mas_mdp_p0,
+ [MASTER_MDP_P1] = &mas_mdp_p1,
+ [MASTER_VENUS] = &mas_venus,
+ [MASTER_VFE] = &mas_vfe,
+ [MASTER_CNOC_MNOC_MMSS_CFG] = &mas_cnoc_mnoc_mmss_cfg,
+ [MASTER_CNOC_MNOC_CFG] = &mas_cnoc_mnoc_cfg,
+ [SLAVE_CAMERA_CFG] = &slv_camera_cfg,
+ [SLAVE_CAMERA_THROTTLE_CFG] = &slv_camera_throttle_cfg,
+ [SLAVE_MISC_CFG] = &slv_misc_cfg,
+ [SLAVE_VENUS_THROTTLE_CFG] = &slv_venus_throttle_cfg,
+ [SLAVE_VENUS_CFG] = &slv_venus_cfg,
+ [SLAVE_MMSS_CLK_XPU_CFG] = &slv_mmss_clk_xpu_cfg,
+ [SLAVE_MMSS_CLK_CFG] = &slv_mmss_clk_cfg,
+ [SLAVE_MNOC_MPU_CFG] = &slv_mnoc_mpu_cfg,
+ [SLAVE_DISPLAY_CFG] = &slv_display_cfg,
+ [SLAVE_CSI_PHY_CFG] = &slv_csi_phy_cfg,
+ [SLAVE_DISPLAY_THROTTLE_CFG] = &slv_display_throttle_cfg,
+ [SLAVE_SMMU_CFG] = &slv_smmu_cfg,
+ [SLAVE_SRVC_MNOC] = &slv_srvc_mnoc,
+ [SLAVE_MNOC_BIMC] = &slv_mnoc_bimc,
+};
+
+static const struct regmap_config sdm660_mnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static struct qcom_icc_desc sdm660_mnoc = {
+ .nodes = sdm660_mnoc_nodes,
+ .num_nodes = ARRAY_SIZE(sdm660_mnoc_nodes),
+ .regmap_cfg = &sdm660_mnoc_regmap_config,
+};
+
+static struct qcom_icc_node *sdm660_snoc_nodes[] = {
+ [MASTER_QDSS_ETR] = &mas_qdss_etr,
+ [MASTER_QDSS_BAM] = &mas_qdss_bam,
+ [MASTER_SNOC_CFG] = &mas_snoc_cfg,
+ [MASTER_BIMC_SNOC] = &mas_bimc_snoc,
+ [MASTER_A2NOC_SNOC] = &mas_a2noc_snoc,
+ [MASTER_GNOC_SNOC] = &mas_gnoc_snoc,
+ [SLAVE_HMSS] = &slv_hmss,
+ [SLAVE_LPASS] = &slv_lpass,
+ [SLAVE_WLAN] = &slv_wlan,
+ [SLAVE_CDSP] = &slv_cdsp,
+ [SLAVE_IPA] = &slv_ipa,
+ [SLAVE_SNOC_BIMC] = &slv_snoc_bimc,
+ [SLAVE_SNOC_CNOC] = &slv_snoc_cnoc,
+ [SLAVE_IMEM] = &slv_imem,
+ [SLAVE_PIMEM] = &slv_pimem,
+ [SLAVE_QDSS_STM] = &slv_qdss_stm,
+ [SLAVE_SRVC_SNOC] = &slv_srvc_snoc,
+};
+
+static const struct regmap_config sdm660_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x20000,
+ .fast_io = true,
+};
+
+static struct qcom_icc_desc sdm660_snoc = {
+ .nodes = sdm660_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(sdm660_snoc_nodes),
+ .regmap_cfg = &sdm660_snoc_regmap_config,
+};
+
+static int qcom_icc_bimc_set_qos_health(struct regmap *rmap,
+ struct qcom_icc_qos *qos,
+ int regnum)
+{
+ u32 val;
+ u32 mask;
+
+ val = qos->prio_level;
+ mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
+
+ val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
+ mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
+
+ /* LIMITCMDS is not present on M_BKE_HEALTH_3 */
+ if (regnum != 3) {
+ val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
+ mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
+ }
+
+ return regmap_update_bits(rmap,
+ M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
+ mask, val);
+}
+
+static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw,
+ bool bypass_mode)
+{
+ struct qcom_icc_provider *qp;
+ struct qcom_icc_node *qn;
+ struct icc_provider *provider;
+ u32 mode = NOC_QOS_MODE_BYPASS;
+ u32 val = 0;
+ int i, rc = 0;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_qcom_provider(provider);
+
+ if (qn->qos.qos_mode != -1)
+ mode = qn->qos.qos_mode;
+
+ /* QoS Priority: The QoS Health parameters are getting considered
+ * only if we are NOT in Bypass Mode.
+ */
+ if (mode != NOC_QOS_MODE_BYPASS) {
+ for (i = 3; i >= 0; i--) {
+ rc = qcom_icc_bimc_set_qos_health(qp->regmap,
+ &qn->qos, i);
+ if (rc)
+ return rc;
+ }
+
+ /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
+ val = 1;
+ }
+
+ return regmap_update_bits(qp->regmap, M_BKE_EN_ADDR(qn->qos.qos_port),
+ M_BKE_EN_EN_BMASK, val);
+}
+
+static int qcom_icc_noc_set_qos_priority(struct regmap *rmap,
+ struct qcom_icc_qos *qos)
+{
+ u32 val;
+ int rc;
+
+ /* Must be updated one at a time, P1 first, P0 last */
+ val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
+ rc = regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
+ NOC_QOS_PRIORITY_MASK, val);
+ if (rc)
+ return rc;
+
+ val = qos->prio_level << NOC_QOS_PRIORITY_P0_SHIFT;
+ return regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
+ NOC_QOS_PRIORITY_MASK, val);
+}
+
+static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
+{
+ struct qcom_icc_provider *qp;
+ struct qcom_icc_node *qn;
+ struct icc_provider *provider;
+ u32 mode = NOC_QOS_MODE_BYPASS;
+ int rc = 0;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_qcom_provider(provider);
+
+ if (qn->qos.qos_port < 0) {
+ dev_dbg(src->provider->dev,
+ "NoC QoS: Skipping %s: vote aggregated on parent.\n",
+ qn->name);
+ return 0;
+ }
+
+ if (qn->qos.qos_mode != -1)
+ mode = qn->qos.qos_mode;
+
+ if (mode == NOC_QOS_MODE_FIXED) {
+ dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n",
+ qn->name);
+ rc = qcom_icc_noc_set_qos_priority(qp->regmap, &qn->qos);
+ if (rc)
+ return rc;
+ } else if (mode == NOC_QOS_MODE_BYPASS) {
+ dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n",
+ qn->name);
+ }
+
+ return regmap_update_bits(qp->regmap,
+ NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
+ NOC_QOS_MODEn_MASK, mode);
+}
+
+static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
+{
+ struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
+ struct qcom_icc_node *qn = node->data;
+
+ dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
+
+ if (qp->is_bimc_node)
+ return qcom_icc_set_bimc_qos(node, sum_bw,
+ (qn->qos.qos_mode == NOC_QOS_MODE_BYPASS));
+
+ return qcom_icc_set_noc_qos(node, sum_bw);
+}
+
+static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
+{
+ int ret = 0;
+
+ if (mas_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_MASTER_REQ,
+ mas_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
+ mas_rpm_id, ret);
+ return ret;
+ }
+ }
+
+ if (slv_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_SLAVE_REQ,
+ slv_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
+ slv_rpm_id, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_icc_provider *qp;
+ struct qcom_icc_node *qn;
+ struct icc_provider *provider;
+ struct icc_node *n;
+ u64 sum_bw;
+ u64 max_peak_bw;
+ u64 rate;
+ u32 agg_avg = 0;
+ u32 agg_peak = 0;
+ int ret, i;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_qcom_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ sum_bw = icc_units_to_bps(agg_avg);
+ max_peak_bw = icc_units_to_bps(agg_peak);
+
+ if (!qn->qos.ap_owned) {
+ /* send bandwidth request message to the RPM processor */
+ ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
+ if (ret)
+ return ret;
+ } else if (qn->qos.qos_mode != -1) {
+ /* set bandwidth directly from the AP */
+ ret = qcom_icc_qos_set(src, sum_bw);
+ if (ret)
+ return ret;
+ }
+
+ rate = max(sum_bw, max_peak_bw);
+
+ do_div(rate, qn->buswidth);
+
+ if (qn->rate == rate)
+ return 0;
+
+ for (i = 0; i < qp->num_clks; i++) {
+ ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+ if (ret) {
+ pr_err("%s clk_set_rate error: %d\n",
+ qp->bus_clks[i].id, ret);
+ return ret;
+ }
+ }
+
+ qn->rate = rate;
+
+ return 0;
+}
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ struct resource *res;
+ size_t num_nodes, i;
+ int ret;
+
+ /* wait for the RPM proxy */
+ if (!qcom_icc_rpm_smd_available())
+ return -EPROBE_DEFER;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (of_device_is_compatible(dev->of_node, "qcom,sdm660-mnoc")) {
+ qp->bus_clks = devm_kmemdup(dev, bus_mm_clocks,
+ sizeof(bus_mm_clocks), GFP_KERNEL);
+ qp->num_clks = ARRAY_SIZE(bus_mm_clocks);
+ } else {
+ if (of_device_is_compatible(dev->of_node, "qcom,sdm660-bimc"))
+ qp->is_bimc_node = true;
+
+ qp->bus_clks = devm_kmemdup(dev, bus_clocks, sizeof(bus_clocks),
+ GFP_KERNEL);
+ qp->num_clks = ARRAY_SIZE(bus_clocks);
+ }
+ if (!qp->bus_clks)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ qp->mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qp->mmio)) {
+ dev_err(dev, "Cannot ioremap interconnect bus resource\n");
+ return PTR_ERR(qp->mmio);
+ }
+
+ qp->regmap = devm_regmap_init_mmio(dev, qp->mmio, desc->regmap_cfg);
+ if (IS_ERR(qp->regmap)) {
+ dev_err(dev, "Cannot regmap interconnect bus resource\n");
+ return PTR_ERR(qp->regmap);
+ }
+
+ ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ provider = &qp->provider;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->dev = dev;
+ provider->set = qcom_icc_set;
+ provider->aggregate = icc_std_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ icc_provider_del(provider);
+
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id sdm660_noc_of_match[] = {
+ { .compatible = "qcom,sdm660-a2noc", .data = &sdm660_a2noc },
+ { .compatible = "qcom,sdm660-bimc", .data = &sdm660_bimc },
+ { .compatible = "qcom,sdm660-cnoc", .data = &sdm660_cnoc },
+ { .compatible = "qcom,sdm660-gnoc", .data = &sdm660_gnoc },
+ { .compatible = "qcom,sdm660-mnoc", .data = &sdm660_mnoc },
+ { .compatible = "qcom,sdm660-snoc", .data = &sdm660_snoc },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sdm660_noc_of_match);
+
+static struct platform_driver sdm660_noc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sdm660",
+ .of_match_table = sdm660_noc_of_match,
+ },
+};
+module_platform_driver(sdm660_noc_driver);
+MODULE_DESCRIPTION("Qualcomm sdm660 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
new file mode 100644
index 000000000000..579b6ce8e046
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8350.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ *
+ */
+
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <dt-bindings/interconnect/qcom,sm8350.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sm8350.h"
+
+DEFINE_QNODE(qhm_qspi, SM8350_MASTER_QSPI_0, 1, 4, SM8350_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_qup0, SM8350_MASTER_QUP_0, 1, 4, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_qup1, SM8350_MASTER_QUP_1, 1, 4, SM8350_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_qup2, SM8350_MASTER_QUP_2, 1, 4, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_a1noc_cfg, SM8350_MASTER_A1NOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(xm_sdc4, SM8350_MASTER_SDCC_4, 1, 8, SM8350_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_mem, SM8350_MASTER_UFS_MEM, 1, 8, SM8350_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_usb3_0, SM8350_MASTER_USB3_0, 1, 8, SM8350_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_usb3_1, SM8350_MASTER_USB3_1, 1, 8, SM8350_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_qdss_bam, SM8350_MASTER_QDSS_BAM, 1, 4, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_a2noc_cfg, SM8350_MASTER_A2NOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qxm_crypto, SM8350_MASTER_CRYPTO, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_ipa, SM8350_MASTER_IPA, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_pcie3_0, SM8350_MASTER_PCIE_0, 1, 8, SM8350_SLAVE_ANOC_PCIE_GEM_NOC);
+DEFINE_QNODE(xm_pcie3_1, SM8350_MASTER_PCIE_1, 1, 8, SM8350_SLAVE_ANOC_PCIE_GEM_NOC);
+DEFINE_QNODE(xm_qdss_etr, SM8350_MASTER_QDSS_ETR, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_sdc2, SM8350_MASTER_SDCC_2, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_ufs_card, SM8350_MASTER_UFS_CARD, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_gemnoc_cnoc, SM8350_MASTER_GEM_NOC_CNOC, 1, 16, SM8350_SLAVE_AHB2PHY_SOUTH, SM8350_SLAVE_AHB2PHY_NORTH, SM8350_SLAVE_AOSS, SM8350_SLAVE_APPSS, SM8350_SLAVE_CAMERA_CFG, SM8350_SLAVE_CLK_CTL, SM8350_SLAVE_CDSP_CFG, SM8350_SLAVE_RBCPR_CX_CFG, SM8350_SLAVE_RBCPR_MMCX_CFG, SM8350_SLAVE_RBCPR_MX_CFG, SM8350_SLAVE_CRYPTO_0_CFG, SM8350_SLAVE_CX_RDPM, SM8350_SLAVE_DCC_CFG, SM8350_SLAVE_DISPLAY_CFG, SM8350_SLAVE_GFX3D_CFG, SM8350_SLAVE_HWKM, SM8350_SLAVE_IMEM_CFG, SM8350_SLAVE_IPA_CFG, SM8350_SLAVE_IPC_ROUTER_CFG, SM8350_SLAVE_LPASS, SM8350_SLAVE_CNOC_MSS, SM8350_SLAVE_MX_RDPM, SM8350_SLAVE_PCIE_0_CFG, SM8350_SLAVE_PCIE_1_CFG, SM8350_SLAVE_PDM, SM8350_SLAVE_PIMEM_CFG, SM8350_SLAVE_PKA_WRAPPER_CFG, SM8350_SLAVE_PMU_WRAPPER_CFG, SM8350_SLAVE_QDSS_CFG, SM8350_SLAVE_QSPI_0, SM8350_SLAVE_QUP_0, SM8350_SLAVE_QUP_1, SM8350_SLAVE_QUP_2, SM8350_SLAVE_SDCC_2, SM8350_SLAVE_SDCC_4, SM8350_SLAVE_SECURITY, SM8350_SLAVE_SPSS_CFG, SM8350_SLAVE_TCSR, SM8350_SLAVE_TLMM, SM8350_SLAVE_UFS_CARD_CFG, SM8350_SLAVE_UFS_MEM_CFG, SM8350_SLAVE_USB3_0, SM8350_SLAVE_USB3_1, SM8350_SLAVE_VENUS_CFG, SM8350_SLAVE_VSENSE_CTRL_CFG, SM8350_SLAVE_A1NOC_CFG, SM8350_SLAVE_A2NOC_CFG, SM8350_SLAVE_DDRSS_CFG, SM8350_SLAVE_CNOC_MNOC_CFG, SM8350_SLAVE_SNOC_CFG, SM8350_SLAVE_BOOT_IMEM, SM8350_SLAVE_IMEM, SM8350_SLAVE_PIMEM, SM8350_SLAVE_SERVICE_CNOC, SM8350_SLAVE_QDSS_STM, SM8350_SLAVE_TCU);
+DEFINE_QNODE(qnm_gemnoc_pcie, SM8350_MASTER_GEM_NOC_PCIE_SNOC, 1, 8, SM8350_SLAVE_PCIE_0, SM8350_SLAVE_PCIE_1);
+DEFINE_QNODE(xm_qdss_dap, SM8350_MASTER_QDSS_DAP, 1, 8, SM8350_SLAVE_AHB2PHY_SOUTH, SM8350_SLAVE_AHB2PHY_NORTH, SM8350_SLAVE_AOSS, SM8350_SLAVE_APPSS, SM8350_SLAVE_CAMERA_CFG, SM8350_SLAVE_CLK_CTL, SM8350_SLAVE_CDSP_CFG, SM8350_SLAVE_RBCPR_CX_CFG, SM8350_SLAVE_RBCPR_MMCX_CFG, SM8350_SLAVE_RBCPR_MX_CFG, SM8350_SLAVE_CRYPTO_0_CFG, SM8350_SLAVE_CX_RDPM, SM8350_SLAVE_DCC_CFG, SM8350_SLAVE_DISPLAY_CFG, SM8350_SLAVE_GFX3D_CFG, SM8350_SLAVE_HWKM, SM8350_SLAVE_IMEM_CFG, SM8350_SLAVE_IPA_CFG, SM8350_SLAVE_IPC_ROUTER_CFG, SM8350_SLAVE_LPASS, SM8350_SLAVE_CNOC_MSS, SM8350_SLAVE_MX_RDPM, SM8350_SLAVE_PCIE_0_CFG, SM8350_SLAVE_PCIE_1_CFG, SM8350_SLAVE_PDM, SM8350_SLAVE_PIMEM_CFG, SM8350_SLAVE_PKA_WRAPPER_CFG, SM8350_SLAVE_PMU_WRAPPER_CFG, SM8350_SLAVE_QDSS_CFG, SM8350_SLAVE_QSPI_0, SM8350_SLAVE_QUP_0, SM8350_SLAVE_QUP_1, SM8350_SLAVE_QUP_2, SM8350_SLAVE_SDCC_2, SM8350_SLAVE_SDCC_4, SM8350_SLAVE_SECURITY, SM8350_SLAVE_SPSS_CFG, SM8350_SLAVE_TCSR, SM8350_SLAVE_TLMM, SM8350_SLAVE_UFS_CARD_CFG, SM8350_SLAVE_UFS_MEM_CFG, SM8350_SLAVE_USB3_0, SM8350_SLAVE_USB3_1, SM8350_SLAVE_VENUS_CFG, SM8350_SLAVE_VSENSE_CTRL_CFG, SM8350_SLAVE_A1NOC_CFG, SM8350_SLAVE_A2NOC_CFG, SM8350_SLAVE_DDRSS_CFG, SM8350_SLAVE_CNOC_MNOC_CFG, SM8350_SLAVE_SNOC_CFG, SM8350_SLAVE_BOOT_IMEM, SM8350_SLAVE_IMEM, SM8350_SLAVE_PIMEM, SM8350_SLAVE_SERVICE_CNOC, SM8350_SLAVE_QDSS_STM, SM8350_SLAVE_TCU);
+DEFINE_QNODE(qnm_cnoc_dc_noc, SM8350_MASTER_CNOC_DC_NOC, 1, 4, SM8350_SLAVE_LLCC_CFG, SM8350_SLAVE_GEM_NOC_CFG);
+DEFINE_QNODE(alm_gpu_tcu, SM8350_MASTER_GPU_TCU, 1, 8, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(alm_sys_tcu, SM8350_MASTER_SYS_TCU, 1, 8, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(chm_apps, SM8350_MASTER_APPSS_PROC, 2, 32, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC, SM8350_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qnm_cmpnoc, SM8350_MASTER_COMPUTE_NOC, 2, 32, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_gemnoc_cfg, SM8350_MASTER_GEM_NOC_CFG, 1, 4, SM8350_SLAVE_MSS_PROC_MS_MPU_CFG, SM8350_SLAVE_MCDMA_MS_MPU_CFG, SM8350_SLAVE_SERVICE_GEM_NOC_1, SM8350_SLAVE_SERVICE_GEM_NOC_2, SM8350_SLAVE_SERVICE_GEM_NOC);
+DEFINE_QNODE(qnm_gpu, SM8350_MASTER_GFX3D, 2, 32, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_hf, SM8350_MASTER_MNOC_HF_MEM_NOC, 2, 32, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, SM8350_MASTER_MNOC_SF_MEM_NOC, 2, 32, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_pcie, SM8350_MASTER_ANOC_PCIE_GEM_NOC, 1, 16, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_gc, SM8350_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, SM8350_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC, SM8350_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhm_config_noc, SM8350_MASTER_CNOC_LPASS_AG_NOC, 1, 4, SM8350_SLAVE_LPASS_CORE_CFG, SM8350_SLAVE_LPASS_LPI_CFG, SM8350_SLAVE_LPASS_MPU_CFG, SM8350_SLAVE_LPASS_TOP_CFG, SM8350_SLAVE_SERVICES_LPASS_AML_NOC, SM8350_SLAVE_SERVICE_LPASS_AG_NOC);
+DEFINE_QNODE(llcc_mc, SM8350_MASTER_LLCC, 4, 4, SM8350_SLAVE_EBI1);
+DEFINE_QNODE(qnm_camnoc_hf, SM8350_MASTER_CAMNOC_HF, 2, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qnm_camnoc_icp, SM8350_MASTER_CAMNOC_ICP, 1, 8, SM8350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_camnoc_sf, SM8350_MASTER_CAMNOC_SF, 2, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_mnoc_cfg, SM8350_MASTER_CNOC_MNOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qnm_video0, SM8350_MASTER_VIDEO_P0, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_video1, SM8350_MASTER_VIDEO_P1, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_video_cvp, SM8350_MASTER_VIDEO_PROC, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, SM8350_MASTER_MDP0, 1, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp1, SM8350_MASTER_MDP1, 1, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, SM8350_MASTER_ROTATOR, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qhm_nsp_noc_config, SM8350_MASTER_CDSP_NOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_NSP_NOC);
+DEFINE_QNODE(qxm_nsp, SM8350_MASTER_CDSP_PROC, 2, 32, SM8350_SLAVE_CDSP_MEM_NOC);
+DEFINE_QNODE(qnm_aggre1_noc, SM8350_MASTER_A1NOC_SNOC, 1, 16, SM8350_SLAVE_SNOC_GEM_NOC_SF);
+DEFINE_QNODE(qnm_aggre2_noc, SM8350_MASTER_A2NOC_SNOC, 1, 16, SM8350_SLAVE_SNOC_GEM_NOC_SF);
+DEFINE_QNODE(qnm_snoc_cfg, SM8350_MASTER_SNOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qxm_pimem, SM8350_MASTER_PIMEM, 1, 8, SM8350_SLAVE_SNOC_GEM_NOC_GC);
+DEFINE_QNODE(xm_gic, SM8350_MASTER_GIC, 1, 8, SM8350_SLAVE_SNOC_GEM_NOC_GC);
+DEFINE_QNODE(qnm_mnoc_hf_disp, SM8350_MASTER_MNOC_HF_MEM_NOC_DISP, 2, 32, SM8350_SLAVE_LLCC_DISP);
+DEFINE_QNODE(qnm_mnoc_sf_disp, SM8350_MASTER_MNOC_SF_MEM_NOC_DISP, 2, 32, SM8350_SLAVE_LLCC_DISP);
+DEFINE_QNODE(llcc_mc_disp, SM8350_MASTER_LLCC_DISP, 4, 4, SM8350_SLAVE_EBI1_DISP);
+DEFINE_QNODE(qxm_mdp0_disp, SM8350_MASTER_MDP0_DISP, 1, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP);
+DEFINE_QNODE(qxm_mdp1_disp, SM8350_MASTER_MDP1_DISP, 1, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP);
+DEFINE_QNODE(qxm_rot_disp, SM8350_MASTER_ROTATOR_DISP, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP);
+DEFINE_QNODE(qns_a1noc_snoc, SM8350_SLAVE_A1NOC_SNOC, 1, 16, SM8350_MASTER_A1NOC_SNOC);
+DEFINE_QNODE(srvc_aggre1_noc, SM8350_SLAVE_SERVICE_A1NOC, 1, 4);
+DEFINE_QNODE(qns_a2noc_snoc, SM8350_SLAVE_A2NOC_SNOC, 1, 16, SM8350_MASTER_A2NOC_SNOC);
+DEFINE_QNODE(qns_pcie_mem_noc, SM8350_SLAVE_ANOC_PCIE_GEM_NOC, 1, 16, SM8350_MASTER_ANOC_PCIE_GEM_NOC);
+DEFINE_QNODE(srvc_aggre2_noc, SM8350_SLAVE_SERVICE_A2NOC, 1, 4);
+DEFINE_QNODE(qhs_ahb2phy0, SM8350_SLAVE_AHB2PHY_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_ahb2phy1, SM8350_SLAVE_AHB2PHY_NORTH, 1, 4);
+DEFINE_QNODE(qhs_aoss, SM8350_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_apss, SM8350_SLAVE_APPSS, 1, 8);
+DEFINE_QNODE(qhs_camera_cfg, SM8350_SLAVE_CAMERA_CFG, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SM8350_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_compute_cfg, SM8350_SLAVE_CDSP_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_cx, SM8350_SLAVE_RBCPR_CX_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_mmcx, SM8350_SLAVE_RBCPR_MMCX_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_mx, SM8350_SLAVE_RBCPR_MX_CFG, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SM8350_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_cx_rdpm, SM8350_SLAVE_CX_RDPM, 1, 4);
+DEFINE_QNODE(qhs_dcc_cfg, SM8350_SLAVE_DCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_display_cfg, SM8350_SLAVE_DISPLAY_CFG, 1, 4);
+DEFINE_QNODE(qhs_gpuss_cfg, SM8350_SLAVE_GFX3D_CFG, 1, 8);
+DEFINE_QNODE(qhs_hwkm, SM8350_SLAVE_HWKM, 1, 4);
+DEFINE_QNODE(qhs_imem_cfg, SM8350_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SM8350_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipc_router, SM8350_SLAVE_IPC_ROUTER_CFG, 1, 4);
+DEFINE_QNODE(qhs_lpass_cfg, SM8350_SLAVE_LPASS, 1, 4, SM8350_MASTER_CNOC_LPASS_AG_NOC);
+DEFINE_QNODE(qhs_mss_cfg, SM8350_SLAVE_CNOC_MSS, 1, 4);
+DEFINE_QNODE(qhs_mx_rdpm, SM8350_SLAVE_MX_RDPM, 1, 4);
+DEFINE_QNODE(qhs_pcie0_cfg, SM8350_SLAVE_PCIE_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_pcie1_cfg, SM8350_SLAVE_PCIE_1_CFG, 1, 4);
+DEFINE_QNODE(qhs_pdm, SM8350_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_pimem_cfg, SM8350_SLAVE_PIMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_pka_wrapper_cfg, SM8350_SLAVE_PKA_WRAPPER_CFG, 1, 4);
+DEFINE_QNODE(qhs_pmu_wrapper_cfg, SM8350_SLAVE_PMU_WRAPPER_CFG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SM8350_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qspi, SM8350_SLAVE_QSPI_0, 1, 4);
+DEFINE_QNODE(qhs_qup0, SM8350_SLAVE_QUP_0, 1, 4);
+DEFINE_QNODE(qhs_qup1, SM8350_SLAVE_QUP_1, 1, 4);
+DEFINE_QNODE(qhs_qup2, SM8350_SLAVE_QUP_2, 1, 4);
+DEFINE_QNODE(qhs_sdc2, SM8350_SLAVE_SDCC_2, 1, 4);
+DEFINE_QNODE(qhs_sdc4, SM8350_SLAVE_SDCC_4, 1, 4);
+DEFINE_QNODE(qhs_security, SM8350_SLAVE_SECURITY, 1, 4);
+DEFINE_QNODE(qhs_spss_cfg, SM8350_SLAVE_SPSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_tcsr, SM8350_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm, SM8350_SLAVE_TLMM, 1, 4);
+DEFINE_QNODE(qhs_ufs_card_cfg, SM8350_SLAVE_UFS_CARD_CFG, 1, 4);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SM8350_SLAVE_UFS_MEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_usb3_0, SM8350_SLAVE_USB3_0, 1, 4);
+DEFINE_QNODE(qhs_usb3_1, SM8350_SLAVE_USB3_1, 1, 4);
+DEFINE_QNODE(qhs_venus_cfg, SM8350_SLAVE_VENUS_CFG, 1, 4);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SM8350_SLAVE_VSENSE_CTRL_CFG, 1, 4);
+DEFINE_QNODE(qns_a1_noc_cfg, SM8350_SLAVE_A1NOC_CFG, 1, 4);
+DEFINE_QNODE(qns_a2_noc_cfg, SM8350_SLAVE_A2NOC_CFG, 1, 4);
+DEFINE_QNODE(qns_ddrss_cfg, SM8350_SLAVE_DDRSS_CFG, 1, 4);
+DEFINE_QNODE(qns_mnoc_cfg, SM8350_SLAVE_CNOC_MNOC_CFG, 1, 4);
+DEFINE_QNODE(qns_snoc_cfg, SM8350_SLAVE_SNOC_CFG, 1, 4);
+DEFINE_QNODE(qxs_boot_imem, SM8350_SLAVE_BOOT_IMEM, 1, 8);
+DEFINE_QNODE(qxs_imem, SM8350_SLAVE_IMEM, 1, 8);
+DEFINE_QNODE(qxs_pimem, SM8350_SLAVE_PIMEM, 1, 8);
+DEFINE_QNODE(srvc_cnoc, SM8350_SLAVE_SERVICE_CNOC, 1, 4);
+DEFINE_QNODE(xs_pcie_0, SM8350_SLAVE_PCIE_0, 1, 8);
+DEFINE_QNODE(xs_pcie_1, SM8350_SLAVE_PCIE_1, 1, 8);
+DEFINE_QNODE(xs_qdss_stm, SM8350_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SM8350_SLAVE_TCU, 1, 8);
+DEFINE_QNODE(qhs_llcc, SM8350_SLAVE_LLCC_CFG, 1, 4);
+DEFINE_QNODE(qns_gemnoc, SM8350_SLAVE_GEM_NOC_CFG, 1, 4);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SM8350_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qhs_modem_ms_mpu_cfg, SM8350_SLAVE_MCDMA_MS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qns_gem_noc_cnoc, SM8350_SLAVE_GEM_NOC_CNOC, 1, 16, SM8350_MASTER_GEM_NOC_CNOC);
+DEFINE_QNODE(qns_llcc, SM8350_SLAVE_LLCC, 4, 16, SM8350_MASTER_LLCC);
+DEFINE_QNODE(qns_pcie, SM8350_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8);
+DEFINE_QNODE(srvc_even_gemnoc, SM8350_SLAVE_SERVICE_GEM_NOC_1, 1, 4);
+DEFINE_QNODE(srvc_odd_gemnoc, SM8350_SLAVE_SERVICE_GEM_NOC_2, 1, 4);
+DEFINE_QNODE(srvc_sys_gemnoc, SM8350_SLAVE_SERVICE_GEM_NOC, 1, 4);
+DEFINE_QNODE(qhs_lpass_core, SM8350_SLAVE_LPASS_CORE_CFG, 1, 4);
+DEFINE_QNODE(qhs_lpass_lpi, SM8350_SLAVE_LPASS_LPI_CFG, 1, 4);
+DEFINE_QNODE(qhs_lpass_mpu, SM8350_SLAVE_LPASS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qhs_lpass_top, SM8350_SLAVE_LPASS_TOP_CFG, 1, 4);
+DEFINE_QNODE(srvc_niu_aml_noc, SM8350_SLAVE_SERVICES_LPASS_AML_NOC, 1, 4);
+DEFINE_QNODE(srvc_niu_lpass_agnoc, SM8350_SLAVE_SERVICE_LPASS_AG_NOC, 1, 4);
+DEFINE_QNODE(ebi, SM8350_SLAVE_EBI1, 4, 4);
+DEFINE_QNODE(qns_mem_noc_hf, SM8350_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SM8350_MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_sf, SM8350_SLAVE_MNOC_SF_MEM_NOC, 2, 32, SM8350_MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SM8350_SLAVE_SERVICE_MNOC, 1, 4);
+DEFINE_QNODE(qns_nsp_gemnoc, SM8350_SLAVE_CDSP_MEM_NOC, 2, 32, SM8350_MASTER_COMPUTE_NOC);
+DEFINE_QNODE(service_nsp_noc, SM8350_SLAVE_SERVICE_NSP_NOC, 1, 4);
+DEFINE_QNODE(qns_gemnoc_gc, SM8350_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SM8350_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_gemnoc_sf, SM8350_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SM8350_MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(srvc_snoc, SM8350_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(qns_llcc_disp, SM8350_SLAVE_LLCC_DISP, 4, 16, SM8350_MASTER_LLCC_DISP);
+DEFINE_QNODE(ebi_disp, SM8350_SLAVE_EBI1_DISP, 4, 4);
+DEFINE_QNODE(qns_mem_noc_hf_disp, SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP, 2, 32, SM8350_MASTER_MNOC_HF_MEM_NOC_DISP);
+DEFINE_QNODE(qns_mem_noc_sf_disp, SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP, 2, 32, SM8350_MASTER_MNOC_SF_MEM_NOC_DISP);
+
+DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie);
+DEFINE_QBCM(bcm_cn1, "CN1", false, &xm_qdss_dap, &qhs_ahb2phy0, &qhs_ahb2phy1, &qhs_aoss, &qhs_apss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_cfg, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_cx_rdpm, &qhs_dcc_cfg, &qhs_display_cfg, &qhs_gpuss_cfg, &qhs_hwkm, &qhs_imem_cfg, &qhs_ipa, &qhs_ipc_router, &qhs_mss_cfg, &qhs_mx_rdpm, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_pimem_cfg, &qhs_pka_wrapper_cfg, &qhs_pmu_wrapper_cfg, &qhs_qdss_cfg, &qhs_qup0, &qhs_qup1, &qhs_qup2, &qhs_security, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_a1_noc_cfg, &qns_a2_noc_cfg, &qns_ddrss_cfg, &qns_mnoc_cfg, &qns_snoc_cfg, &srvc_cnoc);
+DEFINE_QBCM(bcm_cn2, "CN2", false, &qhs_lpass_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2, &qhs_sdc4);
+DEFINE_QBCM(bcm_co0, "CO0", false, &qns_nsp_gemnoc);
+DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_nsp);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_mm1, "MM1", false, &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_mm4, "MM4", false, &qns_mem_noc_sf);
+DEFINE_QBCM(bcm_mm5, "MM5", false, &qnm_camnoc_icp, &qnm_camnoc_sf, &qnm_video0, &qnm_video1, &qnm_video_cvp, &qxm_rot);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_sh2, "SH2", false, &alm_gpu_tcu, &alm_sys_tcu);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+DEFINE_QBCM(bcm_sh4, "SH4", false, &chm_apps);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn5, "SN5", false, &xm_pcie3_0);
+DEFINE_QBCM(bcm_sn6, "SN6", false, &xm_pcie3_1);
+DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
+DEFINE_QBCM(bcm_acv_disp, "ACV", false, &ebi_disp);
+DEFINE_QBCM(bcm_mc0_disp, "MC0", false, &ebi_disp);
+DEFINE_QBCM(bcm_mm0_disp, "MM0", false, &qns_mem_noc_hf_disp);
+DEFINE_QBCM(bcm_mm1_disp, "MM1", false, &qxm_mdp0_disp, &qxm_mdp1_disp);
+DEFINE_QBCM(bcm_mm4_disp, "MM4", false, &qns_mem_noc_sf_disp);
+DEFINE_QBCM(bcm_mm5_disp, "MM5", false, &qxm_rot_disp);
+DEFINE_QBCM(bcm_sh0_disp, "SH0", false, &qns_llcc_disp);
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *aggre1_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static struct qcom_icc_desc sm8350_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_sn5,
+ &bcm_sn6,
+ &bcm_sn14,
+};
+
+static struct qcom_icc_node *aggre2_noc_nodes[] = {
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_PCIE_0] = &xm_pcie3_0,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_UFS_CARD] = &xm_ufs_card,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static struct qcom_icc_desc sm8350_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm *config_noc_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+ &bcm_cn2,
+ &bcm_sn3,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_HWKM] = &qhs_hwkm,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_LPASS] = &qhs_lpass_cfg,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_MX_RDPM] = &qhs_mx_rdpm,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PKA_WRAPPER_CFG] = &qhs_pka_wrapper_cfg,
+ [SLAVE_PMU_WRAPPER_CFG] = &qhs_pmu_wrapper_cfg,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SECURITY] = &qhs_security,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_USB3_1] = &qhs_usb3_1,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg,
+ [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg,
+ [SLAVE_SNOC_CFG] = &qns_snoc_cfg,
+ [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_desc sm8350_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm *dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
+};
+
+static struct qcom_icc_desc sm8350_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm *gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh2,
+ &bcm_sh3,
+ &bcm_sh4,
+ &bcm_sh0_disp,
+};
+
+static struct qcom_icc_node *gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
+ [MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_MCDMA_MS_MPU_CFG] = &qhs_modem_ms_mpu_cfg,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+ [SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
+ [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
+ [MASTER_MNOC_SF_MEM_NOC_DISP] = &qnm_mnoc_sf_disp,
+ [SLAVE_LLCC_DISP] = &qns_llcc_disp,
+};
+
+static struct qcom_icc_desc sm8350_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+ [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
+ [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
+ [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
+ [SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
+ [SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
+ [SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
+ [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
+};
+
+static struct qcom_icc_desc sm8350_lpass_ag_noc = {
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+ .bcms = lpass_ag_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+};
+
+static struct qcom_icc_bcm *mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+ &bcm_acv_disp,
+ &bcm_mc0_disp,
+};
+
+static struct qcom_icc_node *mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+ [MASTER_LLCC_DISP] = &llcc_mc_disp,
+ [SLAVE_EBI1_DISP] = &ebi_disp,
+};
+
+static struct qcom_icc_desc sm8350_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm4,
+ &bcm_mm5,
+ &bcm_mm0_disp,
+ &bcm_mm1_disp,
+ &bcm_mm4_disp,
+ &bcm_mm5_disp,
+};
+
+static struct qcom_icc_node *mmss_noc_nodes[] = {
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
+ [MASTER_VIDEO_P0] = &qnm_video0,
+ [MASTER_VIDEO_P1] = &qnm_video1,
+ [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+ [MASTER_MDP0] = &qxm_mdp0,
+ [MASTER_MDP1] = &qxm_mdp1,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+ [MASTER_MDP0_DISP] = &qxm_mdp0_disp,
+ [MASTER_MDP1_DISP] = &qxm_mdp1_disp,
+ [MASTER_ROTATOR_DISP] = &qxm_rot_disp,
+ [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
+ [SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
+};
+
+static struct qcom_icc_desc sm8350_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+ &bcm_co0,
+ &bcm_co3,
+};
+
+static struct qcom_icc_node *nsp_noc_nodes[] = {
+ [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
+ [MASTER_CDSP_PROC] = &qxm_nsp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+ [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
+};
+
+static struct qcom_icc_desc sm8350_compute_noc = {
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn2,
+ &bcm_sn7,
+ &bcm_sn8,
+};
+
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_SNOC_CFG] = &qnm_snoc_cfg,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+};
+
+static struct qcom_icc_desc sm8350_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter))
+ return PTR_ERR(qp->voter);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ if (!qnodes[i])
+ continue;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return ret;
+
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm8350-aggre1-noc", .data = &sm8350_aggre1_noc},
+ { .compatible = "qcom,sm8350-aggre2-noc", .data = &sm8350_aggre2_noc},
+ { .compatible = "qcom,sm8350-config-noc", .data = &sm8350_config_noc},
+ { .compatible = "qcom,sm8350-dc-noc", .data = &sm8350_dc_noc},
+ { .compatible = "qcom,sm8350-gem-noc", .data = &sm8350_gem_noc},
+ { .compatible = "qcom,sm8350-lpass-ag-noc", .data = &sm8350_lpass_ag_noc},
+ { .compatible = "qcom,sm8350-mc-virt", .data = &sm8350_mc_virt},
+ { .compatible = "qcom,sm8350-mmss-noc", .data = &sm8350_mmss_noc},
+ { .compatible = "qcom,sm8350-compute-noc", .data = &sm8350_compute_noc},
+ { .compatible = "qcom,sm8350-system-noc", .data = &sm8350_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sm8350",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("SM8350 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sm8350.h b/drivers/interconnect/qcom/sm8350.h
new file mode 100644
index 000000000000..328d15238a0d
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8350.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm SM8350 interconnect IDs
+ *
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8350_H
+#define __DRIVERS_INTERCONNECT_QCOM_SM8350_H
+
+#define SM8350_MASTER_GPU_TCU 0
+#define SM8350_MASTER_SYS_TCU 1
+#define SM8350_MASTER_APPSS_PROC 2
+#define SM8350_MASTER_LLCC 3
+#define SM8350_MASTER_CNOC_LPASS_AG_NOC 4
+#define SM8350_MASTER_CDSP_NOC_CFG 5
+#define SM8350_MASTER_QDSS_BAM 6
+#define SM8350_MASTER_QSPI_0 7
+#define SM8350_MASTER_QUP_0 8
+#define SM8350_MASTER_QUP_1 9
+#define SM8350_MASTER_QUP_2 10
+#define SM8350_MASTER_A1NOC_CFG 11
+#define SM8350_MASTER_A2NOC_CFG 12
+#define SM8350_MASTER_A1NOC_SNOC 13
+#define SM8350_MASTER_A2NOC_SNOC 14
+#define SM8350_MASTER_CAMNOC_HF 15
+#define SM8350_MASTER_CAMNOC_ICP 16
+#define SM8350_MASTER_CAMNOC_SF 17
+#define SM8350_MASTER_COMPUTE_NOC 18
+#define SM8350_MASTER_CNOC_DC_NOC 19
+#define SM8350_MASTER_GEM_NOC_CFG 20
+#define SM8350_MASTER_GEM_NOC_CNOC 21
+#define SM8350_MASTER_GEM_NOC_PCIE_SNOC 22
+#define SM8350_MASTER_GFX3D 23
+#define SM8350_MASTER_CNOC_MNOC_CFG 24
+#define SM8350_MASTER_MNOC_HF_MEM_NOC 25
+#define SM8350_MASTER_MNOC_SF_MEM_NOC 26
+#define SM8350_MASTER_ANOC_PCIE_GEM_NOC 27
+#define SM8350_MASTER_SNOC_CFG 28
+#define SM8350_MASTER_SNOC_GC_MEM_NOC 29
+#define SM8350_MASTER_SNOC_SF_MEM_NOC 30
+#define SM8350_MASTER_VIDEO_P0 31
+#define SM8350_MASTER_VIDEO_P1 32
+#define SM8350_MASTER_VIDEO_PROC 33
+#define SM8350_MASTER_QUP_CORE_0 34
+#define SM8350_MASTER_QUP_CORE_1 35
+#define SM8350_MASTER_QUP_CORE_2 36
+#define SM8350_MASTER_CRYPTO 37
+#define SM8350_MASTER_IPA 38
+#define SM8350_MASTER_MDP0 39
+#define SM8350_MASTER_MDP1 40
+#define SM8350_MASTER_CDSP_PROC 41
+#define SM8350_MASTER_PIMEM 42
+#define SM8350_MASTER_ROTATOR 43
+#define SM8350_MASTER_GIC 44
+#define SM8350_MASTER_PCIE_0 45
+#define SM8350_MASTER_PCIE_1 46
+#define SM8350_MASTER_QDSS_DAP 47
+#define SM8350_MASTER_QDSS_ETR 48
+#define SM8350_MASTER_SDCC_2 49
+#define SM8350_MASTER_SDCC_4 50
+#define SM8350_MASTER_UFS_CARD 51
+#define SM8350_MASTER_UFS_MEM 52
+#define SM8350_MASTER_USB3_0 53
+#define SM8350_MASTER_USB3_1 54
+#define SM8350_SLAVE_EBI1 55
+#define SM8350_SLAVE_AHB2PHY_SOUTH 56
+#define SM8350_SLAVE_AHB2PHY_NORTH 57
+#define SM8350_SLAVE_AOSS 58
+#define SM8350_SLAVE_APPSS 59
+#define SM8350_SLAVE_CAMERA_CFG 60
+#define SM8350_SLAVE_CLK_CTL 61
+#define SM8350_SLAVE_CDSP_CFG 62
+#define SM8350_SLAVE_RBCPR_CX_CFG 63
+#define SM8350_SLAVE_RBCPR_MMCX_CFG 64
+#define SM8350_SLAVE_RBCPR_MX_CFG 65
+#define SM8350_SLAVE_CRYPTO_0_CFG 66
+#define SM8350_SLAVE_CX_RDPM 67
+#define SM8350_SLAVE_DCC_CFG 68
+#define SM8350_SLAVE_DISPLAY_CFG 69
+#define SM8350_SLAVE_GFX3D_CFG 70
+#define SM8350_SLAVE_HWKM 71
+#define SM8350_SLAVE_IMEM_CFG 72
+#define SM8350_SLAVE_IPA_CFG 73
+#define SM8350_SLAVE_IPC_ROUTER_CFG 74
+#define SM8350_SLAVE_LLCC_CFG 75
+#define SM8350_SLAVE_LPASS 76
+#define SM8350_SLAVE_LPASS_CORE_CFG 77
+#define SM8350_SLAVE_LPASS_LPI_CFG 78
+#define SM8350_SLAVE_LPASS_MPU_CFG 79
+#define SM8350_SLAVE_LPASS_TOP_CFG 80
+#define SM8350_SLAVE_MSS_PROC_MS_MPU_CFG 81
+#define SM8350_SLAVE_MCDMA_MS_MPU_CFG 82
+#define SM8350_SLAVE_CNOC_MSS 83
+#define SM8350_SLAVE_MX_RDPM 84
+#define SM8350_SLAVE_PCIE_0_CFG 85
+#define SM8350_SLAVE_PCIE_1_CFG 86
+#define SM8350_SLAVE_PDM 87
+#define SM8350_SLAVE_PIMEM_CFG 88
+#define SM8350_SLAVE_PKA_WRAPPER_CFG 89
+#define SM8350_SLAVE_PMU_WRAPPER_CFG 90
+#define SM8350_SLAVE_QDSS_CFG 91
+#define SM8350_SLAVE_QSPI_0 92
+#define SM8350_SLAVE_QUP_0 93
+#define SM8350_SLAVE_QUP_1 94
+#define SM8350_SLAVE_QUP_2 95
+#define SM8350_SLAVE_SDCC_2 96
+#define SM8350_SLAVE_SDCC_4 97
+#define SM8350_SLAVE_SECURITY 98
+#define SM8350_SLAVE_SPSS_CFG 99
+#define SM8350_SLAVE_TCSR 100
+#define SM8350_SLAVE_TLMM 101
+#define SM8350_SLAVE_UFS_CARD_CFG 102
+#define SM8350_SLAVE_UFS_MEM_CFG 103
+#define SM8350_SLAVE_USB3_0 104
+#define SM8350_SLAVE_USB3_1 105
+#define SM8350_SLAVE_VENUS_CFG 106
+#define SM8350_SLAVE_VSENSE_CTRL_CFG 107
+#define SM8350_SLAVE_A1NOC_CFG 108
+#define SM8350_SLAVE_A1NOC_SNOC 109
+#define SM8350_SLAVE_A2NOC_CFG 110
+#define SM8350_SLAVE_A2NOC_SNOC 111
+#define SM8350_SLAVE_DDRSS_CFG 112
+#define SM8350_SLAVE_GEM_NOC_CNOC 113
+#define SM8350_SLAVE_GEM_NOC_CFG 114
+#define SM8350_SLAVE_SNOC_GEM_NOC_GC 115
+#define SM8350_SLAVE_SNOC_GEM_NOC_SF 116
+#define SM8350_SLAVE_LLCC 117
+#define SM8350_SLAVE_MNOC_HF_MEM_NOC 118
+#define SM8350_SLAVE_MNOC_SF_MEM_NOC 119
+#define SM8350_SLAVE_CNOC_MNOC_CFG 120
+#define SM8350_SLAVE_CDSP_MEM_NOC 121
+#define SM8350_SLAVE_MEM_NOC_PCIE_SNOC 122
+#define SM8350_SLAVE_ANOC_PCIE_GEM_NOC 123
+#define SM8350_SLAVE_SNOC_CFG 124
+#define SM8350_SLAVE_QUP_CORE_0 125
+#define SM8350_SLAVE_QUP_CORE_1 126
+#define SM8350_SLAVE_QUP_CORE_2 127
+#define SM8350_SLAVE_BOOT_IMEM 128
+#define SM8350_SLAVE_IMEM 129
+#define SM8350_SLAVE_PIMEM 130
+#define SM8350_SLAVE_SERVICE_NSP_NOC 131
+#define SM8350_SLAVE_SERVICE_A1NOC 132
+#define SM8350_SLAVE_SERVICE_A2NOC 133
+#define SM8350_SLAVE_SERVICE_CNOC 134
+#define SM8350_SLAVE_SERVICE_GEM_NOC_1 135
+#define SM8350_SLAVE_SERVICE_MNOC 136
+#define SM8350_SLAVE_SERVICES_LPASS_AML_NOC 137
+#define SM8350_SLAVE_SERVICE_LPASS_AG_NOC 138
+#define SM8350_SLAVE_SERVICE_GEM_NOC_2 139
+#define SM8350_SLAVE_SERVICE_SNOC 140
+#define SM8350_SLAVE_SERVICE_GEM_NOC 141
+#define SM8350_SLAVE_PCIE_0 142
+#define SM8350_SLAVE_PCIE_1 143
+#define SM8350_SLAVE_QDSS_STM 144
+#define SM8350_SLAVE_TCU 145
+#define SM8350_MASTER_LLCC_DISP 146
+#define SM8350_MASTER_MNOC_HF_MEM_NOC_DISP 147
+#define SM8350_MASTER_MNOC_SF_MEM_NOC_DISP 148
+#define SM8350_MASTER_MDP0_DISP 149
+#define SM8350_MASTER_MDP1_DISP 150
+#define SM8350_MASTER_ROTATOR_DISP 151
+#define SM8350_SLAVE_EBI1_DISP 152
+#define SM8350_SLAVE_LLCC_DISP 153
+#define SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP 154
+#define SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP 155
+
+#endif
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 15536e321df5..c8f57e3e058d 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -279,8 +279,13 @@ config XTENSA_MX
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config XILINX_INTC
- bool
+ bool "Xilinx Interrupt Controller IP"
+ depends on MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP
select IRQ_DOMAIN
+ help
+ Support for the Xilinx Interrupt Controller IP core.
+ This is used as a primary controller with MicroBlaze and can also
+ be used as a secondary chained controller on other platforms.
config IRQ_CROSSBAR
bool
@@ -577,4 +582,15 @@ config MST_IRQ
help
Support MStar Interrupt Controller.
+config WPCM450_AIC
+ bool "Nuvoton WPCM450 Advanced Interrupt Controller"
+ depends on ARCH_WPCM450
+ help
+ Support for the interrupt controller in the Nuvoton WPCM450 BMC SoC.
+
+config IRQ_IDT3243X
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index c59b95a0532c..18573602a939 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -113,3 +113,5 @@ obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o
obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o
obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o
obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o
+obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o
+obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o
diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c
index 6567ed782f82..58717cd44f99 100644
--- a/drivers/irqchip/irq-aspeed-vic.c
+++ b/drivers/irqchip/irq-aspeed-vic.c
@@ -71,7 +71,7 @@ static void vic_init_hw(struct aspeed_vic *vic)
writel(0, vic->base + AVIC_INT_SELECT);
writel(0, vic->base + AVIC_INT_SELECT + 4);
- /* Some interrupts have a programable high/low level trigger
+ /* Some interrupts have a programmable high/low level trigger
* (4 GPIO direct inputs), for now we assume this was configured
* by firmware. We read which ones are edge now.
*/
@@ -203,7 +203,7 @@ static int __init avic_of_init(struct device_node *node,
}
vic->base = regs;
- /* Initialize soures, all masked */
+ /* Initialize sources, all masked */
vic_init_hw(vic);
/* Ready to receive interrupts */
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index c7c9e976acbb..ad59656ccc28 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -309,7 +309,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
if (data->can_wake) {
/* This IRQ chip can wake the system, set all
- * relevant child interupts in wake_enabled mask
+ * relevant child interrupts in wake_enabled mask
*/
gc->wake_enabled = 0xffffffff;
gc->wake_enabled &= ~gc->unused;
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
index 5a2ec43b7ddd..ab91afa86755 100644
--- a/drivers/irqchip/irq-csky-apb-intc.c
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -176,7 +176,7 @@ gx_intc_init(struct device_node *node, struct device_node *parent)
writel(0x0, reg_base + GX_INTC_NEN63_32);
/*
- * Initial mask reg with all unmasked, because we only use enalbe reg
+ * Initial mask reg with all unmasked, because we only use enable reg
*/
writel(0x0, reg_base + GX_INTC_NMASK31_00);
writel(0x0, reg_base + GX_INTC_NMASK63_32);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index fbec07d634ad..4116b48e60af 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -371,7 +371,7 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
* the MSI data is the absolute value within the range from
* spi_start to (spi_start + num_spis).
*
- * Broadom NS2 GICv2m implementation has an erratum where the MSI data
+ * Broadcom NS2 GICv2m implementation has an erratum where the MSI data
* is 'spi_number - 32'
*
* Reading that register fails on the Graviton implementation
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ed46e6057e33..c3485b230d70 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1492,7 +1492,7 @@ static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
*
* Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
* value or to 1023, depending on the enable bit. But that
- * would be issueing a mapping for an /existing/ DevID+EventID
+ * would be issuing a mapping for an /existing/ DevID+EventID
* pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
* to the /same/ vPE, using this opportunity to adjust the
* doorbell. Mouahahahaha. We loves it, Precious.
@@ -3122,7 +3122,7 @@ static void its_cpu_init_lpis(void)
/*
* It's possible for CPU to receive VLPIs before it is
- * sheduled as a vPE, especially for the first CPU, and the
+ * scheduled as a vPE, especially for the first CPU, and the
* VLPI with INTID larger than 2^(IDbits+1) will be considered
* as out of range and dropped by GIC.
* So we initialize IDbits to known value to avoid VLPI drop.
@@ -3616,7 +3616,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
/*
* If all interrupts have been freed, start mopping the
- * floor. This is conditionned on the device not being shared.
+ * floor. This is conditioned on the device not being shared.
*/
if (!its_dev->shared &&
bitmap_empty(its_dev->event_map.lpi_map,
@@ -4194,7 +4194,7 @@ static int its_sgi_set_affinity(struct irq_data *d,
{
/*
* There is no notion of affinity for virtual SGIs, at least
- * not on the host (since they can only be targetting a vPE).
+ * not on the host (since they can only be targeting a vPE).
* Tell the kernel we've done whatever it asked for.
*/
irq_data_update_effective_affinity(d, mask_val);
@@ -4239,7 +4239,7 @@ static int its_sgi_get_irqchip_state(struct irq_data *d,
/*
* Locking galore! We can race against two different events:
*
- * - Concurent vPE affinity change: we must make sure it cannot
+ * - Concurrent vPE affinity change: we must make sure it cannot
* happen, or we'll talk to the wrong redistributor. This is
* identical to what happens with vLPIs.
*
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index 563a9b366294..e81e89a81cb5 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -303,7 +303,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
reg = of_get_property(np, "mbi-alias", NULL);
if (reg) {
mbi_phys_base = of_translate_address(np, reg);
- if (mbi_phys_base == OF_BAD_ADDR) {
+ if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) {
ret = -ENXIO;
goto err_free_mbi;
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index eb0ee356a629..37a23aa6de37 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -648,6 +648,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
irqnr = gic_read_iar();
+ /* Check for special IDs first */
+ if ((irqnr >= 1020 && irqnr <= 1023))
+ return;
+
if (gic_supports_nmi() &&
unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
gic_handle_nmi(irqnr, regs);
@@ -659,10 +663,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
gic_arch_enable_irqs();
}
- /* Check for special IDs first */
- if ((irqnr >= 1020 && irqnr <= 1023))
- return;
-
if (static_branch_likely(&supports_deactivate_key))
gic_write_eoir(irqnr);
else
@@ -1379,7 +1379,7 @@ static int gic_irq_domain_translate(struct irq_domain *d,
/*
* Make it clear that broken DTs are... broken.
- * Partitionned PPIs are an unfortunate exception.
+ * Partitioned PPIs are an unfortunate exception.
*/
WARN_ON(*type == IRQ_TYPE_NONE &&
fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 5d1dc9915272..4ea71b28f9f5 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -87,17 +87,40 @@ static struct irq_domain *gic_domain;
static const struct irq_domain_ops *vpe_domain_ops;
static const struct irq_domain_ops *sgi_domain_ops;
+#ifdef CONFIG_ARM64
+#include <asm/cpufeature.h>
+
+bool gic_cpuif_has_vsgi(void)
+{
+ unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT);
+
+ return fld >= 0x3;
+}
+#else
+bool gic_cpuif_has_vsgi(void)
+{
+ return false;
+}
+#endif
+
static bool has_v4_1(void)
{
return !!sgi_domain_ops;
}
+static bool has_v4_1_sgi(void)
+{
+ return has_v4_1() && gic_cpuif_has_vsgi();
+}
+
static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
{
char *name;
int sgi_base;
- if (!has_v4_1())
+ if (!has_v4_1_sgi())
return 0;
name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
@@ -182,7 +205,7 @@ static void its_free_sgi_irqs(struct its_vm *vm)
{
int i;
- if (!has_v4_1())
+ if (!has_v4_1_sgi())
return;
for (i = 0; i < vm->nr_vpes; i++) {
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index a6ed877d9dd3..058ebaebe2c4 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Hisilicon HiP04 INTC
+ * HiSilicon HiP04 INTC
*
* Copyright (C) 2002-2014 ARM Limited.
- * Copyright (c) 2013-2014 Hisilicon Ltd.
+ * Copyright (c) 2013-2014 HiSilicon Ltd.
* Copyright (c) 2013-2014 Linaro Ltd.
*
* Interrupt architecture for the HIP04 INTC:
diff --git a/drivers/irqchip/irq-idt3243x.c b/drivers/irqchip/irq-idt3243x.c
new file mode 100644
index 000000000000..f0996820077a
--- /dev/null
+++ b/drivers/irqchip/irq-idt3243x.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for IDT/Renesas 79RC3243x Interrupt Controller.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define IDT_PIC_NR_IRQS 32
+
+#define IDT_PIC_IRQ_PEND 0x00
+#define IDT_PIC_IRQ_MASK 0x08
+
+struct idt_pic_data {
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+ struct irq_chip_generic *gc;
+};
+
+static void idt_irq_dispatch(struct irq_desc *desc)
+{
+ struct idt_pic_data *idtpic = irq_desc_get_handler_data(desc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ u32 pending, hwirq, virq;
+
+ chained_irq_enter(host_chip, desc);
+
+ pending = irq_reg_readl(idtpic->gc, IDT_PIC_IRQ_PEND);
+ pending &= ~idtpic->gc->mask_cache;
+ while (pending) {
+ hwirq = __fls(pending);
+ virq = irq_linear_revmap(idtpic->irq_domain, hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ pending &= ~(1 << hwirq);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static int idt_pic_init(struct device_node *of_node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ struct idt_pic_data *idtpic;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int parent_irq;
+ int ret = 0;
+
+ idtpic = kzalloc(sizeof(*idtpic), GFP_KERNEL);
+ if (!idtpic) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ parent_irq = irq_of_parse_and_map(of_node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map parent IRQ!\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ idtpic->base = of_iomap(of_node, 0);
+ if (!idtpic->base) {
+ pr_err("Failed to map base address!\n");
+ ret = -ENOMEM;
+ goto out_unmap_irq;
+ }
+
+ domain = irq_domain_add_linear(of_node, IDT_PIC_NR_IRQS,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("Failed to add irqdomain!\n");
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+ idtpic->irq_domain = domain;
+
+ ret = irq_alloc_domain_generic_chips(domain, 32, 1, "IDTPIC",
+ handle_level_irq, 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+ if (ret)
+ goto out_domain_remove;
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = idtpic->base;
+ gc->private = idtpic;
+
+ ct = gc->chip_types;
+ ct->regs.mask = IDT_PIC_IRQ_MASK;
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+ idtpic->gc = gc;
+
+ /* Mask interrupts. */
+ writel(0xffffffff, idtpic->base + IDT_PIC_IRQ_MASK);
+ gc->mask_cache = 0xffffffff;
+
+ irq_set_chained_handler_and_data(parent_irq,
+ idt_irq_dispatch, idtpic);
+
+ return 0;
+
+out_domain_remove:
+ irq_domain_remove(domain);
+out_iounmap:
+ iounmap(idtpic->base);
+out_unmap_irq:
+ irq_dispose_mapping(parent_irq);
+out_free:
+ kfree(idtpic);
+out_err:
+ pr_err("Failed to initialize! (errno = %d)\n", ret);
+ return ret;
+}
+
+IRQCHIP_DECLARE(idt_pic, "idt,32434-pic", idt_pic_init);
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index 033bccb41455..5f47d8ee4ae3 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -100,11 +100,11 @@ static int __init aic_irq_of_init(struct device_node *node,
jcore_aic.irq_unmask = noop;
jcore_aic.name = "AIC";
- domain = irq_domain_add_linear(node, dom_sz, &jcore_aic_irqdomain_ops,
+ domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq,
+ &jcore_aic_irqdomain_ops,
&jcore_aic);
if (!domain)
return -ENOMEM;
- irq_create_strict_mappings(domain, min_irq, min_irq, dom_sz - min_irq);
return 0;
}
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index 9bf6b9a5f734..f790ca6d78aa 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -163,7 +163,7 @@ static void pch_pic_reset(struct pch_pic *priv)
int i;
for (i = 0; i < PIC_COUNT; i++) {
- /* Write vectore ID */
+ /* Write vectored ID */
writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i));
/* Hardcode route to HT0 Lo */
writeb(1, priv->base + PCH_INT_ROUTE(i));
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index ff7627b57772..2cb45c6b8501 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2015 Hisilicon Limited, All Rights Reserved.
+ * Copyright (C) 2015 HiSilicon Limited, All Rights Reserved.
* Author: Jun Ma <majun258@huawei.com>
* Author: Yun Wu <wuyun.wu@huawei.com>
*/
@@ -390,4 +390,4 @@ module_platform_driver(mbigen_platform_driver);
MODULE_AUTHOR("Jun Ma <majun258@huawei.com>");
MODULE_AUTHOR("Yun Wu <wuyun.wu@huawei.com>");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Hisilicon MBI Generator driver");
+MODULE_DESCRIPTION("HiSilicon MBI Generator driver");
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index bc7aebcc96e9..e50676ce2ec8 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -227,7 +227,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
/*
* Get the hwirq number assigned to this channel through
- * a pointer the channel_irq table. The added benifit of this
+ * a pointer the channel_irq table. The added benefit of this
* method is that we can also retrieve the channel index with
* it, using the table base.
*/
diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c
index 143657b0cf28..f6133ae28155 100644
--- a/drivers/irqchip/irq-mst-intc.c
+++ b/drivers/irqchip/irq-mst-intc.c
@@ -13,15 +13,27 @@
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
-#define INTC_MASK 0x0
-#define INTC_EOI 0x20
+#define MST_INTC_MAX_IRQS 64
+
+#define INTC_MASK 0x0
+#define INTC_REV_POLARITY 0x10
+#define INTC_EOI 0x20
+
+#ifdef CONFIG_PM_SLEEP
+static LIST_HEAD(mst_intc_list);
+#endif
struct mst_intc_chip_data {
raw_spinlock_t lock;
unsigned int irq_start, nr_irqs;
void __iomem *base;
bool no_eoi;
+#ifdef CONFIG_PM_SLEEP
+ struct list_head entry;
+ u16 saved_polarity_conf[DIV_ROUND_UP(MST_INTC_MAX_IRQS, 16)];
+#endif
};
static void mst_set_irq(struct irq_data *d, u32 offset)
@@ -78,6 +90,24 @@ static void mst_intc_eoi_irq(struct irq_data *d)
irq_chip_eoi_parent(d);
}
+static int mst_irq_chip_set_type(struct irq_data *data, unsigned int type)
+{
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ case IRQ_TYPE_EDGE_FALLING:
+ mst_set_irq(data, INTC_REV_POLARITY);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_EDGE_RISING:
+ mst_clear_irq(data, INTC_REV_POLARITY);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return irq_chip_set_type_parent(data, IRQ_TYPE_LEVEL_HIGH);
+}
+
static struct irq_chip mst_intc_chip = {
.name = "mst-intc",
.irq_mask = mst_intc_mask_irq,
@@ -87,13 +117,62 @@ static struct irq_chip mst_intc_chip = {
.irq_set_irqchip_state = irq_chip_set_parent_state,
.irq_set_affinity = irq_chip_set_affinity_parent,
.irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
- .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_type = mst_irq_chip_set_type,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SET_TYPE_MASKED |
IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_MASK_ON_SUSPEND,
};
+#ifdef CONFIG_PM_SLEEP
+static void mst_intc_polarity_save(struct mst_intc_chip_data *cd)
+{
+ int i;
+ void __iomem *addr = cd->base + INTC_REV_POLARITY;
+
+ for (i = 0; i < DIV_ROUND_UP(cd->nr_irqs, 16); i++)
+ cd->saved_polarity_conf[i] = readw_relaxed(addr + i * 4);
+}
+
+static void mst_intc_polarity_restore(struct mst_intc_chip_data *cd)
+{
+ int i;
+ void __iomem *addr = cd->base + INTC_REV_POLARITY;
+
+ for (i = 0; i < DIV_ROUND_UP(cd->nr_irqs, 16); i++)
+ writew_relaxed(cd->saved_polarity_conf[i], addr + i * 4);
+}
+
+static void mst_irq_resume(void)
+{
+ struct mst_intc_chip_data *cd;
+
+ list_for_each_entry(cd, &mst_intc_list, entry)
+ mst_intc_polarity_restore(cd);
+}
+
+static int mst_irq_suspend(void)
+{
+ struct mst_intc_chip_data *cd;
+
+ list_for_each_entry(cd, &mst_intc_list, entry)
+ mst_intc_polarity_save(cd);
+ return 0;
+}
+
+static struct syscore_ops mst_irq_syscore_ops = {
+ .suspend = mst_irq_suspend,
+ .resume = mst_irq_resume,
+};
+
+static int __init mst_irq_pm_init(void)
+{
+ register_syscore_ops(&mst_irq_syscore_ops);
+ return 0;
+}
+late_initcall(mst_irq_pm_init);
+#endif
+
static int mst_intc_domain_translate(struct irq_domain *d,
struct irq_fwspec *fwspec,
unsigned long *hwirq,
@@ -145,6 +224,15 @@ static int mst_intc_domain_alloc(struct irq_domain *domain, unsigned int virq,
parent_fwspec = *fwspec;
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param[1] = cd->irq_start + hwirq;
+
+ /*
+ * mst-intc latch the interrupt request if it's edge triggered,
+ * so the output signal to parent GIC is always level sensitive.
+ * And if the irq signal is active low, configure it to active high
+ * to meet GIC SPI spec in mst_irq_chip_set_type via REV_POLARITY bit.
+ */
+ parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+
return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec);
}
@@ -193,6 +281,10 @@ static int __init mst_intc_of_init(struct device_node *dn,
return -ENOMEM;
}
+#ifdef CONFIG_PM_SLEEP
+ INIT_LIST_HEAD(&cd->entry);
+ list_add_tail(&cd->entry, &mst_intc_list);
+#endif
return 0;
}
diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c
index 69ba8ce3c178..9bca0918078e 100644
--- a/drivers/irqchip/irq-mtk-cirq.c
+++ b/drivers/irqchip/irq-mtk-cirq.c
@@ -217,7 +217,7 @@ static void mtk_cirq_resume(void)
{
u32 value;
- /* flush recored interrupts, will send signals to parent controller */
+ /* flush recorded interrupts, will send signals to parent controller */
value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index a671938fd97f..d1f5740cd575 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -58,7 +58,7 @@ struct icoll_priv {
static struct icoll_priv icoll_priv;
static struct irq_domain *icoll_domain;
-/* calculate bit offset depending on number of intterupt per register */
+/* calculate bit offset depending on number of interrupt per register */
static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit)
{
/*
@@ -68,7 +68,7 @@ static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit)
return bit << ((d->hwirq & 3) << 3);
}
-/* calculate mem offset depending on number of intterupt per register */
+/* calculate mem offset depending on number of interrupt per register */
static void __iomem *icoll_intr_reg(struct irq_data *d)
{
/* offset = hwirq / intr_per_reg * 0x10 */
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 6f432d2a5ceb..97d4d04b0a80 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -77,8 +77,8 @@ struct plic_handler {
void __iomem *enable_base;
struct plic_priv *priv;
};
-static int plic_parent_irq;
-static bool plic_cpuhp_setup_done;
+static int plic_parent_irq __ro_after_init;
+static bool plic_cpuhp_setup_done __ro_after_init;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
static inline void plic_toggle(struct plic_handler *handler,
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 8662d7b7b262..b9db90c4aa56 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -193,7 +193,14 @@ static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
{ .exti = 23, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct },
{ .exti = 24, .irq_parent = 95, .chip = &stm32_exti_h_chip_direct },
{ .exti = 25, .irq_parent = 107, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 26, .irq_parent = 37, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 27, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 28, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 29, .irq_parent = 71, .chip = &stm32_exti_h_chip_direct },
{ .exti = 30, .irq_parent = 52, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 31, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 32, .irq_parent = 82, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 33, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct },
{ .exti = 47, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct },
{ .exti = 48, .irq_parent = 138, .chip = &stm32_exti_h_chip_direct },
{ .exti = 50, .irq_parent = 139, .chip = &stm32_exti_h_chip_direct },
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index fb78d6623556..9ea94456b178 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -189,7 +189,7 @@ static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
* 3) spurious irq
* So if we immediately get a reading of 0, check the irq-pending reg
* to differentiate between 2 and 3. We only do this once to avoid
- * the extra check in the common case of 1 hapening after having
+ * the extra check in the common case of 1 happening after having
* read the vector-reg once.
*/
hwirq = readl(irq_ic_data->irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index 9e456497c1c4..9a63b02b8176 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -60,6 +60,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
break;
case IRQ_TYPE_NONE:
flow_type = IRQ_TYPE_LEVEL_LOW;
+ fallthrough;
case IRQ_TYPE_LEVEL_LOW:
mod ^= im;
pol ^= im;
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 532d0ae172d9..ca1f593f4d13 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -78,7 +78,7 @@ struct ti_sci_inta_vint_desc {
* struct ti_sci_inta_irq_domain - Structure representing a TISCI based
* Interrupt Aggregator IRQ domain.
* @sci: Pointer to TISCI handle
- * @vint: TISCI resource pointer representing IA inerrupts.
+ * @vint: TISCI resource pointer representing IA interrupts.
* @global_event: TISCI resource pointer representing global events.
* @vint_list: List of the vints active in the system
* @vint_mutex: Mutex to protect vint_list
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index e46036374227..62f3d29f9042 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -163,7 +163,7 @@ static struct syscore_ops vic_syscore_ops = {
};
/**
- * vic_pm_init - initicall to register VIC pm
+ * vic_pm_init - initcall to register VIC pm
*
* This is called via late_initcall() to register
* the resources for the VICs due to the early
@@ -397,7 +397,7 @@ static void __init vic_clear_interrupts(void __iomem *base)
/*
* The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
* The original cell has 32 interrupts, while the modified one has 64,
- * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
+ * replicating two blocks 0x00..0x1f in 0x20..0x3f. In that case
* the probe function is called twice, with base set to offset 000
* and 020 within the page. We call this "second block".
*/
diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c
new file mode 100644
index 000000000000..f3ac392d5bc8
--- /dev/null
+++ b/drivers/irqchip/irq-wpcm450-aic.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2021 Jonathan Neuschäfer
+
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/printk.h>
+
+#include <asm/exception.h>
+
+#define AIC_SCR(x) ((x)*4) /* Source control registers */
+#define AIC_GEN 0x84 /* Interrupt group enable control register */
+#define AIC_GRSR 0x88 /* Interrupt group raw status register */
+#define AIC_IRSR 0x100 /* Interrupt raw status register */
+#define AIC_IASR 0x104 /* Interrupt active status register */
+#define AIC_ISR 0x108 /* Interrupt status register */
+#define AIC_IPER 0x10c /* Interrupt priority encoding register */
+#define AIC_ISNR 0x110 /* Interrupt source number register */
+#define AIC_IMR 0x114 /* Interrupt mask register */
+#define AIC_OISR 0x118 /* Output interrupt status register */
+#define AIC_MECR 0x120 /* Mask enable command register */
+#define AIC_MDCR 0x124 /* Mask disable command register */
+#define AIC_SSCR 0x128 /* Source set command register */
+#define AIC_SCCR 0x12c /* Source clear command register */
+#define AIC_EOSCR 0x130 /* End of service command register */
+
+#define AIC_SCR_SRCTYPE_LOW_LEVEL (0 << 6)
+#define AIC_SCR_SRCTYPE_HIGH_LEVEL (1 << 6)
+#define AIC_SCR_SRCTYPE_NEG_EDGE (2 << 6)
+#define AIC_SCR_SRCTYPE_POS_EDGE (3 << 6)
+#define AIC_SCR_PRIORITY(x) (x)
+#define AIC_SCR_PRIORITY_MASK 0x7
+
+#define AIC_NUM_IRQS 32
+
+struct wpcm450_aic {
+ void __iomem *regs;
+ struct irq_domain *domain;
+};
+
+static struct wpcm450_aic *aic;
+
+static void wpcm450_aic_init_hw(void)
+{
+ int i;
+
+ /* Disable (mask) all interrupts */
+ writel(0xffffffff, aic->regs + AIC_MDCR);
+
+ /*
+ * Make sure the interrupt controller is ready to serve new interrupts.
+ * Reading from IPER indicates that the nIRQ signal may be deasserted,
+ * and writing to EOSCR indicates that interrupt handling has finished.
+ */
+ readl(aic->regs + AIC_IPER);
+ writel(0, aic->regs + AIC_EOSCR);
+
+ /* Initialize trigger mode and priority of each interrupt source */
+ for (i = 0; i < AIC_NUM_IRQS; i++)
+ writel(AIC_SCR_SRCTYPE_HIGH_LEVEL | AIC_SCR_PRIORITY(7),
+ aic->regs + AIC_SCR(i));
+}
+
+static void __exception_irq_entry wpcm450_aic_handle_irq(struct pt_regs *regs)
+{
+ int hwirq;
+
+ /* Determine the interrupt source */
+ /* Read IPER to signal that nIRQ can be de-asserted */
+ hwirq = readl(aic->regs + AIC_IPER) / 4;
+
+ handle_domain_irq(aic->domain, hwirq, regs);
+}
+
+static void wpcm450_aic_eoi(struct irq_data *d)
+{
+ /* Signal end-of-service */
+ writel(0, aic->regs + AIC_EOSCR);
+}
+
+static void wpcm450_aic_mask(struct irq_data *d)
+{
+ unsigned int mask = BIT(d->hwirq);
+
+ /* Disable (mask) the interrupt */
+ writel(mask, aic->regs + AIC_MDCR);
+}
+
+static void wpcm450_aic_unmask(struct irq_data *d)
+{
+ unsigned int mask = BIT(d->hwirq);
+
+ /* Enable (unmask) the interrupt */
+ writel(mask, aic->regs + AIC_MECR);
+}
+
+static int wpcm450_aic_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ /*
+ * The hardware supports high/low level, as well as rising/falling edge
+ * modes, and the DT binding accommodates for that, but as long as
+ * other modes than high level mode are not used and can't be tested,
+ * they are rejected in this driver.
+ */
+ if ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip wpcm450_aic_chip = {
+ .name = "wpcm450-aic",
+ .irq_eoi = wpcm450_aic_eoi,
+ .irq_mask = wpcm450_aic_mask,
+ .irq_unmask = wpcm450_aic_unmask,
+ .irq_set_type = wpcm450_aic_set_type,
+};
+
+static int wpcm450_aic_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq)
+{
+ if (hwirq >= AIC_NUM_IRQS)
+ return -EPERM;
+
+ irq_set_chip_and_handler(irq, &wpcm450_aic_chip, handle_fasteoi_irq);
+ irq_set_chip_data(irq, aic);
+ irq_set_probe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops wpcm450_aic_ops = {
+ .map = wpcm450_aic_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int __init wpcm450_aic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ if (parent)
+ return -EINVAL;
+
+ aic = kzalloc(sizeof(*aic), GFP_KERNEL);
+ if (!aic)
+ return -ENOMEM;
+
+ aic->regs = of_iomap(node, 0);
+ if (!aic->regs) {
+ pr_err("Failed to map WPCM450 AIC registers\n");
+ return -ENOMEM;
+ }
+
+ wpcm450_aic_init_hw();
+
+ set_handle_irq(wpcm450_aic_handle_irq);
+
+ aic->domain = irq_domain_add_linear(node, AIC_NUM_IRQS, &wpcm450_aic_ops, aic);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(wpcm450_aic, "nuvoton,wpcm450-aic", wpcm450_aic_of_init);
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 1d3d273309bd..8cd1bfc73057 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -210,7 +210,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
/*
* Disable all external interrupts until they are
- * explicity requested.
+ * explicitly requested.
*/
xintc_write(irqc, IER, 0);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 66f4c6398f67..cea2b3789736 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -65,7 +65,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
u8 *res;
position = (index + rsb) * v->fec->roots;
- block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
+ block = div64_u64_rem(position, v->fec->io_size, &rem);
*offset = (unsigned)rem;
res = dm_bufio_read(v->fec->bufio, block, buf);
@@ -154,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
/* read the next block when we run out of parity bytes */
offset += v->fec->roots;
- if (offset >= v->fec->roots << SECTOR_SHIFT) {
+ if (offset >= v->fec->io_size) {
dm_bufio_release(buf);
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
@@ -742,8 +742,13 @@ int verity_fec_ctr(struct dm_verity *v)
return -E2BIG;
}
+ if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
+ f->io_size = 1 << v->data_dev_block_bits;
+ else
+ f->io_size = v->fec->roots << SECTOR_SHIFT;
+
f->bufio = dm_bufio_client_create(f->dev->bdev,
- f->roots << SECTOR_SHIFT,
+ f->io_size,
1, 0, NULL, NULL);
if (IS_ERR(f->bufio)) {
ti->error = "Cannot initialize FEC bufio client";
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 42fbd3a7fc9f..3c46c8d61883 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -36,6 +36,7 @@ struct dm_verity_fec {
struct dm_dev *dev; /* parity data device */
struct dm_bufio_client *data_bufio; /* for data dev access */
struct dm_bufio_client *bufio; /* for parity data access */
+ size_t io_size; /* IO size for roots */
sector_t start; /* parity data start in blocks */
sector_t blocks; /* number of blocks covered */
sector_t rounds; /* number of interleaving rounds */
diff --git a/drivers/mfd/intel_pmt.c b/drivers/mfd/intel_pmt.c
index 744b230cdcca..dd7eb614c28e 100644
--- a/drivers/mfd/intel_pmt.c
+++ b/drivers/mfd/intel_pmt.c
@@ -49,10 +49,14 @@ enum pmt_quirks {
/* Use shift instead of mask to read discovery table offset */
PMT_QUIRK_TABLE_SHIFT = BIT(2),
+
+ /* DVSEC not present (provided in driver data) */
+ PMT_QUIRK_NO_DVSEC = BIT(3),
};
struct pmt_platform_info {
unsigned long quirks;
+ struct intel_dvsec_header **capabilities;
};
static const struct pmt_platform_info tgl_info = {
@@ -60,6 +64,26 @@ static const struct pmt_platform_info tgl_info = {
PMT_QUIRK_TABLE_SHIFT,
};
+/* DG1 Platform with DVSEC quirk*/
+static struct intel_dvsec_header dg1_telemetry = {
+ .length = 0x10,
+ .id = 2,
+ .num_entries = 1,
+ .entry_size = 3,
+ .tbir = 0,
+ .offset = 0x466000,
+};
+
+static struct intel_dvsec_header *dg1_capabilities[] = {
+ &dg1_telemetry,
+ NULL
+};
+
+static const struct pmt_platform_info dg1_info = {
+ .quirks = PMT_QUIRK_NO_DVSEC,
+ .capabilities = dg1_capabilities,
+};
+
static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
unsigned long quirks)
{
@@ -79,19 +103,18 @@ static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
case DVSEC_INTEL_ID_WATCHER:
if (quirks & PMT_QUIRK_NO_WATCHER) {
dev_info(dev, "Watcher not supported\n");
- return 0;
+ return -EINVAL;
}
name = "pmt_watcher";
break;
case DVSEC_INTEL_ID_CRASHLOG:
if (quirks & PMT_QUIRK_NO_CRASHLOG) {
dev_info(dev, "Crashlog not supported\n");
- return 0;
+ return -EINVAL;
}
name = "pmt_crashlog";
break;
default:
- dev_err(dev, "Unrecognized PMT capability: %d\n", id);
return -EINVAL;
}
@@ -148,41 +171,54 @@ static int pmt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (info)
quirks = info->quirks;
- do {
- struct intel_dvsec_header header;
- u32 table;
- u16 vid;
-
- pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
- if (!pos)
- break;
-
- pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vid);
- if (vid != PCI_VENDOR_ID_INTEL)
- continue;
-
- pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2,
- &header.id);
- pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES,
- &header.num_entries);
- pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE,
- &header.entry_size);
- pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE,
- &table);
-
- header.tbir = INTEL_DVSEC_TABLE_BAR(table);
- header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
-
- ret = pmt_add_dev(pdev, &header, quirks);
- if (ret) {
- dev_warn(&pdev->dev,
- "Failed to add device for DVSEC id %d\n",
- header.id);
- continue;
- }
+ if (info && (info->quirks & PMT_QUIRK_NO_DVSEC)) {
+ struct intel_dvsec_header **header;
+
+ header = info->capabilities;
+ while (*header) {
+ ret = pmt_add_dev(pdev, *header, quirks);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "Failed to add device for DVSEC id %d\n",
+ (*header)->id);
+ else
+ found_devices = true;
- found_devices = true;
- } while (true);
+ ++header;
+ }
+ } else {
+ do {
+ struct intel_dvsec_header header;
+ u32 table;
+ u16 vid;
+
+ pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
+ if (!pos)
+ break;
+
+ pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vid);
+ if (vid != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2,
+ &header.id);
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES,
+ &header.num_entries);
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE,
+ &header.entry_size);
+ pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE,
+ &table);
+
+ header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+ header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+
+ ret = pmt_add_dev(pdev, &header, quirks);
+ if (ret)
+ continue;
+
+ found_devices = true;
+ } while (true);
+ }
if (!found_devices)
return -ENODEV;
@@ -200,10 +236,12 @@ static void pmt_pci_remove(struct pci_dev *pdev)
}
#define PCI_DEVICE_ID_INTEL_PMT_ADL 0x467d
+#define PCI_DEVICE_ID_INTEL_PMT_DG1 0x490e
#define PCI_DEVICE_ID_INTEL_PMT_OOBMSM 0x09a7
#define PCI_DEVICE_ID_INTEL_PMT_TGL 0x9a0d
static const struct pci_device_id pmt_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, PMT_ADL, &tgl_info) },
+ { PCI_DEVICE_DATA(INTEL, PMT_DG1, &dg1_info) },
{ PCI_DEVICE_DATA(INTEL, PMT_OOBMSM, NULL) },
{ PCI_DEVICE_DATA(INTEL, PMT_TGL, &tgl_info) },
{ }
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f532c59bb59b..f4fb5c52b863 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -402,6 +402,16 @@ config SRAM
config SRAM_EXEC
bool
+config DW_XDATA_PCIE
+ depends on PCI
+ tristate "Synopsys DesignWare xData PCIe driver"
+ help
+ This driver allows controlling Synopsys DesignWare PCIe traffic
+ generator IP also known as xData, present in Synopsys DesignWare
+ PCIe Endpoint prototype.
+
+ If unsure, say N.
+
config PCI_ENDPOINT_TEST
depends on PCI
select CRC32
@@ -427,14 +437,6 @@ config MISC_RTSX
tristate
default MISC_RTSX_PCI || MISC_RTSX_USB
-config PVPANIC
- tristate "pvpanic device support"
- depends on HAS_IOMEM && (ACPI || OF)
- help
- This driver provides support for the pvpanic device. pvpanic is
- a paravirtualized device provided by QEMU; it lets a virtual machine
- (guest) communicate panic events to the host.
-
config HISI_HIKEY_USB
tristate "USB GPIO Hub on HiSilicon Hikey 960/970 Platform"
depends on (OF && GPIOLIB) || COMPILE_TEST
@@ -461,4 +463,5 @@ source "drivers/misc/bcm-vk/Kconfig"
source "drivers/misc/cardreader/Kconfig"
source "drivers/misc/habanalabs/Kconfig"
source "drivers/misc/uacce/Kconfig"
+source "drivers/misc/pvpanic/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 99b6f15a3c70..e92a56d4442f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -47,11 +47,12 @@ obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_CXL_BASE) += cxl/
+obj-$(CONFIG_DW_XDATA_PCIE) += dw-xdata-pcie.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
obj-$(CONFIG_BCM_VK) += bcm-vk/
obj-y += cardreader/
-obj-$(CONFIG_PVPANIC) += pvpanic.o
+obj-$(CONFIG_PVPANIC) += pvpanic/
obj-$(CONFIG_HABANA_AI) += habanalabs/
obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 6f164522b028..5d8f3f6a95f2 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -139,6 +139,9 @@ static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
value = dpot_read_r8d8(dpot,
DPOT_AD5291_READ_RDAC << 2);
+ if (value < 0)
+ return value;
+
if (dpot->uid == DPOT_UID(AD5291_ID))
value = value >> 2;
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index fb2eff69e449..e627b4056623 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -52,7 +52,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
* can always access it when dereferenced from IDR. For the same
* reason, the segment table is only destroyed after the context is
* removed from the IDR. Access to this in the IOCTL is protected by
- * Linux filesytem symantics (can't IOCTL until open is complete).
+ * Linux filesystem semantics (can't IOCTL until open is complete).
*/
i = cxl_alloc_sst(ctx);
if (i)
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 01153b74334a..60c829113299 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -200,7 +200,7 @@ static struct mm_struct *get_mem_context(struct cxl_context *ctx)
if (ctx->mm == NULL)
return NULL;
- if (!atomic_inc_not_zero(&ctx->mm->mm_users))
+ if (!mmget_not_zero(ctx->mm))
return NULL;
return ctx->mm;
diff --git a/drivers/misc/dw-xdata-pcie.c b/drivers/misc/dw-xdata-pcie.c
new file mode 100644
index 000000000000..257c25da5199
--- /dev/null
+++ b/drivers/misc/dw-xdata-pcie.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare xData driver
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/bitfield.h>
+#include <linux/pci-epf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#define DW_XDATA_DRIVER_NAME "dw-xdata-pcie"
+
+#define DW_XDATA_EP_MEM_OFFSET 0x8000000
+
+static DEFINE_IDA(xdata_ida);
+
+#define STATUS_DONE BIT(0)
+
+#define CONTROL_DOORBELL BIT(0)
+#define CONTROL_IS_WRITE BIT(1)
+#define CONTROL_LENGTH(a) FIELD_PREP(GENMASK(13, 2), a)
+#define CONTROL_PATTERN_INC BIT(16)
+#define CONTROL_NO_ADDR_INC BIT(18)
+
+#define XPERF_CONTROL_ENABLE BIT(5)
+
+#define BURST_REPEAT BIT(31)
+#define BURST_VALUE 0x1001
+
+#define PATTERN_VALUE 0x0
+
+struct dw_xdata_regs {
+ u32 addr_lsb; /* 0x000 */
+ u32 addr_msb; /* 0x004 */
+ u32 burst_cnt; /* 0x008 */
+ u32 control; /* 0x00c */
+ u32 pattern; /* 0x010 */
+ u32 status; /* 0x014 */
+ u32 RAM_addr; /* 0x018 */
+ u32 RAM_port; /* 0x01c */
+ u32 _reserved0[14]; /* 0x020..0x054 */
+ u32 perf_control; /* 0x058 */
+ u32 _reserved1[41]; /* 0x05c..0x0fc */
+ u32 wr_cnt_lsb; /* 0x100 */
+ u32 wr_cnt_msb; /* 0x104 */
+ u32 rd_cnt_lsb; /* 0x108 */
+ u32 rd_cnt_msb; /* 0x10c */
+} __packed;
+
+struct dw_xdata_region {
+ phys_addr_t paddr; /* physical address */
+ void __iomem *vaddr; /* virtual address */
+};
+
+struct dw_xdata {
+ struct dw_xdata_region rg_region; /* registers */
+ size_t max_wr_len; /* max wr xfer len */
+ size_t max_rd_len; /* max rd xfer len */
+ struct mutex mutex;
+ struct pci_dev *pdev;
+ struct miscdevice misc_dev;
+};
+
+static inline struct dw_xdata_regs __iomem *__dw_regs(struct dw_xdata *dw)
+{
+ return dw->rg_region.vaddr;
+}
+
+static void dw_xdata_stop(struct dw_xdata *dw)
+{
+ u32 burst;
+
+ mutex_lock(&dw->mutex);
+
+ burst = readl(&(__dw_regs(dw)->burst_cnt));
+
+ if (burst & BURST_REPEAT) {
+ burst &= ~(u32)BURST_REPEAT;
+ writel(burst, &(__dw_regs(dw)->burst_cnt));
+ }
+
+ mutex_unlock(&dw->mutex);
+}
+
+static void dw_xdata_start(struct dw_xdata *dw, bool write)
+{
+ struct device *dev = &dw->pdev->dev;
+ u32 control, status;
+
+ /* Stop first if xfer in progress */
+ dw_xdata_stop(dw);
+
+ mutex_lock(&dw->mutex);
+
+ /* Clear status register */
+ writel(0x0, &(__dw_regs(dw)->status));
+
+ /* Burst count register set for continuous until stopped */
+ writel(BURST_REPEAT | BURST_VALUE, &(__dw_regs(dw)->burst_cnt));
+
+ /* Pattern register */
+ writel(PATTERN_VALUE, &(__dw_regs(dw)->pattern));
+
+ /* Control register */
+ control = CONTROL_DOORBELL | CONTROL_PATTERN_INC | CONTROL_NO_ADDR_INC;
+ if (write) {
+ control |= CONTROL_IS_WRITE;
+ control |= CONTROL_LENGTH(dw->max_wr_len);
+ } else {
+ control |= CONTROL_LENGTH(dw->max_rd_len);
+ }
+ writel(control, &(__dw_regs(dw)->control));
+
+ /*
+ * The xData HW block needs about 100 ms to initiate the traffic
+ * generation according this HW block datasheet.
+ */
+ usleep_range(100, 150);
+
+ status = readl(&(__dw_regs(dw)->status));
+
+ mutex_unlock(&dw->mutex);
+
+ if (!(status & STATUS_DONE))
+ dev_dbg(dev, "xData: started %s direction\n",
+ write ? "write" : "read");
+}
+
+static void dw_xdata_perf_meas(struct dw_xdata *dw, u64 *data, bool write)
+{
+ if (write) {
+ *data = readl(&(__dw_regs(dw)->wr_cnt_msb));
+ *data <<= 32;
+ *data |= readl(&(__dw_regs(dw)->wr_cnt_lsb));
+ } else {
+ *data = readl(&(__dw_regs(dw)->rd_cnt_msb));
+ *data <<= 32;
+ *data |= readl(&(__dw_regs(dw)->rd_cnt_lsb));
+ }
+}
+
+static u64 dw_xdata_perf_diff(u64 *m1, u64 *m2, u64 time)
+{
+ u64 rate = (*m1 - *m2);
+
+ rate *= (1000 * 1000 * 1000);
+ rate >>= 20;
+ rate = DIV_ROUND_CLOSEST_ULL(rate, time);
+
+ return rate;
+}
+
+static void dw_xdata_perf(struct dw_xdata *dw, u64 *rate, bool write)
+{
+ struct device *dev = &dw->pdev->dev;
+ u64 data[2], time[2], diff;
+
+ mutex_lock(&dw->mutex);
+
+ /* First acquisition of current count frames */
+ writel(0x0, &(__dw_regs(dw)->perf_control));
+ dw_xdata_perf_meas(dw, &data[0], write);
+ time[0] = jiffies;
+ writel((u32)XPERF_CONTROL_ENABLE, &(__dw_regs(dw)->perf_control));
+
+ /*
+ * Wait 100ms between the 1st count frame acquisition and the 2nd
+ * count frame acquisition, in order to calculate the speed later
+ */
+ mdelay(100);
+
+ /* Second acquisition of current count frames */
+ writel(0x0, &(__dw_regs(dw)->perf_control));
+ dw_xdata_perf_meas(dw, &data[1], write);
+ time[1] = jiffies;
+ writel((u32)XPERF_CONTROL_ENABLE, &(__dw_regs(dw)->perf_control));
+
+ /*
+ * Speed calculation
+ *
+ * rate = (2nd count frames - 1st count frames) / (time elapsed)
+ */
+ diff = jiffies_to_nsecs(time[1] - time[0]);
+ *rate = dw_xdata_perf_diff(&data[1], &data[0], diff);
+
+ mutex_unlock(&dw->mutex);
+
+ dev_dbg(dev, "xData: time=%llu us, %s=%llu MB/s\n",
+ diff, write ? "write" : "read", *rate);
+}
+
+static struct dw_xdata *misc_dev_to_dw(struct miscdevice *misc_dev)
+{
+ return container_of(misc_dev, struct dw_xdata, misc_dev);
+}
+
+static ssize_t write_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct miscdevice *misc_dev = dev_get_drvdata(dev);
+ struct dw_xdata *dw = misc_dev_to_dw(misc_dev);
+ u64 rate;
+
+ dw_xdata_perf(dw, &rate, true);
+
+ return sysfs_emit(buf, "%llu\n", rate);
+}
+
+static ssize_t write_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct miscdevice *misc_dev = dev_get_drvdata(dev);
+ struct dw_xdata *dw = misc_dev_to_dw(misc_dev);
+ bool enabled;
+ int ret;
+
+ ret = kstrtobool(buf, &enabled);
+ if (ret < 0)
+ return ret;
+
+ if (enabled) {
+ dev_dbg(dev, "xData: requested write transfer\n");
+ dw_xdata_start(dw, true);
+ } else {
+ dev_dbg(dev, "xData: requested stop transfer\n");
+ dw_xdata_stop(dw);
+ }
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(write);
+
+static ssize_t read_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct miscdevice *misc_dev = dev_get_drvdata(dev);
+ struct dw_xdata *dw = misc_dev_to_dw(misc_dev);
+ u64 rate;
+
+ dw_xdata_perf(dw, &rate, false);
+
+ return sysfs_emit(buf, "%llu\n", rate);
+}
+
+static ssize_t read_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct miscdevice *misc_dev = dev_get_drvdata(dev);
+ struct dw_xdata *dw = misc_dev_to_dw(misc_dev);
+ bool enabled;
+ int ret;
+
+ ret = kstrtobool(buf, &enabled);
+ if (ret < 0)
+ return ret;
+
+ if (enabled) {
+ dev_dbg(dev, "xData: requested read transfer\n");
+ dw_xdata_start(dw, false);
+ } else {
+ dev_dbg(dev, "xData: requested stop transfer\n");
+ dw_xdata_stop(dw);
+ }
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(read);
+
+static struct attribute *xdata_attrs[] = {
+ &dev_attr_write.attr,
+ &dev_attr_read.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(xdata);
+
+static int dw_xdata_pcie_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pid)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_xdata *dw;
+ char name[24];
+ u64 addr;
+ int err;
+ int id;
+
+ /* Enable PCI device */
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "enabling device failed\n");
+ return err;
+ }
+
+ /* Mapping PCI BAR regions */
+ err = pcim_iomap_regions(pdev, BIT(BAR_0), pci_name(pdev));
+ if (err) {
+ dev_err(dev, "xData BAR I/O remapping failed\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ /* Allocate memory */
+ dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ /* Data structure initialization */
+ mutex_init(&dw->mutex);
+
+ dw->rg_region.vaddr = pcim_iomap_table(pdev)[BAR_0];
+ if (!dw->rg_region.vaddr)
+ return -ENOMEM;
+
+ dw->rg_region.paddr = pdev->resource[BAR_0].start;
+
+ dw->max_wr_len = pcie_get_mps(pdev);
+ dw->max_wr_len >>= 2;
+
+ dw->max_rd_len = pcie_get_readrq(pdev);
+ dw->max_rd_len >>= 2;
+
+ dw->pdev = pdev;
+
+ id = ida_simple_get(&xdata_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(dev, "xData: unable to get id\n");
+ return id;
+ }
+
+ snprintf(name, sizeof(name), DW_XDATA_DRIVER_NAME ".%d", id);
+ dw->misc_dev.name = kstrdup(name, GFP_KERNEL);
+ if (!dw->misc_dev.name) {
+ err = -ENOMEM;
+ goto err_ida_remove;
+ }
+
+ dw->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ dw->misc_dev.parent = dev;
+ dw->misc_dev.groups = xdata_groups;
+
+ writel(0x0, &(__dw_regs(dw)->RAM_addr));
+ writel(0x0, &(__dw_regs(dw)->RAM_port));
+
+ addr = dw->rg_region.paddr + DW_XDATA_EP_MEM_OFFSET;
+ writel(lower_32_bits(addr), &(__dw_regs(dw)->addr_lsb));
+ writel(upper_32_bits(addr), &(__dw_regs(dw)->addr_msb));
+ dev_dbg(dev, "xData: target address = 0x%.16llx\n", addr);
+
+ dev_dbg(dev, "xData: wr_len = %zu, rd_len = %zu\n",
+ dw->max_wr_len * 4, dw->max_rd_len * 4);
+
+ /* Saving data structure reference */
+ pci_set_drvdata(pdev, dw);
+
+ /* Register misc device */
+ err = misc_register(&dw->misc_dev);
+ if (err) {
+ dev_err(dev, "xData: failed to register device\n");
+ goto err_kfree_name;
+ }
+
+ return 0;
+
+err_kfree_name:
+ kfree(dw->misc_dev.name);
+
+err_ida_remove:
+ ida_simple_remove(&xdata_ida, id);
+
+ return err;
+}
+
+static void dw_xdata_pcie_remove(struct pci_dev *pdev)
+{
+ struct dw_xdata *dw = pci_get_drvdata(pdev);
+ int id;
+
+ if (sscanf(dw->misc_dev.name, DW_XDATA_DRIVER_NAME ".%d", &id) != 1)
+ return;
+
+ if (id < 0)
+ return;
+
+ dw_xdata_stop(dw);
+ misc_deregister(&dw->misc_dev);
+ kfree(dw->misc_dev.name);
+ ida_simple_remove(&xdata_ida, id);
+}
+
+static const struct pci_device_id dw_xdata_pcie_id_table[] = {
+ { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, dw_xdata_pcie_id_table);
+
+static struct pci_driver dw_xdata_pcie_driver = {
+ .name = DW_XDATA_DRIVER_NAME,
+ .id_table = dw_xdata_pcie_id_table,
+ .probe = dw_xdata_pcie_probe,
+ .remove = dw_xdata_pcie_remove,
+};
+
+module_pci_driver(dw_xdata_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare xData PCIe driver");
+MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
+
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 0db4000dedf2..500b1feaf1f6 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -316,7 +316,7 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
/**
* copy_ddcb_results() - Copy output state from real DDCB to request
- * @req: pointer to requsted DDCB parameters
+ * @req: pointer to requested DDCB parameters
* @ddcb_no: pointer to ddcb number being tapped
*
* Copy DDCB ASV to request struct. There is no endian
@@ -356,7 +356,7 @@ static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
}
/**
- * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests.
+ * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work requests.
* @cd: pointer to genwqe device descriptor
* @queue: queue to be checked
*
@@ -498,7 +498,7 @@ int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
/*
* We need to distinguish 3 cases here:
- * 1. rc == 0 timeout occured
+ * 1. rc == 0 timeout occurred
* 2. rc == -ERESTARTSYS signal received
* 3. rc > 0 remaining jiffies condition is true
*/
@@ -982,7 +982,7 @@ static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
spin_lock_irqsave(&queue->ddcb_lock, flags);
- if (queue_empty(queue)) { /* emtpy queue */
+ if (queue_empty(queue)) { /* empty queue */
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return 0;
}
@@ -1002,7 +1002,7 @@ static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
* @cd: pointer to genwqe device descriptor
*
* Keep track on the number of DDCBs which ware currently in the
- * queue. This is needed for statistics as well as conditon if we want
+ * queue. This is needed for statistics as well as condition if we want
* to wait or better do polling in case of no interrupts available.
*/
int genwqe_ddcbs_in_flight(struct genwqe_dev *cd)
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index d9adb9a5e4d8..719168c980a4 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -181,7 +181,7 @@ static void cb_release(struct kref *ref)
static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
int ctx_id, bool internal_cb)
{
- struct hl_cb *cb;
+ struct hl_cb *cb = NULL;
u32 cb_offset;
void *p;
@@ -193,9 +193,10 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
* the kernel's copy. Hence, we must never sleep in this code section
* and must use GFP_ATOMIC for all memory allocations.
*/
- if (ctx_id == HL_KERNEL_ASID_ID)
+ if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled)
cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
- else
+
+ if (!cb)
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
@@ -214,6 +215,9 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
} else if (ctx_id == HL_KERNEL_ASID_ID) {
p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
&cb->bus_address, GFP_ATOMIC);
+ if (!p)
+ p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
+ cb_size, &cb->bus_address, GFP_KERNEL);
} else {
p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
&cb->bus_address,
@@ -310,6 +314,8 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
spin_lock(&mgr->cb_lock);
rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
+ if (rc < 0)
+ rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_KERNEL);
spin_unlock(&mgr->cb_lock);
if (rc < 0) {
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 7bd4a03b3429..ff8791a651fd 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -84,6 +84,38 @@ int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
return 0;
}
+static void sob_reset_work(struct work_struct *work)
+{
+ struct hl_cs_compl *hl_cs_cmpl =
+ container_of(work, struct hl_cs_compl, sob_reset_work);
+ struct hl_device *hdev = hl_cs_cmpl->hdev;
+
+ /*
+ * A signal CS can get completion while the corresponding wait
+ * for signal CS is on its way to the PQ. The wait for signal CS
+ * will get stuck if the signal CS incremented the SOB to its
+ * max value and there are no pending (submitted) waits on this
+ * SOB.
+ * We do the following to void this situation:
+ * 1. The wait for signal CS must get a ref for the signal CS as
+ * soon as possible in cs_ioctl_signal_wait() and put it
+ * before being submitted to the PQ but after it incremented
+ * the SOB refcnt in init_signal_wait_cs().
+ * 2. Signal/Wait for signal CS will decrement the SOB refcnt
+ * here.
+ * These two measures guarantee that the wait for signal CS will
+ * reset the SOB upon completion rather than the signal CS and
+ * hence the above scenario is avoided.
+ */
+ kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
+
+ if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
+ hdev->asic_funcs->reset_sob_group(hdev,
+ hl_cs_cmpl->sob_group);
+
+ kfree(hl_cs_cmpl);
+}
+
static void hl_fence_release(struct kref *kref)
{
struct hl_fence *fence =
@@ -109,28 +141,9 @@ static void hl_fence_release(struct kref *kref)
hl_cs_cmpl->hw_sob->sob_id,
hl_cs_cmpl->sob_val);
- /*
- * A signal CS can get completion while the corresponding wait
- * for signal CS is on its way to the PQ. The wait for signal CS
- * will get stuck if the signal CS incremented the SOB to its
- * max value and there are no pending (submitted) waits on this
- * SOB.
- * We do the following to void this situation:
- * 1. The wait for signal CS must get a ref for the signal CS as
- * soon as possible in cs_ioctl_signal_wait() and put it
- * before being submitted to the PQ but after it incremented
- * the SOB refcnt in init_signal_wait_cs().
- * 2. Signal/Wait for signal CS will decrement the SOB refcnt
- * here.
- * These two measures guarantee that the wait for signal CS will
- * reset the SOB upon completion rather than the signal CS and
- * hence the above scenario is avoided.
- */
- kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
+ queue_work(hdev->sob_reset_wq, &hl_cs_cmpl->sob_reset_work);
- if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
- hdev->asic_funcs->reset_sob_group(hdev,
- hl_cs_cmpl->sob_group);
+ return;
}
free:
@@ -454,8 +467,7 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
if (next_entry_found && !next->tdr_active) {
next->tdr_active = true;
- schedule_delayed_work(&next->work_tdr,
- hdev->timeout_jiffies);
+ schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
}
spin_unlock(&hdev->cs_mirror_lock);
@@ -492,24 +504,6 @@ static void cs_do_release(struct kref *ref)
goto out;
}
- hdev->asic_funcs->hw_queues_lock(hdev);
-
- hdev->cs_active_cnt--;
- if (!hdev->cs_active_cnt) {
- struct hl_device_idle_busy_ts *ts;
-
- ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
- ts->busy_to_idle_ts = ktime_get();
-
- if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
- hdev->idle_busy_ts_idx = 0;
- } else if (hdev->cs_active_cnt < 0) {
- dev_crit(hdev->dev, "CS active cnt %d is negative\n",
- hdev->cs_active_cnt);
- }
-
- hdev->asic_funcs->hw_queues_unlock(hdev);
-
/* Need to update CI for all queue jobs that does not get completion */
hl_hw_queue_update_ci(cs);
@@ -620,14 +614,14 @@ static void cs_timedout(struct work_struct *work)
cs_put(cs);
if (hdev->reset_on_lockup)
- hl_device_reset(hdev, false, false);
+ hl_device_reset(hdev, 0);
else
hdev->needs_reset = true;
}
static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
enum hl_cs_type cs_type, u64 user_sequence,
- struct hl_cs **cs_new)
+ struct hl_cs **cs_new, u32 flags, u32 timeout)
{
struct hl_cs_counters_atomic *cntr;
struct hl_fence *other = NULL;
@@ -638,6 +632,9 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cntr = &hdev->aggregated_cs_counters;
cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
+ if (!cs)
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+
if (!cs) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
@@ -651,12 +648,17 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->submitted = false;
cs->completed = false;
cs->type = cs_type;
+ cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
+ cs->timeout_jiffies = timeout;
INIT_LIST_HEAD(&cs->job_list);
INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
kref_init(&cs->refcount);
spin_lock_init(&cs->job_lock);
cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
+ if (!cs_cmpl)
+ cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_KERNEL);
+
if (!cs_cmpl) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
@@ -664,9 +666,23 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
goto free_cs;
}
+ cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
+ sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
+ if (!cs->jobs_in_queue_cnt)
+ cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
+ sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
+
+ if (!cs->jobs_in_queue_cnt) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
+ rc = -ENOMEM;
+ goto free_cs_cmpl;
+ }
+
cs_cmpl->hdev = hdev;
cs_cmpl->type = cs->type;
spin_lock_init(&cs_cmpl->lock);
+ INIT_WORK(&cs_cmpl->sob_reset_work, sob_reset_work);
cs->fence = &cs_cmpl->base_fence;
spin_lock(&ctx->cs_lock);
@@ -696,15 +712,6 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
goto free_fence;
}
- cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
- sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
- if (!cs->jobs_in_queue_cnt) {
- atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
- atomic64_inc(&cntr->out_of_mem_drop_cnt);
- rc = -ENOMEM;
- goto free_fence;
- }
-
/* init hl_fence */
hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
@@ -727,6 +734,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
free_fence:
spin_unlock(&ctx->cs_lock);
+ kfree(cs->jobs_in_queue_cnt);
+free_cs_cmpl:
kfree(cs_cmpl);
free_cs:
kfree(cs);
@@ -749,6 +758,8 @@ void hl_cs_rollback_all(struct hl_device *hdev)
int i;
struct hl_cs *cs, *tmp;
+ flush_workqueue(hdev->sob_reset_wq);
+
/* flush all completions before iterating over the CS mirror list in
* order to avoid a race with the release functions
*/
@@ -778,6 +789,44 @@ void hl_pending_cb_list_flush(struct hl_ctx *ctx)
}
}
+static void
+wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
+{
+ struct hl_user_pending_interrupt *pend;
+
+ spin_lock(&interrupt->wait_list_lock);
+ list_for_each_entry(pend, &interrupt->wait_list_head, wait_list_node) {
+ pend->fence.error = -EIO;
+ complete_all(&pend->fence.completion);
+ }
+ spin_unlock(&interrupt->wait_list_lock);
+}
+
+void hl_release_pending_user_interrupts(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_user_interrupt *interrupt;
+ int i;
+
+ if (!prop->user_interrupt_count)
+ return;
+
+ /* We iterate through the user interrupt requests and waking up all
+ * user threads waiting for interrupt completion. We iterate the
+ * list under a lock, this is why all user threads, once awake,
+ * will wait on the same lock and will release the waiting object upon
+ * unlock.
+ */
+
+ for (i = 0 ; i < prop->user_interrupt_count ; i++) {
+ interrupt = &hdev->user_interrupt[i];
+ wake_pending_user_interrupt_threads(interrupt);
+ }
+
+ interrupt = &hdev->common_user_interrupt;
+ wake_pending_user_interrupt_threads(interrupt);
+}
+
static void job_wq_completion(struct work_struct *work)
{
struct hl_cs_job *job = container_of(work, struct hl_cs_job,
@@ -890,6 +939,9 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
job = kzalloc(sizeof(*job), GFP_ATOMIC);
if (!job)
+ job = kzalloc(sizeof(*job), GFP_KERNEL);
+
+ if (!job)
return NULL;
kref_init(&job->refcount);
@@ -991,6 +1043,9 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
GFP_ATOMIC);
+ if (!*cs_chunk_array)
+ *cs_chunk_array = kmalloc_array(num_chunks,
+ sizeof(**cs_chunk_array), GFP_KERNEL);
if (!*cs_chunk_array) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
@@ -1038,7 +1093,8 @@ static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
}
static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
- u32 num_chunks, u64 *cs_seq, u32 flags)
+ u32 num_chunks, u64 *cs_seq, u32 flags,
+ u32 timeout)
{
bool staged_mid, int_queues_only = true;
struct hl_device *hdev = hpriv->hdev;
@@ -1067,11 +1123,11 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
staged_mid = false;
rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
- staged_mid ? user_sequence : ULLONG_MAX, &cs);
+ staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
+ timeout);
if (rc)
goto free_cs_chunk_array;
- cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
*cs_seq = cs->sequence;
hl_debugfs_add_cs(cs);
@@ -1269,7 +1325,8 @@ static int hl_submit_pending_cb(struct hl_fpriv *hpriv)
list_move_tail(&pending_cb->cb_node, &local_cb_list);
spin_unlock(&ctx->pending_cb_lock);
- rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, ULLONG_MAX, &cs);
+ rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, ULLONG_MAX, &cs, 0,
+ hdev->timeout_jiffies);
if (rc)
goto add_list_elements;
@@ -1370,7 +1427,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
rc = 0;
} else {
rc = cs_ioctl_default(hpriv, chunks, num_chunks,
- cs_seq, 0);
+ cs_seq, 0, hdev->timeout_jiffies);
}
mutex_unlock(&hpriv->restore_phase_mutex);
@@ -1419,7 +1476,7 @@ wait_again:
out:
if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
- hl_device_reset(hdev, false, false);
+ hl_device_reset(hdev, 0);
return rc;
}
@@ -1445,6 +1502,10 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
signal_seq_arr = kmalloc_array(signal_seq_arr_len,
sizeof(*signal_seq_arr),
GFP_ATOMIC);
+ if (!signal_seq_arr)
+ signal_seq_arr = kmalloc_array(signal_seq_arr_len,
+ sizeof(*signal_seq_arr),
+ GFP_KERNEL);
if (!signal_seq_arr) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
@@ -1536,7 +1597,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
void __user *chunks, u32 num_chunks,
- u64 *cs_seq, bool timestamp)
+ u64 *cs_seq, u32 flags, u32 timeout)
{
struct hl_cs_chunk *cs_chunk_array, *chunk;
struct hw_queue_properties *hw_queue_prop;
@@ -1642,7 +1703,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
}
}
- rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs);
+ rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
if (rc) {
if (cs_type == CS_TYPE_WAIT ||
cs_type == CS_TYPE_COLLECTIVE_WAIT)
@@ -1650,8 +1711,6 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
goto free_cs_chunk_array;
}
- cs->timestamp = !!timestamp;
-
/*
* Save the signal CS fence for later initialization right before
* hanging the wait CS on the queue.
@@ -1709,7 +1768,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
enum hl_cs_type cs_type;
u64 cs_seq = ULONG_MAX;
void __user *chunks;
- u32 num_chunks, flags;
+ u32 num_chunks, flags, timeout;
int rc;
rc = hl_cs_sanity_checks(hpriv, args);
@@ -1735,16 +1794,20 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
cs_seq = args->in.seq;
+ timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
+ ? msecs_to_jiffies(args->in.timeout * 1000)
+ : hpriv->hdev->timeout_jiffies;
+
switch (cs_type) {
case CS_TYPE_SIGNAL:
case CS_TYPE_WAIT:
case CS_TYPE_COLLECTIVE_WAIT:
rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
- &cs_seq, args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP);
+ &cs_seq, args->in.cs_flags, timeout);
break;
default:
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
- args->in.cs_flags);
+ args->in.cs_flags, timeout);
break;
}
@@ -1818,7 +1881,7 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
return rc;
}
-int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
+static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
struct hl_device *hdev = hpriv->hdev;
union hl_wait_cs_args *args = data;
@@ -1873,3 +1936,176 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
return 0;
}
+
+static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
+ u32 timeout_us, u64 user_address,
+ u32 target_value, u16 interrupt_offset,
+ enum hl_cs_wait_status *status)
+{
+ struct hl_user_pending_interrupt *pend;
+ struct hl_user_interrupt *interrupt;
+ unsigned long timeout;
+ long completion_rc;
+ u32 completion_value;
+ int rc = 0;
+
+ if (timeout_us == U32_MAX)
+ timeout = timeout_us;
+ else
+ timeout = usecs_to_jiffies(timeout_us);
+
+ hl_ctx_get(hdev, ctx);
+
+ pend = kmalloc(sizeof(*pend), GFP_KERNEL);
+ if (!pend) {
+ hl_ctx_put(ctx);
+ return -ENOMEM;
+ }
+
+ hl_fence_init(&pend->fence, ULONG_MAX);
+
+ if (interrupt_offset == HL_COMMON_USER_INTERRUPT_ID)
+ interrupt = &hdev->common_user_interrupt;
+ else
+ interrupt = &hdev->user_interrupt[interrupt_offset];
+
+ spin_lock(&interrupt->wait_list_lock);
+ if (!hl_device_operational(hdev, NULL)) {
+ rc = -EPERM;
+ goto unlock_and_free_fence;
+ }
+
+ if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
+ dev_err(hdev->dev,
+ "Failed to copy completion value from user\n");
+ rc = -EFAULT;
+ goto unlock_and_free_fence;
+ }
+
+ if (completion_value >= target_value)
+ *status = CS_WAIT_STATUS_COMPLETED;
+ else
+ *status = CS_WAIT_STATUS_BUSY;
+
+ if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
+ goto unlock_and_free_fence;
+
+ /* Add pending user interrupt to relevant list for the interrupt
+ * handler to monitor
+ */
+ list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
+ spin_unlock(&interrupt->wait_list_lock);
+
+wait_again:
+ /* Wait for interrupt handler to signal completion */
+ completion_rc =
+ wait_for_completion_interruptible_timeout(
+ &pend->fence.completion, timeout);
+
+ /* If timeout did not expire we need to perform the comparison.
+ * If comparison fails, keep waiting until timeout expires
+ */
+ if (completion_rc > 0) {
+ if (copy_from_user(&completion_value,
+ u64_to_user_ptr(user_address), 4)) {
+ dev_err(hdev->dev,
+ "Failed to copy completion value from user\n");
+ rc = -EFAULT;
+ goto remove_pending_user_interrupt;
+ }
+
+ if (completion_value >= target_value) {
+ *status = CS_WAIT_STATUS_COMPLETED;
+ } else {
+ timeout -= jiffies_to_usecs(completion_rc);
+ goto wait_again;
+ }
+ } else {
+ *status = CS_WAIT_STATUS_BUSY;
+ }
+
+remove_pending_user_interrupt:
+ spin_lock(&interrupt->wait_list_lock);
+ list_del(&pend->wait_list_node);
+
+unlock_and_free_fence:
+ spin_unlock(&interrupt->wait_list_lock);
+ kfree(pend);
+ hl_ctx_put(ctx);
+
+ return rc;
+}
+
+static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ u16 interrupt_id, interrupt_offset, first_interrupt, last_interrupt;
+ struct hl_device *hdev = hpriv->hdev;
+ struct asic_fixed_properties *prop;
+ union hl_wait_cs_args *args = data;
+ enum hl_cs_wait_status status;
+ int rc;
+
+ prop = &hdev->asic_prop;
+
+ if (!prop->user_interrupt_count) {
+ dev_err(hdev->dev, "no user interrupts allowed");
+ return -EPERM;
+ }
+
+ interrupt_id =
+ FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
+
+ first_interrupt = prop->first_available_user_msix_interrupt;
+ last_interrupt = prop->first_available_user_msix_interrupt +
+ prop->user_interrupt_count - 1;
+
+ if ((interrupt_id < first_interrupt || interrupt_id > last_interrupt) &&
+ interrupt_id != HL_COMMON_USER_INTERRUPT_ID) {
+ dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
+ return -EINVAL;
+ }
+
+ if (interrupt_id == HL_COMMON_USER_INTERRUPT_ID)
+ interrupt_offset = HL_COMMON_USER_INTERRUPT_ID;
+ else
+ interrupt_offset = interrupt_id - first_interrupt;
+
+ rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx,
+ args->in.interrupt_timeout_us, args->in.addr,
+ args->in.target, interrupt_offset, &status);
+
+ memset(args, 0, sizeof(*args));
+
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "interrupt_wait_ioctl failed (%d)\n", rc);
+
+ return rc;
+ }
+
+ switch (status) {
+ case CS_WAIT_STATUS_COMPLETED:
+ args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
+ break;
+ case CS_WAIT_STATUS_BUSY:
+ default:
+ args->out.status = HL_WAIT_CS_STATUS_BUSY;
+ break;
+ }
+
+ return 0;
+}
+
+int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ union hl_wait_cs_args *args = data;
+ u32 flags = args->in.flags;
+ int rc;
+
+ if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
+ rc = hl_interrupt_wait_ioctl(hpriv, data);
+ else
+ rc = hl_cs_wait_ioctl(hpriv, data);
+
+ return rc;
+}
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index cda871afb8f4..62d705889ca8 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -20,6 +20,11 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
*/
hl_pending_cb_list_flush(ctx);
+ /* Release all allocated HW block mapped list entries and destroy
+ * the mutex.
+ */
+ hl_hw_block_mem_fini(ctx);
+
/*
* If we arrived here, there are no jobs waiting for this context
* on its queues so we can safely remove it.
@@ -160,13 +165,15 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
if (!ctx->cs_pending)
return -ENOMEM;
+ hl_hw_block_mem_init(ctx);
+
if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
rc = hl_vm_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "Failed to init mem ctx module\n");
rc = -ENOMEM;
- goto err_free_cs_pending;
+ goto err_hw_block_mem_fini;
}
rc = hdev->asic_funcs->ctx_init(ctx);
@@ -179,7 +186,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
if (!ctx->asid) {
dev_err(hdev->dev, "No free ASID, failed to create context\n");
rc = -ENOMEM;
- goto err_free_cs_pending;
+ goto err_hw_block_mem_fini;
}
rc = hl_vm_ctx_init(ctx);
@@ -214,7 +221,8 @@ err_vm_ctx_fini:
err_asid_free:
if (ctx->asid != HL_KERNEL_ASID_ID)
hl_asid_free(hdev, ctx->asid);
-err_free_cs_pending:
+err_hw_block_mem_fini:
+ hl_hw_block_mem_fini(ctx);
kfree(ctx->cs_pending);
return rc;
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 9f19bee7b592..8381155578a0 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -9,8 +9,8 @@
#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/pci.h>
-#include <linux/debugfs.h>
#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
#define MMU_ADDR_BUF_SIZE 40
#define MMU_ASID_BUF_SIZE 10
@@ -229,6 +229,7 @@ static int vm_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_vm_hw_block_list_node *lnode;
struct hl_ctx *ctx;
struct hl_vm *vm;
struct hl_vm_hash_node *hnode;
@@ -272,6 +273,21 @@ static int vm_show(struct seq_file *s, void *data)
}
mutex_unlock(&ctx->mem_hash_lock);
+ if (ctx->asid != HL_KERNEL_ASID_ID &&
+ !list_empty(&ctx->hw_block_mem_list)) {
+ seq_puts(s, "\nhw_block mappings:\n\n");
+ seq_puts(s, " virtual address size HW block id\n");
+ seq_puts(s, "-------------------------------------------\n");
+ mutex_lock(&ctx->hw_block_list_lock);
+ list_for_each_entry(lnode, &ctx->hw_block_mem_list,
+ node) {
+ seq_printf(s,
+ " 0x%-14lx %-6u %-9u\n",
+ lnode->vaddr, lnode->size, lnode->id);
+ }
+ mutex_unlock(&ctx->hw_block_list_lock);
+ }
+
vm = &ctx->hdev->vm;
spin_lock(&vm->idr_lock);
@@ -441,21 +457,86 @@ out:
return false;
}
-static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
- u64 *phys_addr)
+static bool hl_is_device_internal_memory_va(struct hl_device *hdev, u64 addr,
+ u32 size)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 dram_start_addr, dram_end_addr;
+
+ if (!hdev->mmu_enable)
+ return false;
+
+ if (prop->dram_supports_virtual_memory) {
+ dram_start_addr = prop->dmmu.start_addr;
+ dram_end_addr = prop->dmmu.end_addr;
+ } else {
+ dram_start_addr = prop->dram_base_address;
+ dram_end_addr = prop->dram_end_address;
+ }
+
+ if (hl_mem_area_inside_range(addr, size, dram_start_addr,
+ dram_end_addr))
+ return true;
+
+ if (hl_mem_area_inside_range(addr, size, prop->sram_base_address,
+ prop->sram_end_address))
+ return true;
+
+ return false;
+}
+
+static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
+ u64 *phys_addr)
+{
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
struct hl_ctx *ctx = hdev->compute_ctx;
- int rc = 0;
+ struct hl_vm_hash_node *hnode;
+ struct hl_userptr *userptr;
+ enum vm_type_t *vm_type;
+ bool valid = false;
+ u64 end_address;
+ u32 range_size;
+ int i, rc = 0;
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
return -EINVAL;
}
+ /* Verify address is mapped */
+ mutex_lock(&ctx->mem_hash_lock);
+ hash_for_each(ctx->mem_hash, i, hnode, node) {
+ vm_type = hnode->ptr;
+
+ if (*vm_type == VM_TYPE_USERPTR) {
+ userptr = hnode->ptr;
+ range_size = userptr->size;
+ } else {
+ phys_pg_pack = hnode->ptr;
+ range_size = phys_pg_pack->total_size;
+ }
+
+ end_address = virt_addr + size;
+ if ((virt_addr >= hnode->vaddr) &&
+ (end_address <= hnode->vaddr + range_size)) {
+ valid = true;
+ break;
+ }
+ }
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ if (!valid) {
+ dev_err(hdev->dev,
+ "virt addr 0x%llx is not mapped\n",
+ virt_addr);
+ return -EINVAL;
+ }
+
rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr);
if (rc) {
- dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
- virt_addr);
+ dev_err(hdev->dev,
+ "virt addr 0x%llx is not mapped to phys addr\n",
+ virt_addr);
rc = -EINVAL;
}
@@ -467,10 +548,11 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- char tmp_buf[32];
u64 addr = entry->addr;
- u32 val;
+ bool user_address;
+ char tmp_buf[32];
ssize_t rc;
+ u32 val;
if (atomic_read(&hdev->in_reset)) {
dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
@@ -480,13 +562,14 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
if (*ppos)
return 0;
- if (hl_is_device_va(hdev, addr)) {
- rc = device_va_to_pa(hdev, addr, &addr);
+ user_address = hl_is_device_va(hdev, addr);
+ if (user_address) {
+ rc = device_va_to_pa(hdev, addr, sizeof(val), &addr);
if (rc)
return rc;
}
- rc = hdev->asic_funcs->debugfs_read32(hdev, addr, &val);
+ rc = hdev->asic_funcs->debugfs_read32(hdev, addr, user_address, &val);
if (rc) {
dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
return rc;
@@ -503,6 +586,7 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 addr = entry->addr;
+ bool user_address;
u32 value;
ssize_t rc;
@@ -515,13 +599,14 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
if (rc)
return rc;
- if (hl_is_device_va(hdev, addr)) {
- rc = device_va_to_pa(hdev, addr, &addr);
+ user_address = hl_is_device_va(hdev, addr);
+ if (user_address) {
+ rc = device_va_to_pa(hdev, addr, sizeof(value), &addr);
if (rc)
return rc;
}
- rc = hdev->asic_funcs->debugfs_write32(hdev, addr, value);
+ rc = hdev->asic_funcs->debugfs_write32(hdev, addr, user_address, value);
if (rc) {
dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
value, addr);
@@ -536,21 +621,28 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- char tmp_buf[32];
u64 addr = entry->addr;
- u64 val;
+ bool user_address;
+ char tmp_buf[32];
ssize_t rc;
+ u64 val;
+
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
+ return 0;
+ }
if (*ppos)
return 0;
- if (hl_is_device_va(hdev, addr)) {
- rc = device_va_to_pa(hdev, addr, &addr);
+ user_address = hl_is_device_va(hdev, addr);
+ if (user_address) {
+ rc = device_va_to_pa(hdev, addr, sizeof(val), &addr);
if (rc)
return rc;
}
- rc = hdev->asic_funcs->debugfs_read64(hdev, addr, &val);
+ rc = hdev->asic_funcs->debugfs_read64(hdev, addr, user_address, &val);
if (rc) {
dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
return rc;
@@ -567,20 +659,27 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf,
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 addr = entry->addr;
+ bool user_address;
u64 value;
ssize_t rc;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
+ return 0;
+ }
+
rc = kstrtoull_from_user(buf, count, 16, &value);
if (rc)
return rc;
- if (hl_is_device_va(hdev, addr)) {
- rc = device_va_to_pa(hdev, addr, &addr);
+ user_address = hl_is_device_va(hdev, addr);
+ if (user_address) {
+ rc = device_va_to_pa(hdev, addr, sizeof(value), &addr);
if (rc)
return rc;
}
- rc = hdev->asic_funcs->debugfs_write64(hdev, addr, value);
+ rc = hdev->asic_funcs->debugfs_write64(hdev, addr, user_address, value);
if (rc) {
dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n",
value, addr);
@@ -590,6 +689,63 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf,
return count;
}
+static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u64 addr = entry->addr;
+ ssize_t rc;
+ u32 size;
+
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't DMA during reset\n");
+ return 0;
+ }
+ rc = kstrtouint_from_user(buf, count, 16, &size);
+ if (rc)
+ return rc;
+
+ if (!size) {
+ dev_err(hdev->dev, "DMA read failed. size can't be 0\n");
+ return -EINVAL;
+ }
+
+ if (size > SZ_128M) {
+ dev_err(hdev->dev,
+ "DMA read failed. size can't be larger than 128MB\n");
+ return -EINVAL;
+ }
+
+ if (!hl_is_device_internal_memory_va(hdev, addr, size)) {
+ dev_err(hdev->dev,
+ "DMA read failed. Invalid 0x%010llx + 0x%08x\n",
+ addr, size);
+ return -EINVAL;
+ }
+
+ /* Free the previous allocation, if there was any */
+ entry->blob_desc.size = 0;
+ vfree(entry->blob_desc.data);
+
+ entry->blob_desc.data = vmalloc(size);
+ if (!entry->blob_desc.data)
+ return -ENOMEM;
+
+ rc = hdev->asic_funcs->debugfs_read_dma(hdev, addr, size,
+ entry->blob_desc.data);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to DMA from 0x%010llx\n", addr);
+ vfree(entry->blob_desc.data);
+ entry->blob_desc.data = NULL;
+ return -EIO;
+ }
+
+ entry->blob_desc.size = size;
+
+ return count;
+}
+
static ssize_t hl_get_power_state(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -871,7 +1027,7 @@ static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
hdev->stop_on_err = value ? 1 : 0;
- hl_device_reset(hdev, false, false);
+ hl_device_reset(hdev, 0);
return count;
}
@@ -899,6 +1055,11 @@ static const struct file_operations hl_data64b_fops = {
.write = hl_data_write64
};
+static const struct file_operations hl_dma_size_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_dma_size_write
+};
+
static const struct file_operations hl_i2c_data_fops = {
.owner = THIS_MODULE,
.read = hl_i2c_data_read,
@@ -1001,6 +1162,9 @@ void hl_debugfs_add_device(struct hl_device *hdev)
if (!dev_entry->entry_arr)
return;
+ dev_entry->blob_desc.size = 0;
+ dev_entry->blob_desc.data = NULL;
+
INIT_LIST_HEAD(&dev_entry->file_list);
INIT_LIST_HEAD(&dev_entry->cb_list);
INIT_LIST_HEAD(&dev_entry->cs_list);
@@ -1103,6 +1267,17 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_security_violations_fops);
+ debugfs_create_file("dma_size",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_dma_size_fops);
+
+ debugfs_create_blob("data_dma",
+ 0400,
+ dev_entry->root,
+ &dev_entry->blob_desc);
+
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
@@ -1121,6 +1296,9 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
debugfs_remove_recursive(entry->root);
mutex_destroy(&entry->file_mutex);
+
+ vfree(entry->blob_desc.data);
+
kfree(entry->entry_arr);
}
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 334009e83823..00e92b678828 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -70,6 +70,9 @@ static void hpriv_release(struct kref *ref)
mutex_unlock(&hdev->fpriv_list_lock);
kfree(hpriv);
+
+ if (hdev->reset_upon_device_release)
+ hl_device_reset(hdev, 0);
}
void hl_hpriv_get(struct hl_fpriv *hpriv)
@@ -77,9 +80,9 @@ void hl_hpriv_get(struct hl_fpriv *hpriv)
kref_get(&hpriv->refcount);
}
-void hl_hpriv_put(struct hl_fpriv *hpriv)
+int hl_hpriv_put(struct hl_fpriv *hpriv)
{
- kref_put(&hpriv->refcount, hpriv_release);
+ return kref_put(&hpriv->refcount, hpriv_release);
}
/*
@@ -103,10 +106,17 @@ static int hl_device_release(struct inode *inode, struct file *filp)
return 0;
}
- hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
- hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
+ /* Each pending user interrupt holds the user's context, hence we
+ * must release them all before calling hl_ctx_mgr_fini().
+ */
+ hl_release_pending_user_interrupts(hpriv->hdev);
+
+ hl_cb_mgr_fini(hdev, &hpriv->cb_mgr);
+ hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
- hl_hpriv_put(hpriv);
+ if (!hl_hpriv_put(hpriv))
+ dev_warn(hdev->dev,
+ "Device is still in use because there are live CS and/or memory mappings\n");
return 0;
}
@@ -283,7 +293,7 @@ static void device_hard_reset_pending(struct work_struct *work)
struct hl_device *hdev = device_reset_work->hdev;
int rc;
- rc = hl_device_reset(hdev, true, true);
+ rc = hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FROM_RESET_THREAD);
if ((rc == -EBUSY) && !hdev->device_fini_pending) {
dev_info(hdev->dev,
"Could not reset device. will try again in %u seconds",
@@ -311,11 +321,15 @@ static int device_early_init(struct hl_device *hdev)
switch (hdev->asic_type) {
case ASIC_GOYA:
goya_set_asic_funcs(hdev);
- strlcpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
+ strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
break;
case ASIC_GAUDI:
gaudi_set_asic_funcs(hdev);
- sprintf(hdev->asic_name, "GAUDI");
+ strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));
+ break;
+ case ASIC_GAUDI_SEC:
+ gaudi_set_asic_funcs(hdev);
+ strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
@@ -334,7 +348,7 @@ static int device_early_init(struct hl_device *hdev)
if (hdev->asic_prop.completion_queues_count) {
hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
sizeof(*hdev->cq_wq),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!hdev->cq_wq) {
rc = -ENOMEM;
goto asid_fini;
@@ -358,24 +372,24 @@ static int device_early_init(struct hl_device *hdev)
goto free_cq_wq;
}
- hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
- GFP_KERNEL);
- if (!hdev->hl_chip_info) {
+ hdev->sob_reset_wq = alloc_workqueue("hl-sob-reset", WQ_UNBOUND, 0);
+ if (!hdev->sob_reset_wq) {
+ dev_err(hdev->dev,
+ "Failed to allocate SOB reset workqueue\n");
rc = -ENOMEM;
goto free_eq_wq;
}
- hdev->idle_busy_ts_arr = kmalloc_array(HL_IDLE_BUSY_TS_ARR_SIZE,
- sizeof(struct hl_device_idle_busy_ts),
- (GFP_KERNEL | __GFP_ZERO));
- if (!hdev->idle_busy_ts_arr) {
+ hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
+ GFP_KERNEL);
+ if (!hdev->hl_chip_info) {
rc = -ENOMEM;
- goto free_chip_info;
+ goto free_sob_reset_wq;
}
rc = hl_mmu_if_set_funcs(hdev);
if (rc)
- goto free_idle_busy_ts_arr;
+ goto free_chip_info;
hl_cb_mgr_init(&hdev->kernel_cb_mgr);
@@ -404,10 +418,10 @@ static int device_early_init(struct hl_device *hdev)
free_cb_mgr:
hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
-free_idle_busy_ts_arr:
- kfree(hdev->idle_busy_ts_arr);
free_chip_info:
kfree(hdev->hl_chip_info);
+free_sob_reset_wq:
+ destroy_workqueue(hdev->sob_reset_wq);
free_eq_wq:
destroy_workqueue(hdev->eq_wq);
free_cq_wq:
@@ -441,9 +455,9 @@ static void device_early_fini(struct hl_device *hdev)
hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
- kfree(hdev->idle_busy_ts_arr);
kfree(hdev->hl_chip_info);
+ destroy_workqueue(hdev->sob_reset_wq);
destroy_workqueue(hdev->eq_wq);
destroy_workqueue(hdev->device_reset_work.wq);
@@ -485,7 +499,7 @@ static void hl_device_heartbeat(struct work_struct *work)
goto reschedule;
dev_err(hdev->dev, "Device heartbeat failed!\n");
- hl_device_reset(hdev, true, false);
+ hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_HEARTBEAT);
return;
@@ -561,100 +575,24 @@ static void device_late_fini(struct hl_device *hdev)
hdev->late_init_done = false;
}
-uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms)
+int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
{
- struct hl_device_idle_busy_ts *ts;
- ktime_t zero_ktime, curr = ktime_get();
- u32 overlap_cnt = 0, last_index = hdev->idle_busy_ts_idx;
- s64 period_us, last_start_us, last_end_us, last_busy_time_us,
- total_busy_time_us = 0, total_busy_time_ms;
-
- zero_ktime = ktime_set(0, 0);
- period_us = period_ms * USEC_PER_MSEC;
- ts = &hdev->idle_busy_ts_arr[last_index];
-
- /* check case that device is currently in idle */
- if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime) &&
- !ktime_compare(ts->idle_to_busy_ts, zero_ktime)) {
-
- last_index--;
- /* Handle case idle_busy_ts_idx was 0 */
- if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
- last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
-
- ts = &hdev->idle_busy_ts_arr[last_index];
- }
-
- while (overlap_cnt < HL_IDLE_BUSY_TS_ARR_SIZE) {
- /* Check if we are in last sample case. i.e. if the sample
- * begun before the sampling period. This could be a real
- * sample or 0 so need to handle both cases
- */
- last_start_us = ktime_to_us(
- ktime_sub(curr, ts->idle_to_busy_ts));
-
- if (last_start_us > period_us) {
-
- /* First check two cases:
- * 1. If the device is currently busy
- * 2. If the device was idle during the whole sampling
- * period
- */
-
- if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime)) {
- /* Check if the device is currently busy */
- if (ktime_compare(ts->idle_to_busy_ts,
- zero_ktime))
- return 100;
-
- /* We either didn't have any activity or we
- * reached an entry which is 0. Either way,
- * exit and return what was accumulated so far
- */
- break;
- }
-
- /* If sample has finished, check it is relevant */
- last_end_us = ktime_to_us(
- ktime_sub(curr, ts->busy_to_idle_ts));
-
- if (last_end_us > period_us)
- break;
-
- /* It is relevant so add it but with adjustment */
- last_busy_time_us = ktime_to_us(
- ktime_sub(ts->busy_to_idle_ts,
- ts->idle_to_busy_ts));
- total_busy_time_us += last_busy_time_us -
- (last_start_us - period_us);
- break;
- }
-
- /* Check if the sample is finished or still open */
- if (ktime_compare(ts->busy_to_idle_ts, zero_ktime))
- last_busy_time_us = ktime_to_us(
- ktime_sub(ts->busy_to_idle_ts,
- ts->idle_to_busy_ts));
- else
- last_busy_time_us = ktime_to_us(
- ktime_sub(curr, ts->idle_to_busy_ts));
-
- total_busy_time_us += last_busy_time_us;
+ u64 max_power, curr_power, dc_power, dividend;
+ int rc;
- last_index--;
- /* Handle case idle_busy_ts_idx was 0 */
- if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
- last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
+ max_power = hdev->asic_prop.max_power_default;
+ dc_power = hdev->asic_prop.dc_power_default;
+ rc = hl_fw_cpucp_power_get(hdev, &curr_power);
- ts = &hdev->idle_busy_ts_arr[last_index];
+ if (rc)
+ return rc;
- overlap_cnt++;
- }
+ curr_power = clamp(curr_power, dc_power, max_power);
- total_busy_time_ms = DIV_ROUND_UP_ULL(total_busy_time_us,
- USEC_PER_MSEC);
+ dividend = (curr_power - dc_power) * 100;
+ *utilization = (u32) div_u64(dividend, (max_power - dc_power));
- return DIV_ROUND_UP_ULL(total_busy_time_ms * 100, period_ms);
+ return 0;
}
/*
@@ -809,7 +747,7 @@ int hl_device_resume(struct hl_device *hdev)
hdev->disabled = false;
atomic_set(&hdev->in_reset, 0);
- rc = hl_device_reset(hdev, true, false);
+ rc = hl_device_reset(hdev, HL_RESET_HARD);
if (rc) {
dev_err(hdev->dev, "Failed to reset device during resume\n");
goto disable_device;
@@ -915,9 +853,7 @@ static void device_disable_open_processes(struct hl_device *hdev)
* hl_device_reset - reset the device
*
* @hdev: pointer to habanalabs device structure
- * @hard_reset: should we do hard reset to all engines or just reset the
- * compute/dma engines
- * @from_hard_reset_thread: is the caller the hard-reset thread
+ * @flags: reset flags.
*
* Block future CS and wait for pending CS to be enqueued
* Call ASIC H/W fini
@@ -929,9 +865,10 @@ static void device_disable_open_processes(struct hl_device *hdev)
*
* Returns 0 for success or an error on failure.
*/
-int hl_device_reset(struct hl_device *hdev, bool hard_reset,
- bool from_hard_reset_thread)
+int hl_device_reset(struct hl_device *hdev, u32 flags)
{
+ u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
+ bool hard_reset, from_hard_reset_thread;
int i, rc;
if (!hdev->init_done) {
@@ -940,6 +877,9 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
return 0;
}
+ hard_reset = (flags & HL_RESET_HARD) != 0;
+ from_hard_reset_thread = (flags & HL_RESET_FROM_RESET_THREAD) != 0;
+
if ((!hard_reset) && (!hdev->supports_soft_reset)) {
dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
hard_reset = true;
@@ -960,7 +900,11 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
if (rc)
return 0;
- if (hard_reset) {
+ /*
+ * if reset is due to heartbeat, device CPU is no responsive in
+ * which case no point sending PCI disable message to it
+ */
+ if (hard_reset && !(flags & HL_RESET_HEARTBEAT)) {
/* Disable PCI access from device F/W so he won't send
* us additional interrupts. We disable MSI/MSI-X at
* the halt_engines function and we can't have the F/W
@@ -1030,6 +974,11 @@ again:
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev);
+ /* Release all pending user interrupts, each pending user interrupt
+ * holds a reference to user context
+ */
+ hl_release_pending_user_interrupts(hdev);
+
kill_processes:
if (hard_reset) {
/* Kill processes here after CS rollback. This is because the
@@ -1078,14 +1027,6 @@ kill_processes:
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_reset(hdev, &hdev->completion_queue[i]);
- hdev->idle_busy_ts_idx = 0;
- hdev->idle_busy_ts_arr[0].busy_to_idle_ts = ktime_set(0, 0);
- hdev->idle_busy_ts_arr[0].idle_to_busy_ts = ktime_set(0, 0);
-
- if (hdev->cs_active_cnt)
- dev_crit(hdev->dev, "CS active cnt %d is not 0 during reset\n",
- hdev->cs_active_cnt);
-
mutex_lock(&hdev->fpriv_list_lock);
/* Make sure the context switch phase will run again */
@@ -1151,6 +1092,16 @@ kill_processes:
goto out_err;
}
+ /* If device is not idle fail the reset process */
+ if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
+ HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
+ dev_err(hdev->dev,
+ "device is not idle (mask %#llx %#llx) after reset\n",
+ idle_mask[0], idle_mask[1]);
+ rc = -EIO;
+ goto out_err;
+ }
+
/* Check that the communication with the device is working */
rc = hdev->asic_funcs->test_queues(hdev);
if (rc) {
@@ -1235,7 +1186,7 @@ out_err:
*/
int hl_device_init(struct hl_device *hdev, struct class *hclass)
{
- int i, rc, cq_cnt, cq_ready_cnt;
+ int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
char *name;
bool add_cdev_sysfs_on_err = false;
@@ -1274,13 +1225,26 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
if (rc)
goto free_dev_ctrl;
+ user_interrupt_cnt = hdev->asic_prop.user_interrupt_count;
+
+ if (user_interrupt_cnt) {
+ hdev->user_interrupt = kcalloc(user_interrupt_cnt,
+ sizeof(*hdev->user_interrupt),
+ GFP_KERNEL);
+
+ if (!hdev->user_interrupt) {
+ rc = -ENOMEM;
+ goto early_fini;
+ }
+ }
+
/*
* Start calling ASIC initialization. First S/W then H/W and finally
* late init
*/
rc = hdev->asic_funcs->sw_init(hdev);
if (rc)
- goto early_fini;
+ goto user_interrupts_fini;
/*
* Initialize the H/W queues. Must be done before hw_init, because
@@ -1478,6 +1442,8 @@ hw_queues_destroy:
hl_hw_queues_destroy(hdev);
sw_fini:
hdev->asic_funcs->sw_fini(hdev);
+user_interrupts_fini:
+ kfree(hdev->user_interrupt);
early_fini:
device_early_fini(hdev);
free_dev_ctrl:
@@ -1609,6 +1575,7 @@ void hl_device_fini(struct hl_device *hdev)
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_fini(hdev, &hdev->completion_queue[i]);
kfree(hdev->completion_queue);
+ kfree(hdev->user_interrupt);
hl_hw_queues_destroy(hdev);
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 09706c571e95..832dd5c5bb06 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -293,6 +293,7 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
u32 cpu_security_boot_status_reg)
{
u32 err_val, security_val;
+ bool err_exists = false;
/* Some of the firmware status codes are deprecated in newer f/w
* versions. In those versions, the errors are reported
@@ -307,48 +308,102 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
if (!(err_val & CPU_BOOT_ERR0_ENABLED))
return 0;
- if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL)
+ if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) {
dev_err(hdev->dev,
"Device boot error - DRAM initialization failed\n");
- if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED)
+ err_exists = true;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED) {
dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
- if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL)
+ err_exists = true;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL) {
dev_err(hdev->dev,
"Device boot error - Thermal Sensor initialization failed\n");
- if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED)
+ err_exists = true;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
dev_warn(hdev->dev,
"Device boot warning - Skipped DRAM initialization\n");
+ /* This is a warning so we don't want it to disable the
+ * device
+ */
+ err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
+ }
if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
- if (hdev->bmc_enable)
- dev_warn(hdev->dev,
+ if (hdev->bmc_enable) {
+ dev_err(hdev->dev,
"Device boot error - Skipped waiting for BMC\n");
- else
+ err_exists = true;
+ } else {
+ dev_info(hdev->dev,
+ "Device boot message - Skipped waiting for BMC\n");
+ /* This is an info so we don't want it to disable the
+ * device
+ */
err_val &= ~CPU_BOOT_ERR0_BMC_WAIT_SKIPPED;
+ }
}
- if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY)
+ if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) {
dev_err(hdev->dev,
"Device boot error - Serdes data from BMC not available\n");
- if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
+ err_exists = true;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) {
dev_err(hdev->dev,
"Device boot error - NIC F/W initialization failed\n");
- if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY)
+ err_exists = true;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
dev_warn(hdev->dev,
"Device boot warning - security not ready\n");
- if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL)
+ /* This is a warning so we don't want it to disable the
+ * device
+ */
+ err_val &= ~CPU_BOOT_ERR0_SECURITY_NOT_RDY;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
dev_err(hdev->dev, "Device boot error - security failure\n");
- if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL)
+ err_exists = true;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) {
dev_err(hdev->dev, "Device boot error - eFuse failure\n");
- if (err_val & CPU_BOOT_ERR0_PLL_FAIL)
+ err_exists = true;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_PLL_FAIL) {
dev_err(hdev->dev, "Device boot error - PLL failure\n");
+ err_exists = true;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL) {
+ dev_err(hdev->dev,
+ "Device boot error - device unusable\n");
+ err_exists = true;
+ }
security_val = RREG32(cpu_security_boot_status_reg);
if (security_val & CPU_BOOT_DEV_STS0_ENABLED)
dev_dbg(hdev->dev, "Device security status %#x\n",
security_val);
- if (err_val & ~CPU_BOOT_ERR0_ENABLED)
+ if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) {
+ dev_err(hdev->dev,
+ "Device boot error - unknown error 0x%08x\n",
+ err_val);
+ err_exists = true;
+ }
+
+ if (err_exists)
return -EIO;
return 0;
@@ -419,6 +474,73 @@ out:
return rc;
}
+static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
+{
+ struct cpucp_array_data_packet *pkt;
+ size_t total_pkt_size, data_size;
+ u64 result;
+ int rc;
+
+ /* skip sending this info for unsupported ASICs */
+ if (!hdev->asic_funcs->get_msi_info)
+ return 0;
+
+ data_size = CPUCP_NUM_OF_MSI_TYPES * sizeof(u32);
+ total_pkt_size = sizeof(struct cpucp_array_data_packet) + data_size;
+
+ /* data should be aligned to 8 bytes in order to CPU-CP to copy it */
+ total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
+
+ /* total_pkt_size is casted to u16 later on */
+ if (total_pkt_size > USHRT_MAX) {
+ dev_err(hdev->dev, "CPUCP array data is too big\n");
+ return -EINVAL;
+ }
+
+ pkt = kzalloc(total_pkt_size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ pkt->length = cpu_to_le32(CPUCP_NUM_OF_MSI_TYPES);
+
+ hdev->asic_funcs->get_msi_info((u32 *)&pkt->data);
+
+ pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_MSI_INFO_SET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)pkt,
+ total_pkt_size, 0, &result);
+
+ /*
+ * in case packet result is invalid it means that FW does not support
+ * this feature and will use default/hard coded MSI values. no reason
+ * to stop the boot
+ */
+ if (rc && result == cpucp_packet_invalid)
+ rc = 0;
+
+ if (rc)
+ dev_err(hdev->dev, "failed to send CPUCP array data\n");
+
+ kfree(pkt);
+
+ return rc;
+}
+
+int hl_fw_cpucp_handshake(struct hl_device *hdev,
+ u32 cpu_security_boot_status_reg,
+ u32 boot_err0_reg)
+{
+ int rc;
+
+ rc = hl_fw_cpucp_info_get(hdev, cpu_security_boot_status_reg,
+ boot_err0_reg);
+ if (rc)
+ return rc;
+
+ return hl_fw_send_msi_info_msg(hdev);
+}
+
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
{
struct cpucp_packet pkt = {};
@@ -539,18 +661,63 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
return rc;
}
-int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index,
+int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
+ enum pll_index *pll_index)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u8 pll_byte, pll_bit_off;
+ bool dynamic_pll;
+
+ if (input_pll_index >= PLL_MAX) {
+ dev_err(hdev->dev, "PLL index %d is out of range\n",
+ input_pll_index);
+ return -EINVAL;
+ }
+
+ dynamic_pll = prop->fw_security_status_valid &&
+ (prop->fw_app_security_map & CPU_BOOT_DEV_STS0_DYN_PLL_EN);
+
+ if (!dynamic_pll) {
+ /*
+ * in case we are working with legacy FW (each asic has unique
+ * PLL numbering) extract the legacy numbering
+ */
+ *pll_index = hdev->legacy_pll_map[input_pll_index];
+ return 0;
+ }
+
+ /* PLL map is a u8 array */
+ pll_byte = prop->cpucp_info.pll_map[input_pll_index >> 3];
+ pll_bit_off = input_pll_index & 0x7;
+
+ if (!(pll_byte & BIT(pll_bit_off))) {
+ dev_err(hdev->dev, "PLL index %d is not supported\n",
+ input_pll_index);
+ return -EINVAL;
+ }
+
+ *pll_index = input_pll_index;
+
+ return 0;
+}
+
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, enum pll_index pll_index,
u16 *pll_freq_arr)
{
struct cpucp_packet pkt;
+ enum pll_index used_pll_idx;
u64 result;
int rc;
+ rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
+ if (rc)
+ return rc;
+
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_INFO_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
- pkt.pll_type = __cpu_to_le16(pll_index);
+ pkt.pll_type = __cpu_to_le16((u16)used_pll_idx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
@@ -565,6 +732,29 @@ int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index,
return rc;
}
+int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
+{
+ struct cpucp_packet pkt;
+ u64 result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_CPUCP_INFO_TIMEOUT_USEC, &result);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
+ return rc;
+ }
+
+ *power = result;
+
+ return rc;
+}
+
static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
{
/* Some of the status codes below are deprecated in newer f/w
@@ -623,7 +813,11 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 status, security_status;
int rc;
- if (!hdev->cpu_enable)
+ /* pldm was added for cases in which we use preboot on pldm and want
+ * to load boot fit, but we can't wait for preboot because it runs
+ * very slowly
+ */
+ if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU) || hdev->pldm)
return 0;
/* Need to check two possible scenarios:
@@ -677,16 +871,16 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
if (security_status & CPU_BOOT_DEV_STS0_ENABLED) {
prop->fw_security_status_valid = 1;
+ /* FW security should be derived from PCI ID, we keep this
+ * check for backward compatibility
+ */
if (security_status & CPU_BOOT_DEV_STS0_SECURITY_EN)
prop->fw_security_disabled = false;
- else
- prop->fw_security_disabled = true;
if (security_status & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
prop->hard_reset_done_by_fw = true;
} else {
prop->fw_security_status_valid = 0;
- prop->fw_security_disabled = true;
}
dev_dbg(hdev->dev, "Firmware preboot security status %#x\n",
@@ -710,7 +904,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 status;
int rc;
- if (!(hdev->fw_loading & FW_TYPE_BOOT_CPU))
+ if (!(hdev->fw_components & FW_TYPE_BOOT_CPU))
return 0;
dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
@@ -801,7 +995,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
goto out;
}
- if (!(hdev->fw_loading & FW_TYPE_LINUX)) {
+ if (!(hdev->fw_components & FW_TYPE_LINUX)) {
dev_info(hdev->dev, "Skip loading Linux F/W\n");
goto out;
}
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 4b321e4f8059..44e89da30b4a 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -19,6 +19,7 @@
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/hashtable.h>
+#include <linux/debugfs.h>
#include <linux/bitfield.h>
#include <linux/genalloc.h>
#include <linux/sched/signal.h>
@@ -61,7 +62,7 @@
#define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
-#define HL_IDLE_BUSY_TS_ARR_SIZE 4096
+#define HL_COMMON_USER_INTERRUPT_ID 0xFFF
/* Memory */
#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
@@ -102,6 +103,23 @@ enum hl_mmu_page_table_location {
#define HL_MAX_DCORES 4
+/*
+ * Reset Flags
+ *
+ * - HL_RESET_HARD
+ * If set do hard reset to all engines. If not set reset just
+ * compute/DMA engines.
+ *
+ * - HL_RESET_FROM_RESET_THREAD
+ * Set if the caller is the hard-reset thread
+ *
+ * - HL_RESET_HEARTBEAT
+ * Set if reset is due to heartbeat
+ */
+#define HL_RESET_HARD (1 << 0)
+#define HL_RESET_FROM_RESET_THREAD (1 << 1)
+#define HL_RESET_HEARTBEAT (1 << 2)
+
#define HL_MAX_SOBS_PER_MONITOR 8
/**
@@ -169,15 +187,19 @@ enum hl_fw_component {
};
/**
- * enum hl_fw_types - F/W types to load
+ * enum hl_fw_types - F/W types present in the system
* @FW_TYPE_LINUX: Linux image for device CPU
* @FW_TYPE_BOOT_CPU: Boot image for device CPU
+ * @FW_TYPE_PREBOOT_CPU: Indicates pre-loaded CPUs are present in the system
+ * (preboot, ppboot etc...)
* @FW_TYPE_ALL_TYPES: Mask for all types
*/
enum hl_fw_types {
FW_TYPE_LINUX = 0x1,
FW_TYPE_BOOT_CPU = 0x2,
- FW_TYPE_ALL_TYPES = (FW_TYPE_LINUX | FW_TYPE_BOOT_CPU)
+ FW_TYPE_PREBOOT_CPU = 0x4,
+ FW_TYPE_ALL_TYPES =
+ (FW_TYPE_LINUX | FW_TYPE_BOOT_CPU | FW_TYPE_PREBOOT_CPU)
};
/**
@@ -368,6 +390,7 @@ struct hl_mmu_properties {
* @dram_size: DRAM total size.
* @dram_pci_bar_size: size of PCI bar towards DRAM.
* @max_power_default: max power of the device after reset
+ * @dc_power_default: power consumed by the device in mode idle.
* @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
* fault.
* @pcie_dbi_base_address: Base address of the PCIE_DBI block.
@@ -412,6 +435,7 @@ struct hl_mmu_properties {
* @first_available_user_msix_interrupt: first available msix interrupt
* reserved for the user
* @first_available_cq: first available CQ for the user.
+ * @user_interrupt_count: number of user interrupts.
* @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
* @fw_security_disabled: true if security measures are disabled in firmware,
@@ -421,6 +445,7 @@ struct hl_mmu_properties {
* @dram_supports_virtual_memory: is there an MMU towards the DRAM
* @hard_reset_done_by_fw: true if firmware is handling hard reset flow
* @num_functional_hbms: number of functional HBMs in each DCORE.
+ * @iatu_done_by_fw: true if iATU configuration is being done by FW.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -439,6 +464,7 @@ struct asic_fixed_properties {
u64 dram_size;
u64 dram_pci_bar_size;
u64 max_power_default;
+ u64 dc_power_default;
u64 dram_size_for_default_page_mapping;
u64 pcie_dbi_base_address;
u64 pcie_aux_dbi_reg_addr;
@@ -475,6 +501,7 @@ struct asic_fixed_properties {
u16 first_available_user_mon[HL_MAX_DCORES];
u16 first_available_user_msix_interrupt;
u16 first_available_cq[HL_MAX_DCORES];
+ u16 user_interrupt_count;
u8 tpc_enabled_mask;
u8 completion_queues_count;
u8 fw_security_disabled;
@@ -482,6 +509,7 @@ struct asic_fixed_properties {
u8 dram_supports_virtual_memory;
u8 hard_reset_done_by_fw;
u8 num_functional_hbms;
+ u8 iatu_done_by_fw;
};
/**
@@ -503,6 +531,7 @@ struct hl_fence {
/**
* struct hl_cs_compl - command submission completion object.
+ * @sob_reset_work: workqueue object to run SOB reset flow.
* @base_fence: hl fence object.
* @lock: spinlock to protect fence.
* @hdev: habanalabs device structure.
@@ -513,6 +542,7 @@ struct hl_fence {
* @sob_group: the SOB group that is used in this collective wait CS.
*/
struct hl_cs_compl {
+ struct work_struct sob_reset_work;
struct hl_fence base_fence;
spinlock_t lock;
struct hl_device *hdev;
@@ -690,6 +720,31 @@ struct hl_cq {
};
/**
+ * struct hl_user_interrupt - holds user interrupt information
+ * @hdev: pointer to the device structure
+ * @wait_list_head: head to the list of user threads pending on this interrupt
+ * @wait_list_lock: protects wait_list_head
+ * @interrupt_id: msix interrupt id
+ */
+struct hl_user_interrupt {
+ struct hl_device *hdev;
+ struct list_head wait_list_head;
+ spinlock_t wait_list_lock;
+ u32 interrupt_id;
+};
+
+/**
+ * struct hl_user_pending_interrupt - holds a context to a user thread
+ * pending on an interrupt
+ * @wait_list_node: node in the list of user threads pending on an interrupt
+ * @fence: hl fence object for interrupt completion
+ */
+struct hl_user_pending_interrupt {
+ struct list_head wait_list_node;
+ struct hl_fence fence;
+};
+
+/**
* struct hl_eq - describes the event queue (single one per device)
* @hdev: pointer to the device structure
* @kernel_address: holds the queue's kernel virtual address
@@ -713,11 +768,13 @@ struct hl_eq {
* @ASIC_INVALID: Invalid ASIC type.
* @ASIC_GOYA: Goya device.
* @ASIC_GAUDI: Gaudi device.
+ * @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000).
*/
enum hl_asic_type {
ASIC_INVALID,
ASIC_GOYA,
- ASIC_GAUDI
+ ASIC_GAUDI,
+ ASIC_GAUDI_SEC
};
struct hl_cs_parser;
@@ -802,8 +859,12 @@ enum div_select_defs {
* @update_eq_ci: update event queue CI.
* @context_switch: called upon ASID context switch.
* @restore_phase_topology: clear all SOBs amd MONs.
- * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM.
- * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM.
+ * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM/Host memory.
+ * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM/Host memory.
+ * @debugfs_read64: debug interface for reading u64 from DRAM/SRAM/Host memory.
+ * @debugfs_write64: debug interface for writing u64 to DRAM/SRAM/Host memory.
+ * @debugfs_read_dma: debug interface for reading up to 2MB from the device's
+ * internal memory via DMA engine.
* @add_device_attr: add ASIC specific device attributes.
* @handle_eqe: handle event queue entry (IRQ) from CPU-CP.
* @set_pll_profile: change PLL profile (manual/automatic).
@@ -919,10 +980,16 @@ struct hl_asic_funcs {
void (*update_eq_ci)(struct hl_device *hdev, u32 val);
int (*context_switch)(struct hl_device *hdev, u32 asid);
void (*restore_phase_topology)(struct hl_device *hdev);
- int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val);
- int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val);
- int (*debugfs_read64)(struct hl_device *hdev, u64 addr, u64 *val);
- int (*debugfs_write64)(struct hl_device *hdev, u64 addr, u64 val);
+ int (*debugfs_read32)(struct hl_device *hdev, u64 addr,
+ bool user_address, u32 *val);
+ int (*debugfs_write32)(struct hl_device *hdev, u64 addr,
+ bool user_address, u32 val);
+ int (*debugfs_read64)(struct hl_device *hdev, u64 addr,
+ bool user_address, u64 *val);
+ int (*debugfs_write64)(struct hl_device *hdev, u64 addr,
+ bool user_address, u64 val);
+ int (*debugfs_read_dma)(struct hl_device *hdev, u64 addr, u32 size,
+ void *blob_addr);
void (*add_device_attr)(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
void (*handle_eqe)(struct hl_device *hdev,
@@ -986,6 +1053,7 @@ struct hl_asic_funcs {
int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
u32 block_id, u32 block_size);
void (*enable_events_from_fw)(struct hl_device *hdev);
+ void (*get_msi_info)(u32 *table);
};
@@ -1070,9 +1138,11 @@ struct hl_pending_cb {
* @mem_hash_lock: protects the mem_hash.
* @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
* MMU hash or walking the PGT requires talking this lock.
+ * @hw_block_list_lock: protects the HW block memory list.
* @debugfs_list: node in debugfs list of contexts.
* pending_cb_list: list of pending command buffers waiting to be sent upon
* next user command submission context.
+ * @hw_block_mem_list: list of HW block virtual mapped addresses.
* @cs_counters: context command submission counters.
* @cb_va_pool: device VA pool for command buffers which are mapped to the
* device's MMU.
@@ -1109,8 +1179,10 @@ struct hl_ctx {
struct hl_va_range *va_range[HL_VA_RANGE_TYPE_MAX];
struct mutex mem_hash_lock;
struct mutex mmu_lock;
+ struct mutex hw_block_list_lock;
struct list_head debugfs_list;
struct list_head pending_cb_list;
+ struct list_head hw_block_mem_list;
struct hl_cs_counters_atomic cs_counters;
struct gen_pool *cb_va_pool;
u64 cs_sequence;
@@ -1185,6 +1257,7 @@ struct hl_userptr {
* @sequence: the sequence number of this CS.
* @staged_sequence: the sequence of the staged submission this CS is part of,
* relevant only if staged_cs is set.
+ * @timeout_jiffies: cs timeout in jiffies.
* @type: CS_TYPE_*.
* @submitted: true if CS was submitted to H/W.
* @completed: true if CS was completed by device.
@@ -1213,6 +1286,7 @@ struct hl_cs {
struct list_head debugfs_list;
u64 sequence;
u64 staged_sequence;
+ u64 timeout_jiffies;
enum hl_cs_type type;
u8 submitted;
u8 completed;
@@ -1330,6 +1404,23 @@ struct hl_vm_hash_node {
};
/**
+ * struct hl_vm_hw_block_list_node - list element from user virtual address to
+ * HW block id.
+ * @node: node to hang on the list in context object.
+ * @ctx: the context this node belongs to.
+ * @vaddr: virtual address of the HW block.
+ * @size: size of the block.
+ * @id: HW block id (handle).
+ */
+struct hl_vm_hw_block_list_node {
+ struct list_head node;
+ struct hl_ctx *ctx;
+ unsigned long vaddr;
+ u32 size;
+ u32 id;
+};
+
+/**
* struct hl_vm_phys_pg_pack - physical page pack.
* @vm_type: describes the type of the virtual area descriptor.
* @pages: the physical page array.
@@ -1490,12 +1581,13 @@ struct hl_debugfs_entry {
* @userptr_spinlock: protects userptr_list.
* @ctx_mem_hash_list: list of available contexts with MMU mappings.
* @ctx_mem_hash_spinlock: protects cb_list.
+ * @blob_desc: descriptor of blob
* @addr: next address to read/write from/to in read/write32.
* @mmu_addr: next virtual address to translate to physical address in mmu_show.
* @mmu_asid: ASID to use while translating in mmu_show.
* @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
- * @i2c_bus: generic u8 debugfs file for address value to use in i2c_data_read.
- * @i2c_bus: generic u8 debugfs file for register value to use in i2c_data_read.
+ * @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
+ * @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
*/
struct hl_dbg_device_entry {
struct dentry *root;
@@ -1513,6 +1605,7 @@ struct hl_dbg_device_entry {
spinlock_t userptr_spinlock;
struct list_head ctx_mem_hash_list;
spinlock_t ctx_mem_hash_spinlock;
+ struct debugfs_blob_wrapper blob_desc;
u64 addr;
u64 mmu_addr;
u32 mmu_asid;
@@ -1684,16 +1777,6 @@ struct hl_device_reset_work {
};
/**
- * struct hl_device_idle_busy_ts - used for calculating device utilization rate.
- * @idle_to_busy_ts: timestamp where device changed from idle to busy.
- * @busy_to_idle_ts: timestamp where device changed from busy to idle.
- */
-struct hl_device_idle_busy_ts {
- ktime_t idle_to_busy_ts;
- ktime_t busy_to_idle_ts;
-};
-
-/**
* struct hr_mmu_hop_addrs - used for holding per-device host-resident mmu hop
* information.
* @virt_addr: the virtual address of the hop.
@@ -1821,9 +1904,16 @@ struct hl_mmu_funcs {
* @asic_name: ASIC specific name.
* @asic_type: ASIC specific type.
* @completion_queue: array of hl_cq.
+ * @user_interrupt: array of hl_user_interrupt. upon the corresponding user
+ * interrupt, driver will monitor the list of fences
+ * registered to this interrupt.
+ * @common_user_interrupt: common user interrupt for all user interrupts.
+ * upon any user interrupt, driver will monitor the
+ * list of fences registered to this common structure.
* @cq_wq: work queues of completion queues for executing work in process
* context.
* @eq_wq: work queue of event queue for executing work in process context.
+ * @sob_reset_wq: work queue for sob reset executions.
* @kernel_ctx: Kernel driver context structure.
* @kernel_queues: array of hl_hw_queue.
* @cs_mirror_list: CS mirror list for TDR.
@@ -1857,11 +1947,11 @@ struct hl_mmu_funcs {
* when a user opens the device
* @fpriv_list_lock: protects the fpriv_list
* @compute_ctx: current compute context executing.
- * @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy
- * and vice-versa
* @aggregated_cs_counters: aggregated cs counters among all contexts
* @mmu_priv: device-specific MMU data.
* @mmu_func: device-related MMU functions.
+ * @legacy_pll_map: map holding map between dynamic (common) PLL indexes and
+ * static (asic specific) PLL indexes.
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -1874,13 +1964,10 @@ struct hl_mmu_funcs {
* @curr_pll_profile: current PLL profile.
* @card_type: Various ASICs have several card types. This indicates the card
* type of the current device.
- * @cs_active_cnt: number of active command submissions on this device (active
- * means already in H/W queues)
* @major: habanalabs kernel driver major.
* @high_pll: high PLL profile frequency.
* @soft_reset_cnt: number of soft reset since the driver was loaded.
* @hard_reset_cnt: number of hard reset since the driver was loaded.
- * @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr
* @clk_throttling_reason: bitmask represents the current clk throttling reasons
* @id: device minor.
* @id_control: minor of the control device
@@ -1937,8 +2024,11 @@ struct hl_device {
char status[HL_DEV_STS_MAX][HL_STR_MAX];
enum hl_asic_type asic_type;
struct hl_cq *completion_queue;
+ struct hl_user_interrupt *user_interrupt;
+ struct hl_user_interrupt common_user_interrupt;
struct workqueue_struct **cq_wq;
struct workqueue_struct *eq_wq;
+ struct workqueue_struct *sob_reset_wq;
struct hl_ctx *kernel_ctx;
struct hl_hw_queue *kernel_queues;
struct list_head cs_mirror_list;
@@ -1976,13 +2066,13 @@ struct hl_device {
struct hl_ctx *compute_ctx;
- struct hl_device_idle_busy_ts *idle_busy_ts_arr;
-
struct hl_cs_counters_atomic aggregated_cs_counters;
struct hl_mmu_priv mmu_priv;
struct hl_mmu_funcs mmu_func[MMU_NUM_PGT_LOCATIONS];
+ enum pll_index *legacy_pll_map;
+
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
@@ -1990,12 +2080,10 @@ struct hl_device {
atomic_t in_reset;
enum hl_pll_frequency curr_pll_profile;
enum cpucp_card_types card_type;
- int cs_active_cnt;
u32 major;
u32 high_pll;
u32 soft_reset_cnt;
u32 hard_reset_cnt;
- u32 idle_busy_ts_idx;
u32 clk_throttling_reason;
u16 id;
u16 id_control;
@@ -2029,10 +2117,9 @@ struct hl_device {
/* Parameters for bring-up */
u64 nic_ports_mask;
- u64 fw_loading;
+ u64 fw_components;
u8 mmu_enable;
u8 mmu_huge_page_opt;
- u8 cpu_enable;
u8 reset_pcilink;
u8 cpu_queues_enable;
u8 pldm;
@@ -2043,6 +2130,7 @@ struct hl_device {
u8 bmc_enable;
u8 rl_enable;
u8 reset_on_preboot_fail;
+ u8 reset_upon_device_release;
};
@@ -2157,6 +2245,8 @@ void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
irqreturn_t hl_irq_handler_cq(int irq, void *arg);
irqreturn_t hl_irq_handler_eq(int irq, void *arg);
+irqreturn_t hl_irq_handler_user_cq(int irq, void *arg);
+irqreturn_t hl_irq_handler_default(int irq, void *arg);
u32 hl_cq_inc_ptr(u32 ptr);
int hl_asid_init(struct hl_device *hdev);
@@ -2178,12 +2268,11 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass);
void hl_device_fini(struct hl_device *hdev);
int hl_device_suspend(struct hl_device *hdev);
int hl_device_resume(struct hl_device *hdev);
-int hl_device_reset(struct hl_device *hdev, bool hard_reset,
- bool from_hard_reset_thread);
+int hl_device_reset(struct hl_device *hdev, u32 flags);
void hl_hpriv_get(struct hl_fpriv *hpriv);
-void hl_hpriv_put(struct hl_fpriv *hpriv);
+int hl_hpriv_put(struct hl_fpriv *hpriv);
int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
-uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms);
+int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
int hl_build_hwmon_channel_info(struct hl_device *hdev,
struct cpucp_sensor *sensors_arr);
@@ -2235,6 +2324,9 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx);
int hl_vm_init(struct hl_device *hdev);
void hl_vm_fini(struct hl_device *hdev);
+void hl_hw_block_mem_init(struct hl_ctx *ctx);
+void hl_hw_block_mem_fini(struct hl_ctx *ctx);
+
u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
enum hl_va_range_type type, u32 size, u32 alignment);
int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
@@ -2287,13 +2379,19 @@ int hl_fw_send_heartbeat(struct hl_device *hdev);
int hl_fw_cpucp_info_get(struct hl_device *hdev,
u32 cpu_security_boot_status_reg,
u32 boot_err0_reg);
+int hl_fw_cpucp_handshake(struct hl_device *hdev,
+ u32 cpu_security_boot_status_reg,
+ u32 boot_err0_reg);
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
struct hl_info_pci_counters *counters);
int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
u64 *total_energy);
-int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index,
+int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
+ enum pll_index *pll_index);
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, enum pll_index pll_index,
u16 *pll_freq_arr);
+int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
@@ -2304,6 +2402,7 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
bool is_wc[3]);
+int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data);
int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
struct hl_inbound_pci_region *pci_region);
@@ -2312,8 +2411,10 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
int hl_pci_init(struct hl_device *hdev);
void hl_pci_fini(struct hl_device *hdev);
-long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
-void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
+long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
+ bool curr);
+void hl_set_frequency(struct hl_device *hdev, enum pll_index pll_index,
+ u64 freq);
int hl_get_temperature(struct hl_device *hdev,
int sensor_index, u32 attr, long *value);
int hl_set_temperature(struct hl_device *hdev,
@@ -2334,6 +2435,7 @@ int hl_set_voltage(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
int hl_set_current(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
+void hl_release_pending_user_interrupts(struct hl_device *hdev);
#ifdef CONFIG_DEBUG_FS
@@ -2434,7 +2536,7 @@ long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
-int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data);
+int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data);
int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
#endif /* HABANALABSP_H_ */
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 032d114f01ea..7135f1e03864 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -27,13 +27,13 @@ static struct class *hl_class;
static DEFINE_IDR(hl_devs_idr);
static DEFINE_MUTEX(hl_devs_idr_lock);
-static int timeout_locked = 5;
+static int timeout_locked = 30;
static int reset_on_lockup = 1;
static int memory_scrub = 1;
module_param(timeout_locked, int, 0444);
MODULE_PARM_DESC(timeout_locked,
- "Device lockup timeout in seconds (0 = disabled, default 5s)");
+ "Device lockup timeout in seconds (0 = disabled, default 30s)");
module_param(reset_on_lockup, int, 0444);
MODULE_PARM_DESC(reset_on_lockup,
@@ -47,10 +47,12 @@ MODULE_PARM_DESC(memory_scrub,
#define PCI_IDS_GOYA 0x0001
#define PCI_IDS_GAUDI 0x1000
+#define PCI_IDS_GAUDI_SEC 0x1010
static const struct pci_device_id ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI), },
+ { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI_SEC), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ids);
@@ -74,6 +76,9 @@ static enum hl_asic_type get_asic_type(u16 device)
case PCI_IDS_GAUDI:
asic_type = ASIC_GAUDI;
break;
+ case PCI_IDS_GAUDI_SEC:
+ asic_type = ASIC_GAUDI_SEC;
+ break;
default:
asic_type = ASIC_INVALID;
break;
@@ -82,6 +87,16 @@ static enum hl_asic_type get_asic_type(u16 device)
return asic_type;
}
+static bool is_asic_secured(enum hl_asic_type asic_type)
+{
+ switch (asic_type) {
+ case ASIC_GAUDI_SEC:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
* hl_device_open - open function for habanalabs device
*
@@ -234,8 +249,7 @@ out_err:
static void set_driver_behavior_per_device(struct hl_device *hdev)
{
- hdev->cpu_enable = 1;
- hdev->fw_loading = FW_TYPE_ALL_TYPES;
+ hdev->fw_components = FW_TYPE_ALL_TYPES;
hdev->cpu_queues_enable = 1;
hdev->heartbeat = 1;
hdev->mmu_enable = 1;
@@ -288,6 +302,12 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
hdev->asic_type = asic_type;
}
+ if (pdev)
+ hdev->asic_prop.fw_security_disabled =
+ !is_asic_secured(pdev->device);
+ else
+ hdev->asic_prop.fw_security_disabled = true;
+
/* Assign status description string */
strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
"disabled", HL_STR_MAX);
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 083a30969c5f..33841c272eb6 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -226,19 +226,14 @@ static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
struct hl_info_device_utilization device_util = {0};
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int rc;
if ((!max_size) || (!out))
return -EINVAL;
- if ((args->period_ms < 100) || (args->period_ms > 1000) ||
- (args->period_ms % 100)) {
- dev_err(hdev->dev,
- "period %u must be between 100 - 1000 and must be divisible by 100\n",
- args->period_ms);
+ rc = hl_device_utilization(hdev, &device_util.utilization);
+ if (rc)
return -EINVAL;
- }
-
- device_util.utilization = hl_device_utilization(hdev, args->period_ms);
return copy_to_user(out, &device_util,
min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
@@ -446,6 +441,25 @@ static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
}
+static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ struct hl_power_info power_info = {0};
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ rc = hl_fw_cpucp_power_get(hdev, &power_info.power);
+ if (rc)
+ return rc;
+
+ return copy_to_user(out, &power_info,
+ min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -526,6 +540,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_PLL_FREQUENCY:
return pll_frequency_info(hpriv, args);
+ case HL_INFO_POWER:
+ return power_info(hpriv, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
@@ -596,7 +613,7 @@ static const struct hl_ioctl_desc hl_ioctls[] = {
HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
- HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl),
+ HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_wait_ioctl),
HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
};
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 0f335182267f..173438461835 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -629,20 +629,12 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
first_entry && cs_needs_timeout(cs)) {
cs->tdr_active = true;
- schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
+ schedule_delayed_work(&cs->work_tdr, cs->timeout_jiffies);
}
spin_unlock(&hdev->cs_mirror_lock);
- if (!hdev->cs_active_cnt++) {
- struct hl_device_idle_busy_ts *ts;
-
- ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx];
- ts->busy_to_idle_ts = ktime_set(0, 0);
- ts->idle_to_busy_ts = ktime_get();
- }
-
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
switch (job->queue_type) {
case QUEUE_TYPE_EXT:
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c
index 44a0522b59b9..27129868c711 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -137,6 +137,62 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
return IRQ_HANDLED;
}
+static void handle_user_cq(struct hl_device *hdev,
+ struct hl_user_interrupt *user_cq)
+{
+ struct hl_user_pending_interrupt *pend;
+
+ spin_lock(&user_cq->wait_list_lock);
+ list_for_each_entry(pend, &user_cq->wait_list_head, wait_list_node)
+ complete_all(&pend->fence.completion);
+ spin_unlock(&user_cq->wait_list_lock);
+}
+
+/**
+ * hl_irq_handler_user_cq - irq handler for user completion queues
+ *
+ * @irq: irq number
+ * @arg: pointer to user interrupt structure
+ *
+ */
+irqreturn_t hl_irq_handler_user_cq(int irq, void *arg)
+{
+ struct hl_user_interrupt *user_cq = arg;
+ struct hl_device *hdev = user_cq->hdev;
+
+ dev_dbg(hdev->dev,
+ "got user completion interrupt id %u",
+ user_cq->interrupt_id);
+
+ /* Handle user cq interrupts registered on all interrupts */
+ handle_user_cq(hdev, &hdev->common_user_interrupt);
+
+ /* Handle user cq interrupts registered on this specific interrupt */
+ handle_user_cq(hdev, user_cq);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * hl_irq_handler_default - default irq handler
+ *
+ * @irq: irq number
+ * @arg: pointer to user interrupt structure
+ *
+ */
+irqreturn_t hl_irq_handler_default(int irq, void *arg)
+{
+ struct hl_user_interrupt *user_interrupt = arg;
+ struct hl_device *hdev = user_interrupt->hdev;
+ u32 interrupt_id = user_interrupt->interrupt_id;
+
+ dev_err(hdev->dev,
+ "got invalid user interrupt %u",
+ interrupt_id);
+
+ return IRQ_HANDLED;
+}
+
/**
* hl_irq_handler_eq - irq handler for event queue
*
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 1f5910517b0e..2938cbbafbbc 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -81,16 +81,6 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
num_pgs, total_size);
return -ENOMEM;
}
-
- if (hdev->memory_scrub) {
- rc = hdev->asic_funcs->scrub_device_mem(hdev, paddr,
- total_size);
- if (rc) {
- dev_err(hdev->dev,
- "Failed to scrub contiguous device memory\n");
- goto pages_pack_err;
- }
- }
}
phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
@@ -128,24 +118,13 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
goto page_err;
}
- if (hdev->memory_scrub) {
- rc = hdev->asic_funcs->scrub_device_mem(hdev,
- phys_pg_pack->pages[i],
- page_size);
- if (rc) {
- dev_err(hdev->dev,
- "Failed to scrub device memory\n");
- goto page_err;
- }
- }
-
num_curr_pgs++;
}
}
spin_lock(&vm->idr_lock);
handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
- GFP_ATOMIC);
+ GFP_KERNEL);
spin_unlock(&vm->idr_lock);
if (handle < 0) {
@@ -280,37 +259,67 @@ static void dram_pg_pool_do_release(struct kref *ref)
* @phys_pg_pack: physical page pack to free.
*
* This function does the following:
- * - For DRAM memory only, iterate over the pack and free each physical block
- * structure by returning it to the general pool.
+ * - For DRAM memory only
+ * - iterate over the pack, scrub and free each physical block structure by
+ * returning it to the general pool.
+ * In case of error during scrubbing, initiate hard reset.
+ * Once hard reset is triggered, scrubbing is bypassed while freeing the
+ * memory continues.
* - Free the hl_vm_phys_pg_pack structure.
*/
-static void free_phys_pg_pack(struct hl_device *hdev,
+static int free_phys_pg_pack(struct hl_device *hdev,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
u64 i;
+ int rc = 0;
- if (!phys_pg_pack->created_from_userptr) {
- if (phys_pg_pack->contiguous) {
- gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
+ if (phys_pg_pack->created_from_userptr)
+ goto end;
+
+ if (phys_pg_pack->contiguous) {
+ if (hdev->memory_scrub && !hdev->disabled) {
+ rc = hdev->asic_funcs->scrub_device_mem(hdev,
+ phys_pg_pack->pages[0],
phys_pg_pack->total_size);
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to scrub contiguous device memory\n");
+ }
- for (i = 0; i < phys_pg_pack->npages ; i++)
- kref_put(&vm->dram_pg_pool_refcount,
- dram_pg_pool_do_release);
- } else {
- for (i = 0 ; i < phys_pg_pack->npages ; i++) {
- gen_pool_free(vm->dram_pg_pool,
+ gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
+ phys_pg_pack->total_size);
+
+ for (i = 0; i < phys_pg_pack->npages ; i++)
+ kref_put(&vm->dram_pg_pool_refcount,
+ dram_pg_pool_do_release);
+ } else {
+ for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+ if (hdev->memory_scrub && !hdev->disabled && rc == 0) {
+ rc = hdev->asic_funcs->scrub_device_mem(
+ hdev,
phys_pg_pack->pages[i],
phys_pg_pack->page_size);
- kref_put(&vm->dram_pg_pool_refcount,
- dram_pg_pool_do_release);
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to scrub device memory\n");
}
+ gen_pool_free(vm->dram_pg_pool,
+ phys_pg_pack->pages[i],
+ phys_pg_pack->page_size);
+ kref_put(&vm->dram_pg_pool_refcount,
+ dram_pg_pool_do_release);
}
}
+ if (rc && !hdev->disabled)
+ hl_device_reset(hdev, HL_RESET_HARD);
+
+end:
kvfree(phys_pg_pack->pages);
kfree(phys_pg_pack);
+
+ return rc;
}
/**
@@ -349,7 +358,7 @@ static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
- free_phys_pg_pack(hdev, phys_pg_pack);
+ return free_phys_pg_pack(hdev, phys_pg_pack);
} else {
spin_unlock(&vm->idr_lock);
dev_err(hdev->dev,
@@ -857,6 +866,7 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
u32 page_size = phys_pg_pack->page_size;
int rc = 0;
+ bool is_host_addr;
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
paddr = phys_pg_pack->pages[i];
@@ -878,6 +888,8 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
return 0;
err:
+ is_host_addr = !hl_is_dram_va(hdev, vaddr);
+
next_vaddr = vaddr;
for (i = 0 ; i < mapped_pg_cnt ; i++) {
if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
@@ -888,6 +900,17 @@ err:
phys_pg_pack->pages[i], page_size);
next_vaddr += page_size;
+
+ /*
+ * unmapping on Palladium can be really long, so avoid a CPU
+ * soft lockup bug by sleeping a little between unmapping pages
+ *
+ * In addition, on host num of pages could be huge,
+ * because page size could be 4KB, so when unmapping host
+ * pages sleep every 32K pages to avoid soft lockup
+ */
+ if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
+ usleep_range(50, 200);
}
return rc;
@@ -921,9 +944,9 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
* unmapping on Palladium can be really long, so avoid a CPU
* soft lockup bug by sleeping a little between unmapping pages
*
- * In addition, when unmapping host memory we pass through
- * the Linux kernel to unpin the pages and that takes a long
- * time. Therefore, sleep every 32K pages to avoid soft lockup
+ * In addition, on host num of pages could be huge,
+ * because page size could be 4KB, so when unmapping host
+ * pages sleep every 32K pages to avoid soft lockup
*/
if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
usleep_range(50, 200);
@@ -1117,9 +1140,9 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
*device_addr = ret_vaddr;
if (is_userptr)
- free_phys_pg_pack(hdev, phys_pg_pack);
+ rc = free_phys_pg_pack(hdev, phys_pg_pack);
- return 0;
+ return rc;
map_err:
if (add_va_block(hdev, va_range, ret_vaddr,
@@ -1272,7 +1295,7 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
kfree(hnode);
if (is_userptr) {
- free_phys_pg_pack(hdev, phys_pg_pack);
+ rc = free_phys_pg_pack(hdev, phys_pg_pack);
dma_unmap_host_va(hdev, userptr);
}
@@ -1305,9 +1328,15 @@ static int map_block(struct hl_device *hdev, u64 address, u64 *handle,
static void hw_block_vm_close(struct vm_area_struct *vma)
{
- struct hl_ctx *ctx = (struct hl_ctx *) vma->vm_private_data;
+ struct hl_vm_hw_block_list_node *lnode =
+ (struct hl_vm_hw_block_list_node *) vma->vm_private_data;
+ struct hl_ctx *ctx = lnode->ctx;
+ mutex_lock(&ctx->hw_block_list_lock);
+ list_del(&lnode->node);
+ mutex_unlock(&ctx->hw_block_list_lock);
hl_ctx_put(ctx);
+ kfree(lnode);
vma->vm_private_data = NULL;
}
@@ -1325,7 +1354,9 @@ static const struct vm_operations_struct hw_block_vm_ops = {
*/
int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
{
+ struct hl_vm_hw_block_list_node *lnode;
struct hl_device *hdev = hpriv->hdev;
+ struct hl_ctx *ctx = hpriv->ctx;
u32 block_id, block_size;
int rc;
@@ -1351,17 +1382,31 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
return -EINVAL;
}
+ lnode = kzalloc(sizeof(*lnode), GFP_KERNEL);
+ if (!lnode)
+ return -ENOMEM;
+
vma->vm_ops = &hw_block_vm_ops;
- vma->vm_private_data = hpriv->ctx;
+ vma->vm_private_data = lnode;
- hl_ctx_get(hdev, hpriv->ctx);
+ hl_ctx_get(hdev, ctx);
rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
if (rc) {
- hl_ctx_put(hpriv->ctx);
+ hl_ctx_put(ctx);
+ kfree(lnode);
return rc;
}
+ lnode->ctx = ctx;
+ lnode->vaddr = vma->vm_start;
+ lnode->size = block_size;
+ lnode->id = block_id;
+
+ mutex_lock(&ctx->hw_block_list_lock);
+ list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
+ mutex_unlock(&ctx->hw_block_list_lock);
+
vma->vm_pgoff = block_id;
return 0;
@@ -1574,7 +1619,7 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
rc = sg_alloc_table_from_pages(userptr->sgt,
userptr->pages,
- npages, offset, size, GFP_ATOMIC);
+ npages, offset, size, GFP_KERNEL);
if (rc < 0) {
dev_err(hdev->dev, "failed to create SG table from pages\n");
goto put_pages;
@@ -1624,11 +1669,7 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL;
}
- /*
- * This function can be called also from data path, hence use atomic
- * always as it is not a big allocation.
- */
- userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
+ userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
if (!userptr->sgt)
return -ENOMEM;
@@ -2122,3 +2163,38 @@ void hl_vm_fini(struct hl_device *hdev)
vm->init_done = false;
}
+
+/**
+ * hl_hw_block_mem_init() - HW block memory initialization.
+ * @ctx: pointer to the habanalabs context structure.
+ *
+ * This function initializes the HW block virtual mapped addresses list and
+ * it's lock.
+ */
+void hl_hw_block_mem_init(struct hl_ctx *ctx)
+{
+ mutex_init(&ctx->hw_block_list_lock);
+ INIT_LIST_HEAD(&ctx->hw_block_mem_list);
+}
+
+/**
+ * hl_hw_block_mem_fini() - HW block memory teardown.
+ * @ctx: pointer to the habanalabs context structure.
+ *
+ * This function clears the HW block virtual mapped addresses list and destroys
+ * it's lock.
+ */
+void hl_hw_block_mem_fini(struct hl_ctx *ctx)
+{
+ struct hl_vm_hw_block_list_node *lnode, *tmp;
+
+ if (!list_empty(&ctx->hw_block_mem_list))
+ dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n");
+
+ list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) {
+ list_del(&lnode->node);
+ kfree(lnode);
+ }
+
+ mutex_destroy(&ctx->hw_block_list_lock);
+}
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index 93c9e5f587e1..b37189956b14 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -532,6 +532,8 @@ int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr)
struct hl_mmu_hop_info hops;
int rc;
+ memset(&hops, 0, sizeof(hops));
+
rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops);
if (rc)
return rc;
@@ -589,6 +591,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
switch (hdev->asic_type) {
case ASIC_GOYA:
case ASIC_GAUDI:
+ case ASIC_GAUDI_SEC:
hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
break;
default:
diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/misc/habanalabs/common/pci/pci.c
index b799f9258fb0..e941b7eef346 100644
--- a/drivers/misc/habanalabs/common/pci/pci.c
+++ b/drivers/misc/habanalabs/common/pci/pci.c
@@ -85,6 +85,58 @@ static void hl_pci_bars_unmap(struct hl_device *hdev)
pci_release_regions(pdev);
}
+int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ ktime_t timeout;
+ u64 msec;
+ u32 val;
+
+ if (hdev->pldm)
+ msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC;
+ else
+ msec = HL_PCI_ELBI_TIMEOUT_MSEC;
+
+ /* Clear previous status */
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
+
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, 0);
+
+ timeout = ktime_add_ms(ktime_get(), msec);
+ for (;;) {
+ pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
+ if (val & PCI_CONFIG_ELBI_STS_MASK)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
+ &val);
+ break;
+ }
+
+ usleep_range(300, 500);
+ }
+
+ if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
+ pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
+
+ return 0;
+ }
+
+ if (val & PCI_CONFIG_ELBI_STS_ERR) {
+ dev_err(hdev->dev, "Error reading from ELBI\n");
+ return -EIO;
+ }
+
+ if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
+ dev_err(hdev->dev, "ELBI read didn't finish in time\n");
+ return -EIO;
+ }
+
+ dev_err(hdev->dev, "ELBI read has undefined bits in status\n");
+ return -EIO;
+}
+
/**
* hl_pci_elbi_write() - Write through the ELBI interface.
* @hdev: Pointer to hl_device structure.
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 4366d8f93842..9fa61573a89d 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -9,12 +9,18 @@
#include <linux/pci.h>
-long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
+long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
+ bool curr)
{
struct cpucp_packet pkt;
+ u32 used_pll_idx;
u64 result;
int rc;
+ rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
+ if (rc)
+ return rc;
+
memset(&pkt, 0, sizeof(pkt));
if (curr)
@@ -23,7 +29,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
else
pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
- pkt.pll_index = cpu_to_le32(pll_index);
+ pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
@@ -31,23 +37,29 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
if (rc) {
dev_err(hdev->dev,
"Failed to get frequency of PLL %d, error %d\n",
- pll_index, rc);
+ used_pll_idx, rc);
return rc;
}
return (long) result;
}
-void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
+void hl_set_frequency(struct hl_device *hdev, enum pll_index pll_index,
+ u64 freq)
{
struct cpucp_packet pkt;
+ u32 used_pll_idx;
int rc;
+ rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
+ if (rc)
+ return;
+
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_SET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
- pkt.pll_index = cpu_to_le32(pll_index);
+ pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
pkt.value = cpu_to_le64(freq);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -56,7 +68,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
if (rc)
dev_err(hdev->dev,
"Failed to set frequency to PLL %d, error %d\n",
- pll_index, rc);
+ used_pll_idx, rc);
}
u64 hl_get_max_power(struct hl_device *hdev)
@@ -203,7 +215,7 @@ static ssize_t soft_reset_store(struct device *dev,
dev_warn(hdev->dev, "Soft-Reset requested through sysfs\n");
- hl_device_reset(hdev, false, false);
+ hl_device_reset(hdev, 0);
out:
return count;
@@ -226,7 +238,7 @@ static ssize_t hard_reset_store(struct device *dev,
dev_warn(hdev->dev, "Hard-Reset requested through sysfs\n");
- hl_device_reset(hdev, true, false);
+ hl_device_reset(hdev, HL_RESET_HARD);
out:
return count;
@@ -245,6 +257,9 @@ static ssize_t device_type_show(struct device *dev,
case ASIC_GAUDI:
str = "GAUDI";
break;
+ case ASIC_GAUDI_SEC:
+ str = "GAUDI SEC";
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -344,7 +359,7 @@ static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t offset,
size_t max_size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct hl_device *hdev = dev_get_drvdata(dev);
char *data;
int rc;
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 9152242778f5..b751652f80a8 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -105,6 +105,36 @@
#define GAUDI_PLL_MAX 10
+/*
+ * this enum kept here for compatibility with old FW (in which each asic has
+ * unique PLL numbering
+ */
+enum gaudi_pll_index {
+ GAUDI_CPU_PLL = 0,
+ GAUDI_PCI_PLL,
+ GAUDI_SRAM_PLL,
+ GAUDI_HBM_PLL,
+ GAUDI_NIC_PLL,
+ GAUDI_DMA_PLL,
+ GAUDI_MESH_PLL,
+ GAUDI_MME_PLL,
+ GAUDI_TPC_PLL,
+ GAUDI_IF_PLL,
+};
+
+static enum pll_index gaudi_pll_map[PLL_MAX] = {
+ [CPU_PLL] = GAUDI_CPU_PLL,
+ [PCI_PLL] = GAUDI_PCI_PLL,
+ [SRAM_PLL] = GAUDI_SRAM_PLL,
+ [HBM_PLL] = GAUDI_HBM_PLL,
+ [NIC_PLL] = GAUDI_NIC_PLL,
+ [DMA_PLL] = GAUDI_DMA_PLL,
+ [MESH_PLL] = GAUDI_MESH_PLL,
+ [MME_PLL] = GAUDI_MME_PLL,
+ [TPC_PLL] = GAUDI_TPC_PLL,
+ [IF_PLL] = GAUDI_IF_PLL,
+};
+
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@@ -396,6 +426,19 @@ get_collective_mode(struct hl_device *hdev, u32 queue_id)
return HL_COLLECTIVE_NOT_SUPPORTED;
}
+static inline void set_default_power_values(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ if (hdev->card_type == cpucp_card_type_pmc) {
+ prop->max_power_default = MAX_POWER_DEFAULT_PMC;
+ prop->dc_power_default = DC_POWER_DEFAULT_PMC;
+ } else {
+ prop->max_power_default = MAX_POWER_DEFAULT_PCI;
+ prop->dc_power_default = DC_POWER_DEFAULT_PCI;
+ }
+}
+
static int gaudi_get_fixed_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -507,7 +550,7 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
prop->num_of_events = GAUDI_EVENT_SIZE;
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
- prop->max_power_default = MAX_POWER_DEFAULT_PCI;
+ set_default_power_values(hdev);
prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT;
prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE;
@@ -532,8 +575,6 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
- /* disable fw security for now, set it in a later stage */
- prop->fw_security_disabled = true;
prop->fw_security_status_valid = false;
prop->hard_reset_done_by_fw = false;
@@ -588,6 +629,11 @@ static int gaudi_init_iatu(struct hl_device *hdev)
struct hl_outbound_pci_region outbound_region;
int rc;
+ if (hdev->asic_prop.iatu_done_by_fw) {
+ hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+ return 0;
+ }
+
/* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */
inbound_region.mode = PCI_BAR_MATCH_MODE;
inbound_region.bar = SRAM_BAR_ID;
@@ -632,6 +678,7 @@ static int gaudi_early_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_dev *pdev = hdev->pdev;
+ u32 fw_boot_status;
int rc;
rc = gaudi_get_fixed_properties(hdev);
@@ -665,6 +712,23 @@ static int gaudi_early_init(struct hl_device *hdev)
prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
+ /* If FW security is enabled at this point it means no access to ELBI */
+ if (!hdev->asic_prop.fw_security_disabled) {
+ hdev->asic_prop.iatu_done_by_fw = true;
+ goto pci_init;
+ }
+
+ rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0,
+ &fw_boot_status);
+ if (rc)
+ goto free_queue_props;
+
+ /* Check whether FW is configuring iATU */
+ if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
+ (fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
+ hdev->asic_prop.iatu_done_by_fw = true;
+
+pci_init:
rc = hl_pci_init(hdev);
if (rc)
goto free_queue_props;
@@ -1588,6 +1652,9 @@ static int gaudi_sw_init(struct hl_device *hdev)
hdev->asic_specific = gaudi;
+ /* store legacy PLL map */
+ hdev->legacy_pll_map = gaudi_pll_map;
+
/* Create DMA pool for small allocations */
hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
&hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0);
@@ -1766,8 +1833,7 @@ static int gaudi_enable_msi(struct hl_device *hdev)
if (gaudi->hw_cap_initialized & HW_CAP_MSI)
return 0;
- rc = pci_alloc_irq_vectors(hdev->pdev, 1, GAUDI_MSI_ENTRIES,
- PCI_IRQ_MSI);
+ rc = pci_alloc_irq_vectors(hdev->pdev, 1, 1, PCI_IRQ_MSI);
if (rc < 0) {
dev_err(hdev->dev, "MSI: Failed to enable support %d\n", rc);
return rc;
@@ -3701,7 +3767,7 @@ static int gaudi_init_cpu(struct hl_device *hdev)
struct gaudi_device *gaudi = hdev->asic_specific;
int rc;
- if (!hdev->cpu_enable)
+ if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
return 0;
if (gaudi->hw_cap_initialized & HW_CAP_CPU)
@@ -4873,7 +4939,7 @@ static int gaudi_pin_memory_before_cs(struct hl_device *hdev,
parser->job_userptr_list, &userptr))
goto already_pinned;
- userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
+ userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
if (!userptr)
return -ENOMEM;
@@ -5684,18 +5750,26 @@ release_cb:
static int gaudi_schedule_register_memset(struct hl_device *hdev,
u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val)
{
- struct hl_ctx *ctx = hdev->compute_ctx;
+ struct hl_ctx *ctx;
struct hl_pending_cb *pending_cb;
struct packet_msg_long *pkt;
u32 cb_size, ctl;
struct hl_cb *cb;
- int i;
+ int i, rc;
+
+ mutex_lock(&hdev->fpriv_list_lock);
+ ctx = hdev->compute_ctx;
/* If no compute context available or context is going down
* memset registers directly
*/
- if (!ctx || kref_read(&ctx->refcount) == 0)
- return gaudi_memset_registers(hdev, reg_base, num_regs, val);
+ if (!ctx || kref_read(&ctx->refcount) == 0) {
+ rc = gaudi_memset_registers(hdev, reg_base, num_regs, val);
+ mutex_unlock(&hdev->fpriv_list_lock);
+ return rc;
+ }
+
+ mutex_unlock(&hdev->fpriv_list_lock);
cb_size = (sizeof(*pkt) * num_regs) +
sizeof(struct packet_msg_prot) * 2;
@@ -5911,13 +5985,16 @@ static void gaudi_restore_phase_topology(struct hl_device *hdev)
}
-static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
+static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr,
+ bool user_address, u32 *val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
- u64 hbm_bar_addr;
+ u64 hbm_bar_addr, host_phys_end;
int rc = 0;
+ host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
@@ -5949,6 +6026,9 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
}
if (hbm_bar_addr == U64_MAX)
rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+ user_address && !iommu_present(&pci_bus_type)) {
+ *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
} else {
rc = -EFAULT;
}
@@ -5956,13 +6036,16 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
return rc;
}
-static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
+static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr,
+ bool user_address, u32 val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
- u64 hbm_bar_addr;
+ u64 hbm_bar_addr, host_phys_end;
int rc = 0;
+ host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
@@ -5994,6 +6077,9 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
}
if (hbm_bar_addr == U64_MAX)
rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+ user_address && !iommu_present(&pci_bus_type)) {
+ *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
} else {
rc = -EFAULT;
}
@@ -6001,13 +6087,16 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
return rc;
}
-static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
+static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr,
+ bool user_address, u64 *val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
- u64 hbm_bar_addr;
+ u64 hbm_bar_addr, host_phys_end;
int rc = 0;
+ host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
@@ -6043,6 +6132,9 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
}
if (hbm_bar_addr == U64_MAX)
rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+ user_address && !iommu_present(&pci_bus_type)) {
+ *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
} else {
rc = -EFAULT;
}
@@ -6050,13 +6142,16 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
return rc;
}
-static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
+static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr,
+ bool user_address, u64 val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
- u64 hbm_bar_addr;
+ u64 hbm_bar_addr, host_phys_end;
int rc = 0;
+ host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
@@ -6091,6 +6186,9 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
}
if (hbm_bar_addr == U64_MAX)
rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+ user_address && !iommu_present(&pci_bus_type)) {
+ *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
} else {
rc = -EFAULT;
}
@@ -6098,6 +6196,164 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
return rc;
}
+static int gaudi_dma_core_transfer(struct hl_device *hdev, int dma_id, u64 addr,
+ u32 size_to_dma, dma_addr_t dma_addr)
+{
+ u32 err_cause, val;
+ u64 dma_offset;
+ int rc;
+
+ dma_offset = dma_id * DMA_CORE_OFFSET;
+
+ WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, lower_32_bits(addr));
+ WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, upper_32_bits(addr));
+ WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset, lower_32_bits(dma_addr));
+ WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset, upper_32_bits(dma_addr));
+ WREG32(mmDMA0_CORE_DST_TSIZE_0 + dma_offset, size_to_dma);
+ WREG32(mmDMA0_CORE_COMMIT + dma_offset,
+ (1 << DMA0_CORE_COMMIT_LIN_SHIFT));
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmDMA0_CORE_STS0 + dma_offset,
+ val,
+ ((val & DMA0_CORE_STS0_BUSY_MASK) == 0),
+ 0,
+ 1000000);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "DMA %d timed-out during reading of 0x%llx\n",
+ dma_id, addr);
+ return -EIO;
+ }
+
+ /* Verify DMA is OK */
+ err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
+ if (err_cause) {
+ dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause);
+ dev_dbg(hdev->dev,
+ "Clearing DMA0 engine from errors (cause 0x%x)\n",
+ err_cause);
+ WREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset, err_cause);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int gaudi_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
+ void *blob_addr)
+{
+ u32 dma_core_sts0, err_cause, cfg1, size_left, pos, size_to_dma;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 dma_offset, qm_offset;
+ dma_addr_t dma_addr;
+ void *kernel_addr;
+ bool is_eng_idle;
+ int rc = 0, dma_id;
+
+ kernel_addr = hdev->asic_funcs->asic_dma_alloc_coherent(
+ hdev, SZ_2M,
+ &dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+
+ if (!kernel_addr)
+ return -ENOMEM;
+
+ mutex_lock(&gaudi->clk_gate_mutex);
+
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_1];
+ dma_offset = dma_id * DMA_CORE_OFFSET;
+ qm_offset = dma_id * DMA_QMAN_OFFSET;
+ dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset);
+ is_eng_idle = IS_DMA_IDLE(dma_core_sts0);
+
+ if (!is_eng_idle) {
+ dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2];
+ dma_offset = dma_id * DMA_CORE_OFFSET;
+ qm_offset = dma_id * DMA_QMAN_OFFSET;
+ dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset);
+ is_eng_idle = IS_DMA_IDLE(dma_core_sts0);
+
+ if (!is_eng_idle) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't read via DMA because it is BUSY\n");
+ rc = -EAGAIN;
+ goto out;
+ }
+ }
+
+ cfg1 = RREG32(mmDMA0_QM_GLBL_CFG1 + qm_offset);
+ WREG32(mmDMA0_QM_GLBL_CFG1 + qm_offset,
+ 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+
+ /* TODO: remove this by mapping the DMA temporary buffer to the MMU
+ * using the compute ctx ASID, if exists. If not, use the kernel ctx
+ * ASID
+ */
+ WREG32_OR(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_VAL_SHIFT));
+
+ /* Verify DMA is OK */
+ err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
+ if (err_cause) {
+ dev_dbg(hdev->dev,
+ "Clearing DMA0 engine from errors (cause 0x%x)\n",
+ err_cause);
+ WREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset, err_cause);
+ }
+
+ pos = 0;
+ size_left = size;
+ size_to_dma = SZ_2M;
+
+ while (size_left > 0) {
+
+ if (size_left < SZ_2M)
+ size_to_dma = size_left;
+
+ rc = gaudi_dma_core_transfer(hdev, dma_id, addr, size_to_dma,
+ dma_addr);
+ if (rc)
+ break;
+
+ memcpy(blob_addr + pos, kernel_addr, size_to_dma);
+
+ if (size_left <= SZ_2M)
+ break;
+
+ pos += SZ_2M;
+ addr += SZ_2M;
+ size_left -= SZ_2M;
+ }
+
+ /* TODO: remove this by mapping the DMA temporary buffer to the MMU
+ * using the compute ctx ASID, if exists. If not, use the kernel ctx
+ * ASID
+ */
+ WREG32_AND(mmDMA0_CORE_PROT + dma_offset,
+ ~BIT(DMA0_CORE_PROT_VAL_SHIFT));
+
+ WREG32(mmDMA0_QM_GLBL_CFG1 + qm_offset, cfg1);
+
+out:
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ hdev->asic_funcs->set_clock_gating(hdev);
+
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ hdev->asic_funcs->asic_dma_free_coherent(hdev, SZ_2M, kernel_addr,
+ dma_addr);
+
+ return rc;
+}
+
static u64 gaudi_read_pte(struct hl_device *hdev, u64 addr)
{
struct gaudi_device *gaudi = hdev->asic_specific;
@@ -6851,7 +7107,8 @@ static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
}
/* Write 1 clear errors */
- WREG32(glbl_sts_addr + 4 * i, glbl_sts_clr_val);
+ if (!hdev->stop_on_err)
+ WREG32(glbl_sts_addr + 4 * i, glbl_sts_clr_val);
}
arb_err_val = RREG32(arb_err_addr);
@@ -7097,6 +7354,15 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
}
}
+static void gaudi_print_out_of_sync_info(struct hl_device *hdev,
+ struct cpucp_pkt_sync_err *sync_err)
+{
+ struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
+
+ dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n",
+ sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci));
+}
+
static int gaudi_soft_reset_late_init(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
@@ -7371,18 +7637,14 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_MMU_DERR:
gaudi_print_irq_info(hdev, event_type, true);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
- if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, true, false);
- break;
+ goto reset_device;
case GAUDI_EVENT_GIC500:
case GAUDI_EVENT_AXI_ECC:
case GAUDI_EVENT_L2_RAM_ECC:
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
gaudi_print_irq_info(hdev, event_type, false);
- if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, true, false);
- break;
+ goto reset_device;
case GAUDI_EVENT_HBM0_SPI_0:
case GAUDI_EVENT_HBM1_SPI_0:
@@ -7392,9 +7654,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
gaudi_hbm_read_interrupts(hdev,
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
- if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, true, false);
- break;
+ goto reset_device;
case GAUDI_EVENT_HBM0_SPI_1:
case GAUDI_EVENT_HBM1_SPI_1:
@@ -7423,8 +7683,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
dev_err(hdev->dev, "hard reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
- if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, true, false);
+ goto reset_device;
} else {
hl_fw_unmask_irq(hdev, event_type);
}
@@ -7446,8 +7705,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
dev_err(hdev->dev, "hard reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
- if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, true, false);
+ goto reset_device;
} else {
hl_fw_unmask_irq(hdev, event_type);
}
@@ -7516,9 +7774,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_RAZWI_OR_ADC_SW:
gaudi_print_irq_info(hdev, event_type, true);
- if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, true, false);
- break;
+ goto reset_device;
case GAUDI_EVENT_TPC0_BMON_SPMU:
case GAUDI_EVENT_TPC1_BMON_SPMU:
@@ -7552,11 +7808,28 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
event_type, cause);
break;
+ case GAUDI_EVENT_DEV_RESET_REQ:
+ gaudi_print_irq_info(hdev, event_type, false);
+ goto reset_device;
+
+ case GAUDI_EVENT_PKT_QUEUE_OUT_SYNC:
+ gaudi_print_irq_info(hdev, event_type, false);
+ gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
+ goto reset_device;
+
default:
dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
event_type);
break;
}
+
+ return;
+
+reset_device:
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, HL_RESET_HARD);
+ else
+ hl_fw_unmask_irq(hdev, event_type);
}
static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate,
@@ -7607,7 +7880,7 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
- hl_device_reset(hdev, true, false);
+ hl_device_reset(hdev, HL_RESET_HARD);
}
return rc;
@@ -7656,7 +7929,7 @@ static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
- hl_device_reset(hdev, true, false);
+ hl_device_reset(hdev, HL_RESET_HARD);
}
return rc;
@@ -7714,7 +7987,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
+ rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
if (rc)
return rc;
@@ -7724,10 +7997,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
hdev->card_type = le32_to_cpu(hdev->asic_prop.cpucp_info.card_type);
- if (hdev->card_type == cpucp_card_type_pci)
- prop->max_power_default = MAX_POWER_DEFAULT_PCI;
- else if (hdev->card_type == cpucp_card_type_pmc)
- prop->max_power_default = MAX_POWER_DEFAULT_PMC;
+ set_default_power_values(hdev);
hdev->max_power = prop->max_power_default;
@@ -8549,6 +8819,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.debugfs_write32 = gaudi_debugfs_write32,
.debugfs_read64 = gaudi_debugfs_read64,
.debugfs_write64 = gaudi_debugfs_write64,
+ .debugfs_read_dma = gaudi_debugfs_read_dma,
.add_device_attr = gaudi_add_device_attr,
.handle_eqe = gaudi_handle_eqe,
.set_pll_profile = gaudi_set_pll_profile,
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index 50bb4ad570fd..5929be81ec23 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -47,6 +47,9 @@
#define MAX_POWER_DEFAULT_PCI 200000 /* 200W */
#define MAX_POWER_DEFAULT_PMC 350000 /* 350W */
+#define DC_POWER_DEFAULT_PCI 60000 /* 60W */
+#define DC_POWER_DEFAULT_PMC 60000 /* 60W */
+
#define GAUDI_CPU_TIMEOUT_USEC 30000000 /* 30s */
#define TPC_ENABLED_MASK 0xFF
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index 7085f45814ae..9a706c5980ef 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -9556,7 +9556,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask = 1U << ((mmTPC0_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1U << ((mmTPC0_CFG_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
@@ -10011,7 +10010,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask = 1U << ((mmTPC1_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1U << ((mmTPC1_CFG_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
@@ -10465,7 +10463,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask = 1U << ((mmTPC2_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1U << ((mmTPC2_CFG_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
@@ -10919,7 +10916,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask = 1U << ((mmTPC3_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1U << ((mmTPC3_CFG_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
@@ -11373,7 +11369,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask = 1U << ((mmTPC4_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1U << ((mmTPC4_CFG_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
@@ -11827,7 +11822,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask = 1U << ((mmTPC5_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1U << ((mmTPC5_CFG_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
@@ -12283,7 +12277,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask = 1U << ((mmTPC6_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1U << ((mmTPC6_CFG_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
@@ -12739,7 +12732,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask = 1U << ((mmTPC7_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1U << ((mmTPC7_CFG_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index ed566c52ccaa..e27338f4aad2 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -118,6 +118,29 @@
#define IS_MME_IDLE(mme_arch_sts) \
(((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
+/*
+ * this enum kept here for compatibility with old FW (in which each asic has
+ * unique PLL numbering
+ */
+enum goya_pll_index {
+ GOYA_CPU_PLL = 0,
+ GOYA_IC_PLL,
+ GOYA_MC_PLL,
+ GOYA_MME_PLL,
+ GOYA_PCI_PLL,
+ GOYA_EMMC_PLL,
+ GOYA_TPC_PLL,
+};
+
+static enum pll_index goya_pll_map[PLL_MAX] = {
+ [CPU_PLL] = GOYA_CPU_PLL,
+ [IC_PLL] = GOYA_IC_PLL,
+ [MC_PLL] = GOYA_MC_PLL,
+ [MME_PLL] = GOYA_MME_PLL,
+ [PCI_PLL] = GOYA_PCI_PLL,
+ [EMMC_PLL] = GOYA_EMMC_PLL,
+ [TPC_PLL] = GOYA_TPC_PLL,
+};
static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
"goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
@@ -446,6 +469,7 @@ int goya_get_fixed_properties(struct hl_device *hdev)
prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
prop->max_power_default = MAX_POWER_DEFAULT;
+ prop->dc_power_default = DC_POWER_DEFAULT;
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
@@ -460,8 +484,6 @@ int goya_get_fixed_properties(struct hl_device *hdev)
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
- /* disable fw security for now, set it in a later stage */
- prop->fw_security_disabled = true;
prop->fw_security_status_valid = false;
prop->hard_reset_done_by_fw = false;
@@ -533,6 +555,11 @@ static int goya_init_iatu(struct hl_device *hdev)
struct hl_outbound_pci_region outbound_region;
int rc;
+ if (hdev->asic_prop.iatu_done_by_fw) {
+ hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+ return 0;
+ }
+
/* Inbound Region 0 - Bar 0 - Point to SRAM and CFG */
inbound_region.mode = PCI_BAR_MATCH_MODE;
inbound_region.bar = SRAM_CFG_BAR_ID;
@@ -580,7 +607,7 @@ static int goya_early_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_dev *pdev = hdev->pdev;
- u32 val;
+ u32 fw_boot_status, val;
int rc;
rc = goya_get_fixed_properties(hdev);
@@ -614,6 +641,23 @@ static int goya_early_init(struct hl_device *hdev)
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
+ /* If FW security is enabled at this point it means no access to ELBI */
+ if (!hdev->asic_prop.fw_security_disabled) {
+ hdev->asic_prop.iatu_done_by_fw = true;
+ goto pci_init;
+ }
+
+ rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0,
+ &fw_boot_status);
+ if (rc)
+ goto free_queue_props;
+
+ /* Check whether FW is configuring iATU */
+ if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
+ (fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
+ hdev->asic_prop.iatu_done_by_fw = true;
+
+pci_init:
rc = hl_pci_init(hdev);
if (rc)
goto free_queue_props;
@@ -853,6 +897,9 @@ static int goya_sw_init(struct hl_device *hdev)
hdev->asic_specific = goya;
+ /* store legacy PLL map */
+ hdev->legacy_pll_map = goya_pll_map;
+
/* Create DMA pool for small allocations */
hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
&hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
@@ -2429,7 +2476,7 @@ static int goya_init_cpu(struct hl_device *hdev)
struct goya_device *goya = hdev->asic_specific;
int rc;
- if (!hdev->cpu_enable)
+ if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
return 0;
if (goya->hw_cap_initialized & HW_CAP_CPU)
@@ -3221,7 +3268,7 @@ static int goya_pin_memory_before_cs(struct hl_device *hdev,
parser->job_userptr_list, &userptr))
goto already_pinned;
- userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
+ userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
if (!userptr)
return -ENOMEM;
@@ -4101,12 +4148,15 @@ static void goya_clear_sm_regs(struct hl_device *hdev)
* lead to undefined behavior and therefore, should be done with extreme care
*
*/
-static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
+static int goya_debugfs_read32(struct hl_device *hdev, u64 addr,
+ bool user_address, u32 *val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 ddr_bar_addr;
+ u64 ddr_bar_addr, host_phys_end;
int rc = 0;
+ host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
*val = RREG32(addr - CFG_BASE);
@@ -4132,6 +4182,10 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+ user_address && !iommu_present(&pci_bus_type)) {
+ *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
+
} else {
rc = -EFAULT;
}
@@ -4154,12 +4208,15 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
* lead to undefined behavior and therefore, should be done with extreme care
*
*/
-static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
+static int goya_debugfs_write32(struct hl_device *hdev, u64 addr,
+ bool user_address, u32 val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 ddr_bar_addr;
+ u64 ddr_bar_addr, host_phys_end;
int rc = 0;
+ host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
WREG32(addr - CFG_BASE, val);
@@ -4185,6 +4242,10 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+ user_address && !iommu_present(&pci_bus_type)) {
+ *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
+
} else {
rc = -EFAULT;
}
@@ -4192,12 +4253,15 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
return rc;
}
-static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
+static int goya_debugfs_read64(struct hl_device *hdev, u64 addr,
+ bool user_address, u64 *val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 ddr_bar_addr;
+ u64 ddr_bar_addr, host_phys_end;
int rc = 0;
+ host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
u32 val_l = RREG32(addr - CFG_BASE);
u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
@@ -4227,6 +4291,10 @@ static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+ user_address && !iommu_present(&pci_bus_type)) {
+ *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
+
} else {
rc = -EFAULT;
}
@@ -4234,12 +4302,15 @@ static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
return rc;
}
-static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
+static int goya_debugfs_write64(struct hl_device *hdev, u64 addr,
+ bool user_address, u64 val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 ddr_bar_addr;
+ u64 ddr_bar_addr, host_phys_end;
int rc = 0;
+ host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
WREG32(addr - CFG_BASE, lower_32_bits(val));
WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
@@ -4267,6 +4338,10 @@ static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+ user_address && !iommu_present(&pci_bus_type)) {
+ *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
+
} else {
rc = -EFAULT;
}
@@ -4274,6 +4349,13 @@ static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
return rc;
}
+static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
+ void *blob_addr)
+{
+ dev_err(hdev->dev, "Reading via DMA is unimplemented yet\n");
+ return -EPERM;
+}
+
static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
{
struct goya_device *goya = hdev->asic_specific;
@@ -4401,6 +4483,8 @@ static const char *_goya_get_event_desc(u16 event_type)
return "THERMAL_ENV_S";
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
return "THERMAL_ENV_E";
+ case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
+ return "QUEUE_OUT_OF_SYNC";
default:
return "N/A";
}
@@ -4483,6 +4567,9 @@ static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
index = event_type - GOYA_ASYNC_EVENT_ID_DMA_BM_CH0;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
+ case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
+ snprintf(desc, size, _goya_get_event_desc(event_type));
+ break;
default:
snprintf(desc, size, _goya_get_event_desc(event_type));
break;
@@ -4534,6 +4621,15 @@ static void goya_print_mmu_error_info(struct hl_device *hdev)
}
}
+static void goya_print_out_of_sync_info(struct hl_device *hdev,
+ struct cpucp_pkt_sync_err *sync_err)
+{
+ struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
+
+ dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n",
+ sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci));
+}
+
static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
bool razwi)
{
@@ -4698,7 +4794,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
goya_print_irq_info(hdev, event_type, false);
if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, true, false);
+ hl_device_reset(hdev, HL_RESET_HARD);
break;
case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
@@ -4754,6 +4850,15 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
goya_unmask_irq(hdev, event_type);
break;
+ case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
+ goya_print_irq_info(hdev, event_type, false);
+ goya_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, HL_RESET_HARD);
+ else
+ hl_fw_unmask_irq(hdev, event_type);
+ break;
+
default:
dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
event_type);
@@ -5083,7 +5188,7 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
- hl_device_reset(hdev, true, false);
+ hl_device_reset(hdev, HL_RESET_HARD);
}
return rc;
@@ -5134,7 +5239,7 @@ static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
- hl_device_reset(hdev, true, false);
+ hl_device_reset(hdev, HL_RESET_HARD);
}
return rc;
@@ -5160,7 +5265,7 @@ int goya_cpucp_info_get(struct hl_device *hdev)
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
+ rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
if (rc)
return rc;
@@ -5443,6 +5548,7 @@ static const struct hl_asic_funcs goya_funcs = {
.debugfs_write32 = goya_debugfs_write32,
.debugfs_read64 = goya_debugfs_read64,
.debugfs_write64 = goya_debugfs_write64,
+ .debugfs_read_dma = goya_debugfs_read_dma,
.add_device_attr = goya_add_device_attr,
.handle_eqe = goya_handle_eqe,
.set_pll_profile = goya_set_pll_profile,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 23fe099ed218..ef8c6c8b5c8d 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -49,6 +49,8 @@
#define MAX_POWER_DEFAULT 200000 /* 200W */
+#define DC_POWER_DEFAULT 20000 /* 20W */
+
#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */
#define GOYA_DEFAULT_CARD_NAME "HL1000"
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index b77c1c16c32c..27cd0ba99aa3 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -11,6 +11,8 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include "hl_boot_if.h"
+
#define NUM_HBM_PSEUDO_CH 2
#define NUM_HBM_CH_PER_DEV 8
#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_SHIFT 0
@@ -28,6 +30,17 @@
#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_SHIFT 6
#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK 0x000007C0
+#define PLL_MAP_MAX_BITS 128
+#define PLL_MAP_LEN (PLL_MAP_MAX_BITS / 8)
+
+/*
+ * info of the pkt queue pointers in the first async occurrence
+ */
+struct cpucp_pkt_sync_err {
+ __le32 pi;
+ __le32 ci;
+};
+
struct hl_eq_hbm_ecc_data {
/* SERR counter */
__le32 sec_cnt;
@@ -77,6 +90,7 @@ struct hl_eq_entry {
struct hl_eq_ecc_data ecc_data;
struct hl_eq_hbm_ecc_data hbm_ecc_data;
struct hl_eq_sm_sei_data sm_sei_data;
+ struct cpucp_pkt_sync_err pkt_sync_err;
__le64 data[7];
};
};
@@ -287,6 +301,30 @@ enum pq_init_status {
* The result is composed of 4 outputs, each is 16-bit
* frequency in MHz.
*
+ * CPUCP_PACKET_POWER_GET
+ * Fetch the present power consumption of the device (Current * Voltage).
+ *
+ * CPUCP_PACKET_NIC_PFC_SET -
+ * Enable/Disable the NIC PFC feature. The packet's arguments specify the
+ * NIC port, relevant lanes to configure and one bit indication for
+ * enable/disable.
+ *
+ * CPUCP_PACKET_NIC_FAULT_GET -
+ * Fetch the current indication for local/remote faults from the NIC MAC.
+ * The result is 32-bit value of the relevant register.
+ *
+ * CPUCP_PACKET_NIC_LPBK_SET -
+ * Enable/Disable the MAC loopback feature. The packet's arguments specify
+ * the NIC port, relevant lanes to configure and one bit indication for
+ * enable/disable.
+ *
+ * CPUCP_PACKET_NIC_MAC_INIT -
+ * Configure the NIC MAC channels. The packet's arguments specify the
+ * NIC port and the speed.
+ *
+ * CPUCP_PACKET_MSI_INFO_SET -
+ * set the index number for each supported msi type going from
+ * host to device
*/
enum cpucp_packet_id {
@@ -320,6 +358,13 @@ enum cpucp_packet_id {
CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */
CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */
CPUCP_PACKET_PLL_INFO_GET, /* internal */
+ CPUCP_PACKET_NIC_STATUS, /* internal */
+ CPUCP_PACKET_POWER_GET, /* internal */
+ CPUCP_PACKET_NIC_PFC_SET, /* internal */
+ CPUCP_PACKET_NIC_FAULT_GET, /* internal */
+ CPUCP_PACKET_NIC_LPBK_SET, /* internal */
+ CPUCP_PACKET_NIC_MAC_CFG, /* internal */
+ CPUCP_PACKET_MSI_INFO_SET, /* internal */
};
#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -391,6 +436,12 @@ struct cpucp_unmask_irq_arr_packet {
__le32 irqs[0];
};
+struct cpucp_array_data_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[0];
+};
+
enum cpucp_packet_rc {
cpucp_packet_success,
cpucp_packet_invalid,
@@ -459,6 +510,51 @@ enum cpucp_pll_type_attributes {
cpucp_pll_pci,
};
+/*
+ * MSI type enumeration table for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table (before CPUCP_NUM_OF_MSI_TYPES).
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum cpucp_msi_type {
+ CPUCP_EVENT_QUEUE_MSI_TYPE,
+ CPUCP_NIC_PORT1_MSI_TYPE,
+ CPUCP_NIC_PORT3_MSI_TYPE,
+ CPUCP_NIC_PORT5_MSI_TYPE,
+ CPUCP_NIC_PORT7_MSI_TYPE,
+ CPUCP_NIC_PORT9_MSI_TYPE,
+ CPUCP_NUM_OF_MSI_TYPES
+};
+
+/*
+ * PLL enumeration table used for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table.
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum pll_index {
+ CPU_PLL = 0,
+ PCI_PLL = 1,
+ NIC_PLL = 2,
+ DMA_PLL = 3,
+ MESH_PLL = 4,
+ MME_PLL = 5,
+ TPC_PLL = 6,
+ IF_PLL = 7,
+ SRAM_PLL = 8,
+ NS_PLL = 9,
+ HBM_PLL = 10,
+ MSS_PLL = 11,
+ DDR_PLL = 12,
+ VID_PLL = 13,
+ BANK_PLL = 14,
+ MMU_PLL = 15,
+ IC_PLL = 16,
+ MC_PLL = 17,
+ EMMC_PLL = 18,
+ PLL_MAX
+};
+
/* Event Queue Packets */
struct eq_generic_event {
@@ -470,7 +566,6 @@ struct eq_generic_event {
*/
#define CARD_NAME_MAX_LEN 16
-#define VERSION_MAX_LEN 128
#define CPUCP_MAX_SENSORS 128
#define CPUCP_MAX_NICS 128
#define CPUCP_LANES_PER_NIC 4
@@ -533,6 +628,7 @@ struct cpucp_security_info {
* @dram_size: available DRAM size.
* @card_name: card name that will be displayed in HWMON subsystem on the host
* @sec_info: security information
+ * @pll_map: Bit map of supported PLLs for current ASIC version.
*/
struct cpucp_info {
struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
@@ -554,6 +650,7 @@ struct cpucp_info {
__u8 pad[7];
struct cpucp_security_info sec_info;
__le32 reserved6;
+ __u8 pll_map[PLL_MAP_LEN];
};
struct cpucp_mac_addr {
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index e87f5a98e193..e0a259e0495c 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -13,6 +13,8 @@
#define BOOT_FIT_SRAM_OFFSET 0x200000
+#define VERSION_MAX_LEN 128
+
/*
* CPU error bits in BOOT_ERROR registers
*
@@ -73,6 +75,9 @@
* CPU_BOOT_ERR0_PLL_FAIL PLL settings failed, meaning that one
* of the PLLs remains in REF_CLK
*
+ * CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support
+ * should be contacted.
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -92,6 +97,7 @@
#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << 10)
#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << 11)
#define CPU_BOOT_ERR0_PLL_FAIL (1 << 12)
+#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << 13)
#define CPU_BOOT_ERR0_ENABLED (1 << 31)
/*
@@ -170,6 +176,20 @@
* is set to the PI counter.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_FW_LD_COM_EN Flexible FW loading communication
+ * protocol is enabled.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN FW iATU configuration is enabled.
+ * This bit if set, means the iATU has been
+ * configured and is ready for use.
+ * Initialized in: ppboot
+ *
+ * CPU_BOOT_DEV_STS0_DYN_PLL_EN Dynamic PLL configuration is enabled.
+ * FW sends to host a bitmap of supported
+ * PLLs.
+ * Initialized in: linux
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -195,6 +215,9 @@
#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13)
#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << 14)
#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << 15)
+#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << 16)
+#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << 17)
+#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << 19)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
enum cpu_boot_status {
@@ -230,6 +253,7 @@ enum kmd_msg {
KMD_MSG_SKIP_BMC,
RESERVED,
KMD_MSG_RST_DEV,
+ KMD_MSG_LAST
};
enum cpu_msg_status {
@@ -238,4 +262,199 @@ enum cpu_msg_status {
CPU_MSG_ERR,
};
+/* communication registers mapping - consider ABI when changing */
+struct cpu_dyn_regs {
+ uint32_t cpu_pq_base_addr_low;
+ uint32_t cpu_pq_base_addr_high;
+ uint32_t cpu_pq_length;
+ uint32_t cpu_pq_init_status;
+ uint32_t cpu_eq_base_addr_low;
+ uint32_t cpu_eq_base_addr_high;
+ uint32_t cpu_eq_length;
+ uint32_t cpu_eq_ci;
+ uint32_t cpu_cq_base_addr_low;
+ uint32_t cpu_cq_base_addr_high;
+ uint32_t cpu_cq_length;
+ uint32_t cpu_pf_pq_pi;
+ uint32_t cpu_boot_dev_sts0;
+ uint32_t cpu_boot_dev_sts1;
+ uint32_t cpu_boot_err0;
+ uint32_t cpu_boot_err1;
+ uint32_t cpu_boot_status;
+ uint32_t fw_upd_sts;
+ uint32_t fw_upd_cmd;
+ uint32_t fw_upd_pending_sts;
+ uint32_t fuse_ver_offset;
+ uint32_t preboot_ver_offset;
+ uint32_t uboot_ver_offset;
+ uint32_t hw_state;
+ uint32_t kmd_msg_to_cpu;
+ uint32_t cpu_cmd_status_to_host;
+ uint32_t reserved1[32]; /* reserve for future use */
+};
+
+/* HCDM - Habana Communications Descriptor Magic */
+#define HL_COMMS_DESC_MAGIC 0x4843444D
+#define HL_COMMS_DESC_VER 1
+
+/* this is the comms descriptor header - meta data */
+struct comms_desc_header {
+ uint32_t magic; /* magic for validation */
+ uint32_t crc32; /* CRC32 of the descriptor w/o header */
+ uint16_t size; /* size of the descriptor w/o header */
+ uint8_t version; /* descriptor version */
+ uint8_t reserved[5]; /* pad to 64 bit */
+};
+
+/* this is the main FW descriptor - consider ABI when changing */
+struct lkd_fw_comms_desc {
+ struct comms_desc_header header;
+ struct cpu_dyn_regs cpu_dyn_regs;
+ char fuse_ver[VERSION_MAX_LEN];
+ char cur_fw_ver[VERSION_MAX_LEN];
+ /* can be used for 1 more version w/o ABI change */
+ char reserved0[VERSION_MAX_LEN];
+ uint64_t img_addr; /* address for next FW component load */
+};
+
+/*
+ * LKD commands:
+ *
+ * COMMS_NOOP Used to clear the command register and no actual
+ * command is send.
+ *
+ * COMMS_CLR_STS Clear status command - FW should clear the
+ * status register. Used for synchronization
+ * between the commands as part of the race free
+ * protocol.
+ *
+ * COMMS_RST_STATE Reset the current communication state which is
+ * kept by FW for proper responses.
+ * Should be used in the beginning of the
+ * communication cycle to clean any leftovers from
+ * previous communication attempts.
+ *
+ * COMMS_PREP_DESC Prepare descriptor for setting up the
+ * communication and other dynamic data:
+ * struct lkd_fw_comms_desc.
+ * This command has a parameter stating the next FW
+ * component size, so the FW can actually prepare a
+ * space for it and in the status response provide
+ * the descriptor offset. The Offset of the next FW
+ * data component is a part of the descriptor
+ * structure.
+ *
+ * COMMS_DATA_RDY The FW data has been uploaded and is ready for
+ * validation.
+ *
+ * COMMS_EXEC Execute the next FW component.
+ *
+ * COMMS_RST_DEV Reset the device.
+ *
+ * COMMS_GOTO_WFE Execute WFE command. Allowed only on non-secure
+ * devices.
+ *
+ * COMMS_SKIP_BMC Perform actions required for BMC-less servers.
+ * Do not wait for BMC response.
+ *
+ * COMMS_LOW_PLL_OPP Initialize PLLs for low OPP.
+ */
+enum comms_cmd {
+ COMMS_NOOP = 0,
+ COMMS_CLR_STS = 1,
+ COMMS_RST_STATE = 2,
+ COMMS_PREP_DESC = 3,
+ COMMS_DATA_RDY = 4,
+ COMMS_EXEC = 5,
+ COMMS_RST_DEV = 6,
+ COMMS_GOTO_WFE = 7,
+ COMMS_SKIP_BMC = 8,
+ COMMS_LOW_PLL_OPP = 9,
+ COMMS_INVLD_LAST
+};
+
+#define COMMS_COMMAND_SIZE_SHIFT 0
+#define COMMS_COMMAND_SIZE_MASK 0x1FFFFFF
+#define COMMS_COMMAND_CMD_SHIFT 27
+#define COMMS_COMMAND_CMD_MASK 0xF8000000
+
+/*
+ * LKD command to FW register structure
+ * @size - FW component size
+ * @cmd - command from enum comms_cmd
+ */
+struct comms_command {
+ union { /* bit fields are only for FW use */
+ struct {
+ unsigned int size :25; /* 32MB max. */
+ unsigned int reserved :2;
+ enum comms_cmd cmd :5; /* 32 commands */
+ };
+ unsigned int val;
+ };
+};
+
+/*
+ * FW status
+ *
+ * COMMS_STS_NOOP Used to clear the status register and no actual
+ * status is provided.
+ *
+ * COMMS_STS_ACK Command has been received and recognized.
+ *
+ * COMMS_STS_OK Command execution has finished successfully.
+ *
+ * COMMS_STS_ERR Command execution was unsuccessful and resulted
+ * in error.
+ *
+ * COMMS_STS_VALID_ERR FW validation has failed.
+ *
+ * COMMS_STS_TIMEOUT_ERR Command execution has timed out.
+ */
+enum comms_sts {
+ COMMS_STS_NOOP = 0,
+ COMMS_STS_ACK = 1,
+ COMMS_STS_OK = 2,
+ COMMS_STS_ERR = 3,
+ COMMS_STS_VALID_ERR = 4,
+ COMMS_STS_TIMEOUT_ERR = 5,
+ COMMS_STS_INVLD_LAST
+};
+
+/* RAM types for FW components loading - defines the base address */
+enum comms_ram_types {
+ COMMS_SRAM = 0,
+ COMMS_DRAM = 1,
+};
+
+#define COMMS_STATUS_OFFSET_SHIFT 0
+#define COMMS_STATUS_OFFSET_MASK 0x03FFFFFF
+#define COMMS_STATUS_OFFSET_ALIGN_SHIFT 2
+#define COMMS_STATUS_RAM_TYPE_SHIFT 26
+#define COMMS_STATUS_RAM_TYPE_MASK 0x0C000000
+#define COMMS_STATUS_STATUS_SHIFT 28
+#define COMMS_STATUS_STATUS_MASK 0xF0000000
+
+/*
+ * FW status to LKD register structure
+ * @offset - an offset from the base of the ram_type shifted right by
+ * 2 bits (always aligned to 32 bits).
+ * Allows a maximum addressable offset of 256MB from RAM base.
+ * Example: for real offset in RAM of 0x800000 (8MB), the value
+ * in offset field is (0x800000 >> 2) = 0x200000.
+ * @ram_type - the RAM type that should be used for offset from
+ * enum comms_ram_types
+ * @status - status from enum comms_sts
+ */
+struct comms_status {
+ union { /* bit fields are only for FW use */
+ struct {
+ unsigned int offset :26;
+ unsigned int ram_type :2;
+ enum comms_sts status :4; /* 16 statuses */
+ };
+ unsigned int val;
+ };
+};
+
#endif /* HL_BOOT_IF_H */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi.h b/drivers/misc/habanalabs/include/gaudi/gaudi.h
index f9ea897ae42c..ffae107b1693 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi.h
@@ -38,7 +38,7 @@
#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */
-#define MAX_ASID 1024
+#define MAX_ASID 2
#define PROT_BITS_OFFS 0xF80
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
index 49335e8334b4..e8651abf84f2 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
@@ -303,6 +303,8 @@ enum gaudi_async_event_id {
GAUDI_EVENT_NIC3_QP1 = 619,
GAUDI_EVENT_NIC4_QP0 = 620,
GAUDI_EVENT_NIC4_QP1 = 621,
+ GAUDI_EVENT_DEV_RESET_REQ = 646,
+ GAUDI_EVENT_PKT_QUEUE_OUT_SYNC = 647,
GAUDI_EVENT_FIX_POWER_ENV_S = 658,
GAUDI_EVENT_FIX_POWER_ENV_E = 659,
GAUDI_EVENT_FIX_THERMAL_ENV_S = 660,
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
index 737176ba06fb..3dc79c131805 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
@@ -301,10 +301,10 @@ static struct gaudi_async_events_ids_map gaudi_irq_map_table[] = {
{ .fc_id = 274, .cpu_id = 128, .valid = 0, .name = "" },
{ .fc_id = 275, .cpu_id = 128, .valid = 0, .name = "" },
{ .fc_id = 276, .cpu_id = 128, .valid = 0, .name = "" },
- { .fc_id = 277, .cpu_id = 129, .valid = 0, .name = "" },
- { .fc_id = 278, .cpu_id = 129, .valid = 0, .name = "" },
- { .fc_id = 279, .cpu_id = 129, .valid = 0, .name = "" },
- { .fc_id = 280, .cpu_id = 129, .valid = 0, .name = "" },
+ { .fc_id = 277, .cpu_id = 129, .valid = 1, .name = "DMA_IF_SEI_0" },
+ { .fc_id = 278, .cpu_id = 129, .valid = 1, .name = "DMA_IF_SEI_1" },
+ { .fc_id = 279, .cpu_id = 129, .valid = 1, .name = "DMA_IF_SEI_2" },
+ { .fc_id = 280, .cpu_id = 129, .valid = 1, .name = "DMA_IF_SEI_3" },
{ .fc_id = 281, .cpu_id = 130, .valid = 0, .name = "" },
{ .fc_id = 282, .cpu_id = 131, .valid = 0, .name = "" },
{ .fc_id = 283, .cpu_id = 132, .valid = 0, .name = "" },
@@ -670,18 +670,29 @@ static struct gaudi_async_events_ids_map gaudi_irq_map_table[] = {
{ .fc_id = 643, .cpu_id = 492, .valid = 0, .name = "" },
{ .fc_id = 644, .cpu_id = 493, .valid = 0, .name = "" },
{ .fc_id = 645, .cpu_id = 494, .valid = 0, .name = "" },
- { .fc_id = 646, .cpu_id = 495, .valid = 0, .name = "" },
- { .fc_id = 647, .cpu_id = 496, .valid = 0, .name = "" },
- { .fc_id = 648, .cpu_id = 497, .valid = 0, .name = "" },
- { .fc_id = 649, .cpu_id = 498, .valid = 0, .name = "" },
- { .fc_id = 650, .cpu_id = 499, .valid = 0, .name = "" },
- { .fc_id = 651, .cpu_id = 500, .valid = 0, .name = "" },
- { .fc_id = 652, .cpu_id = 501, .valid = 0, .name = "" },
- { .fc_id = 653, .cpu_id = 502, .valid = 0, .name = "" },
- { .fc_id = 654, .cpu_id = 503, .valid = 0, .name = "" },
- { .fc_id = 655, .cpu_id = 504, .valid = 0, .name = "" },
- { .fc_id = 656, .cpu_id = 505, .valid = 0, .name = "" },
- { .fc_id = 657, .cpu_id = 506, .valid = 0, .name = "" },
+ { .fc_id = 646, .cpu_id = 495, .valid = 1, .name = "DEV_RESET_REQ" },
+ { .fc_id = 647, .cpu_id = 496, .valid = 1,
+ .name = "PKT_QUEUE_OUT_SYNC" },
+ { .fc_id = 648, .cpu_id = 497, .valid = 1,
+ .name = "STATUS_NIC0_ENG0" },
+ { .fc_id = 649, .cpu_id = 498, .valid = 1,
+ .name = "STATUS_NIC0_ENG1" },
+ { .fc_id = 650, .cpu_id = 499, .valid = 1,
+ .name = "STATUS_NIC1_ENG0" },
+ { .fc_id = 651, .cpu_id = 500, .valid = 1,
+ .name = "STATUS_NIC1_ENG1" },
+ { .fc_id = 652, .cpu_id = 501, .valid = 1,
+ .name = "STATUS_NIC2_ENG0" },
+ { .fc_id = 653, .cpu_id = 502, .valid = 1,
+ .name = "STATUS_NIC2_ENG1" },
+ { .fc_id = 654, .cpu_id = 503, .valid = 1,
+ .name = "STATUS_NIC3_ENG0" },
+ { .fc_id = 655, .cpu_id = 504, .valid = 1,
+ .name = "STATUS_NIC3_ENG1" },
+ { .fc_id = 656, .cpu_id = 505, .valid = 1,
+ .name = "STATUS_NIC4_ENG0" },
+ { .fc_id = 657, .cpu_id = 506, .valid = 1,
+ .name = "STATUS_NIC4_ENG1" },
{ .fc_id = 658, .cpu_id = 507, .valid = 1, .name = "FIX_POWER_ENV_S" },
{ .fc_id = 659, .cpu_id = 508, .valid = 1, .name = "FIX_POWER_ENV_E" },
{ .fc_id = 660, .cpu_id = 509, .valid = 1,
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
index 25acd9e87e20..a9f51f9f9e92 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
@@ -20,20 +20,6 @@
#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */
#define LINUX_FW_OFFSET 0x800000 /* 8MB in HBM */
-enum gaudi_pll_index {
- CPU_PLL = 0,
- PCI_PLL,
- SRAM_PLL,
- HBM_PLL,
- NIC_PLL,
- DMA_PLL,
- MESH_PLL,
- MME_PLL,
- TPC_PLL,
- IF_PLL,
- PLL_MAX
-};
-
enum gaudi_nic_axi_error {
RXB,
RXE,
diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h
index 43d241891e45..1b4ca435021d 100644
--- a/drivers/misc/habanalabs/include/goya/goya.h
+++ b/drivers/misc/habanalabs/include/goya/goya.h
@@ -30,7 +30,7 @@
#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */
-#define MAX_ASID 1024
+#define MAX_ASID 2
#define PROT_BITS_OFFS 0xF80
diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/misc/habanalabs/include/goya/goya_async_events.h
index 5fb92362fc5f..09081401cb1d 100644
--- a/drivers/misc/habanalabs/include/goya/goya_async_events.h
+++ b/drivers/misc/habanalabs/include/goya/goya_async_events.h
@@ -188,6 +188,7 @@ enum goya_async_event_id {
GOYA_ASYNC_EVENT_ID_HALT_MACHINE = 485,
GOYA_ASYNC_EVENT_ID_INTS_REGISTER = 486,
GOYA_ASYNC_EVENT_ID_SOFT_RESET = 487,
+ GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC = 506,
GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S = 507,
GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E = 508,
GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S = 509,
diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
index daf8d8cd14be..bc05f86c73ac 100644
--- a/drivers/misc/habanalabs/include/goya/goya_fw_if.h
+++ b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
@@ -15,17 +15,6 @@
#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */
#define LINUX_FW_OFFSET 0x800000 /* 8MB in DDR */
-enum goya_pll_index {
- CPU_PLL = 0,
- IC_PLL,
- MC_PLL,
- MME_PLL,
- PCI_PLL,
- EMMC_PLL,
- TPC_PLL,
- PLL_MAX
-};
-
#define GOYA_PLL_FREQ_LOW 50000000 /* 50 MHz */
#endif /* GOYA_FW_IF_H */
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 945701bce553..64d33e368509 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -92,22 +92,23 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/sched/task.h>
+#include <linux/kallsyms.h>
#include <asm/sections.h>
-#define v1printk(a...) do { \
- if (verbose) \
- printk(KERN_INFO a); \
- } while (0)
-#define v2printk(a...) do { \
- if (verbose > 1) \
- printk(KERN_INFO a); \
- touch_nmi_watchdog(); \
- } while (0)
-#define eprintk(a...) do { \
- printk(KERN_ERR a); \
- WARN_ON(1); \
- } while (0)
+#define v1printk(a...) do { \
+ if (verbose) \
+ printk(KERN_INFO a); \
+} while (0)
+#define v2printk(a...) do { \
+ if (verbose > 1) \
+ printk(KERN_INFO a); \
+ touch_nmi_watchdog(); \
+} while (0)
+#define eprintk(a...) do { \
+ printk(KERN_ERR a); \
+ WARN_ON(1); \
+} while (0)
#define MAX_CONFIG_LEN 40
static struct kgdb_io kgdbts_io_ops;
@@ -200,21 +201,30 @@ static noinline void kgdbts_break_test(void)
v2printk("kgdbts: breakpoint complete\n");
}
-/* Lookup symbol info in the kernel */
+/*
+ * This is a cached wrapper for kallsyms_lookup_name().
+ *
+ * The cache is a big win for several tests. For example it more the doubles
+ * the cycles per second during the sys_open test. This is not theoretic,
+ * the performance improvement shows up at human scale, especially when
+ * testing using emulators.
+ *
+ * Obviously neither re-entrant nor thread-safe but that is OK since it
+ * can only be called from the debug trap (and therefore all other CPUs
+ * are halted).
+ */
static unsigned long lookup_addr(char *arg)
{
- unsigned long addr = 0;
-
- if (!strcmp(arg, "kgdbts_break_test"))
- addr = (unsigned long)kgdbts_break_test;
- else if (!strcmp(arg, "sys_open"))
- addr = (unsigned long)do_sys_open;
- else if (!strcmp(arg, "kernel_clone"))
- addr = (unsigned long)kernel_clone;
- else if (!strcmp(arg, "hw_break_val"))
- addr = (unsigned long)&hw_break_val;
- addr = (unsigned long) dereference_function_descriptor((void *)addr);
- return addr;
+ static char cached_arg[KSYM_NAME_LEN];
+ static unsigned long cached_addr;
+
+ if (strcmp(arg, cached_arg)) {
+ strscpy(cached_arg, arg, KSYM_NAME_LEN);
+ cached_addr = kallsyms_lookup_name(arg);
+ }
+
+ return (unsigned long)dereference_function_descriptor(
+ (void *)cached_addr);
}
static void break_helper(char *bp_type, char *arg, unsigned long vaddr)
@@ -310,7 +320,7 @@ static int check_and_rewind_pc(char *put_str, char *arg)
if (arch_needs_sstep_emulation && sstep_addr &&
ip + offset == sstep_addr &&
- ((!strcmp(arg, "sys_open") || !strcmp(arg, "kernel_clone")))) {
+ ((!strcmp(arg, "do_sys_openat2") || !strcmp(arg, "kernel_clone")))) {
/* This is special case for emulated single step */
v2printk("Emul: rewind hit single step bp\n");
restart_from_top_after_write = 1;
@@ -619,14 +629,14 @@ static struct test_struct do_kernel_clone_test[] = {
*/
static struct test_struct sys_open_test[] = {
{ "?", "S0*" }, /* Clear break points */
- { "sys_open", "OK", sw_break, }, /* set sw breakpoint */
+ { "do_sys_openat2", "OK", sw_break, }, /* set sw breakpoint */
{ "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
- { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */
- { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
+ { "do_sys_openat2", "OK", sw_rem_break }, /*remove breakpoint */
+ { "g", "do_sys_openat2", NULL, check_and_rewind_pc }, /* check location */
{ "write", "OK", write_regs, emul_reset }, /* Write registers */
{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
- { "g", "sys_open", NULL, check_single_step },
- { "sys_open", "OK", sw_break, }, /* set sw breakpoint */
+ { "g", "do_sys_openat2", NULL, check_single_step },
+ { "do_sys_openat2", "OK", sw_break, }, /* set sw breakpoint */
{ "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
{ "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
{ "", "", get_cont_catch, put_cont_catch },
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index dd65cedf3b12..70c5bb1e6f49 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -208,7 +208,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
static int lis3_3dlh_rates[4] = {50, 100, 400, 1000};
/* ODR is Output Data Rate */
-static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
+static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3)
{
u8 ctrl;
int shift;
@@ -216,15 +216,23 @@ static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
lis3->read(lis3, CTRL_REG1, &ctrl);
ctrl &= lis3->odr_mask;
shift = ffs(lis3->odr_mask) - 1;
- return lis3->odrs[(ctrl >> shift)];
+ return (ctrl >> shift);
}
static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
{
- int div = lis3lv02d_get_odr(lis3);
+ int odr_idx = lis3lv02d_get_odr_index(lis3);
+ int div = lis3->odrs[odr_idx];
- if (WARN_ONCE(div == 0, "device returned spurious data"))
+ if (div == 0) {
+ if (odr_idx == 0) {
+ /* Power-down mode, not sampling no need to sleep */
+ return 0;
+ }
+
+ dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
return -ENXIO;
+ }
/* LIS3 power on delay is quite long */
msleep(lis3->pwron_delay / div);
@@ -816,9 +824,12 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lis3lv02d *lis3 = dev_get_drvdata(dev);
+ int odr_idx;
lis3lv02d_sysfs_poweron(lis3);
- return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
+
+ odr_idx = lis3lv02d_get_odr_index(lis3);
+ return sprintf(buf, "%d\n", lis3->odrs[odr_idx]);
}
static ssize_t lis3lv02d_rate_set(struct device *dev,
@@ -1162,16 +1173,14 @@ int lis3lv02d_init_device(struct lis3lv02d *lis3)
break;
default:
pr_err("unknown sensor type 0x%X\n", lis3->whoami);
- return -EINVAL;
+ return -ENODEV;
}
lis3->reg_cache = kzalloc(max(sizeof(lis3_wai8_regs),
sizeof(lis3_wai12_regs)), GFP_KERNEL);
- if (lis3->reg_cache == NULL) {
- printk(KERN_ERR DRIVER_NAME "out of memory\n");
+ if (lis3->reg_cache == NULL)
return -ENOMEM;
- }
mutex_init(&lis3->mutex);
atomic_set(&lis3->wake_thread, 0);
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 110f5a8538e9..0e8254d0cf0b 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -134,6 +134,23 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void)
__lkdtm_CORRUPT_STACK((void *)&data);
}
+static pid_t stack_pid;
+static unsigned long stack_addr;
+
+void lkdtm_REPORT_STACK(void)
+{
+ volatile uintptr_t magic;
+ pid_t pid = task_pid_nr(current);
+
+ if (pid != stack_pid) {
+ pr_info("Starting stack offset tracking for pid %d\n", pid);
+ stack_pid = pid;
+ stack_addr = (uintptr_t)&magic;
+ }
+
+ pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
+}
+
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
{
static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index b2aff4d87c01..8024b6a5cc7f 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -110,6 +110,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(EXHAUST_STACK),
CRASHTYPE(CORRUPT_STACK),
CRASHTYPE(CORRUPT_STACK_STRONG),
+ CRASHTYPE(REPORT_STACK),
CRASHTYPE(CORRUPT_LIST_ADD),
CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(STACK_GUARD_PAGE_LEADING),
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 5ae48c64df24..99f90d3e5e9c 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -17,6 +17,7 @@ void lkdtm_LOOP(void);
void lkdtm_EXHAUST_STACK(void);
void lkdtm_CORRUPT_STACK(void);
void lkdtm_CORRUPT_STACK_STRONG(void);
+void lkdtm_REPORT_STACK(void);
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
void lkdtm_SOFTLOCKUP(void);
void lkdtm_HARDLOCKUP(void);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 14be76d4c2e6..cb34925e10f1 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -105,6 +105,7 @@
#define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */
#define MEI_DEV_ID_ADP_LP 0x7A60 /* Alder Lake Point LP */
+#define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */
/*
* MEI HW Section
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index a7e179626b63..c3393b383e59 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -111,6 +111,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
/* required last entry */
{0, }
diff --git a/drivers/misc/pvpanic/Kconfig b/drivers/misc/pvpanic/Kconfig
new file mode 100644
index 000000000000..12d40a21f698
--- /dev/null
+++ b/drivers/misc/pvpanic/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Pvpanic Kconfig
+#
+# Copyright (C) 2021 Oracle.
+#
+
+config PVPANIC
+ bool "pvpanic device support"
+ help
+ This option allows to select a specific pvpanic device driver.
+ pvpanic is a paravirtualized device provided by QEMU; it lets
+ a virtual machine (guest) communicate panic events to the host.
+
+config PVPANIC_MMIO
+ tristate "pvpanic MMIO device support"
+ depends on HAS_IOMEM && (ACPI || OF) && PVPANIC
+ help
+ This driver provides support for the MMIO pvpanic device.
+
+config PVPANIC_PCI
+ tristate "pvpanic PCI device support"
+ depends on PCI && PVPANIC
+ help
+ This driver provides support for the PCI pvpanic device.
+ pvpanic is a paravirtualized device provided by QEMU which
+ forwards the panic events from the guest to the host.
diff --git a/drivers/misc/pvpanic/Makefile b/drivers/misc/pvpanic/Makefile
new file mode 100644
index 000000000000..9471df7d4f9c
--- /dev/null
+++ b/drivers/misc/pvpanic/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Pvpanic Makefile
+#
+# Copyright (C) 2021 Oracle.
+#
+obj-$(CONFIG_PVPANIC_MMIO) += pvpanic.o pvpanic-mmio.o
+obj-$(CONFIG_PVPANIC_PCI) += pvpanic.o pvpanic-pci.o
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic/pvpanic-mmio.c
index f1655f5ca016..4c0841776087 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic/pvpanic-mmio.c
@@ -1,13 +1,12 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Pvpanic Device Support
+ * Pvpanic MMIO Device Support
*
* Copyright (C) 2013 Fujitsu.
* Copyright (C) 2018 ZTE.
+ * Copyright (C) 2021 Oracle.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
@@ -15,28 +14,36 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
+#include <linux/slab.h>
#include <uapi/misc/pvpanic.h>
-static void __iomem *base;
-static unsigned int capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
-static unsigned int events;
+#include "pvpanic.h"
+
+MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
+MODULE_DESCRIPTION("pvpanic-mmio device driver");
+MODULE_LICENSE("GPL");
static ssize_t capability_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%x\n", capability);
+ struct pvpanic_instance *pi = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%x\n", pi->capability);
}
static DEVICE_ATTR_RO(capability);
static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%x\n", events);
+ struct pvpanic_instance *pi = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%x\n", pi->events);
}
static ssize_t events_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct pvpanic_instance *pi = dev_get_drvdata(dev);
unsigned int tmp;
int err;
@@ -44,57 +51,28 @@ static ssize_t events_store(struct device *dev, struct device_attribute *attr,
if (err)
return err;
- if ((tmp & capability) != tmp)
+ if ((tmp & pi->capability) != tmp)
return -EINVAL;
- events = tmp;
+ pi->events = tmp;
return count;
-
}
static DEVICE_ATTR_RW(events);
-static struct attribute *pvpanic_dev_attrs[] = {
+static struct attribute *pvpanic_mmio_dev_attrs[] = {
&dev_attr_capability.attr,
&dev_attr_events.attr,
NULL
};
-ATTRIBUTE_GROUPS(pvpanic_dev);
-
-MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
-MODULE_DESCRIPTION("pvpanic device driver");
-MODULE_LICENSE("GPL");
-
-static void
-pvpanic_send_event(unsigned int event)
-{
- if (event & capability & events)
- iowrite8(event, base);
-}
-
-static int
-pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
- void *unused)
-{
- unsigned int event = PVPANIC_PANICKED;
-
- if (kexec_crash_loaded())
- event = PVPANIC_CRASH_LOADED;
-
- pvpanic_send_event(event);
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block pvpanic_panic_nb = {
- .notifier_call = pvpanic_panic_notify,
- .priority = 1, /* let this called before broken drm_fb_helper */
-};
+ATTRIBUTE_GROUPS(pvpanic_mmio_dev);
static int pvpanic_mmio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pvpanic_instance *pi;
struct resource *res;
+ void __iomem *base;
res = platform_get_mem_or_io(pdev, 0);
if (!res)
@@ -115,23 +93,28 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
return -EINVAL;
}
+ pi = kmalloc(sizeof(*pi), GFP_ATOMIC);
+ if (!pi)
+ return -ENOMEM;
+
+ pi->base = base;
+ pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
+
/* initlize capability by RDPT */
- capability &= ioread8(base);
- events = capability;
+ pi->capability &= ioread8(base);
+ pi->events = pi->capability;
- if (capability)
- atomic_notifier_chain_register(&panic_notifier_list,
- &pvpanic_panic_nb);
+ dev_set_drvdata(dev, pi);
- return 0;
+ return pvpanic_probe(pi);
}
static int pvpanic_mmio_remove(struct platform_device *pdev)
{
+ struct pvpanic_instance *pi = dev_get_drvdata(&pdev->dev);
- if (capability)
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &pvpanic_panic_nb);
+ pvpanic_remove(pi);
+ kfree(pi);
return 0;
}
@@ -153,7 +136,7 @@ static struct platform_driver pvpanic_mmio_driver = {
.name = "pvpanic-mmio",
.of_match_table = pvpanic_mmio_match,
.acpi_match_table = pvpanic_device_ids,
- .dev_groups = pvpanic_dev_groups,
+ .dev_groups = pvpanic_mmio_dev_groups,
},
.probe = pvpanic_mmio_probe,
.remove = pvpanic_mmio_remove,
diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
new file mode 100644
index 000000000000..9ecc4e8559d5
--- /dev/null
+++ b/drivers/misc/pvpanic/pvpanic-pci.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Pvpanic PCI Device Support
+ *
+ * Copyright (C) 2021 Oracle.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include <uapi/misc/pvpanic.h>
+
+#include "pvpanic.h"
+
+#define PCI_VENDOR_ID_REDHAT 0x1b36
+#define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011
+
+MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
+MODULE_DESCRIPTION("pvpanic device driver ");
+MODULE_LICENSE("GPL");
+
+static const struct pci_device_id pvpanic_pci_id_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_REDHAT, PCI_DEVICE_ID_REDHAT_PVPANIC)},
+ {}
+};
+
+static ssize_t capability_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pvpanic_instance *pi = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%x\n", pi->capability);
+}
+static DEVICE_ATTR_RO(capability);
+
+static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pvpanic_instance *pi = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%x\n", pi->events);
+}
+
+static ssize_t events_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pvpanic_instance *pi = dev_get_drvdata(dev);
+ unsigned int tmp;
+ int err;
+
+ err = kstrtouint(buf, 16, &tmp);
+ if (err)
+ return err;
+
+ if ((tmp & pi->capability) != tmp)
+ return -EINVAL;
+
+ pi->events = tmp;
+
+ return count;
+}
+static DEVICE_ATTR_RW(events);
+
+static struct attribute *pvpanic_pci_dev_attrs[] = {
+ &dev_attr_capability.attr,
+ &dev_attr_events.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(pvpanic_pci_dev);
+
+static int pvpanic_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct pvpanic_instance *pi;
+ void __iomem *base;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ return ret;
+
+ base = pci_iomap(pdev, 0, 0);
+ if (!base)
+ return -ENOMEM;
+
+ pi = kmalloc(sizeof(*pi), GFP_ATOMIC);
+ if (!pi)
+ return -ENOMEM;
+
+ pi->base = base;
+ pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
+
+ /* initlize capability by RDPT */
+ pi->capability &= ioread8(base);
+ pi->events = pi->capability;
+
+ dev_set_drvdata(dev, pi);
+
+ return pvpanic_probe(pi);
+}
+
+static void pvpanic_pci_remove(struct pci_dev *pdev)
+{
+ struct pvpanic_instance *pi = dev_get_drvdata(&pdev->dev);
+
+ pvpanic_remove(pi);
+ iounmap(pi->base);
+ kfree(pi);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver pvpanic_pci_driver = {
+ .name = "pvpanic-pci",
+ .id_table = pvpanic_pci_id_tbl,
+ .probe = pvpanic_pci_probe,
+ .remove = pvpanic_pci_remove,
+ .driver = {
+ .dev_groups = pvpanic_pci_dev_groups,
+ },
+};
+
+module_pci_driver(pvpanic_pci_driver);
diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
new file mode 100644
index 000000000000..65f70a4da8c0
--- /dev/null
+++ b/drivers/misc/pvpanic/pvpanic.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Pvpanic Device Support
+ *
+ * Copyright (C) 2013 Fujitsu.
+ * Copyright (C) 2018 ZTE.
+ * Copyright (C) 2021 Oracle.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+
+#include <uapi/misc/pvpanic.h>
+
+#include "pvpanic.h"
+
+MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
+MODULE_DESCRIPTION("pvpanic device driver ");
+MODULE_LICENSE("GPL");
+
+static struct list_head pvpanic_list;
+static spinlock_t pvpanic_lock;
+
+static void
+pvpanic_send_event(unsigned int event)
+{
+ struct pvpanic_instance *pi_cur;
+
+ spin_lock(&pvpanic_lock);
+ list_for_each_entry(pi_cur, &pvpanic_list, list) {
+ if (event & pi_cur->capability & pi_cur->events)
+ iowrite8(event, pi_cur->base);
+ }
+ spin_unlock(&pvpanic_lock);
+}
+
+static int
+pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
+ void *unused)
+{
+ unsigned int event = PVPANIC_PANICKED;
+
+ if (kexec_crash_loaded())
+ event = PVPANIC_CRASH_LOADED;
+
+ pvpanic_send_event(event);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pvpanic_panic_nb = {
+ .notifier_call = pvpanic_panic_notify,
+ .priority = 1, /* let this called before broken drm_fb_helper */
+};
+
+int pvpanic_probe(struct pvpanic_instance *pi)
+{
+ if (!pi || !pi->base)
+ return -EINVAL;
+
+ spin_lock(&pvpanic_lock);
+ list_add(&pi->list, &pvpanic_list);
+ spin_unlock(&pvpanic_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pvpanic_probe);
+
+void pvpanic_remove(struct pvpanic_instance *pi)
+{
+ struct pvpanic_instance *pi_cur, *pi_next;
+
+ if (!pi)
+ return;
+
+ spin_lock(&pvpanic_lock);
+ list_for_each_entry_safe(pi_cur, pi_next, &pvpanic_list, list) {
+ if (pi_cur == pi) {
+ list_del(&pi_cur->list);
+ break;
+ }
+ }
+ spin_unlock(&pvpanic_lock);
+}
+EXPORT_SYMBOL_GPL(pvpanic_remove);
+
+static int pvpanic_init(void)
+{
+ INIT_LIST_HEAD(&pvpanic_list);
+ spin_lock_init(&pvpanic_lock);
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &pvpanic_panic_nb);
+
+ return 0;
+}
+
+static void pvpanic_exit(void)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &pvpanic_panic_nb);
+
+}
+
+module_init(pvpanic_init);
+module_exit(pvpanic_exit);
diff --git a/drivers/misc/pvpanic/pvpanic.h b/drivers/misc/pvpanic/pvpanic.h
new file mode 100644
index 000000000000..1afccc2e9fec
--- /dev/null
+++ b/drivers/misc/pvpanic/pvpanic.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Pvpanic Device Support
+ *
+ * Copyright (C) 2021 Oracle.
+ */
+
+#ifndef PVPANIC_H_
+#define PVPANIC_H_
+
+struct pvpanic_instance {
+ void __iomem *base;
+ unsigned int capability;
+ unsigned int events;
+ struct list_head list;
+};
+
+int pvpanic_probe(struct pvpanic_instance *pi);
+void pvpanic_remove(struct pvpanic_instance *pi);
+
+#endif /* PVPANIC_H_ */
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index cf2965aa5c05..87d156c15f35 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -21,11 +21,11 @@
/* define the XP debug device structures to be used with dev_dbg() et al */
-struct device_driver xp_dbg_name = {
+static struct device_driver xp_dbg_name = {
.name = "xp"
};
-struct device xp_dbg_subname = {
+static struct device xp_dbg_subname = {
.init_name = "", /* set to "" */
.driver = &xp_dbg_name
};
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 84610bbcc131..b2c3c22fc13c 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -207,7 +207,7 @@ xpc_start_hb_beater(void)
{
xpc_arch_ops.heartbeat_init();
timer_setup(&xpc_hb_timer, xpc_hb_beater, 0);
- xpc_hb_beater(0);
+ xpc_hb_beater(NULL);
}
static void
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index d07af4edfcac..94843e0e51c6 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -126,7 +126,7 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
{
struct uacce_device *uacce;
struct uacce_queue *q;
- int ret = 0;
+ int ret;
uacce = xa_load(&uacce_xa, iminor(inode));
if (!uacce)
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index b837e7eba5f7..f1d8ba6d4857 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -346,11 +346,6 @@ struct vmballoon {
/* statistics */
struct vmballoon_stats *stats;
-#ifdef CONFIG_DEBUG_FS
- /* debugfs file exporting statistics */
- struct dentry *dbg_entry;
-#endif
-
/**
* @b_dev_info: balloon device information descriptor.
*/
@@ -1709,14 +1704,14 @@ DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
static void __init vmballoon_debugfs_init(struct vmballoon *b)
{
- b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
- &vmballoon_debug_fops);
+ debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
+ &vmballoon_debug_fops);
}
static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
{
static_key_disable(&balloon_stat_enabled.key);
- debugfs_remove(b->dbg_entry);
+ debugfs_remove(debugfs_lookup("vmmemctl", NULL));
kfree(b->stats);
b->stats = NULL;
}
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index 345addd9306d..fa8a7fce4481 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -326,7 +326,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
{
int result;
- struct vmci_notify_bm_set_msg bitmap_set_msg;
+ struct vmci_notify_bm_set_msg bitmap_set_msg = { };
bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_SET_NOTIFY_BITMAP);
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index cc8eeb361fcd..1018dc77269d 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -168,7 +168,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
struct vmci_datagram *check_msg;
- check_msg = kmalloc(msg_size, GFP_KERNEL);
+ check_msg = kzalloc(msg_size, GFP_KERNEL);
if (!check_msg) {
dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
return -ENOMEM;
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 2d8328d928d5..da1e2a773823 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -908,7 +908,7 @@ static long vmci_host_unlocked_ioctl(struct file *filp,
unsigned int iocmd, unsigned long ioarg)
{
#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \
- char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \
+ char *name = "IOCTL_VMCI_" # ioctl_name; \
return vmci_host_do_ ## ioctl_fn( \
vmci_host_dev, name, uptr); \
} while (0)
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index eb6c02bc4a02..b8b771b643cc 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -247,8 +247,9 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
*/
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->length % data->blksz) {
- WARN_ONCE(1, "unaligned sg len %u blksize %u\n",
- sg->length, data->blksz);
+ dev_warn_once(mmc_dev(mmc),
+ "unaligned sg len %u blksize %u, disabling descriptor DMA for transfer\n",
+ sg->length, data->blksz);
return;
}
}
diff --git a/drivers/most/most_cdev.c b/drivers/most/most_cdev.c
index 044880760b58..8908b9363a96 100644
--- a/drivers/most/most_cdev.c
+++ b/drivers/most/most_cdev.c
@@ -44,8 +44,8 @@ struct comp_channel {
};
#define to_channel(d) container_of(d, struct comp_channel, cdev)
-static struct list_head channel_list;
-static spinlock_t ch_list_lock;
+static LIST_HEAD(channel_list);
+static DEFINE_SPINLOCK(ch_list_lock);
static inline bool ch_has_mbo(struct comp_channel *c)
{
@@ -494,8 +494,6 @@ static int __init mod_init(void)
if (IS_ERR(comp.class))
return PTR_ERR(comp.class);
- INIT_LIST_HEAD(&channel_list);
- spin_lock_init(&ch_list_lock);
ida_init(&comp.minor_id);
err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 57f1f1708994..5c5c92132287 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip,
return 0;
case NAND_OP_WAITRDY_INSTR:
return readl_poll_timeout(nfc->regs + NFI_STA, status,
- status & STA_BUSY, 20,
- instr->ctx.waitrdy.timeout_ms);
+ !(status & STA_BUSY), 20,
+ instr->ctx.waitrdy.timeout_ms * 1000);
default:
break;
}
diff --git a/drivers/mux/gpio.c b/drivers/mux/gpio.c
index 02c1f2c014e8..cc5f2c1861d4 100644
--- a/drivers/mux/gpio.c
+++ b/drivers/mux/gpio.c
@@ -7,11 +7,12 @@
* Author: Peter Rosin <peda@axentia.se>
*/
+#include <linux/bitmap.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mux/driver.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -23,8 +24,9 @@ static int mux_gpio_set(struct mux_control *mux, int state)
{
struct mux_gpio *mux_gpio = mux_chip_priv(mux->chip);
DECLARE_BITMAP(values, BITS_PER_TYPE(state));
+ u32 value = state;
- values[0] = state;
+ bitmap_from_arr32(values, &value, BITS_PER_TYPE(value));
gpiod_set_array_value_cansleep(mux_gpio->gpios->ndescs,
mux_gpio->gpios->desc,
@@ -64,14 +66,11 @@ static int mux_gpio_probe(struct platform_device *pdev)
mux_chip->ops = &mux_gpio_ops;
mux_gpio->gpios = devm_gpiod_get_array(dev, "mux", GPIOD_OUT_LOW);
- if (IS_ERR(mux_gpio->gpios)) {
- ret = PTR_ERR(mux_gpio->gpios);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get gpios\n");
- return ret;
- }
+ if (IS_ERR(mux_gpio->gpios))
+ return dev_err_probe(dev, PTR_ERR(mux_gpio->gpios),
+ "failed to get gpios\n");
WARN_ON(pins != mux_gpio->gpios->ndescs);
- mux_chip->mux->states = 1 << pins;
+ mux_chip->mux->states = BIT(pins);
ret = device_property_read_u32(dev, "idle-state", (u32 *)&idle_state);
if (ret >= 0 && idle_state != MUX_IDLE_AS_IS) {
@@ -96,7 +95,7 @@ static int mux_gpio_probe(struct platform_device *pdev)
static struct platform_driver mux_gpio_driver = {
.driver = {
.name = "gpio-mux",
- .of_match_table = of_match_ptr(mux_gpio_dt_ids),
+ .of_match_table = mux_gpio_dt_ids,
},
.probe = mux_gpio_probe,
};
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 903d619e08ed..e08bf9377140 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3026,10 +3026,17 @@ out_resources:
return err;
}
+/* prod_id for switch families which do not have a PHY model number */
+static const u16 family_prod_id_table[] = {
+ [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+ [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
+};
+
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ u16 prod_id;
u16 val;
int err;
@@ -3040,23 +3047,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
mv88e6xxx_reg_unlock(chip);
- if (reg == MII_PHYSID2) {
- /* Some internal PHYs don't have a model number. */
- if (chip->info->family != MV88E6XXX_FAMILY_6165)
- /* Then there is the 6165 family. It gets is
- * PHYs correct. But it can also have two
- * SERDES interfaces in the PHY address
- * space. And these don't have a model
- * number. But they are not PHYs, so we don't
- * want to give them something a PHY driver
- * will recognise.
- *
- * Use the mv88e6390 family model number
- * instead, for anything which really could be
- * a PHY,
- */
- if (!(val & 0x3f0))
- val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
+ /* Some internal PHYs don't have a model number. */
+ if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
+ chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
+ prod_id = family_prod_id_table[chip->info->family];
+ if (prod_id)
+ val |= prod_id >> 4;
}
return err ? err : val;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6e5cf490c01d..0f6a6cb7e98d 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3918,6 +3918,7 @@ static int macb_init(struct platform_device *pdev)
reg = gem_readl(bp, DCFG8);
bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
GEM_BFEXT(T2SCR, reg));
+ INIT_LIST_HEAD(&bp->rx_fs_list.list);
if (bp->max_tuples > 0) {
/* also needs one ethtype match to check IPv4 */
if (GEM_BFEXT(SCR2ETH, reg) > 0) {
@@ -3928,7 +3929,6 @@ static int macb_init(struct platform_device *pdev)
/* Filtering is supported in hw but don't enable it in kernel now */
dev->hw_features |= NETIF_F_NTUPLE;
/* init Rx flow definitions */
- INIT_LIST_HEAD(&bp->rx_fs_list.list);
bp->rx_fs_list.count = 0;
spin_lock_init(&bp->rx_fs_lock);
} else
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
index b248966837b4..7aad40b2aa73 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
@@ -412,7 +412,7 @@
| CN6XXX_INTR_M0UNWI_ERR \
| CN6XXX_INTR_M1UPB0_ERR \
| CN6XXX_INTR_M1UPWI_ERR \
- | CN6XXX_INTR_M1UPB0_ERR \
+ | CN6XXX_INTR_M1UNB0_ERR \
| CN6XXX_INTR_M1UNWI_ERR \
| CN6XXX_INTR_INSTR_DB_OF_ERR \
| CN6XXX_INTR_SLIST_DB_OF_ERR \
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 1115b8f9ea4e..a3f5b80888e5 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -350,18 +350,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
}
/*
- * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
- * @tx_info - driver specific tls info.
- * return: NET_TX_OK/NET_XMIT_DROP.
- */
-static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
-{
- return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
- TCB_T_STATE_V(TCB_T_STATE_M),
- CHCR_TCB_STATE_CLOSED, 1);
-}
-
-/*
* chcr_ktls_dev_del: call back for tls_dev_del.
* Remove the tid and l2t entry and close the connection.
* it per connection basis.
@@ -395,8 +383,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
/* clear tid */
if (tx_info->tid != -1) {
- /* clear tcb state and then release tid */
- chcr_ktls_mark_tcb_close(tx_info);
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
}
@@ -574,7 +560,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
return 0;
free_tid:
- chcr_ktls_mark_tcb_close(tx_info);
#if IS_ENABLED(CONFIG_IPV6)
/* clear clip entry */
if (tx_info->ip_family == AF_INET6)
@@ -672,10 +657,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
if (tx_info->pending_close) {
spin_unlock(&tx_info->lock);
if (!status) {
- /* it's a late success, tcb status is established,
- * mark it close.
- */
- chcr_ktls_mark_tcb_close(tx_info);
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tid, tx_info->ip_family);
}
@@ -1664,54 +1645,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
}
/*
- * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
- * sending the same segment again. It will discard the segment which is before
- * the current tx max.
- * @tx_info - driver specific tls info.
- * @q - TX queue.
- * return: NET_TX_OK/NET_XMIT_DROP.
- */
-static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
- struct sge_eth_txq *q)
-{
- struct fw_ulptx_wr *wr;
- unsigned int ndesc;
- int credits;
- void *pos;
- u32 len;
-
- len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
- ndesc = DIV_ROUND_UP(len, 64);
-
- credits = chcr_txq_avail(&q->q) - ndesc;
- if (unlikely(credits < 0)) {
- chcr_eth_txq_stop(q);
- return NETDEV_TX_BUSY;
- }
-
- pos = &q->q.desc[q->q.pidx];
-
- wr = pos;
- /* ULPTX wr */
- wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
- wr->cookie = 0;
- /* fill len in wr field */
- wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
-
- pos += sizeof(*wr);
-
- pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
- TCB_SND_UNA_RAW_W,
- TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
- TCB_SND_UNA_RAW_V(0), 0);
-
- chcr_txq_advance(&q->q, ndesc);
- cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
-
- return 0;
-}
-
-/*
* chcr_end_part_handler: This handler will handle the record which
* is complete or if record's end part is received. T6 adapter has a issue that
* it can't send out TAG with partial record so if its an end part then we have
@@ -1735,7 +1668,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u32 skb_offset,
u32 tls_end_offset, bool last_wr)
{
+ bool free_skb_if_tx_fails = false;
struct sk_buff *nskb = NULL;
+
/* check if it is a complete record */
if (tls_end_offset == record->len) {
nskb = skb;
@@ -1758,6 +1693,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
if (last_wr)
dev_kfree_skb_any(skb);
+ else
+ free_skb_if_tx_fails = true;
last_wr = true;
@@ -1769,6 +1706,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
record->num_frags,
(last_wr && tcp_push_no_fin),
mss)) {
+ if (free_skb_if_tx_fails)
+ dev_kfree_skb_any(skb);
goto out;
}
tx_info->prev_seq = record->end_seq;
@@ -1905,11 +1844,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
}
- /* reset snd una, so the middle record won't send the already
- * sent part.
- */
- if (chcr_ktls_update_snd_una(tx_info, q))
- goto out;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
} else {
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
@@ -2010,12 +1944,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
* we will send the complete record again.
*/
+ spin_lock_irqsave(&tx_ctx->base.lock, flags);
+
do {
- int i;
cxgb4_reclaim_completed_tx(adap, &q->q, true);
- /* lock taken */
- spin_lock_irqsave(&tx_ctx->base.lock, flags);
/* fetch the tls record */
record = tls_get_record(&tx_ctx->base, tcp_seq,
&tx_info->record_no);
@@ -2074,11 +2007,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
tls_end_offset, skb_offset,
0);
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (ret) {
/* free the refcount taken earlier */
if (tls_end_offset < data_len)
dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
goto out;
}
@@ -2088,16 +2021,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
continue;
}
- /* increase page reference count of the record, so that there
- * won't be any chance of page free in middle if in case stack
- * receives ACK and try to delete the record.
- */
- for (i = 0; i < record->num_frags; i++)
- __skb_frag_ref(&record->frags[i]);
- /* lock cleared */
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
-
-
/* if a tls record is finishing in this SKB */
if (tls_end_offset <= data_len) {
ret = chcr_end_part_handler(tx_info, skb, record,
@@ -2122,13 +2045,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
data_len = 0;
}
- /* clear the frag ref count which increased locally before */
- for (i = 0; i < record->num_frags; i++) {
- /* clear the frag ref count */
- __skb_frag_unref(&record->frags[i]);
- }
/* if any failure, come out from the loop. */
if (ret) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (th->fin)
dev_kfree_skb_any(skb);
@@ -2143,6 +2062,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
} while (data_len > 0);
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 252adfa5d837..8a9096aa85cd 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1471,8 +1471,10 @@ dm9000_probe(struct platform_device *pdev)
/* Init network device */
ndev = alloc_etherdev(sizeof(struct board_info));
- if (!ndev)
- return -ENOMEM;
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto out_regulator_disable;
+ }
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index e3a8858915b3..df0eab479d51 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -963,7 +963,7 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue)
unsigned long flag;
netif_stop_queue(dev);
- tasklet_disable(&np->tx_tasklet);
+ tasklet_disable_in_atomic(&np->tx_tasklet);
iowrite16(0, ioaddr + IntrEnable);
printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
"TxFrameId %2.2x,"
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 9c6438d3b3a5..ffb2a91750c7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1149,19 +1149,13 @@ static int __ibmvnic_open(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_disable(&adapter->napi[i]);
+ ibmvnic_napi_disable(adapter);
release_resources(adapter);
return rc;
}
netif_tx_start_all_queues(netdev);
- if (prev_state == VNIC_CLOSED) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
- }
-
adapter->state = VNIC_OPEN;
return rc;
}
@@ -1922,7 +1916,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
u64 old_num_rx_queues, old_num_tx_queues;
u64 old_num_rx_slots, old_num_tx_slots;
struct net_device *netdev = adapter->netdev;
- int i, rc;
+ int rc;
netdev_dbg(adapter->netdev,
"[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
@@ -2111,10 +2105,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
/* refresh device's multicast list */
ibmvnic_set_multi(netdev);
- /* kick napi */
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
-
if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
adapter->reset_reason == VNIC_RESET_MOBILITY)
__netdev_notify_peers(netdev);
@@ -3204,9 +3194,6 @@ restart_loop:
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
- if (next->tx_comp.rcs[i])
- dev_err(dev, "tx error %x\n",
- next->tx_comp.rcs[i]);
index = be32_to_cpu(next->tx_comp.correlators[i]);
if (index & IBMVNIC_TSO_POOL_MASK) {
tx_pool = &adapter->tso_pool[pool];
@@ -3220,7 +3207,13 @@ restart_loop:
num_entries += txbuff->num_entries;
if (txbuff->skb) {
total_bytes += txbuff->skb->len;
- dev_consume_skb_irq(txbuff->skb);
+ if (next->tx_comp.rcs[i]) {
+ dev_err(dev, "tx error %x\n",
+ next->tx_comp.rcs[i]);
+ dev_kfree_skb_irq(txbuff->skb);
+ } else {
+ dev_consume_skb_irq(txbuff->skb);
+ }
txbuff->skb = NULL;
} else {
netdev_warn(adapter->netdev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 30ad7c08d0fb..527023ee4c07 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -12357,6 +12357,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
{
int err = 0;
int size;
+ u16 pow;
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
@@ -12375,6 +12376,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
pf->hw.func_caps.num_tx_qp);
+
+ /* find the next higher power-of-2 of num cpus */
+ pow = roundup_pow_of_two(num_online_cpus());
+ pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
+
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
pf->alloc_rss_size = min_t(int, pf->rss_size_max,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 211ac6f907ad..28e834a128c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -747,8 +747,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
struct ice_port_info *pi)
{
u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
- u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
- u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
+ u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j;
+ u8 i, err, sync, oper, app_index, ice_app_sel_type;
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 03d9aad516d4..cffb95f8f632 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6536,6 +6536,13 @@ err_setup_tx:
return err;
}
+static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring)
+{
+ struct ixgbe_q_vector *q_vector = rx_ring->q_vector;
+
+ return q_vector ? q_vector->napi.napi_id : 0;
+}
+
/**
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
* @adapter: pointer to ixgbe_adapter
@@ -6583,7 +6590,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
/* XDP RX-queue info */
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
- rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0)
+ rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
goto err;
rx_ring->xdp_prog = adapter->xdp_prog;
@@ -6892,6 +6899,11 @@ static int __maybe_unused ixgbe_resume(struct device *dev_d)
adapter->hw.hw_addr = adapter->io_addr;
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ e_dev_err("Cannot enable PCI device from suspend\n");
+ return err;
+ }
smp_mb__before_atomic();
clear_bit(__IXGBE_DISABLED, &adapter->state);
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index e9efe074edc1..f1b9284e0bea 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1265,9 +1265,9 @@ jme_stop_shutdown_timer(struct jme_adapter *jme)
jwrite32f(jme, JME_APMC, apmc);
}
-static void jme_link_change_tasklet(struct tasklet_struct *t)
+static void jme_link_change_work(struct work_struct *work)
{
- struct jme_adapter *jme = from_tasklet(jme, t, linkch_task);
+ struct jme_adapter *jme = container_of(work, struct jme_adapter, linkch_task);
struct net_device *netdev = jme->dev;
int rc;
@@ -1510,7 +1510,7 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
* all other events are ignored
*/
jwrite32(jme, JME_IEVE, intrstat);
- tasklet_schedule(&jme->linkch_task);
+ schedule_work(&jme->linkch_task);
goto out_reenable;
}
@@ -1832,7 +1832,6 @@ jme_open(struct net_device *netdev)
jme_clear_pm_disable_wol(jme);
JME_NAPI_ENABLE(jme);
- tasklet_setup(&jme->linkch_task, jme_link_change_tasklet);
tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
@@ -1920,7 +1919,7 @@ jme_close(struct net_device *netdev)
JME_NAPI_DISABLE(jme);
- tasklet_kill(&jme->linkch_task);
+ cancel_work_sync(&jme->linkch_task);
tasklet_kill(&jme->txclean_task);
tasklet_kill(&jme->rxclean_task);
tasklet_kill(&jme->rxempty_task);
@@ -3035,6 +3034,7 @@ jme_init_one(struct pci_dev *pdev,
atomic_set(&jme->rx_empty, 1);
tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
+ INIT_WORK(&jme->linkch_task, jme_link_change_work);
jme->dpi.cur = PCC_P1;
jme->reg_ghc = 0;
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index a2c3b00d939d..2af76329b4a2 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -411,7 +411,7 @@ struct jme_adapter {
struct tasklet_struct rxempty_task;
struct tasklet_struct rxclean_task;
struct tasklet_struct txclean_task;
- struct tasklet_struct linkch_task;
+ struct work_struct linkch_task;
struct tasklet_struct pcc_task;
unsigned long flags;
u32 reg_txcs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index d7d8a68ef23d..d0f9d3cee97d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -246,6 +246,11 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
struct mlx5_devlink_trap *dl_trap;
int err = 0;
+ if (is_mdev_switchdev_mode(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode");
+ return -EOPNOTSUPP;
+ }
+
dl_trap = mlx5_find_trap_by_id(dev, trap->id);
if (!dl_trap) {
mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 308fd279669e..89510cac46c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -387,21 +387,6 @@ enum mlx5e_fec_supported_link_mode {
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
} while (0)
-#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
- do { \
- unsigned long policy_long; \
- u16 *__policy = &(policy); \
- bool _write = (write); \
- \
- policy_long = *__policy; \
- if (_write && *__policy) \
- *__policy = find_first_bit(&policy_long, \
- sizeof(policy_long) * BITS_PER_BYTE);\
- MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
- if (!_write && *__policy) \
- *__policy = 1 << *__policy; \
- } while (0)
-
/* get/set FEC admin field for a given speed */
static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
enum mlx5e_fec_supported_link_mode link_mode)
@@ -423,16 +408,16 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x);
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g_1x);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x);
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_2x);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x);
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_4x);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x);
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x);
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index df2a0af854bb..d675107d9eca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1895,6 +1895,9 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
return 0;
flow_rule_match_meta(rule, &match);
+ if (!match.mask->ingress_ifindex)
+ return 0;
+
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 581a92fc3292..1df2c002c9f6 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2350,6 +2350,13 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
pcie_set_readrq(tp->pci_dev, readrq);
+
+ /* Chip doesn't support pause in jumbo mode */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ tp->phydev->advertising, !jumbo);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ tp->phydev->advertising, !jumbo);
+ phy_start_aneg(tp->phydev);
}
DECLARE_RTL_COND(rtl_chipcmd_cond)
@@ -4630,8 +4637,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
if (!tp->supports_gmii)
phy_set_max_speed(phydev, SPEED_100);
- phy_support_asym_pause(phydev);
-
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 208cae344ffa..4749bd0af160 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1380,88 +1380,6 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
}
/**
- * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
- * @priv: driver private structure
- * Description: this function is called to re-allocate a receive buffer, perform
- * the DMA mapping and init the descriptor.
- */
-static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
-{
- u32 rx_count = priv->plat->rx_queues_to_use;
- u32 queue;
- int i;
-
- for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- for (i = 0; i < priv->dma_rx_size; i++) {
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-
- if (buf->page) {
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
- buf->page = NULL;
- }
-
- if (priv->sph && buf->sec_page) {
- page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
- buf->sec_page = NULL;
- }
- }
- }
-
- for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- for (i = 0; i < priv->dma_rx_size; i++) {
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
- struct dma_desc *p;
-
- if (priv->extend_desc)
- p = &((rx_q->dma_erx + i)->basic);
- else
- p = rx_q->dma_rx + i;
-
- if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
- if (!buf->page)
- goto err_reinit_rx_buffers;
-
- buf->addr = page_pool_get_dma_addr(buf->page);
- }
-
- if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
- if (!buf->sec_page)
- goto err_reinit_rx_buffers;
-
- buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
- }
-
- stmmac_set_desc_addr(priv, p, buf->addr);
- if (priv->sph)
- stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
- else
- stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
- if (priv->dma_buf_sz == BUF_SIZE_16KiB)
- stmmac_init_desc3(priv, p);
- }
- }
-
- return;
-
-err_reinit_rx_buffers:
- do {
- while (--i >= 0)
- stmmac_free_rx_buffer(priv, queue, i);
-
- if (queue == 0)
- break;
-
- i = priv->dma_rx_size;
- } while (queue-- > 0);
-}
-
-/**
* init_dma_rx_desc_rings - init the RX descriptor rings
* @dev: net device structure
* @flags: gfp flag.
@@ -5428,7 +5346,7 @@ int stmmac_resume(struct device *dev)
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
- stmmac_reinit_rx_buffers(priv);
+
stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index d5b1e48e0c09..42f31c681846 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -891,6 +891,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ return -EINVAL;
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
geneve->cfg.info.key.tp_dst, sport);
@@ -985,6 +988,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ return -EINVAL;
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
geneve->cfg.info.key.tp_dst, sport);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e26a5d663f8a..8018ddf7f316 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -3021,9 +3021,34 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
},
{
- .phy_id = MARVELL_PHY_ID_88E6390,
+ .phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
.phy_id_mask = MARVELL_PHY_ID_MASK,
- .name = "Marvell 88E6390",
+ .name = "Marvell 88E6341 Family",
+ /* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = m88e1510_probe,
+ .config_init = marvell_config_init,
+ .config_aneg = m88e6390_config_aneg,
+ .read_status = marvell_read_status,
+ .config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
+ .cable_test_start = marvell_vct7_cable_test_start,
+ .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
+ .cable_test_get_status = marvell_vct7_cable_test_get_status,
+ },
+ {
+ .phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E6390 Family",
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.probe = m88e6390_probe,
@@ -3107,7 +3132,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
- { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
{ }
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 6d9130859c55..503e2fd7ce51 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -471,9 +471,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
skb_dst_drop(skb);
- /* if dst.dev is loopback or the VRF device again this is locally
- * originated traffic destined to a local address. Short circuit
- * to Rx path
+ /* if dst.dev is the VRF device again this is locally originated traffic
+ * destined to a local address. Short circuit to Rx path.
*/
if (dst->dev == dev)
return vrf_local_xmit(skb, dev, dst);
@@ -547,9 +546,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
skb_dst_drop(skb);
- /* if dst.dev is loopback or the VRF device again this is locally
- * originated traffic destined to a local address. Short circuit
- * to Rx path
+ /* if dst.dev is the VRF device again this is locally originated traffic
+ * destined to a local address. Short circuit to Rx path.
*/
if (rt->dst.dev == vrf_dev)
return vrf_local_xmit(skb, vrf_dev, &rt->dst);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 71e2ada86793..72e2e71aac0e 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -251,7 +251,7 @@ void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc)
int first_slot = ATH_BCBUF;
int slot;
- tasklet_disable(&sc->bcon_tasklet);
+ tasklet_disable_in_atomic(&sc->bcon_tasklet);
/* Find first taken slot. */
for (slot = 0; slot < ATH_BCBUF; slot++) {
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index a5439c130130..d24b7a7993aa 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -824,11 +824,15 @@ static void connect(struct backend_info *be)
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
- hotplug_status_changed,
- "%s/%s", dev->nodename, "hotplug-status");
- if (!err)
+ if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
+ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
+ NULL, hotplug_status_changed,
+ "%s/%s", dev->nodename,
+ "hotplug-status");
+ if (err)
+ goto err;
be->have_hotplug_status_watch = 1;
+ }
netif_tx_wake_all_queues(be->vif->dev);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 48f0985ca8a0..3a777d0073b7 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -631,16 +631,14 @@ void nvdimm_check_and_set_ro(struct gendisk *disk)
struct nd_region *nd_region = to_nd_region(dev->parent);
int disk_ro = get_disk_ro(disk);
- /*
- * Upgrade to read-only if the region is read-only preserve as
- * read-only if the disk is already read-only.
- */
- if (disk_ro || nd_region->ro == disk_ro)
+ /* catch the disk up with the region ro state */
+ if (disk_ro == nd_region->ro)
return;
- dev_info(dev, "%s read-only, marking %s read-only\n",
- dev_name(&nd_region->dev), disk->disk_name);
- set_disk_ro(disk, 1);
+ dev_info(dev, "%s read-%s, marking %s read-%s\n",
+ dev_name(&nd_region->dev), nd_region->ro ? "only" : "write",
+ disk->disk_name, nd_region->ro ? "only" : "write");
+ set_disk_ro(disk, nd_region->ro);
}
EXPORT_SYMBOL(nvdimm_check_and_set_ro);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index b8a85bfb2e95..7daac795db39 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -26,6 +26,7 @@
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include "pmem.h"
+#include "btt.h"
#include "pfn.h"
#include "nd.h"
@@ -585,7 +586,7 @@ static void nd_pmem_shutdown(struct device *dev)
nvdimm_flush(to_nd_region(dev->parent), NULL);
}
-static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+static void pmem_revalidate_poison(struct device *dev)
{
struct nd_region *nd_region;
resource_size_t offset = 0, end_trunc = 0;
@@ -595,9 +596,6 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
struct range range;
struct kernfs_node *bb_state;
- if (event != NVDIMM_REVALIDATE_POISON)
- return;
-
if (is_nd_btt(dev)) {
struct nd_btt *nd_btt = to_nd_btt(dev);
@@ -635,6 +633,37 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
sysfs_notify_dirent(bb_state);
}
+static void pmem_revalidate_region(struct device *dev)
+{
+ struct pmem_device *pmem;
+
+ if (is_nd_btt(dev)) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+ struct btt *btt = nd_btt->btt;
+
+ nvdimm_check_and_set_ro(btt->btt_disk);
+ return;
+ }
+
+ pmem = dev_get_drvdata(dev);
+ nvdimm_check_and_set_ro(pmem->disk);
+}
+
+static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+{
+ switch (event) {
+ case NVDIMM_REVALIDATE_POISON:
+ pmem_revalidate_poison(dev);
+ break;
+ case NVDIMM_REVALIDATE_REGION:
+ pmem_revalidate_region(dev);
+ break;
+ default:
+ dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event);
+ break;
+ }
+}
+
MODULE_ALIAS("pmem");
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index ef23119db574..9ccf3d608799 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -518,6 +518,12 @@ static ssize_t read_only_show(struct device *dev,
return sprintf(buf, "%d\n", nd_region->ro);
}
+static int revalidate_read_only(struct device *dev, void *data)
+{
+ nd_device_notify(dev, NVDIMM_REVALIDATE_REGION);
+ return 0;
+}
+
static ssize_t read_only_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
@@ -529,6 +535,7 @@ static ssize_t read_only_store(struct device *dev,
return rc;
nd_region->ro = ro;
+ device_for_each_child(dev, NULL, revalidate_read_only);
return len;
}
static DEVICE_ATTR_RW(read_only);
@@ -1239,6 +1246,11 @@ int nvdimm_has_flush(struct nd_region *nd_region)
|| !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
return -ENXIO;
+ /* Test if an explicit flush function is defined */
+ if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
+ return 1;
+
+ /* Test if any flush hints for the region are available */
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -1249,8 +1261,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
}
/*
- * The platform defines dimm devices without hints, assume
- * platform persistence mechanism like ADR
+ * The platform defines dimm devices without hints nor explicit flush,
+ * assume platform persistence mechanism like ADR
*/
return 0;
}
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 75d2594c16e1..dd2019006838 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -272,10 +272,20 @@ config SPRD_EFUSE
config NVMEM_RMEM
tristate "Reserved Memory Based Driver Support"
+ depends on HAS_IOMEM
help
This driver maps reserved memory into an nvmem device. It might be
useful to expose information left by firmware in memory.
This driver can also be built as a module. If so, the module
will be called nvmem-rmem.
+
+config NVMEM_BRCM_NVRAM
+ tristate "Broadcom's NVRAM support"
+ depends on ARCH_BCM_5301X || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This driver provides support for Broadcom's NVRAM that can be accessed
+ using I/O mapping.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 5376b8e0dae5..bbea1410240a 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -57,3 +57,5 @@ obj-$(CONFIG_SPRD_EFUSE) += nvmem_sprd_efuse.o
nvmem_sprd_efuse-y := sprd-efuse.o
obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o
nvmem-rmem-y := rmem.o
+obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o
+nvmem_brcm_nvram-y := brcm_nvram.o
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
new file mode 100644
index 000000000000..bd2ecaaf4585
--- /dev/null
+++ b/drivers/nvmem/brcm_nvram.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+struct brcm_nvram {
+ struct device *dev;
+ void __iomem *base;
+};
+
+static int brcm_nvram_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct brcm_nvram *priv = context;
+ u8 *dst = val;
+
+ while (bytes--)
+ *dst++ = readb(priv->base + offset++);
+
+ return 0;
+}
+
+static int brcm_nvram_probe(struct platform_device *pdev)
+{
+ struct nvmem_config config = {
+ .name = "brcm-nvram",
+ .reg_read = brcm_nvram_read,
+ };
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct brcm_nvram *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ config.dev = dev;
+ config.priv = priv;
+ config.size = resource_size(res);
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
+}
+
+static const struct of_device_id brcm_nvram_of_match_table[] = {
+ { .compatible = "brcm,nvram", },
+ {},
+};
+
+static struct platform_driver brcm_nvram_driver = {
+ .probe = brcm_nvram_probe,
+ .driver = {
+ .name = "brcm_nvram",
+ .of_match_table = brcm_nvram_of_match_table,
+ },
+};
+
+static int __init brcm_nvram_init(void)
+{
+ return platform_driver_register(&brcm_nvram_driver);
+}
+
+subsys_initcall_sync(brcm_nvram_init);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, brcm_nvram_of_match_table);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index a5ab1e0c74cf..bca671ff4e54 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -1606,6 +1606,101 @@ int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
+static void *nvmem_cell_read_variable_common(struct device *dev,
+ const char *cell_id,
+ size_t max_len, size_t *len)
+{
+ struct nvmem_cell *cell;
+ int nbits;
+ void *buf;
+
+ cell = nvmem_cell_get(dev, cell_id);
+ if (IS_ERR(cell))
+ return cell;
+
+ nbits = cell->nbits;
+ buf = nvmem_cell_read(cell, len);
+ nvmem_cell_put(cell);
+ if (IS_ERR(buf))
+ return buf;
+
+ /*
+ * If nbits is set then nvmem_cell_read() can significantly exaggerate
+ * the length of the real data. Throw away the extra junk.
+ */
+ if (nbits)
+ *len = DIV_ROUND_UP(nbits, 8);
+
+ if (*len > max_len) {
+ kfree(buf);
+ return ERR_PTR(-ERANGE);
+ }
+
+ return buf;
+}
+
+/**
+ * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
+ u32 *val)
+{
+ size_t len;
+ u8 *buf;
+ int i;
+
+ buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* Copy w/ implicit endian conversion */
+ *val = 0;
+ for (i = 0; i < len; i++)
+ *val |= buf[i] << (8 * i);
+
+ kfree(buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
+
+/**
+ * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
+ u64 *val)
+{
+ size_t len;
+ u8 *buf;
+ int i;
+
+ buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* Copy w/ implicit endian conversion */
+ *val = 0;
+ for (i = 0; i < len; i++)
+ *val |= (uint64_t)buf[i] << (8 * i);
+
+ kfree(buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
+
/**
* nvmem_device_cell_read() - Read a given nvmem device and cell
*
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
index f6e9f96933ca..4fcb63507ecd 100644
--- a/drivers/nvmem/qcom-spmi-sdam.c
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -141,7 +141,7 @@ static int sdam_probe(struct platform_device *pdev)
sdam->sdam_config.dev = &pdev->dev;
sdam->sdam_config.name = "spmi_sdam";
sdam->sdam_config.id = NVMEM_DEVID_AUTO;
- sdam->sdam_config.owner = THIS_MODULE,
+ sdam->sdam_config.owner = THIS_MODULE;
sdam->sdam_config.stride = 1;
sdam->sdam_config.word_size = 1;
sdam->sdam_config.reg_read = sdam_read;
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 6cace24dfbf7..d6d3f24685a8 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -45,11 +45,13 @@ MODULE_PARM_DESC(read_raw_data, "Read raw instead of corrected data");
* @qfprom_blow_timer_value: The timer value of qfprom when doing efuse blow.
* @qfprom_blow_set_freq: The frequency required to set when we start the
* fuse blowing.
+ * @qfprom_blow_uV: LDO voltage to be set when doing efuse blow
*/
struct qfprom_soc_data {
u32 accel_value;
u32 qfprom_blow_timer_value;
u32 qfprom_blow_set_freq;
+ int qfprom_blow_uV;
};
/**
@@ -111,6 +113,15 @@ static const struct qfprom_soc_compatible_data sc7180_qfprom = {
.nkeepout = ARRAY_SIZE(sc7180_qfprom_keepout)
};
+static const struct nvmem_keepout sc7280_qfprom_keepout[] = {
+ {.start = 0x128, .end = 0x148},
+ {.start = 0x238, .end = 0x248}
+};
+
+static const struct qfprom_soc_compatible_data sc7280_qfprom = {
+ .keepout = sc7280_qfprom_keepout,
+ .nkeepout = ARRAY_SIZE(sc7280_qfprom_keepout)
+};
/**
* qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing.
* @priv: Our driver data.
@@ -127,6 +138,16 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
{
int ret;
+ /*
+ * This may be a shared rail and may be able to run at a lower rate
+ * when we're not blowing fuses. At the moment, the regulator framework
+ * applies voltage constraints even on disabled rails, so remove our
+ * constraints and allow the rail to be adjusted by other users.
+ */
+ ret = regulator_set_voltage(priv->vcc, 0, INT_MAX);
+ if (ret)
+ dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n");
+
ret = regulator_disable(priv->vcc);
if (ret)
dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
@@ -158,6 +179,7 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
struct qfprom_touched_values *old)
{
int ret;
+ int qfprom_blow_uV = priv->soc_data->qfprom_blow_uV;
ret = clk_prepare_enable(priv->secclk);
if (ret) {
@@ -172,6 +194,17 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
goto err_clk_prepared;
}
+ /*
+ * Hardware requires 1.8V min for fuse blowing; this may be
+ * a rail shared do don't specify a max--regulator constraints
+ * will handle.
+ */
+ ret = regulator_set_voltage(priv->vcc, qfprom_blow_uV, INT_MAX);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set %duV\n", qfprom_blow_uV);
+ goto err_clk_rate_set;
+ }
+
ret = regulator_enable(priv->vcc);
if (ret) {
dev_err(priv->dev, "Failed to enable regulator\n");
@@ -290,6 +323,14 @@ static const struct qfprom_soc_data qfprom_7_8_data = {
.accel_value = 0xD10,
.qfprom_blow_timer_value = 25,
.qfprom_blow_set_freq = 4800000,
+ .qfprom_blow_uV = 1800000,
+};
+
+static const struct qfprom_soc_data qfprom_7_15_data = {
+ .accel_value = 0xD08,
+ .qfprom_blow_timer_value = 24,
+ .qfprom_blow_set_freq = 4800000,
+ .qfprom_blow_uV = 1900000,
};
static int qfprom_probe(struct platform_device *pdev)
@@ -358,6 +399,8 @@ static int qfprom_probe(struct platform_device *pdev)
if (major_version == 7 && minor_version == 8)
priv->soc_data = &qfprom_7_8_data;
+ if (major_version == 7 && minor_version == 15)
+ priv->soc_data = &qfprom_7_15_data;
priv->vcc = devm_regulator_get(&pdev->dev, "vcc");
if (IS_ERR(priv->vcc))
@@ -384,6 +427,7 @@ static int qfprom_probe(struct platform_device *pdev)
static const struct of_device_id qfprom_of_match[] = {
{ .compatible = "qcom,qfprom",},
{ .compatible = "qcom,sc7180-qfprom", .data = &sc7180_qfprom},
+ { .compatible = "qcom,sc7280-qfprom", .data = &sc7280_qfprom},
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, qfprom_of_match);
diff --git a/drivers/nvmem/snvs_lpgpr.c b/drivers/nvmem/snvs_lpgpr.c
index c527d26ca6ac..4692aa985bd6 100644
--- a/drivers/nvmem/snvs_lpgpr.c
+++ b/drivers/nvmem/snvs_lpgpr.c
@@ -123,7 +123,7 @@ static int snvs_lpgpr_probe(struct platform_device *pdev)
cfg->dev = dev;
cfg->stride = 4;
cfg->word_size = 4;
- cfg->size = dcfg->size,
+ cfg->size = dcfg->size;
cfg->owner = THIS_MODULE;
cfg->reg_read = snvs_lpgpr_read;
cfg->reg_write = snvs_lpgpr_write;
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 27a17a1e4a7c..1ff4ce24f4b3 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1292,7 +1292,7 @@ exit_unlock:
* resumes, hv_pci_restore_msi_state() is able to correctly restore
* the interrupt with the correct affinity.
*/
- if (res && hbus->state != hv_pcibus_removing)
+ if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
dev_err(&hbus->hdev->device,
"%s() failed: %#llx", __func__, res);
@@ -1458,7 +1458,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* Prevents hv_pci_onchannelcallback() from running concurrently
* in the tasklet.
*/
- tasklet_disable(&channel->callback_event);
+ tasklet_disable_in_atomic(&channel->callback_event);
/*
* Since this function is called with IRQ locks held, can't
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index f81e2ec90005..666d8a9b557f 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -306,7 +306,7 @@ static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
- return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var);
+ return sysfs_emit(buf, "config=0x%lx\n", (unsigned long)eattr->var);
}
static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
@@ -525,8 +525,8 @@ static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
/* Global events have single fixed source code */
- return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n",
- (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
+ return sysfs_emit(buf, "event=0x%lx,source=0x%x\n",
+ (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
}
/*
@@ -696,7 +696,7 @@ static ssize_t cci_pmu_format_show(struct device *dev,
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
- return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
static ssize_t cci_pmu_event_show(struct device *dev,
@@ -705,8 +705,8 @@ static ssize_t cci_pmu_event_show(struct device *dev,
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
/* source parameter is mandatory for normal PMU events */
- return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n",
- (unsigned long)eattr->var);
+ return sysfs_emit(buf, "source=?,event=0x%lx\n",
+ (unsigned long)eattr->var);
}
static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index a0a71c1df042..96d47cb302dd 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -221,7 +221,7 @@ static ssize_t arm_ccn_pmu_format_show(struct device *dev,
struct dev_ext_attribute *ea = container_of(attr,
struct dev_ext_attribute, attr);
- return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var);
+ return sysfs_emit(buf, "%s\n", (char *)ea->var);
}
#define CCN_FORMAT_ATTR(_name, _config) \
@@ -326,43 +326,38 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
struct arm_ccn_pmu_event *event = container_of(attr,
struct arm_ccn_pmu_event, attr);
- ssize_t res;
+ int res;
- res = scnprintf(buf, PAGE_SIZE, "type=0x%x", event->type);
+ res = sysfs_emit(buf, "type=0x%x", event->type);
if (event->event)
- res += scnprintf(buf + res, PAGE_SIZE - res, ",event=0x%x",
- event->event);
+ res += sysfs_emit_at(buf, res, ",event=0x%x", event->event);
if (event->def)
- res += scnprintf(buf + res, PAGE_SIZE - res, ",%s",
- event->def);
+ res += sysfs_emit_at(buf, res, ",%s", event->def);
if (event->mask)
- res += scnprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
- event->mask);
+ res += sysfs_emit_at(buf, res, ",mask=0x%x", event->mask);
/* Arguments required by an event */
switch (event->type) {
case CCN_TYPE_CYCLES:
break;
case CCN_TYPE_XP:
- res += scnprintf(buf + res, PAGE_SIZE - res,
- ",xp=?,vc=?");
+ res += sysfs_emit_at(buf, res, ",xp=?,vc=?");
if (event->event == CCN_EVENT_WATCHPOINT)
- res += scnprintf(buf + res, PAGE_SIZE - res,
+ res += sysfs_emit_at(buf, res,
",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
else
- res += scnprintf(buf + res, PAGE_SIZE - res,
- ",bus=?");
+ res += sysfs_emit_at(buf, res, ",bus=?");
break;
case CCN_TYPE_MN:
- res += scnprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
+ res += sysfs_emit_at(buf, res, ",node=%d", ccn->mn_id);
break;
default:
- res += scnprintf(buf + res, PAGE_SIZE - res, ",node=?");
+ res += sysfs_emit_at(buf, res, ",node=?");
break;
}
- res += scnprintf(buf + res, PAGE_SIZE - res, "\n");
+ res += sysfs_emit_at(buf, res, "\n");
return res;
}
@@ -476,7 +471,7 @@ static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
- return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL;
+ return mask ? sysfs_emit(buf, "0x%016llx\n", *mask) : -EINVAL;
}
static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 1328159fe564..56a5c355701d 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -348,19 +348,19 @@ static ssize_t arm_cmn_event_show(struct device *dev,
eattr = container_of(attr, typeof(*eattr), attr);
if (eattr->type == CMN_TYPE_DTC)
- return snprintf(buf, PAGE_SIZE, "type=0x%x\n", eattr->type);
+ return sysfs_emit(buf, "type=0x%x\n", eattr->type);
if (eattr->type == CMN_TYPE_WP)
- return snprintf(buf, PAGE_SIZE,
- "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
- eattr->type, eattr->eventid);
+ return sysfs_emit(buf,
+ "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
+ eattr->type, eattr->eventid);
if (arm_cmn_is_occup_event(eattr->type, eattr->eventid))
- return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
- eattr->type, eattr->eventid, eattr->occupid);
+ return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
+ eattr->type, eattr->eventid, eattr->occupid);
- return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x\n",
- eattr->type, eattr->eventid);
+ return sysfs_emit(buf, "type=0x%x,eventid=0x%x\n", eattr->type,
+ eattr->eventid);
}
static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
@@ -560,12 +560,12 @@ static ssize_t arm_cmn_format_show(struct device *dev,
int lo = __ffs(fmt->field), hi = __fls(fmt->field);
if (lo == hi)
- return snprintf(buf, PAGE_SIZE, "config:%d\n", lo);
+ return sysfs_emit(buf, "config:%d\n", lo);
if (!fmt->config)
- return snprintf(buf, PAGE_SIZE, "config:%d-%d\n", lo, hi);
+ return sysfs_emit(buf, "config:%d-%d\n", lo, hi);
- return snprintf(buf, PAGE_SIZE, "config%d:%d-%d\n", fmt->config, lo, hi);
+ return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi);
}
#define _CMN_FORMAT_ATTR(_name, _cfg, _fld) \
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index f2a85500258d..b6c2511d59af 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -113,7 +113,7 @@ dmc620_pmu_event_show(struct device *dev,
eattr = container_of(attr, typeof(*eattr), attr);
- return sprintf(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
+ return sysfs_emit(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
}
#define DMC620_PMU_EVENT_ATTR(_name, _eventid, _clkdiv2) \
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 0459a3403469..196faea074d0 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -136,8 +136,7 @@ static ssize_t dsu_pmu_sysfs_event_show(struct device *dev,
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
- return snprintf(buf, PAGE_SIZE, "event=0x%lx\n",
- (unsigned long)eattr->var);
+ return sysfs_emit(buf, "event=0x%lx\n", (unsigned long)eattr->var);
}
static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
@@ -146,7 +145,7 @@ static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
- return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
static ssize_t dsu_pmu_cpumask_show(struct device *dev,
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 933bd8410fc2..513de1f54e2d 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -6,6 +6,7 @@
* Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#define dev_fmt pr_fmt
#include <linux/bug.h>
#include <linux/cpumask.h>
@@ -62,7 +63,7 @@ static bool pmu_has_irq_affinity(struct device_node *node)
return !!of_find_property(node, "interrupt-affinity", NULL);
}
-static int pmu_parse_irq_affinity(struct device_node *node, int i)
+static int pmu_parse_irq_affinity(struct device *dev, int i)
{
struct device_node *dn;
int cpu;
@@ -72,19 +73,18 @@ static int pmu_parse_irq_affinity(struct device_node *node, int i)
* affinity matches our logical CPU order, as we used to assume.
* This is fragile, so we'll warn in pmu_parse_irqs().
*/
- if (!pmu_has_irq_affinity(node))
+ if (!pmu_has_irq_affinity(dev->of_node))
return i;
- dn = of_parse_phandle(node, "interrupt-affinity", i);
+ dn = of_parse_phandle(dev->of_node, "interrupt-affinity", i);
if (!dn) {
- pr_warn("failed to parse interrupt-affinity[%d] for %pOFn\n",
- i, node);
+ dev_warn(dev, "failed to parse interrupt-affinity[%d]\n", i);
return -EINVAL;
}
cpu = of_cpu_node_to_id(dn);
if (cpu < 0) {
- pr_warn("failed to find logical CPU for %pOFn\n", dn);
+ dev_warn(dev, "failed to find logical CPU for %pOFn\n", dn);
cpu = nr_cpu_ids;
}
@@ -98,19 +98,18 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
int i = 0, num_irqs;
struct platform_device *pdev = pmu->plat_device;
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+ struct device *dev = &pdev->dev;
num_irqs = platform_irq_count(pdev);
- if (num_irqs < 0) {
- pr_err("unable to count PMU IRQs\n");
- return num_irqs;
- }
+ if (num_irqs < 0)
+ return dev_err_probe(dev, num_irqs, "unable to count PMU IRQs\n");
/*
* In this case we have no idea which CPUs are covered by the PMU.
* To match our prior behaviour, we assume all CPUs in this case.
*/
if (num_irqs == 0) {
- pr_warn("no irqs for PMU, sampling events not supported\n");
+ dev_warn(dev, "no irqs for PMU, sampling events not supported\n");
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
cpumask_setall(&pmu->supported_cpus);
return 0;
@@ -122,10 +121,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
return pmu_parse_percpu_irq(pmu, irq);
}
- if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(pdev->dev.of_node)) {
- pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
- pdev->dev.of_node);
- }
+ if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(dev->of_node))
+ dev_warn(dev, "no interrupt-affinity property, guessing.\n");
for (i = 0; i < num_irqs; i++) {
int cpu, irq;
@@ -135,18 +132,18 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
continue;
if (irq_is_percpu_devid(irq)) {
- pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
+ dev_warn(dev, "multiple PPIs or mismatched SPI/PPI detected\n");
return -EINVAL;
}
- cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i);
+ cpu = pmu_parse_irq_affinity(dev, i);
if (cpu < 0)
return cpu;
if (cpu >= nr_cpu_ids)
continue;
if (per_cpu(hw_events->irq, cpu)) {
- pr_warn("multiple PMU IRQs for the same CPU detected\n");
+ dev_warn(dev, "multiple PMU IRQs for the same CPU detected\n");
return -EINVAL;
}
@@ -191,9 +188,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table)
{
- const struct of_device_id *of_id;
armpmu_init_fn init_fn;
- struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct arm_pmu *pmu;
int ret = -ENODEV;
@@ -207,15 +203,14 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (ret)
goto out_free;
- if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
- init_fn = of_id->data;
-
- pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
+ init_fn = of_device_get_match_data(dev);
+ if (init_fn) {
+ pmu->secure_access = of_property_read_bool(dev->of_node,
"secure-reg-access");
/* arm64 systems boot only as non-secure */
if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
- pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
+ dev_warn(dev, "ignoring \"secure-reg-access\" property for arm64\n");
pmu->secure_access = false;
}
@@ -226,7 +221,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
}
if (ret) {
- pr_info("%pOF: failed to probe PMU!\n", node);
+ dev_err(dev, "failed to probe PMU!\n");
goto out_free;
}
@@ -235,15 +230,16 @@ int arm_pmu_device_probe(struct platform_device *pdev,
goto out_free_irqs;
ret = armpmu_register(pmu);
- if (ret)
- goto out_free;
+ if (ret) {
+ dev_err(dev, "failed to register PMU devices!\n");
+ goto out_free_irqs;
+ }
return 0;
out_free_irqs:
armpmu_free_irqs(pmu);
out_free:
- pr_info("%pOF: failed to register PMU devices!\n", node);
armpmu_free(pmu);
return ret;
}
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 8ff7a67f691c..ff6fab4bae30 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -506,30 +506,24 @@ static ssize_t smmu_pmu_event_show(struct device *dev,
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
- return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
-#define SMMU_EVENT_ATTR(name, config) \
- PMU_EVENT_ATTR(name, smmu_event_attr_##name, \
- config, smmu_pmu_event_show)
-SMMU_EVENT_ATTR(cycles, 0);
-SMMU_EVENT_ATTR(transaction, 1);
-SMMU_EVENT_ATTR(tlb_miss, 2);
-SMMU_EVENT_ATTR(config_cache_miss, 3);
-SMMU_EVENT_ATTR(trans_table_walk_access, 4);
-SMMU_EVENT_ATTR(config_struct_access, 5);
-SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6);
-SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7);
+#define SMMU_EVENT_ATTR(name, config) \
+ (&((struct perf_pmu_events_attr) { \
+ .attr = __ATTR(name, 0444, smmu_pmu_event_show, NULL), \
+ .id = config, \
+ }).attr.attr)
static struct attribute *smmu_pmu_events[] = {
- &smmu_event_attr_cycles.attr.attr,
- &smmu_event_attr_transaction.attr.attr,
- &smmu_event_attr_tlb_miss.attr.attr,
- &smmu_event_attr_config_cache_miss.attr.attr,
- &smmu_event_attr_trans_table_walk_access.attr.attr,
- &smmu_event_attr_config_struct_access.attr.attr,
- &smmu_event_attr_pcie_ats_trans_rq.attr.attr,
- &smmu_event_attr_pcie_ats_trans_passed.attr.attr,
+ SMMU_EVENT_ATTR(cycles, 0),
+ SMMU_EVENT_ATTR(transaction, 1),
+ SMMU_EVENT_ATTR(tlb_miss, 2),
+ SMMU_EVENT_ATTR(config_cache_miss, 3),
+ SMMU_EVENT_ATTR(trans_table_walk_access, 4),
+ SMMU_EVENT_ATTR(config_struct_access, 5),
+ SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
+ SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
NULL
};
@@ -560,7 +554,7 @@ static ssize_t smmu_pmu_identifier_attr_show(struct device *dev,
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
- return snprintf(page, PAGE_SIZE, "0x%08x\n", smmu_pmu->iidr);
+ return sysfs_emit(page, "0x%08x\n", smmu_pmu->iidr);
}
static umode_t smmu_pmu_identifier_attr_visible(struct kobject *kobj,
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index d3929ccebfd2..8a1e86ab2d8e 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -126,8 +126,7 @@ static ssize_t arm_spe_pmu_cap_show(struct device *dev,
container_of(attr, struct dev_ext_attribute, attr);
int cap = (long)ea->var;
- return snprintf(buf, PAGE_SIZE, "%u\n",
- arm_spe_pmu_cap_get(spe_pmu, cap));
+ return sysfs_emit(buf, "%u\n", arm_spe_pmu_cap_get(spe_pmu, cap));
}
#define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index be1f26b62ddb..2bbb93188064 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -110,7 +110,7 @@ static ssize_t ddr_perf_identifier_show(struct device *dev,
{
struct ddr_pmu *pmu = dev_get_drvdata(dev);
- return sprintf(page, "%s\n", pmu->devtype_data->identifier);
+ return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier);
}
static umode_t ddr_perf_identifier_attr_visible(struct kobject *kobj,
@@ -170,8 +170,7 @@ static ssize_t ddr_perf_filter_cap_show(struct device *dev,
container_of(attr, struct dev_ext_attribute, attr);
int cap = (long)ea->var;
- return snprintf(buf, PAGE_SIZE, "%u\n",
- ddr_perf_filter_cap_get(pmu, cap));
+ return sysfs_emit(buf, "%u\n", ddr_perf_filter_cap_get(pmu, cap));
}
#define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \
@@ -220,7 +219,7 @@ ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
- return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
index e8377061845f..7643c9f93e36 100644
--- a/drivers/perf/hisilicon/Makefile
+++ b/drivers/perf/hisilicon/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
- hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o
+ hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \
+ hisi_uncore_pa_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index ac1a8c120a00..7c8a4bc21db4 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -14,12 +14,11 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
-#include <linux/platform_device.h>
#include <linux/smp.h>
#include "hisi_uncore_pmu.h"
-/* DDRC register definition */
+/* DDRC register definition in v1 */
#define DDRC_PERF_CTRL 0x010
#define DDRC_FLUX_WR 0x380
#define DDRC_FLUX_RD 0x384
@@ -35,12 +34,24 @@
#define DDRC_INT_CLEAR 0x6d0
#define DDRC_VERSION 0x710
+/* DDRC register definition in v2 */
+#define DDRC_V2_INT_MASK 0x528
+#define DDRC_V2_INT_STATUS 0x52c
+#define DDRC_V2_INT_CLEAR 0x530
+#define DDRC_V2_EVENT_CNT 0xe00
+#define DDRC_V2_EVENT_CTRL 0xe70
+#define DDRC_V2_EVENT_TYPE 0xe74
+#define DDRC_V2_PERF_CTRL 0xeA0
+
/* DDRC has 8-counters */
#define DDRC_NR_COUNTERS 0x8
-#define DDRC_PERF_CTRL_EN 0x2
+#define DDRC_V1_PERF_CTRL_EN 0x2
+#define DDRC_V2_PERF_CTRL_EN 0x1
+#define DDRC_V1_NR_EVENTS 0x7
+#define DDRC_V2_NR_EVENTS 0x90
/*
- * For DDRC PMU, there are eight-events and every event has been mapped
+ * For PMU v1, there are eight-events and every event has been mapped
* to fixed-purpose counters which register offset is not consistent.
* Therefore there is no write event type and we assume that event
* code (0 to 7) is equal to counter index in PMU driver.
@@ -54,73 +65,85 @@ static const u32 ddrc_reg_off[] = {
/*
* Select the counter register offset using the counter index.
- * In DDRC there are no programmable counter, the count
- * is readed form the statistics counter register itself.
+ * In PMU v1, there are no programmable counter, the count
+ * is read form the statistics counter register itself.
*/
-static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx)
+static u32 hisi_ddrc_pmu_v1_get_counter_offset(int cntr_idx)
{
return ddrc_reg_off[cntr_idx];
}
-static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
+static u32 hisi_ddrc_pmu_v2_get_counter_offset(int cntr_idx)
{
- /* Use event code as counter index */
- u32 idx = GET_DDRC_EVENTID(hwc);
-
- if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
- dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
- return 0;
- }
+ return DDRC_V2_EVENT_CNT + cntr_idx * 8;
+}
- return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
+static u64 hisi_ddrc_pmu_v1_read_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ return readl(ddrc_pmu->base +
+ hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
}
-static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
+static void hisi_ddrc_pmu_v1_write_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc, u64 val)
{
- u32 idx = GET_DDRC_EVENTID(hwc);
+ writel((u32)val,
+ ddrc_pmu->base + hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
+}
- if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
- dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
- return;
- }
+static u64 hisi_ddrc_pmu_v2_read_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ return readq(ddrc_pmu->base +
+ hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
+}
- writel((u32)val,
- ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
+static void hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ writeq(val,
+ ddrc_pmu->base + hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
}
/*
- * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware,
- * so there is no need to write event type.
+ * For DDRC PMU v1, event has been mapped to fixed-purpose counter by hardware,
+ * so there is no need to write event type, while it is programmable counter in
+ * PMU v2.
*/
static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
u32 type)
{
+ u32 offset;
+
+ if (hha_pmu->identifier >= HISI_PMU_V2) {
+ offset = DDRC_V2_EVENT_TYPE + 4 * idx;
+ writel(type, hha_pmu->base + offset);
+ }
}
-static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
+static void hisi_ddrc_pmu_v1_start_counters(struct hisi_pmu *ddrc_pmu)
{
u32 val;
/* Set perf_enable in DDRC_PERF_CTRL to start event counting */
val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
- val |= DDRC_PERF_CTRL_EN;
+ val |= DDRC_V1_PERF_CTRL_EN;
writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
}
-static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
+static void hisi_ddrc_pmu_v1_stop_counters(struct hisi_pmu *ddrc_pmu)
{
u32 val;
/* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
- val &= ~DDRC_PERF_CTRL_EN;
+ val &= ~DDRC_V1_PERF_CTRL_EN;
writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
}
-static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
+static void hisi_ddrc_pmu_v1_enable_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
{
u32 val;
@@ -130,8 +153,8 @@ static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
}
-static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
+static void hisi_ddrc_pmu_v1_disable_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
{
u32 val;
@@ -141,7 +164,7 @@ static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
}
-static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
+static int hisi_ddrc_pmu_v1_get_event_idx(struct perf_event *event)
{
struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
@@ -157,87 +180,117 @@ static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
return idx;
}
-static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+static int hisi_ddrc_pmu_v2_get_event_idx(struct perf_event *event)
+{
+ return hisi_uncore_pmu_get_event_idx(event);
+}
+
+static void hisi_ddrc_pmu_v2_start_counters(struct hisi_pmu *ddrc_pmu)
+{
+ u32 val;
+
+ val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+ val |= DDRC_V2_PERF_CTRL_EN;
+ writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+}
+
+static void hisi_ddrc_pmu_v2_stop_counters(struct hisi_pmu *ddrc_pmu)
+{
+ u32 val;
+
+ val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+ val &= ~DDRC_V2_PERF_CTRL_EN;
+ writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+}
+
+static void hisi_ddrc_pmu_v2_enable_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+ val |= 1 << hwc->idx;
+ writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+}
+
+static void hisi_ddrc_pmu_v2_disable_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
+ val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+ val &= ~(1 << hwc->idx);
+ writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+}
+
+static void hisi_ddrc_pmu_v1_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
/* Write 0 to enable interrupt */
val = readl(ddrc_pmu->base + DDRC_INT_MASK);
- val &= ~(1 << GET_DDRC_EVENTID(hwc));
+ val &= ~(1 << hwc->idx);
writel(val, ddrc_pmu->base + DDRC_INT_MASK);
}
-static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
+static void hisi_ddrc_pmu_v1_disable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
{
u32 val;
/* Write 1 to mask interrupt */
val = readl(ddrc_pmu->base + DDRC_INT_MASK);
- val |= (1 << GET_DDRC_EVENTID(hwc));
+ val |= 1 << hwc->idx;
writel(val, ddrc_pmu->base + DDRC_INT_MASK);
}
-static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id)
+static void hisi_ddrc_pmu_v2_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
{
- struct hisi_pmu *ddrc_pmu = dev_id;
- struct perf_event *event;
- unsigned long overflown;
- int idx;
-
- /* Read the DDRC_INT_STATUS register */
- overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS);
- if (!overflown)
- return IRQ_NONE;
+ u32 val;
- /*
- * Find the counter index which overflowed if the bit was set
- * and handle it
- */
- for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) {
- /* Write 1 to clear the IRQ status flag */
- writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR);
+ val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
+ val &= ~(1 << hwc->idx);
+ writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
+}
- /* Get the corresponding event struct */
- event = ddrc_pmu->pmu_events.hw_events[idx];
- if (!event)
- continue;
+static void hisi_ddrc_pmu_v2_disable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
- hisi_uncore_pmu_event_update(event);
- hisi_uncore_pmu_set_event_period(event);
- }
+ val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
+ val |= 1 << hwc->idx;
+ writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
+}
- return IRQ_HANDLED;
+static u32 hisi_ddrc_pmu_v1_get_int_status(struct hisi_pmu *ddrc_pmu)
+{
+ return readl(ddrc_pmu->base + DDRC_INT_STATUS);
}
-static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
- struct platform_device *pdev)
+static void hisi_ddrc_pmu_v1_clear_int_status(struct hisi_pmu *ddrc_pmu,
+ int idx)
{
- int irq, ret;
-
- /* Read and init IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
- IRQF_NOBALANCING | IRQF_NO_THREAD,
- dev_name(&pdev->dev), ddrc_pmu);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "Fail to request IRQ:%d ret:%d\n", irq, ret);
- return ret;
- }
+ writel(1 << idx, ddrc_pmu->base + DDRC_INT_CLEAR);
+}
- ddrc_pmu->irq = irq;
+static u32 hisi_ddrc_pmu_v2_get_int_status(struct hisi_pmu *ddrc_pmu)
+{
+ return readl(ddrc_pmu->base + DDRC_V2_INT_STATUS);
+}
- return 0;
+static void hisi_ddrc_pmu_v2_clear_int_status(struct hisi_pmu *ddrc_pmu,
+ int idx)
+{
+ writel(1 << idx, ddrc_pmu->base + DDRC_V2_INT_CLEAR);
}
static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
{ "HISI0233", },
- {},
+ { "HISI0234", },
+ {}
};
MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
@@ -269,21 +322,38 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
}
ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
+ if (ddrc_pmu->identifier >= HISI_PMU_V2) {
+ if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
+ &ddrc_pmu->sub_id)) {
+ dev_err(&pdev->dev, "Can not read sub-id!\n");
+ return -EINVAL;
+ }
+ }
return 0;
}
-static struct attribute *hisi_ddrc_pmu_format_attr[] = {
+static struct attribute *hisi_ddrc_pmu_v1_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
NULL,
};
-static const struct attribute_group hisi_ddrc_pmu_format_group = {
+static const struct attribute_group hisi_ddrc_pmu_v1_format_group = {
+ .name = "format",
+ .attrs = hisi_ddrc_pmu_v1_format_attr,
+};
+
+static struct attribute *hisi_ddrc_pmu_v2_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL
+};
+
+static const struct attribute_group hisi_ddrc_pmu_v2_format_group = {
.name = "format",
- .attrs = hisi_ddrc_pmu_format_attr,
+ .attrs = hisi_ddrc_pmu_v2_format_attr,
};
-static struct attribute *hisi_ddrc_pmu_events_attr[] = {
+static struct attribute *hisi_ddrc_pmu_v1_events_attr[] = {
HISI_PMU_EVENT_ATTR(flux_wr, 0x00),
HISI_PMU_EVENT_ATTR(flux_rd, 0x01),
HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02),
@@ -295,9 +365,21 @@ static struct attribute *hisi_ddrc_pmu_events_attr[] = {
NULL,
};
-static const struct attribute_group hisi_ddrc_pmu_events_group = {
+static const struct attribute_group hisi_ddrc_pmu_v1_events_group = {
.name = "events",
- .attrs = hisi_ddrc_pmu_events_attr,
+ .attrs = hisi_ddrc_pmu_v1_events_attr,
+};
+
+static struct attribute *hisi_ddrc_pmu_v2_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(cycles, 0x00),
+ HISI_PMU_EVENT_ATTR(flux_wr, 0x83),
+ HISI_PMU_EVENT_ATTR(flux_rd, 0x84),
+ NULL
+};
+
+static const struct attribute_group hisi_ddrc_pmu_v2_events_group = {
+ .name = "events",
+ .attrs = hisi_ddrc_pmu_v2_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
@@ -323,25 +405,50 @@ static const struct attribute_group hisi_ddrc_pmu_identifier_group = {
.attrs = hisi_ddrc_pmu_identifier_attrs,
};
-static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
- &hisi_ddrc_pmu_format_group,
- &hisi_ddrc_pmu_events_group,
+static const struct attribute_group *hisi_ddrc_pmu_v1_attr_groups[] = {
+ &hisi_ddrc_pmu_v1_format_group,
+ &hisi_ddrc_pmu_v1_events_group,
&hisi_ddrc_pmu_cpumask_attr_group,
&hisi_ddrc_pmu_identifier_group,
NULL,
};
-static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
+static const struct attribute_group *hisi_ddrc_pmu_v2_attr_groups[] = {
+ &hisi_ddrc_pmu_v2_format_group,
+ &hisi_ddrc_pmu_v2_events_group,
+ &hisi_ddrc_pmu_cpumask_attr_group,
+ &hisi_ddrc_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_ddrc_v1_ops = {
+ .write_evtype = hisi_ddrc_pmu_write_evtype,
+ .get_event_idx = hisi_ddrc_pmu_v1_get_event_idx,
+ .start_counters = hisi_ddrc_pmu_v1_start_counters,
+ .stop_counters = hisi_ddrc_pmu_v1_stop_counters,
+ .enable_counter = hisi_ddrc_pmu_v1_enable_counter,
+ .disable_counter = hisi_ddrc_pmu_v1_disable_counter,
+ .enable_counter_int = hisi_ddrc_pmu_v1_enable_counter_int,
+ .disable_counter_int = hisi_ddrc_pmu_v1_disable_counter_int,
+ .write_counter = hisi_ddrc_pmu_v1_write_counter,
+ .read_counter = hisi_ddrc_pmu_v1_read_counter,
+ .get_int_status = hisi_ddrc_pmu_v1_get_int_status,
+ .clear_int_status = hisi_ddrc_pmu_v1_clear_int_status,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_ddrc_v2_ops = {
.write_evtype = hisi_ddrc_pmu_write_evtype,
- .get_event_idx = hisi_ddrc_pmu_get_event_idx,
- .start_counters = hisi_ddrc_pmu_start_counters,
- .stop_counters = hisi_ddrc_pmu_stop_counters,
- .enable_counter = hisi_ddrc_pmu_enable_counter,
- .disable_counter = hisi_ddrc_pmu_disable_counter,
- .enable_counter_int = hisi_ddrc_pmu_enable_counter_int,
- .disable_counter_int = hisi_ddrc_pmu_disable_counter_int,
- .write_counter = hisi_ddrc_pmu_write_counter,
- .read_counter = hisi_ddrc_pmu_read_counter,
+ .get_event_idx = hisi_ddrc_pmu_v2_get_event_idx,
+ .start_counters = hisi_ddrc_pmu_v2_start_counters,
+ .stop_counters = hisi_ddrc_pmu_v2_stop_counters,
+ .enable_counter = hisi_ddrc_pmu_v2_enable_counter,
+ .disable_counter = hisi_ddrc_pmu_v2_disable_counter,
+ .enable_counter_int = hisi_ddrc_pmu_v2_enable_counter_int,
+ .disable_counter_int = hisi_ddrc_pmu_v2_disable_counter_int,
+ .write_counter = hisi_ddrc_pmu_v2_write_counter,
+ .read_counter = hisi_ddrc_pmu_v2_read_counter,
+ .get_int_status = hisi_ddrc_pmu_v2_get_int_status,
+ .clear_int_status = hisi_ddrc_pmu_v2_clear_int_status,
};
static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
@@ -353,16 +460,25 @@ static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
if (ret)
return ret;
- ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev);
+ ret = hisi_uncore_pmu_init_irq(ddrc_pmu, pdev);
if (ret)
return ret;
+ if (ddrc_pmu->identifier >= HISI_PMU_V2) {
+ ddrc_pmu->counter_bits = 48;
+ ddrc_pmu->check_event = DDRC_V2_NR_EVENTS;
+ ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v2_attr_groups;
+ ddrc_pmu->ops = &hisi_uncore_ddrc_v2_ops;
+ } else {
+ ddrc_pmu->counter_bits = 32;
+ ddrc_pmu->check_event = DDRC_V1_NR_EVENTS;
+ ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v1_attr_groups;
+ ddrc_pmu->ops = &hisi_uncore_ddrc_v1_ops;
+ }
+
ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
- ddrc_pmu->counter_bits = 32;
- ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
ddrc_pmu->dev = &pdev->dev;
ddrc_pmu->on_cpu = -1;
- ddrc_pmu->check_event = 7;
return 0;
}
@@ -390,8 +506,16 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
return ret;
}
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u",
- ddrc_pmu->sccl_id, ddrc_pmu->index_id);
+ if (ddrc_pmu->identifier >= HISI_PMU_V2)
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "hisi_sccl%u_ddrc%u_%u",
+ ddrc_pmu->sccl_id, ddrc_pmu->index_id,
+ ddrc_pmu->sub_id);
+ else
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
+ ddrc_pmu->index_id);
+
ddrc_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
@@ -404,7 +528,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
.start = hisi_uncore_pmu_start,
.stop = hisi_uncore_pmu_stop,
.read = hisi_uncore_pmu_read,
- .attr_groups = hisi_ddrc_pmu_attr_groups,
+ .attr_groups = ddrc_pmu->pmu_events.attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 3402f1a395a8..0316fabe32f1 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
-#include <linux/platform_device.h>
#include <linux/smp.h>
#include "hisi_uncore_pmu.h"
@@ -26,18 +25,136 @@
#define HHA_VERSION 0x1cf0
#define HHA_PERF_CTRL 0x1E00
#define HHA_EVENT_CTRL 0x1E04
+#define HHA_SRCID_CTRL 0x1E08
+#define HHA_DATSRC_CTRL 0x1BF0
#define HHA_EVENT_TYPE0 0x1E80
/*
- * Each counter is 48-bits and [48:63] are reserved
- * which are Read-As-Zero and Writes-Ignored.
+ * If the HW version only supports a 48-bit counter, then
+ * bits [63:48] are reserved, which are Read-As-Zero and
+ * Writes-Ignored.
*/
#define HHA_CNT0_LOWER 0x1F00
-/* HHA has 16-counters */
-#define HHA_NR_COUNTERS 0x10
+/* HHA PMU v1 has 16 counters and v2 only has 8 counters */
+#define HHA_V1_NR_COUNTERS 0x10
+#define HHA_V2_NR_COUNTERS 0x8
#define HHA_PERF_CTRL_EN 0x1
+#define HHA_TRACETAG_EN BIT(31)
+#define HHA_SRCID_EN BIT(2)
+#define HHA_SRCID_CMD_SHIFT 6
+#define HHA_SRCID_MSK_SHIFT 20
+#define HHA_SRCID_CMD GENMASK(16, 6)
+#define HHA_SRCID_MSK GENMASK(30, 20)
+#define HHA_DATSRC_SKT_EN BIT(23)
#define HHA_EVTYPE_NONE 0xff
+#define HHA_V1_NR_EVENT 0x65
+#define HHA_V2_NR_EVENT 0xCE
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 10, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 21, 11);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 22, 22);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 23, 23);
+
+static void hisi_hha_pmu_enable_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_en = hisi_get_tracetag_en(event);
+
+ if (tt_en) {
+ u32 val;
+
+ val = readl(hha_pmu->base + HHA_SRCID_CTRL);
+ val |= HHA_TRACETAG_EN;
+ writel(val, hha_pmu->base + HHA_SRCID_CTRL);
+ }
+}
+
+static void hisi_hha_pmu_clear_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+ u32 val;
+
+ val = readl(hha_pmu->base + HHA_SRCID_CTRL);
+ val &= ~HHA_TRACETAG_EN;
+ writel(val, hha_pmu->base + HHA_SRCID_CTRL);
+}
+
+static void hisi_hha_pmu_config_ds(struct perf_event *event)
+{
+ struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+ u32 ds_skt = hisi_get_datasrc_skt(event);
+
+ if (ds_skt) {
+ u32 val;
+
+ val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
+ val |= HHA_DATSRC_SKT_EN;
+ writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL);
+ }
+}
+
+static void hisi_hha_pmu_clear_ds(struct perf_event *event)
+{
+ struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+ u32 ds_skt = hisi_get_datasrc_skt(event);
+
+ if (ds_skt) {
+ u32 val;
+
+ val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
+ val &= ~HHA_DATSRC_SKT_EN;
+ writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL);
+ }
+}
+
+static void hisi_hha_pmu_config_srcid(struct perf_event *event)
+{
+ struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_srcid_cmd(event);
+
+ if (cmd) {
+ u32 val, msk;
+
+ msk = hisi_get_srcid_msk(event);
+ val = readl(hha_pmu->base + HHA_SRCID_CTRL);
+ val |= HHA_SRCID_EN | (cmd << HHA_SRCID_CMD_SHIFT) |
+ (msk << HHA_SRCID_MSK_SHIFT);
+ writel(val, hha_pmu->base + HHA_SRCID_CTRL);
+ }
+}
+
+static void hisi_hha_pmu_disable_srcid(struct perf_event *event)
+{
+ struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_srcid_cmd(event);
+
+ if (cmd) {
+ u32 val;
+
+ val = readl(hha_pmu->base + HHA_SRCID_CTRL);
+ val &= ~(HHA_SRCID_EN | HHA_SRCID_MSK | HHA_SRCID_CMD);
+ writel(val, hha_pmu->base + HHA_SRCID_CTRL);
+ }
+}
+
+static void hisi_hha_pmu_enable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_hha_pmu_enable_tracetag(event);
+ hisi_hha_pmu_config_ds(event);
+ hisi_hha_pmu_config_srcid(event);
+ }
+}
+
+static void hisi_hha_pmu_disable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_hha_pmu_disable_srcid(event);
+ hisi_hha_pmu_clear_ds(event);
+ hisi_hha_pmu_clear_tracetag(event);
+ }
+}
/*
* Select the counter register offset using the counter index
@@ -51,29 +168,15 @@ static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx)
static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu,
struct hw_perf_event *hwc)
{
- u32 idx = hwc->idx;
-
- if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
- dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
- return 0;
- }
-
/* Read 64 bits and like L3C, top 16 bits are RAZ */
- return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
+ return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(hwc->idx));
}
static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu,
struct hw_perf_event *hwc, u64 val)
{
- u32 idx = hwc->idx;
-
- if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
- dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
- return;
- }
-
/* Write 64 bits and like L3C, top 16 bits are WI */
- writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
+ writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(hwc->idx));
}
static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
@@ -169,65 +272,20 @@ static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu,
writel(val, hha_pmu->base + HHA_INT_MASK);
}
-static irqreturn_t hisi_hha_pmu_isr(int irq, void *dev_id)
+static u32 hisi_hha_pmu_get_int_status(struct hisi_pmu *hha_pmu)
{
- struct hisi_pmu *hha_pmu = dev_id;
- struct perf_event *event;
- unsigned long overflown;
- int idx;
-
- /* Read HHA_INT_STATUS register */
- overflown = readl(hha_pmu->base + HHA_INT_STATUS);
- if (!overflown)
- return IRQ_NONE;
-
- /*
- * Find the counter index which overflowed if the bit was set
- * and handle it
- */
- for_each_set_bit(idx, &overflown, HHA_NR_COUNTERS) {
- /* Write 1 to clear the IRQ status flag */
- writel((1 << idx), hha_pmu->base + HHA_INT_CLEAR);
-
- /* Get the corresponding event struct */
- event = hha_pmu->pmu_events.hw_events[idx];
- if (!event)
- continue;
-
- hisi_uncore_pmu_event_update(event);
- hisi_uncore_pmu_set_event_period(event);
- }
-
- return IRQ_HANDLED;
+ return readl(hha_pmu->base + HHA_INT_STATUS);
}
-static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
- struct platform_device *pdev)
+static void hisi_hha_pmu_clear_int_status(struct hisi_pmu *hha_pmu, int idx)
{
- int irq, ret;
-
- /* Read and init IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
- IRQF_NOBALANCING | IRQF_NO_THREAD,
- dev_name(&pdev->dev), hha_pmu);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "Fail to request IRQ:%d ret:%d\n", irq, ret);
- return ret;
- }
-
- hha_pmu->irq = irq;
-
- return 0;
+ writel(1 << idx, hha_pmu->base + HHA_INT_CLEAR);
}
static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = {
{ "HISI0243", },
- {},
+ { "HISI0244", },
+ {}
};
MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match);
@@ -237,13 +295,6 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
unsigned long long id;
acpi_status status;
- status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
- "_UID", NULL, &id);
- if (ACPI_FAILURE(status))
- return -EINVAL;
-
- hha_pmu->index_id = id;
-
/*
* Use SCCL_ID and UID to identify the HHA PMU, while
* SCCL_ID is in MPIDR[aff2].
@@ -253,6 +304,22 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
return -EINVAL;
}
+
+ /*
+ * Early versions of BIOS support _UID by mistake, so we support
+ * both "hisilicon, idx-id" as preference, if available.
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
+ &hha_pmu->index_id)) {
+ status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ "_UID", NULL, &id);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&pdev->dev, "Cannot read idx-id!\n");
+ return -EINVAL;
+ }
+
+ hha_pmu->index_id = id;
+ }
/* HHA PMUs only share the same SCCL */
hha_pmu->ccl_id = -1;
@@ -267,17 +334,31 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
return 0;
}
-static struct attribute *hisi_hha_pmu_format_attr[] = {
+static struct attribute *hisi_hha_pmu_v1_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
NULL,
};
-static const struct attribute_group hisi_hha_pmu_format_group = {
+static const struct attribute_group hisi_hha_pmu_v1_format_group = {
+ .name = "format",
+ .attrs = hisi_hha_pmu_v1_format_attr,
+};
+
+static struct attribute *hisi_hha_pmu_v2_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:0-10"),
+ HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:11-21"),
+ HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:22"),
+ HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:23"),
+ NULL
+};
+
+static const struct attribute_group hisi_hha_pmu_v2_format_group = {
.name = "format",
- .attrs = hisi_hha_pmu_format_attr,
+ .attrs = hisi_hha_pmu_v2_format_attr,
};
-static struct attribute *hisi_hha_pmu_events_attr[] = {
+static struct attribute *hisi_hha_pmu_v1_events_attr[] = {
HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00),
HISI_PMU_EVENT_ATTR(rx_outer, 0x01),
HISI_PMU_EVENT_ATTR(rx_sccl, 0x02),
@@ -307,9 +388,23 @@ static struct attribute *hisi_hha_pmu_events_attr[] = {
NULL,
};
-static const struct attribute_group hisi_hha_pmu_events_group = {
+static const struct attribute_group hisi_hha_pmu_v1_events_group = {
.name = "events",
- .attrs = hisi_hha_pmu_events_attr,
+ .attrs = hisi_hha_pmu_v1_events_attr,
+};
+
+static struct attribute *hisi_hha_pmu_v2_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00),
+ HISI_PMU_EVENT_ATTR(rx_outer, 0x01),
+ HISI_PMU_EVENT_ATTR(rx_sccl, 0x02),
+ HISI_PMU_EVENT_ATTR(hha_retry, 0x2e),
+ HISI_PMU_EVENT_ATTR(cycles, 0x55),
+ NULL
+};
+
+static const struct attribute_group hisi_hha_pmu_v2_events_group = {
+ .name = "events",
+ .attrs = hisi_hha_pmu_v2_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
@@ -335,14 +430,22 @@ static const struct attribute_group hisi_hha_pmu_identifier_group = {
.attrs = hisi_hha_pmu_identifier_attrs,
};
-static const struct attribute_group *hisi_hha_pmu_attr_groups[] = {
- &hisi_hha_pmu_format_group,
- &hisi_hha_pmu_events_group,
+static const struct attribute_group *hisi_hha_pmu_v1_attr_groups[] = {
+ &hisi_hha_pmu_v1_format_group,
+ &hisi_hha_pmu_v1_events_group,
&hisi_hha_pmu_cpumask_attr_group,
&hisi_hha_pmu_identifier_group,
NULL,
};
+static const struct attribute_group *hisi_hha_pmu_v2_attr_groups[] = {
+ &hisi_hha_pmu_v2_format_group,
+ &hisi_hha_pmu_v2_events_group,
+ &hisi_hha_pmu_cpumask_attr_group,
+ &hisi_hha_pmu_identifier_group,
+ NULL
+};
+
static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
.write_evtype = hisi_hha_pmu_write_evtype,
.get_event_idx = hisi_uncore_pmu_get_event_idx,
@@ -354,6 +457,10 @@ static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
.disable_counter_int = hisi_hha_pmu_disable_counter_int,
.write_counter = hisi_hha_pmu_write_counter,
.read_counter = hisi_hha_pmu_read_counter,
+ .get_int_status = hisi_hha_pmu_get_int_status,
+ .clear_int_status = hisi_hha_pmu_clear_int_status,
+ .enable_filter = hisi_hha_pmu_enable_filter,
+ .disable_filter = hisi_hha_pmu_disable_filter,
};
static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
@@ -365,16 +472,24 @@ static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
if (ret)
return ret;
- ret = hisi_hha_pmu_init_irq(hha_pmu, pdev);
+ ret = hisi_uncore_pmu_init_irq(hha_pmu, pdev);
if (ret)
return ret;
- hha_pmu->num_counters = HHA_NR_COUNTERS;
- hha_pmu->counter_bits = 48;
+ if (hha_pmu->identifier >= HISI_PMU_V2) {
+ hha_pmu->counter_bits = 64;
+ hha_pmu->check_event = HHA_V2_NR_EVENT;
+ hha_pmu->pmu_events.attr_groups = hisi_hha_pmu_v2_attr_groups;
+ hha_pmu->num_counters = HHA_V2_NR_COUNTERS;
+ } else {
+ hha_pmu->counter_bits = 48;
+ hha_pmu->check_event = HHA_V1_NR_EVENT;
+ hha_pmu->pmu_events.attr_groups = hisi_hha_pmu_v1_attr_groups;
+ hha_pmu->num_counters = HHA_V1_NR_COUNTERS;
+ }
hha_pmu->ops = &hisi_uncore_hha_ops;
hha_pmu->dev = &pdev->dev;
hha_pmu->on_cpu = -1;
- hha_pmu->check_event = 0x65;
return 0;
}
@@ -416,7 +531,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
.start = hisi_uncore_pmu_start,
.stop = hisi_uncore_pmu_stop,
.read = hisi_uncore_pmu_read,
- .attr_groups = hisi_hha_pmu_attr_groups,
+ .attr_groups = hha_pmu->pmu_events.attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 7d792435c2aa..bf9f7772cac9 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
-#include <linux/platform_device.h>
#include <linux/smp.h>
#include "hisi_uncore_pmu.h"
@@ -24,12 +23,17 @@
#define L3C_INT_MASK 0x0800
#define L3C_INT_STATUS 0x0808
#define L3C_INT_CLEAR 0x080c
+#define L3C_CORE_CTRL 0x1b04
+#define L3C_TRACETAG_CTRL 0x1b20
+#define L3C_DATSRC_TYPE 0x1b48
+#define L3C_DATSRC_CTRL 0x1bf0
#define L3C_EVENT_CTRL 0x1c00
#define L3C_VERSION 0x1cf0
#define L3C_EVENT_TYPE0 0x1d00
/*
- * Each counter is 48-bits and [48:63] are reserved
- * which are Read-As-Zero and Writes-Ignored.
+ * If the HW version only supports a 48-bit counter, then
+ * bits [63:48] are reserved, which are Read-As-Zero and
+ * Writes-Ignored.
*/
#define L3C_CNTR0_LOWER 0x1e00
@@ -37,7 +41,186 @@
#define L3C_NR_COUNTERS 0x8
#define L3C_PERF_CTRL_EN 0x10000
+#define L3C_TRACETAG_EN BIT(31)
+#define L3C_TRACETAG_REQ_SHIFT 7
+#define L3C_TRACETAG_MARK_EN BIT(0)
+#define L3C_TRACETAG_REQ_EN (L3C_TRACETAG_MARK_EN | BIT(2))
+#define L3C_TRACETAG_CORE_EN (L3C_TRACETAG_MARK_EN | BIT(3))
+#define L3C_CORE_EN BIT(20)
+#define L3C_COER_NONE 0x0
+#define L3C_DATSRC_MASK 0xFF
+#define L3C_DATSRC_SKT_EN BIT(23)
+#define L3C_DATSRC_NONE 0x0
#define L3C_EVTYPE_NONE 0xff
+#define L3C_V1_NR_EVENTS 0x59
+#define L3C_V2_NR_EVENTS 0xFF
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config1, 7, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_req, config1, 10, 8);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_cfg, config1, 15, 11);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 16, 16);
+
+static void hisi_l3c_pmu_config_req_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_req = hisi_get_tt_req(event);
+
+ if (tt_req) {
+ u32 val;
+
+ /* Set request-type for tracetag */
+ val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val |= tt_req << L3C_TRACETAG_REQ_SHIFT;
+ val |= L3C_TRACETAG_REQ_EN;
+ writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+
+ /* Enable request-tracetag statistics */
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val |= L3C_TRACETAG_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ }
+}
+
+static void hisi_l3c_pmu_clear_req_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_req = hisi_get_tt_req(event);
+
+ if (tt_req) {
+ u32 val;
+
+ /* Clear request-type */
+ val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val &= ~(tt_req << L3C_TRACETAG_REQ_SHIFT);
+ val &= ~L3C_TRACETAG_REQ_EN;
+ writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+
+ /* Disable request-tracetag statistics */
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val &= ~L3C_TRACETAG_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ }
+}
+
+static void hisi_l3c_pmu_write_ds(struct perf_event *event, u32 ds_cfg)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u32 reg, reg_idx, shift, val;
+ int idx = hwc->idx;
+
+ /*
+ * Select the appropriate datasource register(L3C_DATSRC_TYPE0/1).
+ * There are 2 datasource ctrl register for the 8 hardware counters.
+ * Datasrc is 8-bits and for the former 4 hardware counters,
+ * L3C_DATSRC_TYPE0 is chosen. For the latter 4 hardware counters,
+ * L3C_DATSRC_TYPE1 is chosen.
+ */
+ reg = L3C_DATSRC_TYPE + (idx / 4) * 4;
+ reg_idx = idx % 4;
+ shift = 8 * reg_idx;
+
+ val = readl(l3c_pmu->base + reg);
+ val &= ~(L3C_DATSRC_MASK << shift);
+ val |= ds_cfg << shift;
+ writel(val, l3c_pmu->base + reg);
+}
+
+static void hisi_l3c_pmu_config_ds(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ u32 ds_cfg = hisi_get_datasrc_cfg(event);
+ u32 ds_skt = hisi_get_datasrc_skt(event);
+
+ if (ds_cfg)
+ hisi_l3c_pmu_write_ds(event, ds_cfg);
+
+ if (ds_skt) {
+ u32 val;
+
+ val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
+ val |= L3C_DATSRC_SKT_EN;
+ writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
+ }
+}
+
+static void hisi_l3c_pmu_clear_ds(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ u32 ds_cfg = hisi_get_datasrc_cfg(event);
+ u32 ds_skt = hisi_get_datasrc_skt(event);
+
+ if (ds_cfg)
+ hisi_l3c_pmu_write_ds(event, L3C_DATSRC_NONE);
+
+ if (ds_skt) {
+ u32 val;
+
+ val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
+ val &= ~L3C_DATSRC_SKT_EN;
+ writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
+ }
+}
+
+static void hisi_l3c_pmu_config_core_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ u32 core = hisi_get_tt_core(event);
+
+ if (core) {
+ u32 val;
+
+ /* Config and enable core information */
+ writel(core, l3c_pmu->base + L3C_CORE_CTRL);
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val |= L3C_CORE_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+
+ /* Enable core-tracetag statistics */
+ val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val |= L3C_TRACETAG_CORE_EN;
+ writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ }
+}
+
+static void hisi_l3c_pmu_clear_core_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ u32 core = hisi_get_tt_core(event);
+
+ if (core) {
+ u32 val;
+
+ /* Clear core information */
+ writel(L3C_COER_NONE, l3c_pmu->base + L3C_CORE_CTRL);
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val &= ~L3C_CORE_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+
+ /* Disable core-tracetag statistics */
+ val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val &= ~L3C_TRACETAG_CORE_EN;
+ writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ }
+}
+
+static void hisi_l3c_pmu_enable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_l3c_pmu_config_req_tracetag(event);
+ hisi_l3c_pmu_config_core_tracetag(event);
+ hisi_l3c_pmu_config_ds(event);
+ }
+}
+
+static void hisi_l3c_pmu_disable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_l3c_pmu_clear_ds(event);
+ hisi_l3c_pmu_clear_core_tracetag(event);
+ hisi_l3c_pmu_clear_req_tracetag(event);
+ }
+}
/*
* Select the counter register offset using the counter index
@@ -50,29 +233,13 @@ static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx)
static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc)
{
- u32 idx = hwc->idx;
-
- if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
- dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
- return 0;
- }
-
- /* Read 64-bits and the upper 16 bits are RAZ */
- return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx));
+ return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
}
static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc, u64 val)
{
- u32 idx = hwc->idx;
-
- if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
- dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
- return;
- }
-
- /* Write 64-bits and the upper 16 bits are WI */
- writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx));
+ writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
}
static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
@@ -168,81 +335,26 @@ static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
writel(val, l3c_pmu->base + L3C_INT_MASK);
}
-static irqreturn_t hisi_l3c_pmu_isr(int irq, void *dev_id)
+static u32 hisi_l3c_pmu_get_int_status(struct hisi_pmu *l3c_pmu)
{
- struct hisi_pmu *l3c_pmu = dev_id;
- struct perf_event *event;
- unsigned long overflown;
- int idx;
-
- /* Read L3C_INT_STATUS register */
- overflown = readl(l3c_pmu->base + L3C_INT_STATUS);
- if (!overflown)
- return IRQ_NONE;
-
- /*
- * Find the counter index which overflowed if the bit was set
- * and handle it.
- */
- for_each_set_bit(idx, &overflown, L3C_NR_COUNTERS) {
- /* Write 1 to clear the IRQ status flag */
- writel((1 << idx), l3c_pmu->base + L3C_INT_CLEAR);
-
- /* Get the corresponding event struct */
- event = l3c_pmu->pmu_events.hw_events[idx];
- if (!event)
- continue;
-
- hisi_uncore_pmu_event_update(event);
- hisi_uncore_pmu_set_event_period(event);
- }
-
- return IRQ_HANDLED;
+ return readl(l3c_pmu->base + L3C_INT_STATUS);
}
-static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu,
- struct platform_device *pdev)
+static void hisi_l3c_pmu_clear_int_status(struct hisi_pmu *l3c_pmu, int idx)
{
- int irq, ret;
-
- /* Read and init IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr,
- IRQF_NOBALANCING | IRQF_NO_THREAD,
- dev_name(&pdev->dev), l3c_pmu);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "Fail to request IRQ:%d ret:%d\n", irq, ret);
- return ret;
- }
-
- l3c_pmu->irq = irq;
-
- return 0;
+ writel(1 << idx, l3c_pmu->base + L3C_INT_CLEAR);
}
static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
{ "HISI0213", },
- {},
+ { "HISI0214", },
+ {}
};
MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
- unsigned long long id;
- acpi_status status;
-
- status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
- "_UID", NULL, &id);
- if (ACPI_FAILURE(status))
- return -EINVAL;
-
- l3c_pmu->index_id = id;
-
/*
* Use the SCCL_ID and CCL_ID to identify the L3C PMU, while
* SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1].
@@ -270,17 +382,31 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
return 0;
}
-static struct attribute *hisi_l3c_pmu_format_attr[] = {
+static struct attribute *hisi_l3c_pmu_v1_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
NULL,
};
-static const struct attribute_group hisi_l3c_pmu_format_group = {
+static const struct attribute_group hisi_l3c_pmu_v1_format_group = {
+ .name = "format",
+ .attrs = hisi_l3c_pmu_v1_format_attr,
+};
+
+static struct attribute *hisi_l3c_pmu_v2_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(tt_core, "config1:0-7"),
+ HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"),
+ HISI_PMU_FORMAT_ATTR(datasrc_cfg, "config1:11-15"),
+ HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:16"),
+ NULL
+};
+
+static const struct attribute_group hisi_l3c_pmu_v2_format_group = {
.name = "format",
- .attrs = hisi_l3c_pmu_format_attr,
+ .attrs = hisi_l3c_pmu_v2_format_attr,
};
-static struct attribute *hisi_l3c_pmu_events_attr[] = {
+static struct attribute *hisi_l3c_pmu_v1_events_attr[] = {
HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00),
HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01),
HISI_PMU_EVENT_ATTR(rd_hit_cpipe, 0x02),
@@ -297,9 +423,22 @@ static struct attribute *hisi_l3c_pmu_events_attr[] = {
NULL,
};
-static const struct attribute_group hisi_l3c_pmu_events_group = {
+static const struct attribute_group hisi_l3c_pmu_v1_events_group = {
+ .name = "events",
+ .attrs = hisi_l3c_pmu_v1_events_attr,
+};
+
+static struct attribute *hisi_l3c_pmu_v2_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(l3c_hit, 0x48),
+ HISI_PMU_EVENT_ATTR(cycles, 0x7f),
+ HISI_PMU_EVENT_ATTR(l3c_ref, 0xb8),
+ HISI_PMU_EVENT_ATTR(dat_access, 0xb9),
+ NULL
+};
+
+static const struct attribute_group hisi_l3c_pmu_v2_events_group = {
.name = "events",
- .attrs = hisi_l3c_pmu_events_attr,
+ .attrs = hisi_l3c_pmu_v2_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
@@ -325,14 +464,22 @@ static const struct attribute_group hisi_l3c_pmu_identifier_group = {
.attrs = hisi_l3c_pmu_identifier_attrs,
};
-static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = {
- &hisi_l3c_pmu_format_group,
- &hisi_l3c_pmu_events_group,
+static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = {
+ &hisi_l3c_pmu_v1_format_group,
+ &hisi_l3c_pmu_v1_events_group,
&hisi_l3c_pmu_cpumask_attr_group,
&hisi_l3c_pmu_identifier_group,
NULL,
};
+static const struct attribute_group *hisi_l3c_pmu_v2_attr_groups[] = {
+ &hisi_l3c_pmu_v2_format_group,
+ &hisi_l3c_pmu_v2_events_group,
+ &hisi_l3c_pmu_cpumask_attr_group,
+ &hisi_l3c_pmu_identifier_group,
+ NULL
+};
+
static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
.write_evtype = hisi_l3c_pmu_write_evtype,
.get_event_idx = hisi_uncore_pmu_get_event_idx,
@@ -344,6 +491,10 @@ static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
.disable_counter_int = hisi_l3c_pmu_disable_counter_int,
.write_counter = hisi_l3c_pmu_write_counter,
.read_counter = hisi_l3c_pmu_read_counter,
+ .get_int_status = hisi_l3c_pmu_get_int_status,
+ .clear_int_status = hisi_l3c_pmu_clear_int_status,
+ .enable_filter = hisi_l3c_pmu_enable_filter,
+ .disable_filter = hisi_l3c_pmu_disable_filter,
};
static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
@@ -355,16 +506,24 @@ static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
if (ret)
return ret;
- ret = hisi_l3c_pmu_init_irq(l3c_pmu, pdev);
+ ret = hisi_uncore_pmu_init_irq(l3c_pmu, pdev);
if (ret)
return ret;
+ if (l3c_pmu->identifier >= HISI_PMU_V2) {
+ l3c_pmu->counter_bits = 64;
+ l3c_pmu->check_event = L3C_V2_NR_EVENTS;
+ l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v2_attr_groups;
+ } else {
+ l3c_pmu->counter_bits = 48;
+ l3c_pmu->check_event = L3C_V1_NR_EVENTS;
+ l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v1_attr_groups;
+ }
+
l3c_pmu->num_counters = L3C_NR_COUNTERS;
- l3c_pmu->counter_bits = 48;
l3c_pmu->ops = &hisi_uncore_l3c_ops;
l3c_pmu->dev = &pdev->dev;
l3c_pmu->on_cpu = -1;
- l3c_pmu->check_event = 0x59;
return 0;
}
@@ -392,8 +551,12 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
return ret;
}
+ /*
+ * CCL_ID is used to identify the L3C in the same SCCL which was
+ * used _UID by mistake.
+ */
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
- l3c_pmu->sccl_id, l3c_pmu->index_id);
+ l3c_pmu->sccl_id, l3c_pmu->ccl_id);
l3c_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
@@ -406,7 +569,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
.start = hisi_uncore_pmu_start,
.stop = hisi_uncore_pmu_stop,
.read = hisi_uncore_pmu_read,
- .attr_groups = hisi_l3c_pmu_attr_groups,
+ .attr_groups = l3c_pmu->pmu_events.attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
new file mode 100644
index 000000000000..14f23eb31248
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon PA uncore Hardware event counters support
+ *
+ * Copyright (C) 2020 HiSilicon Limited
+ * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ */
+#include <linux/acpi.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* PA register definition */
+#define PA_PERF_CTRL 0x1c00
+#define PA_EVENT_CTRL 0x1c04
+#define PA_TT_CTRL 0x1c08
+#define PA_TGTID_CTRL 0x1c14
+#define PA_SRCID_CTRL 0x1c18
+#define PA_INT_MASK 0x1c70
+#define PA_INT_STATUS 0x1c78
+#define PA_INT_CLEAR 0x1c7c
+#define PA_EVENT_TYPE0 0x1c80
+#define PA_PMU_VERSION 0x1cf0
+#define PA_EVENT_CNT0_L 0x1f00
+
+#define PA_EVTYPE_MASK 0xff
+#define PA_NR_COUNTERS 0x8
+#define PA_PERF_CTRL_EN BIT(0)
+#define PA_TRACETAG_EN BIT(4)
+#define PA_TGTID_EN BIT(11)
+#define PA_SRCID_EN BIT(11)
+#define PA_TGTID_NONE 0
+#define PA_SRCID_NONE 0
+#define PA_TGTID_MSK_SHIFT 12
+#define PA_SRCID_MSK_SHIFT 12
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_cmd, config1, 10, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_msk, config1, 21, 11);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44);
+
+static void hisi_pa_pmu_enable_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_en = hisi_get_tracetag_en(event);
+
+ if (tt_en) {
+ u32 val;
+
+ val = readl(pa_pmu->base + PA_TT_CTRL);
+ val |= PA_TRACETAG_EN;
+ writel(val, pa_pmu->base + PA_TT_CTRL);
+ }
+}
+
+static void hisi_pa_pmu_clear_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_en = hisi_get_tracetag_en(event);
+
+ if (tt_en) {
+ u32 val;
+
+ val = readl(pa_pmu->base + PA_TT_CTRL);
+ val &= ~PA_TRACETAG_EN;
+ writel(val, pa_pmu->base + PA_TT_CTRL);
+ }
+}
+
+static void hisi_pa_pmu_config_tgtid(struct perf_event *event)
+{
+ struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_tgtid_cmd(event);
+
+ if (cmd) {
+ u32 msk = hisi_get_tgtid_msk(event);
+ u32 val = cmd | PA_TGTID_EN | (msk << PA_TGTID_MSK_SHIFT);
+
+ writel(val, pa_pmu->base + PA_TGTID_CTRL);
+ }
+}
+
+static void hisi_pa_pmu_clear_tgtid(struct perf_event *event)
+{
+ struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_tgtid_cmd(event);
+
+ if (cmd)
+ writel(PA_TGTID_NONE, pa_pmu->base + PA_TGTID_CTRL);
+}
+
+static void hisi_pa_pmu_config_srcid(struct perf_event *event)
+{
+ struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_srcid_cmd(event);
+
+ if (cmd) {
+ u32 msk = hisi_get_srcid_msk(event);
+ u32 val = cmd | PA_SRCID_EN | (msk << PA_SRCID_MSK_SHIFT);
+
+ writel(val, pa_pmu->base + PA_SRCID_CTRL);
+ }
+}
+
+static void hisi_pa_pmu_clear_srcid(struct perf_event *event)
+{
+ struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_srcid_cmd(event);
+
+ if (cmd)
+ writel(PA_SRCID_NONE, pa_pmu->base + PA_SRCID_CTRL);
+}
+
+static void hisi_pa_pmu_enable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_pa_pmu_enable_tracetag(event);
+ hisi_pa_pmu_config_srcid(event);
+ hisi_pa_pmu_config_tgtid(event);
+ }
+}
+
+static void hisi_pa_pmu_disable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_pa_pmu_clear_tgtid(event);
+ hisi_pa_pmu_clear_srcid(event);
+ hisi_pa_pmu_clear_tracetag(event);
+ }
+}
+
+static u32 hisi_pa_pmu_get_counter_offset(int idx)
+{
+ return (PA_EVENT_CNT0_L + idx * 8);
+}
+
+static u64 hisi_pa_pmu_read_counter(struct hisi_pmu *pa_pmu,
+ struct hw_perf_event *hwc)
+{
+ return readq(pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_pa_pmu_write_counter(struct hisi_pmu *pa_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ writeq(val, pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_pa_pmu_write_evtype(struct hisi_pmu *pa_pmu, int idx,
+ u32 type)
+{
+ u32 reg, reg_idx, shift, val;
+
+ /*
+ * Select the appropriate event select register(PA_EVENT_TYPE0/1).
+ * There are 2 event select registers for the 8 hardware counters.
+ * Event code is 8-bits and for the former 4 hardware counters,
+ * PA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
+ * PA_EVENT_TYPE1 is chosen.
+ */
+ reg = PA_EVENT_TYPE0 + (idx / 4) * 4;
+ reg_idx = idx % 4;
+ shift = 8 * reg_idx;
+
+ /* Write event code to pa_EVENT_TYPEx Register */
+ val = readl(pa_pmu->base + reg);
+ val &= ~(PA_EVTYPE_MASK << shift);
+ val |= (type << shift);
+ writel(val, pa_pmu->base + reg);
+}
+
+static void hisi_pa_pmu_start_counters(struct hisi_pmu *pa_pmu)
+{
+ u32 val;
+
+ val = readl(pa_pmu->base + PA_PERF_CTRL);
+ val |= PA_PERF_CTRL_EN;
+ writel(val, pa_pmu->base + PA_PERF_CTRL);
+}
+
+static void hisi_pa_pmu_stop_counters(struct hisi_pmu *pa_pmu)
+{
+ u32 val;
+
+ val = readl(pa_pmu->base + PA_PERF_CTRL);
+ val &= ~(PA_PERF_CTRL_EN);
+ writel(val, pa_pmu->base + PA_PERF_CTRL);
+}
+
+static void hisi_pa_pmu_enable_counter(struct hisi_pmu *pa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Enable counter index in PA_EVENT_CTRL register */
+ val = readl(pa_pmu->base + PA_EVENT_CTRL);
+ val |= 1 << hwc->idx;
+ writel(val, pa_pmu->base + PA_EVENT_CTRL);
+}
+
+static void hisi_pa_pmu_disable_counter(struct hisi_pmu *pa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Clear counter index in PA_EVENT_CTRL register */
+ val = readl(pa_pmu->base + PA_EVENT_CTRL);
+ val &= ~(1 << hwc->idx);
+ writel(val, pa_pmu->base + PA_EVENT_CTRL);
+}
+
+static void hisi_pa_pmu_enable_counter_int(struct hisi_pmu *pa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 0 to enable interrupt */
+ val = readl(pa_pmu->base + PA_INT_MASK);
+ val &= ~(1 << hwc->idx);
+ writel(val, pa_pmu->base + PA_INT_MASK);
+}
+
+static void hisi_pa_pmu_disable_counter_int(struct hisi_pmu *pa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 1 to mask interrupt */
+ val = readl(pa_pmu->base + PA_INT_MASK);
+ val |= 1 << hwc->idx;
+ writel(val, pa_pmu->base + PA_INT_MASK);
+}
+
+static u32 hisi_pa_pmu_get_int_status(struct hisi_pmu *pa_pmu)
+{
+ return readl(pa_pmu->base + PA_INT_STATUS);
+}
+
+static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx)
+{
+ writel(1 << idx, pa_pmu->base + PA_INT_CLEAR);
+}
+
+static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = {
+ { "HISI0273", },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_pa_pmu_acpi_match);
+
+static int hisi_pa_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *pa_pmu)
+{
+ /*
+ * Use the SCCL_ID and the index ID to identify the PA PMU,
+ * while SCCL_ID is the nearst SCCL_ID from this SICL and
+ * CPU core is chosen from this SCCL to manage this PMU.
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &pa_pmu->sccl_id)) {
+ dev_err(&pdev->dev, "Cannot read sccl-id!\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
+ &pa_pmu->index_id)) {
+ dev_err(&pdev->dev, "Cannot read idx-id!\n");
+ return -EINVAL;
+ }
+
+ pa_pmu->ccl_id = -1;
+
+ pa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pa_pmu->base)) {
+ dev_err(&pdev->dev, "ioremap failed for pa_pmu resource.\n");
+ return PTR_ERR(pa_pmu->base);
+ }
+
+ pa_pmu->identifier = readl(pa_pmu->base + PA_PMU_VERSION);
+
+ return 0;
+}
+
+static struct attribute *hisi_pa_pmu_v2_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(tgtid_cmd, "config1:0-10"),
+ HISI_PMU_FORMAT_ATTR(tgtid_msk, "config1:11-21"),
+ HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:22-32"),
+ HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:33-43"),
+ HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:44"),
+ NULL,
+};
+
+static const struct attribute_group hisi_pa_pmu_v2_format_group = {
+ .name = "format",
+ .attrs = hisi_pa_pmu_v2_format_attr,
+};
+
+static struct attribute *hisi_pa_pmu_v2_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rx_req, 0x40),
+ HISI_PMU_EVENT_ATTR(tx_req, 0x5c),
+ HISI_PMU_EVENT_ATTR(cycle, 0x78),
+ NULL
+};
+
+static const struct attribute_group hisi_pa_pmu_v2_events_group = {
+ .name = "events",
+ .attrs = hisi_pa_pmu_v2_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_pa_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_pa_pmu_cpumask_attr_group = {
+ .attrs = hisi_pa_pmu_cpumask_attrs,
+};
+
+static struct device_attribute hisi_pa_pmu_identifier_attr =
+ __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_pa_pmu_identifier_attrs[] = {
+ &hisi_pa_pmu_identifier_attr.attr,
+ NULL
+};
+
+static struct attribute_group hisi_pa_pmu_identifier_group = {
+ .attrs = hisi_pa_pmu_identifier_attrs,
+};
+
+static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = {
+ &hisi_pa_pmu_v2_format_group,
+ &hisi_pa_pmu_v2_events_group,
+ &hisi_pa_pmu_cpumask_attr_group,
+ &hisi_pa_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_pa_ops = {
+ .write_evtype = hisi_pa_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_pa_pmu_start_counters,
+ .stop_counters = hisi_pa_pmu_stop_counters,
+ .enable_counter = hisi_pa_pmu_enable_counter,
+ .disable_counter = hisi_pa_pmu_disable_counter,
+ .enable_counter_int = hisi_pa_pmu_enable_counter_int,
+ .disable_counter_int = hisi_pa_pmu_disable_counter_int,
+ .write_counter = hisi_pa_pmu_write_counter,
+ .read_counter = hisi_pa_pmu_read_counter,
+ .get_int_status = hisi_pa_pmu_get_int_status,
+ .clear_int_status = hisi_pa_pmu_clear_int_status,
+ .enable_filter = hisi_pa_pmu_enable_filter,
+ .disable_filter = hisi_pa_pmu_disable_filter,
+};
+
+static int hisi_pa_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *pa_pmu)
+{
+ int ret;
+
+ ret = hisi_pa_pmu_init_data(pdev, pa_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_uncore_pmu_init_irq(pa_pmu, pdev);
+ if (ret)
+ return ret;
+
+ pa_pmu->pmu_events.attr_groups = hisi_pa_pmu_v2_attr_groups;
+ pa_pmu->num_counters = PA_NR_COUNTERS;
+ pa_pmu->ops = &hisi_uncore_pa_ops;
+ pa_pmu->check_event = 0xB0;
+ pa_pmu->counter_bits = 64;
+ pa_pmu->dev = &pdev->dev;
+ pa_pmu->on_cpu = -1;
+
+ return 0;
+}
+
+static int hisi_pa_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *pa_pmu;
+ char *name;
+ int ret;
+
+ pa_pmu = devm_kzalloc(&pdev->dev, sizeof(*pa_pmu), GFP_KERNEL);
+ if (!pa_pmu)
+ return -ENOMEM;
+
+ ret = hisi_pa_pmu_dev_probe(pdev, pa_pmu);
+ if (ret)
+ return ret;
+ /*
+ * PA is attached in SICL and the CPU core is chosen to manage this
+ * PMU which is the nearest SCCL, while its SCCL_ID is greater than
+ * one with the SICL_ID.
+ */
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%u_pa%u",
+ pa_pmu->sccl_id - 1, pa_pmu->index_id);
+ if (!name)
+ return -ENOMEM;
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ &pa_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ return ret;
+ }
+
+ pa_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = hisi_uncore_pmu_event_init,
+ .pmu_enable = hisi_uncore_pmu_enable,
+ .pmu_disable = hisi_uncore_pmu_disable,
+ .add = hisi_uncore_pmu_add,
+ .del = hisi_uncore_pmu_del,
+ .start = hisi_uncore_pmu_start,
+ .stop = hisi_uncore_pmu_stop,
+ .read = hisi_uncore_pmu_read,
+ .attr_groups = pa_pmu->pmu_events.attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ &pa_pmu->node);
+ irq_set_affinity_hint(pa_pmu->irq, NULL);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pa_pmu);
+ return ret;
+}
+
+static int hisi_pa_pmu_remove(struct platform_device *pdev)
+{
+ struct hisi_pmu *pa_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&pa_pmu->pmu);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ &pa_pmu->node);
+ irq_set_affinity_hint(pa_pmu->irq, NULL);
+
+ return 0;
+}
+
+static struct platform_driver hisi_pa_pmu_driver = {
+ .driver = {
+ .name = "hisi_pa_pmu",
+ .acpi_match_table = hisi_pa_pmu_acpi_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_pa_pmu_probe,
+ .remove = hisi_pa_pmu_remove,
+};
+
+static int __init hisi_pa_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ "AP_PERF_ARM_HISI_PA_ONLINE",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("PA PMU: cpuhp state setup failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&hisi_pa_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE);
+
+ return ret;
+}
+module_init(hisi_pa_pmu_module_init);
+
+static void __exit hisi_pa_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_pa_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE);
+}
+module_exit(hisi_pa_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon Protocol Adapter uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
+MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 9dbdc3fc3bb4..13c68b5e39c4 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -21,7 +21,7 @@
#include "hisi_uncore_pmu.h"
#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff)
-#define HISI_MAX_PERIOD(nr) (BIT_ULL(nr) - 1)
+#define HISI_MAX_PERIOD(nr) (GENMASK_ULL((nr) - 1, 0))
/*
* PMU format attributes
@@ -33,7 +33,7 @@ ssize_t hisi_format_sysfs_show(struct device *dev,
eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sprintf(buf, "%s\n", (char *)eattr->var);
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
EXPORT_SYMBOL_GPL(hisi_format_sysfs_show);
@@ -47,7 +47,7 @@ ssize_t hisi_event_sysfs_show(struct device *dev,
eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
+ return sysfs_emit(page, "config=0x%lx\n", (unsigned long)eattr->var);
}
EXPORT_SYMBOL_GPL(hisi_event_sysfs_show);
@@ -59,7 +59,7 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev,
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
- return sprintf(buf, "%d\n", hisi_pmu->on_cpu);
+ return sysfs_emit(buf, "%d\n", hisi_pmu->on_cpu);
}
EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show);
@@ -96,12 +96,6 @@ static bool hisi_validate_event_group(struct perf_event *event)
return counters <= hisi_pmu->num_counters;
}
-int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx)
-{
- return idx >= 0 && idx < hisi_pmu->num_counters;
-}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_counter_valid);
-
int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
@@ -125,19 +119,68 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
- return snprintf(page, PAGE_SIZE, "0x%08x\n", hisi_pmu->identifier);
+ return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show);
static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
{
- if (!hisi_uncore_pmu_counter_valid(hisi_pmu, idx)) {
- dev_err(hisi_pmu->dev, "Unsupported event index:%d!\n", idx);
- return;
+ clear_bit(idx, hisi_pmu->pmu_events.used_mask);
+}
+
+static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
+{
+ struct hisi_pmu *hisi_pmu = data;
+ struct perf_event *event;
+ unsigned long overflown;
+ int idx;
+
+ overflown = hisi_pmu->ops->get_int_status(hisi_pmu);
+ if (!overflown)
+ return IRQ_NONE;
+
+ /*
+ * Find the counter index which overflowed if the bit was set
+ * and handle it.
+ */
+ for_each_set_bit(idx, &overflown, hisi_pmu->num_counters) {
+ /* Write 1 to clear the IRQ status flag */
+ hisi_pmu->ops->clear_int_status(hisi_pmu, idx);
+ /* Get the corresponding event struct */
+ event = hisi_pmu->pmu_events.hw_events[idx];
+ if (!event)
+ continue;
+
+ hisi_uncore_pmu_event_update(event);
+ hisi_uncore_pmu_set_event_period(event);
}
- clear_bit(idx, hisi_pmu->pmu_events.used_mask);
+ return IRQ_HANDLED;
+}
+
+int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
+ struct platform_device *pdev)
+{
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(&pdev->dev), hisi_pmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Fail to request IRQ: %d ret: %d.\n", irq, ret);
+ return ret;
+ }
+
+ hisi_pmu->irq = irq;
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_init_irq);
int hisi_uncore_pmu_event_init(struct perf_event *event)
{
@@ -202,6 +245,9 @@ static void hisi_uncore_pmu_enable_event(struct perf_event *event)
hisi_pmu->ops->write_evtype(hisi_pmu, hwc->idx,
HISI_GET_EVENTID(event));
+ if (hisi_pmu->ops->enable_filter)
+ hisi_pmu->ops->enable_filter(event);
+
hisi_pmu->ops->enable_counter_int(hisi_pmu, hwc);
hisi_pmu->ops->enable_counter(hisi_pmu, hwc);
}
@@ -216,6 +262,9 @@ static void hisi_uncore_pmu_disable_event(struct perf_event *event)
hisi_pmu->ops->disable_counter(hisi_pmu, hwc);
hisi_pmu->ops->disable_counter_int(hisi_pmu, hwc);
+
+ if (hisi_pmu->ops->disable_filter)
+ hisi_pmu->ops->disable_filter(event);
}
void hisi_uncore_pmu_set_event_period(struct perf_event *event)
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 25b7cbe1f818..ea9d89bbc1ea 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -11,16 +11,19 @@
#ifndef __HISI_UNCORE_PMU_H__
#define __HISI_UNCORE_PMU_H__
+#include <linux/bitfield.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#undef pr_fmt
#define pr_fmt(fmt) "hisi_pmu: " fmt
+#define HISI_PMU_V2 0x30
#define HISI_MAX_COUNTERS 0x10
#define to_hisi_pmu(p) (container_of(p, struct hisi_pmu, pmu))
@@ -34,6 +37,12 @@
#define HISI_PMU_EVENT_ATTR(_name, _config) \
HISI_PMU_ATTR(_name, hisi_event_sysfs_show, (unsigned long)_config)
+#define HISI_PMU_EVENT_ATTR_EXTRACTOR(name, config, hi, lo) \
+ static inline u32 hisi_get_##name(struct perf_event *event) \
+ { \
+ return FIELD_GET(GENMASK_ULL(hi, lo), event->attr.config); \
+ }
+
struct hisi_pmu;
struct hisi_uncore_ops {
@@ -47,11 +56,16 @@ struct hisi_uncore_ops {
void (*disable_counter_int)(struct hisi_pmu *, struct hw_perf_event *);
void (*start_counters)(struct hisi_pmu *);
void (*stop_counters)(struct hisi_pmu *);
+ u32 (*get_int_status)(struct hisi_pmu *hisi_pmu);
+ void (*clear_int_status)(struct hisi_pmu *hisi_pmu, int idx);
+ void (*enable_filter)(struct perf_event *event);
+ void (*disable_filter)(struct perf_event *event);
};
struct hisi_pmu_hwevents {
struct perf_event *hw_events[HISI_MAX_COUNTERS];
DECLARE_BITMAP(used_mask, HISI_MAX_COUNTERS);
+ const struct attribute_group **attr_groups;
};
/* Generic pmu struct for different pmu types */
@@ -71,6 +85,8 @@ struct hisi_pmu {
void __iomem *base;
/* the ID of the PMU modules */
u32 index_id;
+ /* For DDRC PMU v2: each DDRC has more than one DMC */
+ u32 sub_id;
int num_counters;
int counter_bits;
/* check event code range */
@@ -78,7 +94,6 @@ struct hisi_pmu {
u32 identifier;
};
-int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx);
int hisi_uncore_pmu_get_event_idx(struct perf_event *event);
void hisi_uncore_pmu_read(struct perf_event *event);
int hisi_uncore_pmu_add(struct perf_event *event, int flags);
@@ -102,6 +117,7 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page);
-
+int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
+ struct platform_device *pdev);
#endif /* __HISI_UNCORE_PMU_H__ */
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
new file mode 100644
index 000000000000..46be312fa126
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon SLLC uncore Hardware event counters support
+ *
+ * Copyright (C) 2020 Hisilicon Limited
+ * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ */
+#include <linux/acpi.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* SLLC register definition */
+#define SLLC_INT_MASK 0x0814
+#define SLLC_INT_STATUS 0x0818
+#define SLLC_INT_CLEAR 0x081c
+#define SLLC_PERF_CTRL 0x1c00
+#define SLLC_SRCID_CTRL 0x1c04
+#define SLLC_TGTID_CTRL 0x1c08
+#define SLLC_EVENT_CTRL 0x1c14
+#define SLLC_EVENT_TYPE0 0x1c18
+#define SLLC_VERSION 0x1cf0
+#define SLLC_EVENT_CNT0_L 0x1d00
+
+#define SLLC_EVTYPE_MASK 0xff
+#define SLLC_PERF_CTRL_EN BIT(0)
+#define SLLC_FILT_EN BIT(1)
+#define SLLC_TRACETAG_EN BIT(2)
+#define SLLC_SRCID_EN BIT(4)
+#define SLLC_SRCID_NONE 0x0
+#define SLLC_TGTID_EN BIT(5)
+#define SLLC_TGTID_NONE 0x0
+#define SLLC_TGTID_MIN_SHIFT 1
+#define SLLC_TGTID_MAX_SHIFT 12
+#define SLLC_SRCID_CMD_SHIFT 1
+#define SLLC_SRCID_MSK_SHIFT 12
+#define SLLC_NR_EVENTS 0x80
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_min, config1, 10, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_max, config1, 21, 11);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44);
+
+static bool tgtid_is_valid(u32 max, u32 min)
+{
+ return max > 0 && max >= min;
+}
+
+static void hisi_sllc_pmu_enable_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_en = hisi_get_tracetag_en(event);
+
+ if (tt_en) {
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val |= SLLC_TRACETAG_EN | SLLC_FILT_EN;
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ }
+}
+
+static void hisi_sllc_pmu_disable_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_en = hisi_get_tracetag_en(event);
+
+ if (tt_en) {
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val &= ~(SLLC_TRACETAG_EN | SLLC_FILT_EN);
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ }
+}
+
+static void hisi_sllc_pmu_config_tgtid(struct perf_event *event)
+{
+ struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ u32 min = hisi_get_tgtid_min(event);
+ u32 max = hisi_get_tgtid_max(event);
+
+ if (tgtid_is_valid(max, min)) {
+ u32 val = (max << SLLC_TGTID_MAX_SHIFT) | (min << SLLC_TGTID_MIN_SHIFT);
+
+ writel(val, sllc_pmu->base + SLLC_TGTID_CTRL);
+ /* Enable the tgtid */
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val |= SLLC_TGTID_EN | SLLC_FILT_EN;
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ }
+}
+
+static void hisi_sllc_pmu_clear_tgtid(struct perf_event *event)
+{
+ struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ u32 min = hisi_get_tgtid_min(event);
+ u32 max = hisi_get_tgtid_max(event);
+
+ if (tgtid_is_valid(max, min)) {
+ u32 val;
+
+ writel(SLLC_TGTID_NONE, sllc_pmu->base + SLLC_TGTID_CTRL);
+ /* Disable the tgtid */
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val &= ~(SLLC_TGTID_EN | SLLC_FILT_EN);
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ }
+}
+
+static void hisi_sllc_pmu_config_srcid(struct perf_event *event)
+{
+ struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_srcid_cmd(event);
+
+ if (cmd) {
+ u32 val, msk;
+
+ msk = hisi_get_srcid_msk(event);
+ val = (cmd << SLLC_SRCID_CMD_SHIFT) | (msk << SLLC_SRCID_MSK_SHIFT);
+ writel(val, sllc_pmu->base + SLLC_SRCID_CTRL);
+ /* Enable the srcid */
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val |= SLLC_SRCID_EN | SLLC_FILT_EN;
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ }
+}
+
+static void hisi_sllc_pmu_clear_srcid(struct perf_event *event)
+{
+ struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_srcid_cmd(event);
+
+ if (cmd) {
+ u32 val;
+
+ writel(SLLC_SRCID_NONE, sllc_pmu->base + SLLC_SRCID_CTRL);
+ /* Disable the srcid */
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val &= ~(SLLC_SRCID_EN | SLLC_FILT_EN);
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ }
+}
+
+static void hisi_sllc_pmu_enable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_sllc_pmu_enable_tracetag(event);
+ hisi_sllc_pmu_config_srcid(event);
+ hisi_sllc_pmu_config_tgtid(event);
+ }
+}
+
+static void hisi_sllc_pmu_clear_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_sllc_pmu_disable_tracetag(event);
+ hisi_sllc_pmu_clear_srcid(event);
+ hisi_sllc_pmu_clear_tgtid(event);
+ }
+}
+
+static u32 hisi_sllc_pmu_get_counter_offset(int idx)
+{
+ return (SLLC_EVENT_CNT0_L + idx * 8);
+}
+
+static u64 hisi_sllc_pmu_read_counter(struct hisi_pmu *sllc_pmu,
+ struct hw_perf_event *hwc)
+{
+ return readq(sllc_pmu->base +
+ hisi_sllc_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_sllc_pmu_write_counter(struct hisi_pmu *sllc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ writeq(val, sllc_pmu->base +
+ hisi_sllc_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_sllc_pmu_write_evtype(struct hisi_pmu *sllc_pmu, int idx,
+ u32 type)
+{
+ u32 reg, reg_idx, shift, val;
+
+ /*
+ * Select the appropriate event select register(SLLC_EVENT_TYPE0/1).
+ * There are 2 event select registers for the 8 hardware counters.
+ * Event code is 8-bits and for the former 4 hardware counters,
+ * SLLC_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
+ * SLLC_EVENT_TYPE1 is chosen.
+ */
+ reg = SLLC_EVENT_TYPE0 + (idx / 4) * 4;
+ reg_idx = idx % 4;
+ shift = 8 * reg_idx;
+
+ /* Write event code to SLLC_EVENT_TYPEx Register */
+ val = readl(sllc_pmu->base + reg);
+ val &= ~(SLLC_EVTYPE_MASK << shift);
+ val |= (type << shift);
+ writel(val, sllc_pmu->base + reg);
+}
+
+static void hisi_sllc_pmu_start_counters(struct hisi_pmu *sllc_pmu)
+{
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val |= SLLC_PERF_CTRL_EN;
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+}
+
+static void hisi_sllc_pmu_stop_counters(struct hisi_pmu *sllc_pmu)
+{
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val &= ~(SLLC_PERF_CTRL_EN);
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+}
+
+static void hisi_sllc_pmu_enable_counter(struct hisi_pmu *sllc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_EVENT_CTRL);
+ val |= 1 << hwc->idx;
+ writel(val, sllc_pmu->base + SLLC_EVENT_CTRL);
+}
+
+static void hisi_sllc_pmu_disable_counter(struct hisi_pmu *sllc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_EVENT_CTRL);
+ val &= ~(1 << hwc->idx);
+ writel(val, sllc_pmu->base + SLLC_EVENT_CTRL);
+}
+
+static void hisi_sllc_pmu_enable_counter_int(struct hisi_pmu *sllc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_INT_MASK);
+ /* Write 0 to enable interrupt */
+ val &= ~(1 << hwc->idx);
+ writel(val, sllc_pmu->base + SLLC_INT_MASK);
+}
+
+static void hisi_sllc_pmu_disable_counter_int(struct hisi_pmu *sllc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_INT_MASK);
+ /* Write 1 to mask interrupt */
+ val |= 1 << hwc->idx;
+ writel(val, sllc_pmu->base + SLLC_INT_MASK);
+}
+
+static u32 hisi_sllc_pmu_get_int_status(struct hisi_pmu *sllc_pmu)
+{
+ return readl(sllc_pmu->base + SLLC_INT_STATUS);
+}
+
+static void hisi_sllc_pmu_clear_int_status(struct hisi_pmu *sllc_pmu, int idx)
+{
+ writel(1 << idx, sllc_pmu->base + SLLC_INT_CLEAR);
+}
+
+static const struct acpi_device_id hisi_sllc_pmu_acpi_match[] = {
+ { "HISI0263", },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_sllc_pmu_acpi_match);
+
+static int hisi_sllc_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *sllc_pmu)
+{
+ /*
+ * Use the SCCL_ID and the index ID to identify the SLLC PMU,
+ * while SCCL_ID is from MPIDR_EL1 by CPU.
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &sllc_pmu->sccl_id)) {
+ dev_err(&pdev->dev, "Cannot read sccl-id!\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
+ &sllc_pmu->index_id)) {
+ dev_err(&pdev->dev, "Cannot read idx-id!\n");
+ return -EINVAL;
+ }
+
+ /* SLLC PMUs only share the same SCCL */
+ sllc_pmu->ccl_id = -1;
+
+ sllc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sllc_pmu->base)) {
+ dev_err(&pdev->dev, "ioremap failed for sllc_pmu resource.\n");
+ return PTR_ERR(sllc_pmu->base);
+ }
+
+ sllc_pmu->identifier = readl(sllc_pmu->base + SLLC_VERSION);
+
+ return 0;
+}
+
+static struct attribute *hisi_sllc_pmu_v2_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(tgtid_min, "config1:0-10"),
+ HISI_PMU_FORMAT_ATTR(tgtid_max, "config1:11-21"),
+ HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:22-32"),
+ HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:33-43"),
+ HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:44"),
+ NULL
+};
+
+static const struct attribute_group hisi_sllc_pmu_v2_format_group = {
+ .name = "format",
+ .attrs = hisi_sllc_pmu_v2_format_attr,
+};
+
+static struct attribute *hisi_sllc_pmu_v2_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rx_req, 0x30),
+ HISI_PMU_EVENT_ATTR(rx_data, 0x31),
+ HISI_PMU_EVENT_ATTR(tx_req, 0x34),
+ HISI_PMU_EVENT_ATTR(tx_data, 0x35),
+ HISI_PMU_EVENT_ATTR(cycles, 0x09),
+ NULL
+};
+
+static const struct attribute_group hisi_sllc_pmu_v2_events_group = {
+ .name = "events",
+ .attrs = hisi_sllc_pmu_v2_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_sllc_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_sllc_pmu_cpumask_attr_group = {
+ .attrs = hisi_sllc_pmu_cpumask_attrs,
+};
+
+static struct device_attribute hisi_sllc_pmu_identifier_attr =
+ __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_sllc_pmu_identifier_attrs[] = {
+ &hisi_sllc_pmu_identifier_attr.attr,
+ NULL
+};
+
+static struct attribute_group hisi_sllc_pmu_identifier_group = {
+ .attrs = hisi_sllc_pmu_identifier_attrs,
+};
+
+static const struct attribute_group *hisi_sllc_pmu_v2_attr_groups[] = {
+ &hisi_sllc_pmu_v2_format_group,
+ &hisi_sllc_pmu_v2_events_group,
+ &hisi_sllc_pmu_cpumask_attr_group,
+ &hisi_sllc_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_sllc_ops = {
+ .write_evtype = hisi_sllc_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_sllc_pmu_start_counters,
+ .stop_counters = hisi_sllc_pmu_stop_counters,
+ .enable_counter = hisi_sllc_pmu_enable_counter,
+ .disable_counter = hisi_sllc_pmu_disable_counter,
+ .enable_counter_int = hisi_sllc_pmu_enable_counter_int,
+ .disable_counter_int = hisi_sllc_pmu_disable_counter_int,
+ .write_counter = hisi_sllc_pmu_write_counter,
+ .read_counter = hisi_sllc_pmu_read_counter,
+ .get_int_status = hisi_sllc_pmu_get_int_status,
+ .clear_int_status = hisi_sllc_pmu_clear_int_status,
+ .enable_filter = hisi_sllc_pmu_enable_filter,
+ .disable_filter = hisi_sllc_pmu_clear_filter,
+};
+
+static int hisi_sllc_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *sllc_pmu)
+{
+ int ret;
+
+ ret = hisi_sllc_pmu_init_data(pdev, sllc_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_uncore_pmu_init_irq(sllc_pmu, pdev);
+ if (ret)
+ return ret;
+
+ sllc_pmu->pmu_events.attr_groups = hisi_sllc_pmu_v2_attr_groups;
+ sllc_pmu->ops = &hisi_uncore_sllc_ops;
+ sllc_pmu->check_event = SLLC_NR_EVENTS;
+ sllc_pmu->counter_bits = 64;
+ sllc_pmu->num_counters = 8;
+ sllc_pmu->dev = &pdev->dev;
+ sllc_pmu->on_cpu = -1;
+
+ return 0;
+}
+
+static int hisi_sllc_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *sllc_pmu;
+ char *name;
+ int ret;
+
+ sllc_pmu = devm_kzalloc(&pdev->dev, sizeof(*sllc_pmu), GFP_KERNEL);
+ if (!sllc_pmu)
+ return -ENOMEM;
+
+ ret = hisi_sllc_pmu_dev_probe(pdev, sllc_pmu);
+ if (ret)
+ return ret;
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_sllc%u",
+ sllc_pmu->sccl_id, sllc_pmu->index_id);
+ if (!name)
+ return -ENOMEM;
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ &sllc_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ return ret;
+ }
+
+ sllc_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = hisi_uncore_pmu_event_init,
+ .pmu_enable = hisi_uncore_pmu_enable,
+ .pmu_disable = hisi_uncore_pmu_disable,
+ .add = hisi_uncore_pmu_add,
+ .del = hisi_uncore_pmu_del,
+ .start = hisi_uncore_pmu_start,
+ .stop = hisi_uncore_pmu_stop,
+ .read = hisi_uncore_pmu_read,
+ .attr_groups = sllc_pmu->pmu_events.attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ &sllc_pmu->node);
+ irq_set_affinity_hint(sllc_pmu->irq, NULL);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, sllc_pmu);
+
+ return ret;
+}
+
+static int hisi_sllc_pmu_remove(struct platform_device *pdev)
+{
+ struct hisi_pmu *sllc_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&sllc_pmu->pmu);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ &sllc_pmu->node);
+ irq_set_affinity_hint(sllc_pmu->irq, NULL);
+
+ return 0;
+}
+
+static struct platform_driver hisi_sllc_pmu_driver = {
+ .driver = {
+ .name = "hisi_sllc_pmu",
+ .acpi_match_table = hisi_sllc_pmu_acpi_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_sllc_pmu_probe,
+ .remove = hisi_sllc_pmu_remove,
+};
+
+static int __init hisi_sllc_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ "AP_PERF_ARM_HISI_SLLC_ONLINE",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("SLLC PMU: cpuhp state setup failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&hisi_sllc_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE);
+
+ return ret;
+}
+module_init(hisi_sllc_pmu_module_init);
+
+static void __exit hisi_sllc_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_sllc_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE);
+}
+module_exit(hisi_sllc_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SLLC uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
+MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 8883af955a2a..fc54a80f9c5c 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -676,7 +676,7 @@ static ssize_t l2cache_pmu_event_show(struct device *dev,
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
- return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define L2CACHE_EVENT_ATTR(_name, _id) \
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index fb34b87b9471..bba078077c93 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -615,7 +615,7 @@ static ssize_t l3cache_pmu_format_show(struct device *dev,
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sprintf(buf, "%s\n", (char *) eattr->var);
+ return sysfs_emit(buf, "%s\n", (char *) eattr->var);
}
#define L3CACHE_PMU_FORMAT_ATTR(_name, _config) \
@@ -643,7 +643,7 @@ static ssize_t l3cache_pmu_event_show(struct device *dev,
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
- return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define L3CACHE_EVENT_ATTR(_name, _id) \
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index e116815fa809..06a6d569b0b5 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -128,7 +128,7 @@ __tx2_pmu_##_var##_show(struct device *dev, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
- return sprintf(page, _format "\n"); \
+ return sysfs_emit(page, _format "\n"); \
} \
\
static struct device_attribute format_attr_##_var = \
@@ -176,7 +176,7 @@ static ssize_t tx2_pmu_event_show(struct device *dev,
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sprintf(buf, "event=0x%lx\n", (unsigned long) eattr->var);
+ return sysfs_emit(buf, "event=0x%lx\n", (unsigned long) eattr->var);
}
#define TX2_EVENT_ATTR(name, config) \
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 44faa51ba799..ffe3bdeec845 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -170,7 +170,7 @@ static ssize_t xgene_pmu_format_show(struct device *dev,
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sprintf(buf, "%s\n", (char *) eattr->var);
+ return sysfs_emit(buf, "%s\n", (char *) eattr->var);
}
#define XGENE_PMU_FORMAT_ATTR(_name, _config) \
@@ -281,7 +281,7 @@ static ssize_t xgene_pmu_event_show(struct device *dev,
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var);
+ return sysfs_emit(buf, "config=0x%lx\n", (unsigned long) eattr->var);
}
#define XGENE_PMU_EVENT_ATTR(_name, _config) \
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 68d9c2f6a5ca..54c1f2f0985f 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -71,6 +71,7 @@ source "drivers/phy/ingenic/Kconfig"
source "drivers/phy/lantiq/Kconfig"
source "drivers/phy/marvell/Kconfig"
source "drivers/phy/mediatek/Kconfig"
+source "drivers/phy/microchip/Kconfig"
source "drivers/phy/motorola/Kconfig"
source "drivers/phy/mscc/Kconfig"
source "drivers/phy/qualcomm/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 32261e164abd..adac1b1a39d1 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -20,6 +20,7 @@ obj-y += allwinner/ \
lantiq/ \
marvell/ \
mediatek/ \
+ microchip/ \
motorola/ \
mscc/ \
qualcomm/ \
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 09256339bd04..fd92b73b7109 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -94,7 +94,7 @@ config PHY_BRCM_USB
depends on ARCH_BCM4908 || ARCH_BRCMSTB || COMPILE_TEST
depends on OF
select GENERIC_PHY
- select SOC_BRCMSTB
+ select SOC_BRCMSTB if ARCH_BRCMSTB
default ARCH_BCM4908
default ARCH_BRCMSTB
help
diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig
index 432832bdbd16..a62910ff5591 100644
--- a/drivers/phy/cadence/Kconfig
+++ b/drivers/phy/cadence/Kconfig
@@ -7,6 +7,7 @@ config PHY_CADENCE_TORRENT
tristate "Cadence Torrent PHY driver"
depends on OF
depends on HAS_IOMEM
+ depends on COMMON_CLK
select GENERIC_PHY
help
Support for Cadence Torrent PHY.
@@ -24,6 +25,7 @@ config PHY_CADENCE_DPHY
config PHY_CADENCE_SIERRA
tristate "Cadence Sierra PHY Driver"
depends on OF && HAS_IOMEM && RESET_CONTROLLER
+ depends on COMMON_CLK
select GENERIC_PHY
help
Enable this to support the Cadence Sierra PHY driver
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 26a0badabe38..5c68e31c5939 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -7,6 +7,7 @@
*
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -20,10 +21,12 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-cadence.h>
/* PHY register offsets */
#define SIERRA_COMMON_CDB_OFFSET 0x0
#define SIERRA_MACRO_ID_REG 0x0
+#define SIERRA_CMN_PLLLC_GEN_PREG 0x42
#define SIERRA_CMN_PLLLC_MODE_PREG 0x48
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
@@ -31,6 +34,9 @@
#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
+#define SIERRA_CMN_REFRCV_PREG 0x98
+#define SIERRA_CMN_REFRCV1_PREG 0xB8
+#define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2
#define SIERRA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
((0x4000 << (block_offset)) + \
@@ -144,6 +150,19 @@
#define SIERRA_MAX_LANES 16
#define PLL_LOCK_TIME 100000
+#define CDNS_SIERRA_OUTPUT_CLOCKS 2
+#define CDNS_SIERRA_INPUT_CLOCKS 5
+enum cdns_sierra_clock_input {
+ PHY_CLK,
+ CMN_REFCLK_DIG_DIV,
+ CMN_REFCLK1_DIG_DIV,
+ PLL0_REFCLK,
+ PLL1_REFCLK,
+};
+
+#define SIERRA_NUM_CMN_PLLC 2
+#define SIERRA_NUM_CMN_PLLC_PARENTS 2
+
static const struct reg_field macro_id_type =
REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
static const struct reg_field phy_pll_cfg_1 =
@@ -151,6 +170,53 @@ static const struct reg_field phy_pll_cfg_1 =
static const struct reg_field pllctrl_lock =
REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
+static const char * const clk_names[] = {
+ [CDNS_SIERRA_PLL_CMNLC] = "pll_cmnlc",
+ [CDNS_SIERRA_PLL_CMNLC1] = "pll_cmnlc1",
+};
+
+enum cdns_sierra_cmn_plllc {
+ CMN_PLLLC,
+ CMN_PLLLC1,
+};
+
+struct cdns_sierra_pll_mux_reg_fields {
+ struct reg_field pfdclk_sel_preg;
+ struct reg_field plllc1en_field;
+ struct reg_field termen_field;
+};
+
+static const struct cdns_sierra_pll_mux_reg_fields cmn_plllc_pfdclk1_sel_preg[] = {
+ [CMN_PLLLC] = {
+ .pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC_GEN_PREG, 1, 1),
+ .plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 8, 8),
+ .termen_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 0, 0),
+ },
+ [CMN_PLLLC1] = {
+ .pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC1_GEN_PREG, 1, 1),
+ .plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 8, 8),
+ .termen_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 0, 0),
+ },
+};
+
+struct cdns_sierra_pll_mux {
+ struct clk_hw hw;
+ struct regmap_field *pfdclk_sel_preg;
+ struct regmap_field *plllc1en_field;
+ struct regmap_field *termen_field;
+ struct clk_init_data clk_data;
+};
+
+#define to_cdns_sierra_pll_mux(_hw) \
+ container_of(_hw, struct cdns_sierra_pll_mux, hw)
+
+static const int pll_mux_parent_index[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
+ [CMN_PLLLC] = { PLL0_REFCLK, PLL1_REFCLK },
+ [CMN_PLLLC1] = { PLL1_REFCLK, PLL0_REFCLK },
+};
+
+static u32 cdns_sierra_pll_mux_table[] = { 0, 1 };
+
struct cdns_sierra_inst {
struct phy *phy;
u32 phy_type;
@@ -197,12 +263,15 @@ struct cdns_sierra_phy {
struct regmap_field *macro_id_type;
struct regmap_field *phy_pll_cfg_1;
struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
- struct clk *clk;
- struct clk *cmn_refclk_dig_div;
- struct clk *cmn_refclk1_dig_div;
+ struct regmap_field *cmn_refrcv_refclk_plllc1en_preg[SIERRA_NUM_CMN_PLLC];
+ struct regmap_field *cmn_refrcv_refclk_termen_preg[SIERRA_NUM_CMN_PLLC];
+ struct regmap_field *cmn_plllc_pfdclk1_sel_preg[SIERRA_NUM_CMN_PLLC];
+ struct clk *input_clks[CDNS_SIERRA_INPUT_CLOCKS];
int nsubnodes;
u32 num_lanes;
bool autoconf;
+ struct clk_onecell_data clk_data;
+ struct clk *output_clks[CDNS_SIERRA_OUTPUT_CLOCKS];
};
static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
@@ -281,8 +350,8 @@ static int cdns_sierra_phy_init(struct phy *gphy)
if (phy->autoconf)
return 0;
- clk_set_rate(phy->cmn_refclk_dig_div, 25000000);
- clk_set_rate(phy->cmn_refclk1_dig_div, 25000000);
+ clk_set_rate(phy->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
+ clk_set_rate(phy->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
if (ins->phy_type == PHY_TYPE_PCIE) {
num_cmn_regs = phy->init_data->pcie_cmn_regs;
num_ln_regs = phy->init_data->pcie_ln_regs;
@@ -319,6 +388,12 @@ static int cdns_sierra_phy_on(struct phy *gphy)
u32 val;
int ret;
+ ret = reset_control_deassert(sp->phy_rst);
+ if (ret) {
+ dev_err(dev, "Failed to take the PHY out of reset\n");
+ return ret;
+ }
+
/* Take the PHY lane group out of reset */
ret = reset_control_deassert(ins->lnk_rst);
if (ret) {
@@ -358,6 +433,153 @@ static const struct phy_ops ops = {
.owner = THIS_MODULE,
};
+static u8 cdns_sierra_pll_mux_get_parent(struct clk_hw *hw)
+{
+ struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw);
+ struct regmap_field *field = mux->pfdclk_sel_preg;
+ unsigned int val;
+
+ regmap_field_read(field, &val);
+ return clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table, 0, val);
+}
+
+static int cdns_sierra_pll_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw);
+ struct regmap_field *plllc1en_field = mux->plllc1en_field;
+ struct regmap_field *termen_field = mux->termen_field;
+ struct regmap_field *field = mux->pfdclk_sel_preg;
+ int val, ret;
+
+ ret = regmap_field_write(plllc1en_field, 0);
+ ret |= regmap_field_write(termen_field, 0);
+ if (index == 1) {
+ ret |= regmap_field_write(plllc1en_field, 1);
+ ret |= regmap_field_write(termen_field, 1);
+ }
+
+ val = cdns_sierra_pll_mux_table[index];
+ ret |= regmap_field_write(field, val);
+
+ return ret;
+}
+
+static const struct clk_ops cdns_sierra_pll_mux_ops = {
+ .set_parent = cdns_sierra_pll_mux_set_parent,
+ .get_parent = cdns_sierra_pll_mux_get_parent,
+};
+
+static int cdns_sierra_pll_mux_register(struct cdns_sierra_phy *sp,
+ struct regmap_field *pfdclk1_sel_field,
+ struct regmap_field *plllc1en_field,
+ struct regmap_field *termen_field,
+ int clk_index)
+{
+ struct cdns_sierra_pll_mux *mux;
+ struct device *dev = sp->dev;
+ struct clk_init_data *init;
+ const char **parent_names;
+ unsigned int num_parents;
+ char clk_name[100];
+ struct clk *clk;
+ int i;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ num_parents = SIERRA_NUM_CMN_PLLC_PARENTS;
+ parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ for (i = 0; i < num_parents; i++) {
+ clk = sp->input_clks[pll_mux_parent_index[clk_index][i]];
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_err(dev, "No parent clock for derived_refclk\n");
+ return PTR_ERR(clk);
+ }
+ parent_names[i] = __clk_get_name(clk);
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev), clk_names[clk_index]);
+
+ init = &mux->clk_data;
+
+ init->ops = &cdns_sierra_pll_mux_ops;
+ init->flags = CLK_SET_RATE_NO_REPARENT;
+ init->parent_names = parent_names;
+ init->num_parents = num_parents;
+ init->name = clk_name;
+
+ mux->pfdclk_sel_preg = pfdclk1_sel_field;
+ mux->plllc1en_field = plllc1en_field;
+ mux->termen_field = termen_field;
+ mux->hw.init = init;
+
+ clk = devm_clk_register(dev, &mux->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ sp->output_clks[clk_index] = clk;
+
+ return 0;
+}
+
+static int cdns_sierra_phy_register_pll_mux(struct cdns_sierra_phy *sp)
+{
+ struct regmap_field *pfdclk1_sel_field;
+ struct regmap_field *plllc1en_field;
+ struct regmap_field *termen_field;
+ struct device *dev = sp->dev;
+ int ret = 0, i, clk_index;
+
+ clk_index = CDNS_SIERRA_PLL_CMNLC;
+ for (i = 0; i < SIERRA_NUM_CMN_PLLC; i++, clk_index++) {
+ pfdclk1_sel_field = sp->cmn_plllc_pfdclk1_sel_preg[i];
+ plllc1en_field = sp->cmn_refrcv_refclk_plllc1en_preg[i];
+ termen_field = sp->cmn_refrcv_refclk_termen_preg[i];
+
+ ret = cdns_sierra_pll_mux_register(sp, pfdclk1_sel_field, plllc1en_field,
+ termen_field, clk_index);
+ if (ret) {
+ dev_err(dev, "Fail to register cmn plllc mux\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void cdns_sierra_clk_unregister(struct cdns_sierra_phy *sp)
+{
+ struct device *dev = sp->dev;
+ struct device_node *node = dev->of_node;
+
+ of_clk_del_provider(node);
+}
+
+static int cdns_sierra_clk_register(struct cdns_sierra_phy *sp)
+{
+ struct device *dev = sp->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ ret = cdns_sierra_phy_register_pll_mux(sp);
+ if (ret) {
+ dev_err(dev, "Failed to pll mux clocks\n");
+ return ret;
+ }
+
+ sp->clk_data.clks = sp->output_clks;
+ sp->clk_data.clk_num = CDNS_SIERRA_OUTPUT_CLOCKS;
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, &sp->clk_data);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider: %s\n", node->name);
+
+ return ret;
+}
+
static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
struct device_node *child)
{
@@ -396,6 +618,7 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
{
struct device *dev = sp->dev;
struct regmap_field *field;
+ struct reg_field reg_field;
struct regmap *regmap;
int i;
@@ -407,6 +630,32 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
}
sp->macro_id_type = field;
+ for (i = 0; i < SIERRA_NUM_CMN_PLLC; i++) {
+ reg_field = cmn_plllc_pfdclk1_sel_preg[i].pfdclk_sel_preg;
+ field = devm_regmap_field_alloc(dev, regmap, reg_field);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PLLLC%d_PFDCLK1_SEL failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->cmn_plllc_pfdclk1_sel_preg[i] = field;
+
+ reg_field = cmn_plllc_pfdclk1_sel_preg[i].plllc1en_field;
+ field = devm_regmap_field_alloc(dev, regmap, reg_field);
+ if (IS_ERR(field)) {
+ dev_err(dev, "REFRCV%d_REFCLK_PLLLC1EN failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->cmn_refrcv_refclk_plllc1en_preg[i] = field;
+
+ reg_field = cmn_plllc_pfdclk1_sel_preg[i].termen_field;
+ field = devm_regmap_field_alloc(dev, regmap, reg_field);
+ if (IS_ERR(field)) {
+ dev_err(dev, "REFRCV%d_REFCLK_TERMEN failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->cmn_refrcv_refclk_termen_preg[i] = field;
+ }
+
regmap = sp->regmap_phy_config_ctrl;
field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
if (IS_ERR(field)) {
@@ -471,6 +720,110 @@ static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
return 0;
}
+static int cdns_sierra_phy_get_clocks(struct cdns_sierra_phy *sp,
+ struct device *dev)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, "phy_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clock phy_clk\n");
+ return PTR_ERR(clk);
+ }
+ sp->input_clks[PHY_CLK] = clk;
+
+ clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[CMN_REFCLK_DIG_DIV] = clk;
+
+ clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[CMN_REFCLK1_DIG_DIV] = clk;
+
+ clk = devm_clk_get_optional(dev, "pll0_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "pll0_refclk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[PLL0_REFCLK] = clk;
+
+ clk = devm_clk_get_optional(dev, "pll1_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "pll1_refclk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[PLL1_REFCLK] = clk;
+
+ return 0;
+}
+
+static int cdns_sierra_phy_enable_clocks(struct cdns_sierra_phy *sp)
+{
+ int ret;
+
+ ret = clk_prepare_enable(sp->input_clks[PHY_CLK]);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(sp->output_clks[CDNS_SIERRA_PLL_CMNLC]);
+ if (ret)
+ goto err_pll_cmnlc;
+
+ ret = clk_prepare_enable(sp->output_clks[CDNS_SIERRA_PLL_CMNLC1]);
+ if (ret)
+ goto err_pll_cmnlc1;
+
+ return 0;
+
+err_pll_cmnlc1:
+ clk_disable_unprepare(sp->output_clks[CDNS_SIERRA_PLL_CMNLC]);
+
+err_pll_cmnlc:
+ clk_disable_unprepare(sp->input_clks[PHY_CLK]);
+
+ return ret;
+}
+
+static void cdns_sierra_phy_disable_clocks(struct cdns_sierra_phy *sp)
+{
+ clk_disable_unprepare(sp->output_clks[CDNS_SIERRA_PLL_CMNLC1]);
+ clk_disable_unprepare(sp->output_clks[CDNS_SIERRA_PLL_CMNLC]);
+ clk_disable_unprepare(sp->input_clks[PHY_CLK]);
+}
+
+static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp,
+ struct device *dev)
+{
+ struct reset_control *rst;
+
+ rst = devm_reset_control_get_exclusive(dev, "sierra_reset");
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get reset\n");
+ return PTR_ERR(rst);
+ }
+ sp->phy_rst = rst;
+
+ rst = devm_reset_control_get_optional_exclusive(dev, "sierra_apb");
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get apb reset\n");
+ return PTR_ERR(rst);
+ }
+ sp->apb_rst = rst;
+
+ return 0;
+}
+
static int cdns_sierra_phy_probe(struct platform_device *pdev)
{
struct cdns_sierra_phy *sp;
@@ -481,7 +834,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
unsigned int id_value;
int i, ret, node = 0;
void __iomem *base;
- struct clk *clk;
struct device_node *dn = dev->of_node, *child;
if (of_get_child_count(dn) == 0)
@@ -518,43 +870,21 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sp);
- sp->clk = devm_clk_get_optional(dev, "phy_clk");
- if (IS_ERR(sp->clk)) {
- dev_err(dev, "failed to get clock phy_clk\n");
- return PTR_ERR(sp->clk);
- }
-
- sp->phy_rst = devm_reset_control_get(dev, "sierra_reset");
- if (IS_ERR(sp->phy_rst)) {
- dev_err(dev, "failed to get reset\n");
- return PTR_ERR(sp->phy_rst);
- }
-
- sp->apb_rst = devm_reset_control_get_optional(dev, "sierra_apb");
- if (IS_ERR(sp->apb_rst)) {
- dev_err(dev, "failed to get apb reset\n");
- return PTR_ERR(sp->apb_rst);
- }
-
- clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
- if (IS_ERR(clk)) {
- dev_err(dev, "cmn_refclk_dig_div clock not found\n");
- ret = PTR_ERR(clk);
+ ret = cdns_sierra_phy_get_clocks(sp, dev);
+ if (ret)
return ret;
- }
- sp->cmn_refclk_dig_div = clk;
- clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
- if (IS_ERR(clk)) {
- dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
- ret = PTR_ERR(clk);
+ ret = cdns_sierra_clk_register(sp);
+ if (ret)
return ret;
- }
- sp->cmn_refclk1_dig_div = clk;
- ret = clk_prepare_enable(sp->clk);
+ ret = cdns_sierra_phy_get_resets(sp, dev);
if (ret)
- return ret;
+ goto unregister_clk;
+
+ ret = cdns_sierra_phy_enable_clocks(sp);
+ if (ret)
+ goto unregister_clk;
/* Enable APB */
reset_control_deassert(sp->apb_rst);
@@ -571,6 +901,10 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
for_each_available_child_of_node(dn, child) {
struct phy *gphy;
+ if (!(of_node_name_eq(child, "phy") ||
+ of_node_name_eq(child, "link")))
+ continue;
+
sp->phys[node].lnk_rst =
of_reset_control_array_get_exclusive(child);
@@ -616,7 +950,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- reset_control_deassert(sp->phy_rst);
return PTR_ERR_OR_ZERO(phy_provider);
put_child:
@@ -626,8 +959,10 @@ put_child2:
reset_control_put(sp->phys[i].lnk_rst);
of_node_put(child);
clk_disable:
- clk_disable_unprepare(sp->clk);
+ cdns_sierra_phy_disable_clocks(sp);
reset_control_assert(sp->apb_rst);
+unregister_clk:
+ cdns_sierra_clk_unregister(sp);
return ret;
}
@@ -640,6 +975,7 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
reset_control_assert(phy->apb_rst);
pm_runtime_disable(&pdev->dev);
+ cdns_sierra_phy_disable_clocks(phy);
/*
* The device level resets will be put automatically.
* Need to put the subnode resets here though.
@@ -648,6 +984,9 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
reset_control_assert(phy->phys[i].lnk_rst);
reset_control_put(phy->phys[i].lnk_rst);
}
+
+ cdns_sierra_clk_unregister(phy);
+
return 0;
}
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 591a15834b48..0477e7beebbf 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -7,7 +7,9 @@
*/
#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-cadence.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -84,6 +86,8 @@
#define CMN_PLLSM1_PLLLOCK_TMR 0x0034U
#define CMN_CDIAG_CDB_PWRI_OVRD 0x0041U
#define CMN_CDIAG_XCVRC_PWRI_OVRD 0x0047U
+#define CMN_CDIAG_REFCLK_OVRD 0x004CU
+#define CMN_CDIAG_REFCLK_DRV0_CTRL 0x0050U
#define CMN_BGCAL_INIT_TMR 0x0064U
#define CMN_BGCAL_ITER_TMR 0x0065U
#define CMN_IBCAL_INIT_TMR 0x0074U
@@ -122,6 +126,8 @@
#define CMN_PLL1_FRACDIVH_M0 0x00D2U
#define CMN_PLL1_HIGH_THR_M0 0x00D3U
#define CMN_PLL1_DSM_DIAG_M0 0x00D4U
+#define CMN_PLL1_DSM_FBH_OVRD_M0 0x00D5U
+#define CMN_PLL1_DSM_FBL_OVRD_M0 0x00D6U
#define CMN_PLL1_SS_CTRL1_M0 0x00D8U
#define CMN_PLL1_SS_CTRL2_M0 0x00D9U
#define CMN_PLL1_SS_CTRL3_M0 0x00DAU
@@ -163,10 +169,12 @@
#define TX_TXCC_CPOST_MULT_00 0x004CU
#define TX_TXCC_CPOST_MULT_01 0x004DU
#define TX_TXCC_MGNFS_MULT_000 0x0050U
+#define TX_TXCC_MGNFS_MULT_100 0x0054U
#define DRV_DIAG_TX_DRV 0x00C6U
#define XCVR_DIAG_PLLDRC_CTRL 0x00E5U
#define XCVR_DIAG_HSCLK_SEL 0x00E6U
#define XCVR_DIAG_HSCLK_DIV 0x00E7U
+#define XCVR_DIAG_RXCLK_CTRL 0x00E9U
#define XCVR_DIAG_BIDI_CTRL 0x00EAU
#define XCVR_DIAG_PSC_OVRD 0x00EBU
#define TX_PSC_A0 0x0100U
@@ -206,6 +214,7 @@
#define RX_DIAG_ACYA 0x01FFU
/* PHY PCS common registers */
+#define PHY_PIPE_CMN_CTRL1 0x0000U
#define PHY_PLL_CFG 0x000EU
#define PHY_PIPE_USB3_GEN2_PRE_CFG0 0x0020U
#define PHY_PIPE_USB3_GEN2_POST_CFG0 0x0022U
@@ -216,6 +225,10 @@
#define PHY_PMA_CMN_CTRL2 0x0001U
#define PHY_PMA_PLL_RAW_CTRL 0x0003U
+static const char * const clk_names[] = {
+ [CDNS_TORRENT_REFCLK_DRIVER] = "refclk-driver",
+};
+
static const struct reg_field phy_pll_cfg =
REG_FIELD(PHY_PLL_CFG, 0, 1);
@@ -231,6 +244,26 @@ static const struct reg_field phy_pma_pll_raw_ctrl =
static const struct reg_field phy_reset_ctrl =
REG_FIELD(PHY_RESET, 8, 8);
+static const struct reg_field phy_pipe_cmn_ctrl1_0 = REG_FIELD(PHY_PIPE_CMN_CTRL1, 0, 0);
+
+#define REFCLK_OUT_NUM_CMN_CONFIG 5
+
+enum cdns_torrent_refclk_out_cmn {
+ CMN_CDIAG_REFCLK_OVRD_4,
+ CMN_CDIAG_REFCLK_DRV0_CTRL_1,
+ CMN_CDIAG_REFCLK_DRV0_CTRL_4,
+ CMN_CDIAG_REFCLK_DRV0_CTRL_5,
+ CMN_CDIAG_REFCLK_DRV0_CTRL_6,
+};
+
+static const struct reg_field refclk_out_cmn_cfg[] = {
+ [CMN_CDIAG_REFCLK_OVRD_4] = REG_FIELD(CMN_CDIAG_REFCLK_OVRD, 4, 4),
+ [CMN_CDIAG_REFCLK_DRV0_CTRL_1] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 1, 1),
+ [CMN_CDIAG_REFCLK_DRV0_CTRL_4] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 4, 4),
+ [CMN_CDIAG_REFCLK_DRV0_CTRL_5] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 5, 5),
+ [CMN_CDIAG_REFCLK_DRV0_CTRL_6] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 6, 6),
+};
+
enum cdns_torrent_phy_type {
TYPE_NONE,
TYPE_DP,
@@ -279,6 +312,8 @@ struct cdns_torrent_phy {
struct regmap_field *phy_pma_cmn_ctrl_2;
struct regmap_field *phy_pma_pll_raw_ctrl;
struct regmap_field *phy_reset_ctrl;
+ struct clk *clks[CDNS_TORRENT_REFCLK_DRIVER + 1];
+ struct clk_onecell_data clk_data;
};
enum phy_powerstate {
@@ -288,6 +323,16 @@ enum phy_powerstate {
POWERSTATE_A3 = 3,
};
+struct cdns_torrent_derived_refclk {
+ struct clk_hw hw;
+ struct regmap_field *phy_pipe_cmn_ctrl1_0;
+ struct regmap_field *cmn_fields[REFCLK_OUT_NUM_CMN_CONFIG];
+ struct clk_init_data clk_data;
+};
+
+#define to_cdns_torrent_derived_refclk(_hw) \
+ container_of(_hw, struct cdns_torrent_derived_refclk, hw)
+
static int cdns_torrent_phy_init(struct phy *phy);
static int cdns_torrent_dp_init(struct phy *phy);
static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy,
@@ -326,6 +371,19 @@ static const struct phy_ops cdns_torrent_phy_ops = {
.owner = THIS_MODULE,
};
+static int cdns_torrent_noop_phy_on(struct phy *phy)
+{
+ /* Give 5ms to 10ms delay for the PIPE clock to be stable */
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static const struct phy_ops noop_ops = {
+ .power_on = cdns_torrent_noop_phy_on,
+ .owner = THIS_MODULE,
+};
+
struct cdns_reg_pairs {
u32 val;
u32 off;
@@ -1604,6 +1662,108 @@ static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes)
return ret;
}
+static int cdns_torrent_derived_refclk_enable(struct clk_hw *hw)
+{
+ struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
+
+ regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_6], 0);
+ regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_4], 1);
+ regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_5], 1);
+ regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_1], 0);
+ regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_OVRD_4], 1);
+ regmap_field_write(derived_refclk->phy_pipe_cmn_ctrl1_0, 1);
+
+ return 0;
+}
+
+static void cdns_torrent_derived_refclk_disable(struct clk_hw *hw)
+{
+ struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
+
+ regmap_field_write(derived_refclk->phy_pipe_cmn_ctrl1_0, 0);
+}
+
+static int cdns_torrent_derived_refclk_is_enabled(struct clk_hw *hw)
+{
+ struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
+ int val;
+
+ regmap_field_read(derived_refclk->phy_pipe_cmn_ctrl1_0, &val);
+
+ return !!val;
+}
+
+static const struct clk_ops cdns_torrent_derived_refclk_ops = {
+ .enable = cdns_torrent_derived_refclk_enable,
+ .disable = cdns_torrent_derived_refclk_disable,
+ .is_enabled = cdns_torrent_derived_refclk_is_enabled,
+};
+
+static int cdns_torrent_derived_refclk_register(struct cdns_torrent_phy *cdns_phy)
+{
+ struct cdns_torrent_derived_refclk *derived_refclk;
+ struct device *dev = cdns_phy->dev;
+ struct regmap_field *field;
+ struct clk_init_data *init;
+ const char *parent_name;
+ struct regmap *regmap;
+ char clk_name[100];
+ struct clk *clk;
+ int i;
+
+ derived_refclk = devm_kzalloc(dev, sizeof(*derived_refclk), GFP_KERNEL);
+ if (!derived_refclk)
+ return -ENOMEM;
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ clk_names[CDNS_TORRENT_REFCLK_DRIVER]);
+
+ clk = devm_clk_get_optional(dev, "phy_en_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "No parent clock for derived_refclk\n");
+ return PTR_ERR(clk);
+ }
+
+ init = &derived_refclk->clk_data;
+
+ if (clk) {
+ parent_name = __clk_get_name(clk);
+ init->parent_names = &parent_name;
+ init->num_parents = 1;
+ }
+ init->ops = &cdns_torrent_derived_refclk_ops;
+ init->flags = 0;
+ init->name = clk_name;
+
+ regmap = cdns_phy->regmap_phy_pcs_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pipe_cmn_ctrl1_0);
+ if (IS_ERR(field)) {
+ dev_err(dev, "phy_pipe_cmn_ctrl1_0 reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ derived_refclk->phy_pipe_cmn_ctrl1_0 = field;
+
+ regmap = cdns_phy->regmap_common_cdb;
+ for (i = 0; i < REFCLK_OUT_NUM_CMN_CONFIG; i++) {
+ field = devm_regmap_field_alloc(dev, regmap, refclk_out_cmn_cfg[i]);
+ if (IS_ERR(field)) {
+ dev_err(dev, "CMN reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ derived_refclk->cmn_fields[i] = field;
+ }
+
+ derived_refclk->hw.init = init;
+
+ clk = devm_clk_register(dev, &derived_refclk->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ cdns_phy->clks[CDNS_TORRENT_REFCLK_DRIVER] = clk;
+
+ return 0;
+}
+
static int cdns_torrent_phy_on(struct phy *phy)
{
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
@@ -2071,6 +2231,85 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
return 0;
}
+static void cdns_torrent_clk_cleanup(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+
+ of_clk_del_provider(dev->of_node);
+}
+
+static int cdns_torrent_clk_register(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ ret = cdns_torrent_derived_refclk_register(cdns_phy);
+ if (ret) {
+ dev_err(dev, "failed to register derived refclk\n");
+ return ret;
+ }
+
+ cdns_phy->clk_data.clks = cdns_phy->clks;
+ cdns_phy->clk_data.clk_num = CDNS_TORRENT_REFCLK_DRIVER + 1;
+
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, &cdns_phy->clk_data);
+ if (ret) {
+ dev_err(dev, "Failed to add clock provider: %s\n", node->name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cdns_torrent_reset(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+
+ cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0);
+ if (IS_ERR(cdns_phy->phy_rst)) {
+ dev_err(dev, "%s: failed to get reset\n",
+ dev->of_node->full_name);
+ return PTR_ERR(cdns_phy->phy_rst);
+ }
+
+ cdns_phy->apb_rst = devm_reset_control_get_optional_exclusive(dev, "torrent_apb");
+ if (IS_ERR(cdns_phy->apb_rst)) {
+ dev_err(dev, "%s: failed to get apb reset\n",
+ dev->of_node->full_name);
+ return PTR_ERR(cdns_phy->apb_rst);
+ }
+
+ return 0;
+}
+
+static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+ int ret;
+
+ cdns_phy->clk = devm_clk_get(dev, "refclk");
+ if (IS_ERR(cdns_phy->clk)) {
+ dev_err(dev, "phy ref clock not found\n");
+ return PTR_ERR(cdns_phy->clk);
+ }
+
+ ret = clk_prepare_enable(cdns_phy->clk);
+ if (ret) {
+ dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
+ return ret;
+ }
+
+ cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk);
+ if (!(cdns_phy->ref_clk_rate)) {
+ dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
+ clk_disable_unprepare(cdns_phy->clk);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int cdns_torrent_phy_probe(struct platform_device *pdev)
{
struct cdns_torrent_phy *cdns_phy;
@@ -2080,6 +2319,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
struct device_node *child;
int ret, subnodes, node = 0, i;
u32 total_num_lanes = 0;
+ int already_configured;
u8 init_dp_regmap = 0;
u32 phy_type;
@@ -2096,26 +2336,6 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
cdns_phy->dev = dev;
cdns_phy->init_data = data;
- cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0);
- if (IS_ERR(cdns_phy->phy_rst)) {
- dev_err(dev, "%s: failed to get reset\n",
- dev->of_node->full_name);
- return PTR_ERR(cdns_phy->phy_rst);
- }
-
- cdns_phy->apb_rst = devm_reset_control_get_optional(dev, "torrent_apb");
- if (IS_ERR(cdns_phy->apb_rst)) {
- dev_err(dev, "%s: failed to get apb reset\n",
- dev->of_node->full_name);
- return PTR_ERR(cdns_phy->apb_rst);
- }
-
- cdns_phy->clk = devm_clk_get(dev, "refclk");
- if (IS_ERR(cdns_phy->clk)) {
- dev_err(dev, "phy ref clock not found\n");
- return PTR_ERR(cdns_phy->clk);
- }
-
cdns_phy->sd_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cdns_phy->sd_base))
return PTR_ERR(cdns_phy->sd_base);
@@ -2134,21 +2354,24 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_prepare_enable(cdns_phy->clk);
- if (ret) {
- dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
+ ret = cdns_torrent_clk_register(cdns_phy);
+ if (ret)
return ret;
- }
- cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk);
- if (!(cdns_phy->ref_clk_rate)) {
- dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
- clk_disable_unprepare(cdns_phy->clk);
- return -EINVAL;
- }
+ regmap_field_read(cdns_phy->phy_pma_cmn_ctrl_1, &already_configured);
- /* Enable APB */
- reset_control_deassert(cdns_phy->apb_rst);
+ if (!already_configured) {
+ ret = cdns_torrent_reset(cdns_phy);
+ if (ret)
+ goto clk_cleanup;
+
+ ret = cdns_torrent_clk(cdns_phy);
+ if (ret)
+ goto clk_cleanup;
+
+ /* Enable APB */
+ reset_control_deassert(cdns_phy->apb_rst);
+ }
for_each_available_child_of_node(dev->of_node, child) {
struct phy *gphy;
@@ -2218,7 +2441,10 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
of_property_read_u32(child, "cdns,ssc-mode",
&cdns_phy->phys[node].ssc_mode);
- gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops);
+ if (!already_configured)
+ gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops);
+ else
+ gphy = devm_phy_create(dev, child, &noop_ops);
if (IS_ERR(gphy)) {
ret = PTR_ERR(gphy);
goto put_child;
@@ -2302,7 +2528,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
goto put_lnk_rst;
}
- if (cdns_phy->nsubnodes > 1) {
+ if (cdns_phy->nsubnodes > 1 && !already_configured) {
ret = cdns_torrent_phy_configure_multilink(cdns_phy);
if (ret)
goto put_lnk_rst;
@@ -2324,6 +2550,8 @@ put_lnk_rst:
of_node_put(child);
reset_control_assert(cdns_phy->apb_rst);
clk_disable_unprepare(cdns_phy->clk);
+clk_cleanup:
+ cdns_torrent_clk_cleanup(cdns_phy);
return ret;
}
@@ -2340,6 +2568,7 @@ static int cdns_torrent_phy_remove(struct platform_device *pdev)
}
clk_disable_unprepare(cdns_phy->clk);
+ cdns_torrent_clk_cleanup(cdns_phy);
return 0;
}
@@ -2455,8 +2684,6 @@ static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
- {0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
@@ -2464,7 +2691,9 @@ static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
{0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
{0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
- {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
+ {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = {
@@ -2507,13 +2736,28 @@ static struct cdns_torrent_vals usb_phy_pcs_cmn_vals = {
};
/* USB 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
+static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
{0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
};
+static struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = {
+ .reg_pairs = sl_usb_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usb_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
+ {0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
+ {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = {
{0x02FF, TX_PSC_A0},
{0x06AF, TX_PSC_A1},
@@ -2645,12 +2889,22 @@ static struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = {
};
/* SGMII 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
+static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL},
- {0x3700, CMN_DIAG_BIAS_OVRD1},
- {0x0008, CMN_TXPUCAL_TUNE},
- {0x0008, CMN_TXPDCAL_TUNE}
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = {
+ .reg_pairs = sl_sgmii_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_sgmii_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
@@ -2661,6 +2915,15 @@ static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
{0x00B3, DRV_DIAG_TX_DRV}
};
+static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x00B3, DRV_DIAG_TX_DRV},
+ {0x4000, XCVR_DIAG_RXCLK_CTRL},
+};
+
static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
@@ -2689,6 +2952,11 @@ static struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = {
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_tx_ln_regs),
};
+static struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = ti_sgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_sgmii_100_no_ssc_tx_ln_regs),
+};
+
static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = sgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs),
@@ -2736,17 +3004,14 @@ static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
- {0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
{0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
{0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
- {0x3700, CMN_DIAG_BIAS_OVRD1},
- {0x0008, CMN_TXPUCAL_TUNE},
- {0x0008, CMN_TXPDCAL_TUNE}
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
@@ -2755,19 +3020,43 @@ static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
};
/* QSGMII 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
+static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
+static struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = {
+ .reg_pairs = sl_qsgmii_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_qsgmii_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
{0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0011, TX_TXCC_MGNFS_MULT_100},
{0x0003, DRV_DIAG_TX_DRV}
};
+static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0011, TX_TXCC_MGNFS_MULT_100},
+ {0x0003, DRV_DIAG_TX_DRV},
+ {0x4000, XCVR_DIAG_RXCLK_CTRL},
+};
+
static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
@@ -2796,6 +3085,11 @@ static struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = {
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_tx_ln_regs),
};
+static struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = ti_qsgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_qsgmii_100_no_ssc_tx_ln_regs),
+};
+
static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = qsgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs),
@@ -2843,14 +3137,14 @@ static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
- {0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
{0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
{0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
- {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = {
@@ -2922,8 +3216,6 @@ static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
- {0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
@@ -2979,8 +3271,6 @@ static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = {
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
- {0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
@@ -2996,8 +3286,9 @@ static struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = {
/* PCIe, 100 MHz Ref clk, no SSC & external SSC */
static struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = {
- {0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}
};
static struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = {
@@ -3198,8 +3489,8 @@ static const struct cdns_torrent_data cdns_map_torrent = {
.cmn_vals = {
[TYPE_PCIE] = {
[TYPE_NONE] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
[INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
},
[TYPE_SGMII] = {
@@ -3220,7 +3511,7 @@ static const struct cdns_torrent_data cdns_map_torrent = {
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals,
},
[TYPE_PCIE] = {
[NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
@@ -3235,7 +3526,7 @@ static const struct cdns_torrent_data cdns_map_torrent = {
},
[TYPE_QSGMII] = {
[TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals,
},
[TYPE_PCIE] = {
[NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
@@ -3250,8 +3541,8 @@ static const struct cdns_torrent_data cdns_map_torrent = {
},
[TYPE_USB] = {
[TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
},
[TYPE_PCIE] = {
@@ -3260,13 +3551,13 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
},
[TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
},
[TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
},
},
@@ -3607,8 +3898,8 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
.cmn_vals = {
[TYPE_PCIE] = {
[TYPE_NONE] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
[INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
},
[TYPE_SGMII] = {
@@ -3629,7 +3920,7 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals,
},
[TYPE_PCIE] = {
[NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
@@ -3644,7 +3935,7 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
},
[TYPE_QSGMII] = {
[TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals,
},
[TYPE_PCIE] = {
[NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
@@ -3659,8 +3950,8 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
},
[TYPE_USB] = {
[TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
},
[TYPE_PCIE] = {
@@ -3669,13 +3960,13 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
},
[TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
},
[TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
},
},
@@ -3705,32 +3996,32 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
},
[TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
},
[TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
},
},
[TYPE_QSGMII] = {
[TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
},
[TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
},
[TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
},
},
[TYPE_USB] = {
diff --git a/drivers/phy/hisilicon/phy-hi6220-usb.c b/drivers/phy/hisilicon/phy-hi6220-usb.c
index be05292df8b8..e92ba78da4c8 100644
--- a/drivers/phy/hisilicon/phy-hi6220-usb.c
+++ b/drivers/phy/hisilicon/phy-hi6220-usb.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2015 Linaro Ltd.
- * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 HiSilicon Limited.
*/
#include <linux/mfd/syscon.h>
diff --git a/drivers/phy/hisilicon/phy-hix5hd2-sata.c b/drivers/phy/hisilicon/phy-hix5hd2-sata.c
index c67b78cd2602..b0f99a9ac857 100644
--- a/drivers/phy/hisilicon/phy-hix5hd2-sata.c
+++ b/drivers/phy/hisilicon/phy-hix5hd2-sata.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2014 Linaro Ltd.
- * Copyright (c) 2014 Hisilicon Limited.
+ * Copyright (c) 2014 HiSilicon Limited.
*/
#include <linux/delay.h>
diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
index ea127b177f46..28c28d816484 100644
--- a/drivers/phy/ingenic/phy-ingenic-usb.c
+++ b/drivers/phy/ingenic/phy-ingenic-usb.c
@@ -352,8 +352,8 @@ static int ingenic_usb_phy_probe(struct platform_device *pdev)
}
priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops);
- if (IS_ERR(priv))
- return PTR_ERR(priv);
+ if (IS_ERR(priv->phy))
+ return PTR_ERR(priv->phy);
phy_set_drvdata(priv->phy, priv);
diff --git a/drivers/phy/intel/phy-intel-lgm-combo.c b/drivers/phy/intel/phy-intel-lgm-combo.c
index 360b1eb2ebd6..157683d10367 100644
--- a/drivers/phy/intel/phy-intel-lgm-combo.c
+++ b/drivers/phy/intel/phy-intel-lgm-combo.c
@@ -462,7 +462,7 @@ static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
/*
* syscfg and hsiocfg variables stores the handle of the registers set
- * in which ComboPhy subsytem specific registers are subset. Using
+ * in which ComboPhy subsystem specific registers are subset. Using
* Register map framework to access the registers set.
*/
ret = fwnode_property_get_reference_args(fwnode, "intel,syscfg", NULL,
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 6c96f2bf5266..bdb87c976243 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -3,8 +3,8 @@
# Phy drivers for Marvell platforms
#
config ARMADA375_USBCLUSTER_PHY
- def_bool y
- depends on MACH_ARMADA_375 || COMPILE_TEST
+ bool "Armada 375 USB cluster PHY support" if COMPILE_TEST
+ default y if MACH_ARMADA_375
depends on OF && HAS_IOMEM
select GENERIC_PHY
@@ -67,6 +67,14 @@ config PHY_MVEBU_CP110_COMPHY
lanes can be used by various controllers (Ethernet, sata, usb,
PCIe...).
+config PHY_MVEBU_CP110_UTMI
+ tristate "Marvell CP110 UTMI driver"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on OF && USB_COMMON
+ select GENERIC_PHY
+ help
+ Enable this to support Marvell CP110 UTMI PHY driver.
+
config PHY_MVEBU_SATA
def_bool y
depends on ARCH_DOVE || MACH_DOVE || MACH_KIRKWOOD
diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile
index 7f296ef02829..90862c4daa26 100644
--- a/drivers/phy/marvell/Makefile
+++ b/drivers/phy/marvell/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_PHY_MVEBU_A3700_COMPHY) += phy-mvebu-a3700-comphy.o
obj-$(CONFIG_PHY_MVEBU_A3700_UTMI) += phy-mvebu-a3700-utmi.o
obj-$(CONFIG_PHY_MVEBU_A38X_COMPHY) += phy-armada38x-comphy.o
obj-$(CONFIG_PHY_MVEBU_CP110_COMPHY) += phy-mvebu-cp110-comphy.o
+obj-$(CONFIG_PHY_MVEBU_CP110_UTMI) += phy-mvebu-cp110-utmi.o
obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o
obj-$(CONFIG_PHY_PXA_28NM_USB2) += phy-pxa-28nm-usb2.o
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
new file mode 100644
index 000000000000..08d178a4dc13
--- /dev/null
+++ b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Marvell
+ *
+ * Authors:
+ * Konstantin Porotchkin <kostap@marvell.com>
+ *
+ * Marvell CP110 UTMI PHY driver
+ */
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/usb/of.h>
+#include <linux/usb/otg.h>
+
+#define UTMI_PHY_PORTS 2
+
+/* CP110 UTMI register macro definetions */
+#define SYSCON_USB_CFG_REG 0x420
+#define USB_CFG_DEVICE_EN_MASK BIT(0)
+#define USB_CFG_DEVICE_MUX_OFFSET 1
+#define USB_CFG_DEVICE_MUX_MASK BIT(1)
+#define USB_CFG_PLL_MASK BIT(25)
+
+#define SYSCON_UTMI_CFG_REG(id) (0x440 + (id) * 4)
+#define UTMI_PHY_CFG_PU_MASK BIT(5)
+
+#define UTMI_PLL_CTRL_REG 0x0
+#define PLL_REFDIV_OFFSET 0
+#define PLL_REFDIV_MASK GENMASK(6, 0)
+#define PLL_REFDIV_VAL 0x5
+#define PLL_FBDIV_OFFSET 16
+#define PLL_FBDIV_MASK GENMASK(24, 16)
+#define PLL_FBDIV_VAL 0x60
+#define PLL_SEL_LPFR_MASK GENMASK(29, 28)
+#define PLL_RDY BIT(31)
+#define UTMI_CAL_CTRL_REG 0x8
+#define IMPCAL_VTH_OFFSET 8
+#define IMPCAL_VTH_MASK GENMASK(10, 8)
+#define IMPCAL_VTH_VAL 0x7
+#define IMPCAL_DONE BIT(23)
+#define PLLCAL_DONE BIT(31)
+#define UTMI_TX_CH_CTRL_REG 0xC
+#define DRV_EN_LS_OFFSET 12
+#define DRV_EN_LS_MASK GENMASK(15, 12)
+#define IMP_SEL_LS_OFFSET 16
+#define IMP_SEL_LS_MASK GENMASK(19, 16)
+#define TX_AMP_OFFSET 20
+#define TX_AMP_MASK GENMASK(22, 20)
+#define TX_AMP_VAL 0x4
+#define UTMI_RX_CH_CTRL0_REG 0x14
+#define SQ_DET_EN BIT(15)
+#define SQ_ANA_DTC_SEL BIT(28)
+#define UTMI_RX_CH_CTRL1_REG 0x18
+#define SQ_AMP_CAL_OFFSET 0
+#define SQ_AMP_CAL_MASK GENMASK(2, 0)
+#define SQ_AMP_CAL_VAL 1
+#define SQ_AMP_CAL_EN BIT(3)
+#define UTMI_CTRL_STATUS0_REG 0x24
+#define SUSPENDM BIT(22)
+#define TEST_SEL BIT(25)
+#define UTMI_CHGDTC_CTRL_REG 0x38
+#define VDAT_OFFSET 8
+#define VDAT_MASK GENMASK(9, 8)
+#define VDAT_VAL 1
+#define VSRC_OFFSET 10
+#define VSRC_MASK GENMASK(11, 10)
+#define VSRC_VAL 1
+
+#define PLL_LOCK_DELAY_US 10000
+#define PLL_LOCK_TIMEOUT_US 1000000
+
+#define PORT_REGS(p) ((p)->priv->regs + (p)->id * 0x1000)
+
+/**
+ * struct mvebu_cp110_utmi - PHY driver data
+ *
+ * @regs: PHY registers
+ * @syscom: Regmap with system controller registers
+ * @dev: device driver handle
+ * @caps: PHY capabilities
+ */
+struct mvebu_cp110_utmi {
+ void __iomem *regs;
+ struct regmap *syscon;
+ struct device *dev;
+ const struct phy_ops *ops;
+};
+
+/**
+ * struct mvebu_cp110_utmi_port - PHY port data
+ *
+ * @priv: PHY driver data
+ * @id: PHY port ID
+ * @dr_mode: PHY connection: USB_DR_MODE_HOST or USB_DR_MODE_PERIPHERAL
+ */
+struct mvebu_cp110_utmi_port {
+ struct mvebu_cp110_utmi *priv;
+ u32 id;
+ enum usb_dr_mode dr_mode;
+};
+
+static void mvebu_cp110_utmi_port_setup(struct mvebu_cp110_utmi_port *port)
+{
+ u32 reg;
+
+ /*
+ * Setup PLL.
+ * The reference clock is the frequency of quartz resonator
+ * connected to pins REFCLK_XIN and REFCLK_XOUT of the SoC.
+ * Register init values are matching the 40MHz default clock.
+ * The crystal used for all platform boards is now 25MHz.
+ * See the functional specification for details.
+ */
+ reg = readl(PORT_REGS(port) + UTMI_PLL_CTRL_REG);
+ reg &= ~(PLL_REFDIV_MASK | PLL_FBDIV_MASK | PLL_SEL_LPFR_MASK);
+ reg |= (PLL_REFDIV_VAL << PLL_REFDIV_OFFSET) |
+ (PLL_FBDIV_VAL << PLL_FBDIV_OFFSET);
+ writel(reg, PORT_REGS(port) + UTMI_PLL_CTRL_REG);
+
+ /* Impedance Calibration Threshold Setting */
+ reg = readl(PORT_REGS(port) + UTMI_CAL_CTRL_REG);
+ reg &= ~IMPCAL_VTH_MASK;
+ reg |= IMPCAL_VTH_VAL << IMPCAL_VTH_OFFSET;
+ writel(reg, PORT_REGS(port) + UTMI_CAL_CTRL_REG);
+
+ /* Set LS TX driver strength coarse control */
+ reg = readl(PORT_REGS(port) + UTMI_TX_CH_CTRL_REG);
+ reg &= ~TX_AMP_MASK;
+ reg |= TX_AMP_VAL << TX_AMP_OFFSET;
+ writel(reg, PORT_REGS(port) + UTMI_TX_CH_CTRL_REG);
+
+ /* Disable SQ and enable analog squelch detect */
+ reg = readl(PORT_REGS(port) + UTMI_RX_CH_CTRL0_REG);
+ reg &= ~SQ_DET_EN;
+ reg |= SQ_ANA_DTC_SEL;
+ writel(reg, PORT_REGS(port) + UTMI_RX_CH_CTRL0_REG);
+
+ /*
+ * Set External squelch calibration number and
+ * enable the External squelch calibration
+ */
+ reg = readl(PORT_REGS(port) + UTMI_RX_CH_CTRL1_REG);
+ reg &= ~SQ_AMP_CAL_MASK;
+ reg |= (SQ_AMP_CAL_VAL << SQ_AMP_CAL_OFFSET) | SQ_AMP_CAL_EN;
+ writel(reg, PORT_REGS(port) + UTMI_RX_CH_CTRL1_REG);
+
+ /*
+ * Set Control VDAT Reference Voltage - 0.325V and
+ * Control VSRC Reference Voltage - 0.6V
+ */
+ reg = readl(PORT_REGS(port) + UTMI_CHGDTC_CTRL_REG);
+ reg &= ~(VDAT_MASK | VSRC_MASK);
+ reg |= (VDAT_VAL << VDAT_OFFSET) | (VSRC_VAL << VSRC_OFFSET);
+ writel(reg, PORT_REGS(port) + UTMI_CHGDTC_CTRL_REG);
+}
+
+static int mvebu_cp110_utmi_phy_power_off(struct phy *phy)
+{
+ struct mvebu_cp110_utmi_port *port = phy_get_drvdata(phy);
+ struct mvebu_cp110_utmi *utmi = port->priv;
+ int i;
+
+ /* Power down UTMI PHY port */
+ regmap_clear_bits(utmi->syscon, SYSCON_UTMI_CFG_REG(port->id),
+ UTMI_PHY_CFG_PU_MASK);
+
+ for (i = 0; i < UTMI_PHY_PORTS; i++) {
+ int test = regmap_test_bits(utmi->syscon,
+ SYSCON_UTMI_CFG_REG(i),
+ UTMI_PHY_CFG_PU_MASK);
+ /* skip PLL shutdown if there are active UTMI PHY ports */
+ if (test != 0)
+ return 0;
+ }
+
+ /* PLL Power down if all UTMI PHYs are down */
+ regmap_clear_bits(utmi->syscon, SYSCON_USB_CFG_REG, USB_CFG_PLL_MASK);
+
+ return 0;
+}
+
+static int mvebu_cp110_utmi_phy_power_on(struct phy *phy)
+{
+ struct mvebu_cp110_utmi_port *port = phy_get_drvdata(phy);
+ struct mvebu_cp110_utmi *utmi = port->priv;
+ struct device *dev = &phy->dev;
+ int ret;
+ u32 reg;
+
+ /* It is necessary to power off UTMI before configuration */
+ ret = mvebu_cp110_utmi_phy_power_off(phy);
+ if (ret) {
+ dev_err(dev, "UTMI power OFF before power ON failed\n");
+ return ret;
+ }
+
+ /*
+ * If UTMI port is connected to USB Device controller,
+ * configure the USB MUX prior to UTMI PHY initialization.
+ * The single USB device controller can be connected
+ * to UTMI0 or to UTMI1 PHY port, but not to both.
+ */
+ if (port->dr_mode == USB_DR_MODE_PERIPHERAL) {
+ regmap_update_bits(utmi->syscon, SYSCON_USB_CFG_REG,
+ USB_CFG_DEVICE_EN_MASK | USB_CFG_DEVICE_MUX_MASK,
+ USB_CFG_DEVICE_EN_MASK |
+ (port->id << USB_CFG_DEVICE_MUX_OFFSET));
+ }
+
+ /* Set Test suspendm mode and enable Test UTMI select */
+ reg = readl(PORT_REGS(port) + UTMI_CTRL_STATUS0_REG);
+ reg |= SUSPENDM | TEST_SEL;
+ writel(reg, PORT_REGS(port) + UTMI_CTRL_STATUS0_REG);
+
+ /* Wait for UTMI power down */
+ mdelay(1);
+
+ /* PHY port setup first */
+ mvebu_cp110_utmi_port_setup(port);
+
+ /* Power UP UTMI PHY */
+ regmap_set_bits(utmi->syscon, SYSCON_UTMI_CFG_REG(port->id),
+ UTMI_PHY_CFG_PU_MASK);
+
+ /* Disable Test UTMI select */
+ reg = readl(PORT_REGS(port) + UTMI_CTRL_STATUS0_REG);
+ reg &= ~TEST_SEL;
+ writel(reg, PORT_REGS(port) + UTMI_CTRL_STATUS0_REG);
+
+ /* Wait for impedance calibration */
+ ret = readl_poll_timeout(PORT_REGS(port) + UTMI_CAL_CTRL_REG, reg,
+ reg & IMPCAL_DONE,
+ PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Failed to end UTMI impedance calibration\n");
+ return ret;
+ }
+
+ /* Wait for PLL calibration */
+ ret = readl_poll_timeout(PORT_REGS(port) + UTMI_CAL_CTRL_REG, reg,
+ reg & PLLCAL_DONE,
+ PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Failed to end UTMI PLL calibration\n");
+ return ret;
+ }
+
+ /* Wait for PLL ready */
+ ret = readl_poll_timeout(PORT_REGS(port) + UTMI_PLL_CTRL_REG, reg,
+ reg & PLL_RDY,
+ PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "PLL is not ready\n");
+ return ret;
+ }
+
+ /* PLL Power up */
+ regmap_set_bits(utmi->syscon, SYSCON_USB_CFG_REG, USB_CFG_PLL_MASK);
+
+ return 0;
+}
+
+static const struct phy_ops mvebu_cp110_utmi_phy_ops = {
+ .power_on = mvebu_cp110_utmi_phy_power_on,
+ .power_off = mvebu_cp110_utmi_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id mvebu_cp110_utmi_of_match[] = {
+ { .compatible = "marvell,cp110-utmi-phy" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mvebu_cp110_utmi_of_match);
+
+static int mvebu_cp110_utmi_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mvebu_cp110_utmi *utmi;
+ struct phy_provider *provider;
+ struct device_node *child;
+ u32 usb_devices = 0;
+
+ utmi = devm_kzalloc(dev, sizeof(*utmi), GFP_KERNEL);
+ if (!utmi)
+ return -ENOMEM;
+
+ utmi->dev = dev;
+
+ /* Get system controller region */
+ utmi->syscon = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "marvell,system-controller");
+ if (IS_ERR(utmi->syscon)) {
+ dev_err(dev, "Missing UTMI system controller\n");
+ return PTR_ERR(utmi->syscon);
+ }
+
+ /* Get UTMI memory region */
+ utmi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(utmi->regs))
+ return PTR_ERR(utmi->regs);
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ struct mvebu_cp110_utmi_port *port;
+ struct phy *phy;
+ int ret;
+ u32 port_id;
+
+ ret = of_property_read_u32(child, "reg", &port_id);
+ if ((ret < 0) || (port_id >= UTMI_PHY_PORTS)) {
+ dev_err(dev,
+ "invalid 'reg' property on child %pOF\n",
+ child);
+ continue;
+ }
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ of_node_put(child);
+ return -ENOMEM;
+ }
+
+ port->dr_mode = of_usb_get_dr_mode_by_phy(child, -1);
+ if ((port->dr_mode != USB_DR_MODE_HOST) &&
+ (port->dr_mode != USB_DR_MODE_PERIPHERAL)) {
+ dev_err(&pdev->dev,
+ "Missing dual role setting of the port%d, will use HOST mode\n",
+ port_id);
+ port->dr_mode = USB_DR_MODE_HOST;
+ }
+
+ if (port->dr_mode == USB_DR_MODE_PERIPHERAL) {
+ usb_devices++;
+ if (usb_devices > 1) {
+ dev_err(dev,
+ "Single USB device allowed! Port%d will use HOST mode\n",
+ port_id);
+ port->dr_mode = USB_DR_MODE_HOST;
+ }
+ }
+
+ /* Retrieve PHY capabilities */
+ utmi->ops = &mvebu_cp110_utmi_phy_ops;
+
+ /* Instantiate the PHY */
+ phy = devm_phy_create(dev, child, utmi->ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "Failed to create the UTMI PHY\n");
+ of_node_put(child);
+ return PTR_ERR(phy);
+ }
+
+ port->priv = utmi;
+ port->id = port_id;
+ phy_set_drvdata(phy, port);
+
+ /* Ensure the PHY is powered off */
+ mvebu_cp110_utmi_phy_power_off(phy);
+ }
+
+ dev_set_drvdata(dev, utmi);
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static struct platform_driver mvebu_cp110_utmi_driver = {
+ .probe = mvebu_cp110_utmi_phy_probe,
+ .driver = {
+ .name = "mvebu-cp110-utmi-phy",
+ .of_match_table = mvebu_cp110_utmi_of_match,
+ },
+};
+module_platform_driver(mvebu_cp110_utmi_driver);
+
+MODULE_AUTHOR("Konstatin Porotchkin <kostap@marvell.com>");
+MODULE_DESCRIPTION("Marvell Armada CP110 UTMI PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/microchip/Kconfig b/drivers/phy/microchip/Kconfig
new file mode 100644
index 000000000000..3728a284bf64
--- /dev/null
+++ b/drivers/phy/microchip/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Phy drivers for Microchip devices
+#
+
+config PHY_SPARX5_SERDES
+ tristate "Microchip Sparx5 SerDes PHY driver"
+ select GENERIC_PHY
+ depends on ARCH_SPARX5 || COMPILE_TEST
+ depends on OF
+ depends on HAS_IOMEM
+ help
+ Enable this for support of the 10G/25G SerDes on Microchip Sparx5.
diff --git a/drivers/phy/microchip/Makefile b/drivers/phy/microchip/Makefile
new file mode 100644
index 000000000000..7b98345712aa
--- /dev/null
+++ b/drivers/phy/microchip/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip phy drivers.
+#
+
+obj-$(CONFIG_PHY_SPARX5_SERDES) := sparx5_serdes.o
diff --git a/drivers/phy/microchip/sparx5_serdes.c b/drivers/phy/microchip/sparx5_serdes.c
new file mode 100644
index 000000000000..c8a7d0927ced
--- /dev/null
+++ b/drivers/phy/microchip/sparx5_serdes.c
@@ -0,0 +1,2513 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Microchip Sparx5 Switch SerDes driver
+ *
+ * Copyright (c) 2020 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ * and the datasheet is available here:
+ * https://ww1.microchip.com/downloads/en/DeviceDoc/SparX-5_Family_L2L3_Enterprise_10G_Ethernet_Switches_Datasheet_00003822B.pdf
+ */
+#include <linux/printk.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+
+#include "sparx5_serdes.h"
+
+#define SPX5_CMU_MAX 14
+
+#define SPX5_SERDES_10G_START 13
+#define SPX5_SERDES_25G_START 25
+
+enum sparx5_10g28cmu_mode {
+ SPX5_SD10G28_CMU_MAIN = 0,
+ SPX5_SD10G28_CMU_AUX1 = 1,
+ SPX5_SD10G28_CMU_AUX2 = 3,
+ SPX5_SD10G28_CMU_NONE = 4,
+};
+
+enum sparx5_sd25g28_mode_preset_type {
+ SPX5_SD25G28_MODE_PRESET_25000,
+ SPX5_SD25G28_MODE_PRESET_10000,
+ SPX5_SD25G28_MODE_PRESET_5000,
+ SPX5_SD25G28_MODE_PRESET_SD_2G5,
+ SPX5_SD25G28_MODE_PRESET_1000BASEX,
+};
+
+enum sparx5_sd10g28_mode_preset_type {
+ SPX5_SD10G28_MODE_PRESET_10000,
+ SPX5_SD10G28_MODE_PRESET_SFI_5000_6G,
+ SPX5_SD10G28_MODE_PRESET_SFI_5000_10G,
+ SPX5_SD10G28_MODE_PRESET_QSGMII,
+ SPX5_SD10G28_MODE_PRESET_SD_2G5,
+ SPX5_SD10G28_MODE_PRESET_1000BASEX,
+};
+
+struct sparx5_serdes_io_resource {
+ enum sparx5_serdes_target id;
+ phys_addr_t offset;
+};
+
+struct sparx5_sd25g28_mode_preset {
+ u8 bitwidth;
+ u8 tx_pre_div;
+ u8 fifo_ck_div;
+ u8 pre_divsel;
+ u8 vco_div_mode;
+ u8 sel_div;
+ u8 ck_bitwidth;
+ u8 subrate;
+ u8 com_txcal_en;
+ u8 com_tx_reserve_msb;
+ u8 com_tx_reserve_lsb;
+ u8 cfg_itx_ipcml_base;
+ u8 tx_reserve_lsb;
+ u8 tx_reserve_msb;
+ u8 bw;
+ u8 rxterm;
+ u8 dfe_tap;
+ u8 dfe_enable;
+ bool txmargin;
+ u8 cfg_ctle_rstn;
+ u8 r_dfe_rstn;
+ u8 cfg_pi_bw_3_0;
+ u8 tx_tap_dly;
+ u8 tx_tap_adv;
+};
+
+struct sparx5_sd25g28_media_preset {
+ u8 cfg_eq_c_force_3_0;
+ u8 cfg_vga_ctrl_byp_4_0;
+ u8 cfg_eq_r_force_3_0;
+ u8 cfg_en_adv;
+ u8 cfg_en_main;
+ u8 cfg_en_dly;
+ u8 cfg_tap_adv_3_0;
+ u8 cfg_tap_main;
+ u8 cfg_tap_dly_4_0;
+ u8 cfg_alos_thr_2_0;
+};
+
+struct sparx5_sd25g28_args {
+ u8 if_width; /* UDL if-width: 10/16/20/32/64 */
+ bool skip_cmu_cfg:1; /* Enable/disable CMU cfg */
+ enum sparx5_10g28cmu_mode cmu_sel; /* Device/Mode serdes uses */
+ bool no_pwrcycle:1; /* Omit initial power-cycle */
+ bool txinvert:1; /* Enable inversion of output data */
+ bool rxinvert:1; /* Enable inversion of input data */
+ u16 txswing; /* Set output level */
+ u8 rate; /* Rate of network interface */
+ u8 pi_bw_gen1;
+ u8 duty_cycle; /* Set output level to half/full */
+ bool mute:1; /* Mute Output Buffer */
+ bool reg_rst:1;
+ u8 com_pll_reserve;
+};
+
+struct sparx5_sd25g28_params {
+ u8 reg_rst;
+ u8 cfg_jc_byp;
+ u8 cfg_common_reserve_7_0;
+ u8 r_reg_manual;
+ u8 r_d_width_ctrl_from_hwt;
+ u8 r_d_width_ctrl_2_0;
+ u8 r_txfifo_ck_div_pmad_2_0;
+ u8 r_rxfifo_ck_div_pmad_2_0;
+ u8 cfg_pll_lol_set;
+ u8 cfg_vco_div_mode_1_0;
+ u8 cfg_pre_divsel_1_0;
+ u8 cfg_sel_div_3_0;
+ u8 cfg_vco_start_code_3_0;
+ u8 cfg_pma_tx_ck_bitwidth_2_0;
+ u8 cfg_tx_prediv_1_0;
+ u8 cfg_rxdiv_sel_2_0;
+ u8 cfg_tx_subrate_2_0;
+ u8 cfg_rx_subrate_2_0;
+ u8 r_multi_lane_mode;
+ u8 cfg_cdrck_en;
+ u8 cfg_dfeck_en;
+ u8 cfg_dfe_pd;
+ u8 cfg_dfedmx_pd;
+ u8 cfg_dfetap_en_5_1;
+ u8 cfg_dmux_pd;
+ u8 cfg_dmux_clk_pd;
+ u8 cfg_erramp_pd;
+ u8 cfg_pi_dfe_en;
+ u8 cfg_pi_en;
+ u8 cfg_pd_ctle;
+ u8 cfg_summer_en;
+ u8 cfg_pmad_ck_pd;
+ u8 cfg_pd_clk;
+ u8 cfg_pd_cml;
+ u8 cfg_pd_driver;
+ u8 cfg_rx_reg_pu;
+ u8 cfg_pd_rms_det;
+ u8 cfg_dcdr_pd;
+ u8 cfg_ecdr_pd;
+ u8 cfg_pd_sq;
+ u8 cfg_itx_ipdriver_base_2_0;
+ u8 cfg_tap_dly_4_0;
+ u8 cfg_tap_main;
+ u8 cfg_en_main;
+ u8 cfg_tap_adv_3_0;
+ u8 cfg_en_adv;
+ u8 cfg_en_dly;
+ u8 cfg_iscan_en;
+ u8 l1_pcs_en_fast_iscan;
+ u8 l0_cfg_bw_1_0;
+ u8 l0_cfg_txcal_en;
+ u8 cfg_en_dummy;
+ u8 cfg_pll_reserve_3_0;
+ u8 l0_cfg_tx_reserve_15_8;
+ u8 l0_cfg_tx_reserve_7_0;
+ u8 cfg_tx_reserve_15_8;
+ u8 cfg_tx_reserve_7_0;
+ u8 cfg_bw_1_0;
+ u8 cfg_txcal_man_en;
+ u8 cfg_phase_man_4_0;
+ u8 cfg_quad_man_1_0;
+ u8 cfg_txcal_shift_code_5_0;
+ u8 cfg_txcal_valid_sel_3_0;
+ u8 cfg_txcal_en;
+ u8 cfg_cdr_kf_2_0;
+ u8 cfg_cdr_m_7_0;
+ u8 cfg_pi_bw_3_0;
+ u8 cfg_pi_steps_1_0;
+ u8 cfg_dis_2ndorder;
+ u8 cfg_ctle_rstn;
+ u8 r_dfe_rstn;
+ u8 cfg_alos_thr_2_0;
+ u8 cfg_itx_ipcml_base_1_0;
+ u8 cfg_rx_reserve_7_0;
+ u8 cfg_rx_reserve_15_8;
+ u8 cfg_rxterm_2_0;
+ u8 cfg_fom_selm;
+ u8 cfg_rx_sp_ctle_1_0;
+ u8 cfg_isel_ctle_1_0;
+ u8 cfg_vga_ctrl_byp_4_0;
+ u8 cfg_vga_byp;
+ u8 cfg_agc_adpt_byp;
+ u8 cfg_eqr_byp;
+ u8 cfg_eqr_force_3_0;
+ u8 cfg_eqc_force_3_0;
+ u8 cfg_sum_setcm_en;
+ u8 cfg_init_pos_iscan_6_0;
+ u8 cfg_init_pos_ipi_6_0;
+ u8 cfg_dfedig_m_2_0;
+ u8 cfg_en_dfedig;
+ u8 cfg_pi_DFE_en;
+ u8 cfg_tx2rx_lp_en;
+ u8 cfg_txlb_en;
+ u8 cfg_rx2tx_lp_en;
+ u8 cfg_rxlb_en;
+ u8 r_tx_pol_inv;
+ u8 r_rx_pol_inv;
+};
+
+struct sparx5_sd10g28_media_preset {
+ u8 cfg_en_adv;
+ u8 cfg_en_main;
+ u8 cfg_en_dly;
+ u8 cfg_tap_adv_3_0;
+ u8 cfg_tap_main;
+ u8 cfg_tap_dly_4_0;
+ u8 cfg_vga_ctrl_3_0;
+ u8 cfg_vga_cp_2_0;
+ u8 cfg_eq_res_3_0;
+ u8 cfg_eq_r_byp;
+ u8 cfg_eq_c_force_3_0;
+ u8 cfg_alos_thr_3_0;
+};
+
+struct sparx5_sd10g28_mode_preset {
+ u8 bwidth; /* interface width: 10/16/20/32/64 */
+ enum sparx5_10g28cmu_mode cmu_sel; /* Device/Mode serdes uses */
+ u8 rate; /* Rate of network interface */
+ u8 dfe_tap;
+ u8 dfe_enable;
+ u8 pi_bw_gen1;
+ u8 duty_cycle; /* Set output level to half/full */
+};
+
+struct sparx5_sd10g28_args {
+ bool skip_cmu_cfg:1; /* Enable/disable CMU cfg */
+ bool no_pwrcycle:1; /* Omit initial power-cycle */
+ bool txinvert:1; /* Enable inversion of output data */
+ bool rxinvert:1; /* Enable inversion of input data */
+ bool txmargin:1; /* Set output level to half/full */
+ u16 txswing; /* Set output level */
+ bool mute:1; /* Mute Output Buffer */
+ bool is_6g:1;
+ bool reg_rst:1;
+};
+
+struct sparx5_sd10g28_params {
+ u8 cmu_sel;
+ u8 is_6g;
+ u8 skip_cmu_cfg;
+ u8 cfg_lane_reserve_7_0;
+ u8 cfg_ssc_rtl_clk_sel;
+ u8 cfg_lane_reserve_15_8;
+ u8 cfg_txrate_1_0;
+ u8 cfg_rxrate_1_0;
+ u8 r_d_width_ctrl_2_0;
+ u8 cfg_pma_tx_ck_bitwidth_2_0;
+ u8 cfg_rxdiv_sel_2_0;
+ u8 r_pcs2pma_phymode_4_0;
+ u8 cfg_lane_id_2_0;
+ u8 cfg_cdrck_en;
+ u8 cfg_dfeck_en;
+ u8 cfg_dfe_pd;
+ u8 cfg_dfetap_en_5_1;
+ u8 cfg_erramp_pd;
+ u8 cfg_pi_DFE_en;
+ u8 cfg_pi_en;
+ u8 cfg_pd_ctle;
+ u8 cfg_summer_en;
+ u8 cfg_pd_rx_cktree;
+ u8 cfg_pd_clk;
+ u8 cfg_pd_cml;
+ u8 cfg_pd_driver;
+ u8 cfg_rx_reg_pu;
+ u8 cfg_d_cdr_pd;
+ u8 cfg_pd_sq;
+ u8 cfg_rxdet_en;
+ u8 cfg_rxdet_str;
+ u8 r_multi_lane_mode;
+ u8 cfg_en_adv;
+ u8 cfg_en_main;
+ u8 cfg_en_dly;
+ u8 cfg_tap_adv_3_0;
+ u8 cfg_tap_main;
+ u8 cfg_tap_dly_4_0;
+ u8 cfg_vga_ctrl_3_0;
+ u8 cfg_vga_cp_2_0;
+ u8 cfg_eq_res_3_0;
+ u8 cfg_eq_r_byp;
+ u8 cfg_eq_c_force_3_0;
+ u8 cfg_en_dfedig;
+ u8 cfg_sum_setcm_en;
+ u8 cfg_en_preemph;
+ u8 cfg_itx_ippreemp_base_1_0;
+ u8 cfg_itx_ipdriver_base_2_0;
+ u8 cfg_ibias_tune_reserve_5_0;
+ u8 cfg_txswing_half;
+ u8 cfg_dis_2nd_order;
+ u8 cfg_rx_ssc_lh;
+ u8 cfg_pi_floop_steps_1_0;
+ u8 cfg_pi_ext_dac_23_16;
+ u8 cfg_pi_ext_dac_15_8;
+ u8 cfg_iscan_ext_dac_7_0;
+ u8 cfg_cdr_kf_gen1_2_0;
+ u8 cfg_cdr_kf_gen2_2_0;
+ u8 cfg_cdr_kf_gen3_2_0;
+ u8 cfg_cdr_kf_gen4_2_0;
+ u8 r_cdr_m_gen1_7_0;
+ u8 cfg_pi_bw_gen1_3_0;
+ u8 cfg_pi_bw_gen2;
+ u8 cfg_pi_bw_gen3;
+ u8 cfg_pi_bw_gen4;
+ u8 cfg_pi_ext_dac_7_0;
+ u8 cfg_pi_steps;
+ u8 cfg_mp_max_3_0;
+ u8 cfg_rstn_dfedig;
+ u8 cfg_alos_thr_3_0;
+ u8 cfg_predrv_slewrate_1_0;
+ u8 cfg_itx_ipcml_base_1_0;
+ u8 cfg_ip_pre_base_1_0;
+ u8 r_cdr_m_gen2_7_0;
+ u8 r_cdr_m_gen3_7_0;
+ u8 r_cdr_m_gen4_7_0;
+ u8 r_en_auto_cdr_rstn;
+ u8 cfg_oscal_afe;
+ u8 cfg_pd_osdac_afe;
+ u8 cfg_resetb_oscal_afe[2];
+ u8 cfg_center_spreading;
+ u8 cfg_m_cnt_maxval_4_0;
+ u8 cfg_ncnt_maxval_7_0;
+ u8 cfg_ncnt_maxval_10_8;
+ u8 cfg_ssc_en;
+ u8 cfg_tx2rx_lp_en;
+ u8 cfg_txlb_en;
+ u8 cfg_rx2tx_lp_en;
+ u8 cfg_rxlb_en;
+ u8 r_tx_pol_inv;
+ u8 r_rx_pol_inv;
+ u8 fx_100;
+};
+
+static struct sparx5_sd25g28_media_preset media_presets_25g[] = {
+ { /* ETH_MEDIA_DEFAULT */
+ .cfg_en_adv = 0,
+ .cfg_en_main = 1,
+ .cfg_en_dly = 0,
+ .cfg_tap_adv_3_0 = 0,
+ .cfg_tap_main = 1,
+ .cfg_tap_dly_4_0 = 0,
+ .cfg_eq_c_force_3_0 = 0xf,
+ .cfg_vga_ctrl_byp_4_0 = 4,
+ .cfg_eq_r_force_3_0 = 12,
+ .cfg_alos_thr_2_0 = 7,
+ },
+ { /* ETH_MEDIA_SR */
+ .cfg_en_adv = 1,
+ .cfg_en_main = 1,
+ .cfg_en_dly = 1,
+ .cfg_tap_adv_3_0 = 0,
+ .cfg_tap_main = 1,
+ .cfg_tap_dly_4_0 = 0x10,
+ .cfg_eq_c_force_3_0 = 0xf,
+ .cfg_vga_ctrl_byp_4_0 = 8,
+ .cfg_eq_r_force_3_0 = 4,
+ .cfg_alos_thr_2_0 = 0,
+ },
+ { /* ETH_MEDIA_DAC */
+ .cfg_en_adv = 0,
+ .cfg_en_main = 1,
+ .cfg_en_dly = 0,
+ .cfg_tap_adv_3_0 = 0,
+ .cfg_tap_main = 1,
+ .cfg_tap_dly_4_0 = 0,
+ .cfg_eq_c_force_3_0 = 0xf,
+ .cfg_vga_ctrl_byp_4_0 = 8,
+ .cfg_eq_r_force_3_0 = 0xc,
+ .cfg_alos_thr_2_0 = 0,
+ },
+};
+
+static struct sparx5_sd25g28_mode_preset mode_presets_25g[] = {
+ { /* SPX5_SD25G28_MODE_PRESET_25000 */
+ .bitwidth = 40,
+ .tx_pre_div = 0,
+ .fifo_ck_div = 0,
+ .pre_divsel = 1,
+ .vco_div_mode = 0,
+ .sel_div = 15,
+ .ck_bitwidth = 3,
+ .subrate = 0,
+ .com_txcal_en = 0,
+ .com_tx_reserve_msb = (0x26 << 1),
+ .com_tx_reserve_lsb = 0xf0,
+ .cfg_itx_ipcml_base = 0,
+ .tx_reserve_msb = 0xcc,
+ .tx_reserve_lsb = 0xfe,
+ .bw = 3,
+ .rxterm = 0,
+ .dfe_enable = 1,
+ .dfe_tap = 0x1f,
+ .txmargin = 1,
+ .cfg_ctle_rstn = 1,
+ .r_dfe_rstn = 1,
+ .cfg_pi_bw_3_0 = 0,
+ .tx_tap_dly = 8,
+ .tx_tap_adv = 0xc,
+ },
+ { /* SPX5_SD25G28_MODE_PRESET_10000 */
+ .bitwidth = 64,
+ .tx_pre_div = 0,
+ .fifo_ck_div = 2,
+ .pre_divsel = 0,
+ .vco_div_mode = 1,
+ .sel_div = 9,
+ .ck_bitwidth = 0,
+ .subrate = 0,
+ .com_txcal_en = 1,
+ .com_tx_reserve_msb = (0x20 << 1),
+ .com_tx_reserve_lsb = 0x40,
+ .cfg_itx_ipcml_base = 0,
+ .tx_reserve_msb = 0x4c,
+ .tx_reserve_lsb = 0x44,
+ .bw = 3,
+ .cfg_pi_bw_3_0 = 0,
+ .rxterm = 3,
+ .dfe_enable = 1,
+ .dfe_tap = 0x1f,
+ .txmargin = 0,
+ .cfg_ctle_rstn = 1,
+ .r_dfe_rstn = 1,
+ .tx_tap_dly = 0,
+ .tx_tap_adv = 0,
+ },
+ { /* SPX5_SD25G28_MODE_PRESET_5000 */
+ .bitwidth = 64,
+ .tx_pre_div = 0,
+ .fifo_ck_div = 2,
+ .pre_divsel = 0,
+ .vco_div_mode = 2,
+ .sel_div = 9,
+ .ck_bitwidth = 0,
+ .subrate = 0,
+ .com_txcal_en = 1,
+ .com_tx_reserve_msb = (0x20 << 1),
+ .com_tx_reserve_lsb = 0,
+ .cfg_itx_ipcml_base = 0,
+ .tx_reserve_msb = 0xe,
+ .tx_reserve_lsb = 0x80,
+ .bw = 0,
+ .rxterm = 0,
+ .cfg_pi_bw_3_0 = 6,
+ .dfe_enable = 0,
+ .dfe_tap = 0,
+ .tx_tap_dly = 0,
+ .tx_tap_adv = 0,
+ },
+ { /* SPX5_SD25G28_MODE_PRESET_SD_2G5 */
+ .bitwidth = 10,
+ .tx_pre_div = 0,
+ .fifo_ck_div = 0,
+ .pre_divsel = 0,
+ .vco_div_mode = 1,
+ .sel_div = 6,
+ .ck_bitwidth = 3,
+ .subrate = 2,
+ .com_txcal_en = 1,
+ .com_tx_reserve_msb = (0x26 << 1),
+ .com_tx_reserve_lsb = (0xf << 4),
+ .cfg_itx_ipcml_base = 2,
+ .tx_reserve_msb = 0x8,
+ .tx_reserve_lsb = 0x8a,
+ .bw = 0,
+ .cfg_pi_bw_3_0 = 0,
+ .rxterm = (1 << 2),
+ .dfe_enable = 0,
+ .dfe_tap = 0,
+ .tx_tap_dly = 0,
+ .tx_tap_adv = 0,
+ },
+ { /* SPX5_SD25G28_MODE_PRESET_1000BASEX */
+ .bitwidth = 10,
+ .tx_pre_div = 0,
+ .fifo_ck_div = 1,
+ .pre_divsel = 0,
+ .vco_div_mode = 1,
+ .sel_div = 8,
+ .ck_bitwidth = 3,
+ .subrate = 3,
+ .com_txcal_en = 1,
+ .com_tx_reserve_msb = (0x26 << 1),
+ .com_tx_reserve_lsb = 0xf0,
+ .cfg_itx_ipcml_base = 0,
+ .tx_reserve_msb = 0x8,
+ .tx_reserve_lsb = 0xce,
+ .bw = 0,
+ .rxterm = 0,
+ .cfg_pi_bw_3_0 = 0,
+ .dfe_enable = 0,
+ .dfe_tap = 0,
+ .tx_tap_dly = 0,
+ .tx_tap_adv = 0,
+ },
+};
+
+static struct sparx5_sd10g28_media_preset media_presets_10g[] = {
+ { /* ETH_MEDIA_DEFAULT */
+ .cfg_en_adv = 0,
+ .cfg_en_main = 1,
+ .cfg_en_dly = 0,
+ .cfg_tap_adv_3_0 = 0,
+ .cfg_tap_main = 1,
+ .cfg_tap_dly_4_0 = 0,
+ .cfg_vga_ctrl_3_0 = 5,
+ .cfg_vga_cp_2_0 = 0,
+ .cfg_eq_res_3_0 = 0xa,
+ .cfg_eq_r_byp = 1,
+ .cfg_eq_c_force_3_0 = 0x8,
+ .cfg_alos_thr_3_0 = 0x3,
+ },
+ { /* ETH_MEDIA_SR */
+ .cfg_en_adv = 1,
+ .cfg_en_main = 1,
+ .cfg_en_dly = 1,
+ .cfg_tap_adv_3_0 = 0,
+ .cfg_tap_main = 1,
+ .cfg_tap_dly_4_0 = 0xc,
+ .cfg_vga_ctrl_3_0 = 0xa,
+ .cfg_vga_cp_2_0 = 0x4,
+ .cfg_eq_res_3_0 = 0xa,
+ .cfg_eq_r_byp = 1,
+ .cfg_eq_c_force_3_0 = 0xF,
+ .cfg_alos_thr_3_0 = 0x3,
+ },
+ { /* ETH_MEDIA_DAC */
+ .cfg_en_adv = 1,
+ .cfg_en_main = 1,
+ .cfg_en_dly = 1,
+ .cfg_tap_adv_3_0 = 12,
+ .cfg_tap_main = 1,
+ .cfg_tap_dly_4_0 = 8,
+ .cfg_vga_ctrl_3_0 = 0xa,
+ .cfg_vga_cp_2_0 = 4,
+ .cfg_eq_res_3_0 = 0xa,
+ .cfg_eq_r_byp = 1,
+ .cfg_eq_c_force_3_0 = 0xf,
+ .cfg_alos_thr_3_0 = 0x0,
+ }
+};
+
+static struct sparx5_sd10g28_mode_preset mode_presets_10g[] = {
+ { /* SPX5_SD10G28_MODE_PRESET_10000 */
+ .bwidth = 64,
+ .cmu_sel = SPX5_SD10G28_CMU_MAIN,
+ .rate = 0x0,
+ .dfe_enable = 1,
+ .dfe_tap = 0x1f,
+ .pi_bw_gen1 = 0x0,
+ .duty_cycle = 0x2,
+ },
+ { /* SPX5_SD10G28_MODE_PRESET_SFI_5000_6G */
+ .bwidth = 16,
+ .cmu_sel = SPX5_SD10G28_CMU_MAIN,
+ .rate = 0x1,
+ .dfe_enable = 0,
+ .dfe_tap = 0,
+ .pi_bw_gen1 = 0x5,
+ .duty_cycle = 0x0,
+ },
+ { /* SPX5_SD10G28_MODE_PRESET_SFI_5000_10G */
+ .bwidth = 64,
+ .cmu_sel = SPX5_SD10G28_CMU_MAIN,
+ .rate = 0x1,
+ .dfe_enable = 0,
+ .dfe_tap = 0,
+ .pi_bw_gen1 = 0x5,
+ .duty_cycle = 0x0,
+ },
+ { /* SPX5_SD10G28_MODE_PRESET_QSGMII */
+ .bwidth = 20,
+ .cmu_sel = SPX5_SD10G28_CMU_AUX1,
+ .rate = 0x1,
+ .dfe_enable = 0,
+ .dfe_tap = 0,
+ .pi_bw_gen1 = 0x5,
+ .duty_cycle = 0x0,
+ },
+ { /* SPX5_SD10G28_MODE_PRESET_SD_2G5 */
+ .bwidth = 10,
+ .cmu_sel = SPX5_SD10G28_CMU_AUX2,
+ .rate = 0x2,
+ .dfe_enable = 0,
+ .dfe_tap = 0,
+ .pi_bw_gen1 = 0x7,
+ .duty_cycle = 0x0,
+ },
+ { /* SPX5_SD10G28_MODE_PRESET_1000BASEX */
+ .bwidth = 10,
+ .cmu_sel = SPX5_SD10G28_CMU_AUX1,
+ .rate = 0x3,
+ .dfe_enable = 0,
+ .dfe_tap = 0,
+ .pi_bw_gen1 = 0x7,
+ .duty_cycle = 0x0,
+ },
+};
+
+/* map from SD25G28 interface width to configuration value */
+static u8 sd25g28_get_iw_setting(struct device *dev, const u8 interface_width)
+{
+ switch (interface_width) {
+ case 10: return 0;
+ case 16: return 1;
+ case 32: return 3;
+ case 40: return 4;
+ case 64: return 5;
+ default:
+ dev_err(dev, "%s: Illegal value %d for interface width\n",
+ __func__, interface_width);
+ }
+ return 0;
+}
+
+/* map from SD10G28 interface width to configuration value */
+static u8 sd10g28_get_iw_setting(struct device *dev, const u8 interface_width)
+{
+ switch (interface_width) {
+ case 10: return 0;
+ case 16: return 1;
+ case 20: return 2;
+ case 32: return 3;
+ case 40: return 4;
+ case 64: return 7;
+ default:
+ dev_err(dev, "%s: Illegal value %d for interface width\n", __func__,
+ interface_width);
+ return 0;
+ }
+}
+
+static int sparx5_sd10g25_get_mode_preset(struct sparx5_serdes_macro *macro,
+ struct sparx5_sd25g28_mode_preset *mode)
+{
+ switch (macro->serdesmode) {
+ case SPX5_SD_MODE_SFI:
+ if (macro->speed == SPEED_25000)
+ *mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_25000];
+ else if (macro->speed == SPEED_10000)
+ *mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_10000];
+ else if (macro->speed == SPEED_5000)
+ *mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_5000];
+ break;
+ case SPX5_SD_MODE_2G5:
+ *mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_SD_2G5];
+ break;
+ case SPX5_SD_MODE_1000BASEX:
+ *mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_1000BASEX];
+ break;
+ case SPX5_SD_MODE_100FX:
+ /* Not supported */
+ return -EINVAL;
+ default:
+ *mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_25000];
+ break;
+ }
+ return 0;
+}
+
+static int sparx5_sd10g28_get_mode_preset(struct sparx5_serdes_macro *macro,
+ struct sparx5_sd10g28_mode_preset *mode,
+ struct sparx5_sd10g28_args *args)
+{
+ switch (macro->serdesmode) {
+ case SPX5_SD_MODE_SFI:
+ if (macro->speed == SPEED_10000) {
+ *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_10000];
+ } else if (macro->speed == SPEED_5000) {
+ if (args->is_6g)
+ *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_SFI_5000_6G];
+ else
+ *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_SFI_5000_10G];
+ } else {
+ dev_err(macro->priv->dev, "%s: Illegal speed: %02u, sidx: %02u, mode (%u)",
+ __func__, macro->speed, macro->sidx,
+ macro->serdesmode);
+ return -EINVAL;
+ }
+ break;
+ case SPX5_SD_MODE_QSGMII:
+ *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_QSGMII];
+ break;
+ case SPX5_SD_MODE_2G5:
+ *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_SD_2G5];
+ break;
+ case SPX5_SD_MODE_100FX:
+ case SPX5_SD_MODE_1000BASEX:
+ *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_1000BASEX];
+ break;
+ default:
+ *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_10000];
+ break;
+ }
+ return 0;
+}
+
+static void sparx5_sd25g28_get_params(struct sparx5_serdes_macro *macro,
+ struct sparx5_sd25g28_media_preset *media,
+ struct sparx5_sd25g28_mode_preset *mode,
+ struct sparx5_sd25g28_args *args,
+ struct sparx5_sd25g28_params *params)
+{
+ u8 iw = sd25g28_get_iw_setting(macro->priv->dev, mode->bitwidth);
+ struct sparx5_sd25g28_params init = {
+ .r_d_width_ctrl_2_0 = iw,
+ .r_txfifo_ck_div_pmad_2_0 = mode->fifo_ck_div,
+ .r_rxfifo_ck_div_pmad_2_0 = mode->fifo_ck_div,
+ .cfg_vco_div_mode_1_0 = mode->vco_div_mode,
+ .cfg_pre_divsel_1_0 = mode->pre_divsel,
+ .cfg_sel_div_3_0 = mode->sel_div,
+ .cfg_vco_start_code_3_0 = 0,
+ .cfg_pma_tx_ck_bitwidth_2_0 = mode->ck_bitwidth,
+ .cfg_tx_prediv_1_0 = mode->tx_pre_div,
+ .cfg_rxdiv_sel_2_0 = mode->ck_bitwidth,
+ .cfg_tx_subrate_2_0 = mode->subrate,
+ .cfg_rx_subrate_2_0 = mode->subrate,
+ .r_multi_lane_mode = 0,
+ .cfg_cdrck_en = 1,
+ .cfg_dfeck_en = mode->dfe_enable,
+ .cfg_dfe_pd = mode->dfe_enable == 1 ? 0 : 1,
+ .cfg_dfedmx_pd = 1,
+ .cfg_dfetap_en_5_1 = mode->dfe_tap,
+ .cfg_dmux_pd = 0,
+ .cfg_dmux_clk_pd = 1,
+ .cfg_erramp_pd = mode->dfe_enable == 1 ? 0 : 1,
+ .cfg_pi_DFE_en = mode->dfe_enable,
+ .cfg_pi_en = 1,
+ .cfg_pd_ctle = 0,
+ .cfg_summer_en = 1,
+ .cfg_pmad_ck_pd = 0,
+ .cfg_pd_clk = 0,
+ .cfg_pd_cml = 0,
+ .cfg_pd_driver = 0,
+ .cfg_rx_reg_pu = 1,
+ .cfg_pd_rms_det = 1,
+ .cfg_dcdr_pd = 0,
+ .cfg_ecdr_pd = 1,
+ .cfg_pd_sq = 1,
+ .cfg_itx_ipdriver_base_2_0 = mode->txmargin,
+ .cfg_tap_dly_4_0 = media->cfg_tap_dly_4_0,
+ .cfg_tap_main = media->cfg_tap_main,
+ .cfg_en_main = media->cfg_en_main,
+ .cfg_tap_adv_3_0 = media->cfg_tap_adv_3_0,
+ .cfg_en_adv = media->cfg_en_adv,
+ .cfg_en_dly = media->cfg_en_dly,
+ .cfg_iscan_en = 0,
+ .l1_pcs_en_fast_iscan = 0,
+ .l0_cfg_bw_1_0 = 0,
+ .cfg_en_dummy = 0,
+ .cfg_pll_reserve_3_0 = args->com_pll_reserve,
+ .l0_cfg_txcal_en = mode->com_txcal_en,
+ .l0_cfg_tx_reserve_15_8 = mode->com_tx_reserve_msb,
+ .l0_cfg_tx_reserve_7_0 = mode->com_tx_reserve_lsb,
+ .cfg_tx_reserve_15_8 = mode->tx_reserve_msb,
+ .cfg_tx_reserve_7_0 = mode->tx_reserve_lsb,
+ .cfg_bw_1_0 = mode->bw,
+ .cfg_txcal_man_en = 1,
+ .cfg_phase_man_4_0 = 0,
+ .cfg_quad_man_1_0 = 0,
+ .cfg_txcal_shift_code_5_0 = 2,
+ .cfg_txcal_valid_sel_3_0 = 4,
+ .cfg_txcal_en = 0,
+ .cfg_cdr_kf_2_0 = 1,
+ .cfg_cdr_m_7_0 = 6,
+ .cfg_pi_bw_3_0 = mode->cfg_pi_bw_3_0,
+ .cfg_pi_steps_1_0 = 0,
+ .cfg_dis_2ndorder = 1,
+ .cfg_ctle_rstn = mode->cfg_ctle_rstn,
+ .r_dfe_rstn = mode->r_dfe_rstn,
+ .cfg_alos_thr_2_0 = media->cfg_alos_thr_2_0,
+ .cfg_itx_ipcml_base_1_0 = mode->cfg_itx_ipcml_base,
+ .cfg_rx_reserve_7_0 = 0xbf,
+ .cfg_rx_reserve_15_8 = 0x61,
+ .cfg_rxterm_2_0 = mode->rxterm,
+ .cfg_fom_selm = 0,
+ .cfg_rx_sp_ctle_1_0 = 0,
+ .cfg_isel_ctle_1_0 = 0,
+ .cfg_vga_ctrl_byp_4_0 = media->cfg_vga_ctrl_byp_4_0,
+ .cfg_vga_byp = 1,
+ .cfg_agc_adpt_byp = 1,
+ .cfg_eqr_byp = 1,
+ .cfg_eqr_force_3_0 = media->cfg_eq_r_force_3_0,
+ .cfg_eqc_force_3_0 = media->cfg_eq_c_force_3_0,
+ .cfg_sum_setcm_en = 1,
+ .cfg_pi_dfe_en = 1,
+ .cfg_init_pos_iscan_6_0 = 6,
+ .cfg_init_pos_ipi_6_0 = 9,
+ .cfg_dfedig_m_2_0 = 6,
+ .cfg_en_dfedig = mode->dfe_enable,
+ .r_d_width_ctrl_from_hwt = 0,
+ .r_reg_manual = 1,
+ .reg_rst = args->reg_rst,
+ .cfg_jc_byp = 1,
+ .cfg_common_reserve_7_0 = 1,
+ .cfg_pll_lol_set = 1,
+ .cfg_tx2rx_lp_en = 0,
+ .cfg_txlb_en = 0,
+ .cfg_rx2tx_lp_en = 0,
+ .cfg_rxlb_en = 0,
+ .r_tx_pol_inv = args->txinvert,
+ .r_rx_pol_inv = args->rxinvert,
+ };
+
+ *params = init;
+}
+
+static void sparx5_sd10g28_get_params(struct sparx5_serdes_macro *macro,
+ struct sparx5_sd10g28_media_preset *media,
+ struct sparx5_sd10g28_mode_preset *mode,
+ struct sparx5_sd10g28_args *args,
+ struct sparx5_sd10g28_params *params)
+{
+ u8 iw = sd10g28_get_iw_setting(macro->priv->dev, mode->bwidth);
+ struct sparx5_sd10g28_params init = {
+ .skip_cmu_cfg = args->skip_cmu_cfg,
+ .is_6g = args->is_6g,
+ .cmu_sel = mode->cmu_sel,
+ .cfg_lane_reserve_7_0 = (mode->cmu_sel % 2) << 6,
+ .cfg_ssc_rtl_clk_sel = (mode->cmu_sel / 2),
+ .cfg_lane_reserve_15_8 = mode->duty_cycle,
+ .cfg_txrate_1_0 = mode->rate,
+ .cfg_rxrate_1_0 = mode->rate,
+ .fx_100 = macro->serdesmode == SPX5_SD_MODE_100FX,
+ .r_d_width_ctrl_2_0 = iw,
+ .cfg_pma_tx_ck_bitwidth_2_0 = iw,
+ .cfg_rxdiv_sel_2_0 = iw,
+ .r_pcs2pma_phymode_4_0 = 0,
+ .cfg_lane_id_2_0 = 0,
+ .cfg_cdrck_en = 1,
+ .cfg_dfeck_en = mode->dfe_enable,
+ .cfg_dfe_pd = (mode->dfe_enable == 1) ? 0 : 1,
+ .cfg_dfetap_en_5_1 = mode->dfe_tap,
+ .cfg_erramp_pd = (mode->dfe_enable == 1) ? 0 : 1,
+ .cfg_pi_DFE_en = mode->dfe_enable,
+ .cfg_pi_en = 1,
+ .cfg_pd_ctle = 0,
+ .cfg_summer_en = 1,
+ .cfg_pd_rx_cktree = 0,
+ .cfg_pd_clk = 0,
+ .cfg_pd_cml = 0,
+ .cfg_pd_driver = 0,
+ .cfg_rx_reg_pu = 1,
+ .cfg_d_cdr_pd = 0,
+ .cfg_pd_sq = mode->dfe_enable,
+ .cfg_rxdet_en = 0,
+ .cfg_rxdet_str = 0,
+ .r_multi_lane_mode = 0,
+ .cfg_en_adv = media->cfg_en_adv,
+ .cfg_en_main = 1,
+ .cfg_en_dly = media->cfg_en_dly,
+ .cfg_tap_adv_3_0 = media->cfg_tap_adv_3_0,
+ .cfg_tap_main = media->cfg_tap_main,
+ .cfg_tap_dly_4_0 = media->cfg_tap_dly_4_0,
+ .cfg_vga_ctrl_3_0 = media->cfg_vga_ctrl_3_0,
+ .cfg_vga_cp_2_0 = media->cfg_vga_cp_2_0,
+ .cfg_eq_res_3_0 = media->cfg_eq_res_3_0,
+ .cfg_eq_r_byp = media->cfg_eq_r_byp,
+ .cfg_eq_c_force_3_0 = media->cfg_eq_c_force_3_0,
+ .cfg_en_dfedig = mode->dfe_enable,
+ .cfg_sum_setcm_en = 1,
+ .cfg_en_preemph = 0,
+ .cfg_itx_ippreemp_base_1_0 = 0,
+ .cfg_itx_ipdriver_base_2_0 = (args->txswing >> 6),
+ .cfg_ibias_tune_reserve_5_0 = (args->txswing & 63),
+ .cfg_txswing_half = (args->txmargin),
+ .cfg_dis_2nd_order = 0x1,
+ .cfg_rx_ssc_lh = 0x0,
+ .cfg_pi_floop_steps_1_0 = 0x0,
+ .cfg_pi_ext_dac_23_16 = (1 << 5),
+ .cfg_pi_ext_dac_15_8 = (0 << 6),
+ .cfg_iscan_ext_dac_7_0 = (1 << 7) + 9,
+ .cfg_cdr_kf_gen1_2_0 = 1,
+ .cfg_cdr_kf_gen2_2_0 = 1,
+ .cfg_cdr_kf_gen3_2_0 = 1,
+ .cfg_cdr_kf_gen4_2_0 = 1,
+ .r_cdr_m_gen1_7_0 = 4,
+ .cfg_pi_bw_gen1_3_0 = mode->pi_bw_gen1,
+ .cfg_pi_bw_gen2 = mode->pi_bw_gen1,
+ .cfg_pi_bw_gen3 = mode->pi_bw_gen1,
+ .cfg_pi_bw_gen4 = mode->pi_bw_gen1,
+ .cfg_pi_ext_dac_7_0 = 3,
+ .cfg_pi_steps = 0,
+ .cfg_mp_max_3_0 = 1,
+ .cfg_rstn_dfedig = mode->dfe_enable,
+ .cfg_alos_thr_3_0 = media->cfg_alos_thr_3_0,
+ .cfg_predrv_slewrate_1_0 = 3,
+ .cfg_itx_ipcml_base_1_0 = 0,
+ .cfg_ip_pre_base_1_0 = 0,
+ .r_cdr_m_gen2_7_0 = 2,
+ .r_cdr_m_gen3_7_0 = 2,
+ .r_cdr_m_gen4_7_0 = 2,
+ .r_en_auto_cdr_rstn = 0,
+ .cfg_oscal_afe = 1,
+ .cfg_pd_osdac_afe = 0,
+ .cfg_resetb_oscal_afe[0] = 0,
+ .cfg_resetb_oscal_afe[1] = 1,
+ .cfg_center_spreading = 0,
+ .cfg_m_cnt_maxval_4_0 = 15,
+ .cfg_ncnt_maxval_7_0 = 32,
+ .cfg_ncnt_maxval_10_8 = 6,
+ .cfg_ssc_en = 1,
+ .cfg_tx2rx_lp_en = 0,
+ .cfg_txlb_en = 0,
+ .cfg_rx2tx_lp_en = 0,
+ .cfg_rxlb_en = 0,
+ .r_tx_pol_inv = args->txinvert,
+ .r_rx_pol_inv = args->rxinvert,
+ };
+
+ *params = init;
+}
+
+static void sparx5_sd25g28_reset(void __iomem *regs[],
+ struct sparx5_sd25g28_params *params,
+ u32 sd_index)
+{
+ if (params->reg_rst == 1) {
+ sdx5_rmw_addr(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_SET(1),
+ SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST,
+ sdx5_addr(regs, SD_LANE_25G_SD_LANE_CFG(sd_index)));
+
+ usleep_range(1000, 2000);
+
+ sdx5_rmw_addr(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_SET(0),
+ SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST,
+ sdx5_addr(regs, SD_LANE_25G_SD_LANE_CFG(sd_index)));
+ }
+}
+
+static int sparx5_sd25g28_apply_params(struct sparx5_serdes_macro *macro,
+ struct sparx5_sd25g28_params *params)
+{
+ struct sparx5_serdes_private *priv = macro->priv;
+ void __iomem **regs = priv->regs;
+ struct device *dev = priv->dev;
+ u32 sd_index = macro->stpidx;
+ u32 value;
+
+ sdx5_rmw(SD_LANE_25G_SD_LANE_CFG_MACRO_RST_SET(1),
+ SD_LANE_25G_SD_LANE_CFG_MACRO_RST,
+ priv,
+ SD_LANE_25G_SD_LANE_CFG(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(0xFF),
+ SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX,
+ priv,
+ SD25G_LANE_CMU_FF(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT_SET
+ (params->r_d_width_ctrl_from_hwt) |
+ SD25G_LANE_CMU_1A_R_REG_MANUAL_SET(params->r_reg_manual),
+ SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT |
+ SD25G_LANE_CMU_1A_R_REG_MANUAL,
+ priv,
+ SD25G_LANE_CMU_1A(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0_SET
+ (params->cfg_common_reserve_7_0),
+ SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0,
+ priv,
+ SD25G_LANE_CMU_31(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_09_CFG_EN_DUMMY_SET(params->cfg_en_dummy),
+ SD25G_LANE_CMU_09_CFG_EN_DUMMY,
+ priv,
+ SD25G_LANE_CMU_09(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0_SET
+ (params->cfg_pll_reserve_3_0),
+ SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0,
+ priv,
+ SD25G_LANE_CMU_13(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN_SET(params->l0_cfg_txcal_en),
+ SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN,
+ priv,
+ SD25G_LANE_CMU_40(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8_SET
+ (params->l0_cfg_tx_reserve_15_8),
+ SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8,
+ priv,
+ SD25G_LANE_CMU_46(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0_SET
+ (params->l0_cfg_tx_reserve_7_0),
+ SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0,
+ priv,
+ SD25G_LANE_CMU_45(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN_SET(0),
+ SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN,
+ priv,
+ SD25G_LANE_CMU_0B(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN_SET(1),
+ SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN,
+ priv,
+ SD25G_LANE_CMU_0B(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_19_R_CK_RESETB_SET(0),
+ SD25G_LANE_CMU_19_R_CK_RESETB,
+ priv,
+ SD25G_LANE_CMU_19(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_19_R_CK_RESETB_SET(1),
+ SD25G_LANE_CMU_19_R_CK_RESETB,
+ priv,
+ SD25G_LANE_CMU_19(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_18_R_PLL_RSTN_SET(0),
+ SD25G_LANE_CMU_18_R_PLL_RSTN,
+ priv,
+ SD25G_LANE_CMU_18(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_18_R_PLL_RSTN_SET(1),
+ SD25G_LANE_CMU_18_R_PLL_RSTN,
+ priv,
+ SD25G_LANE_CMU_18(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0_SET(params->r_d_width_ctrl_2_0),
+ SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0,
+ priv,
+ SD25G_LANE_CMU_1A(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0_SET
+ (params->r_txfifo_ck_div_pmad_2_0) |
+ SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0_SET
+ (params->r_rxfifo_ck_div_pmad_2_0),
+ SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0 |
+ SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0,
+ priv,
+ SD25G_LANE_CMU_30(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET_SET(params->cfg_pll_lol_set) |
+ SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0_SET
+ (params->cfg_vco_div_mode_1_0),
+ SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET |
+ SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0,
+ priv,
+ SD25G_LANE_CMU_0C(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0_SET
+ (params->cfg_pre_divsel_1_0),
+ SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0,
+ priv,
+ SD25G_LANE_CMU_0D(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0_SET(params->cfg_sel_div_3_0),
+ SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0,
+ priv,
+ SD25G_LANE_CMU_0E(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(0x00),
+ SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX,
+ priv,
+ SD25G_LANE_CMU_FF(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0_SET
+ (params->cfg_pma_tx_ck_bitwidth_2_0),
+ SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0,
+ priv,
+ SD25G_LANE_LANE_0C(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0_SET
+ (params->cfg_tx_prediv_1_0),
+ SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0,
+ priv,
+ SD25G_LANE_LANE_01(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0_SET
+ (params->cfg_rxdiv_sel_2_0),
+ SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0,
+ priv,
+ SD25G_LANE_LANE_18(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0_SET
+ (params->cfg_tx_subrate_2_0),
+ SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0,
+ priv,
+ SD25G_LANE_LANE_2C(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0_SET
+ (params->cfg_rx_subrate_2_0),
+ SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0,
+ priv,
+ SD25G_LANE_LANE_28(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN_SET(params->cfg_cdrck_en),
+ SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN,
+ priv,
+ SD25G_LANE_LANE_18(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1_SET
+ (params->cfg_dfetap_en_5_1),
+ SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1,
+ priv,
+ SD25G_LANE_LANE_0F(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD_SET(params->cfg_erramp_pd),
+ SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD,
+ priv,
+ SD25G_LANE_LANE_18(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN_SET(params->cfg_pi_dfe_en),
+ SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN,
+ priv,
+ SD25G_LANE_LANE_1D(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_19_LN_CFG_ECDR_PD_SET(params->cfg_ecdr_pd),
+ SD25G_LANE_LANE_19_LN_CFG_ECDR_PD,
+ priv,
+ SD25G_LANE_LANE_19(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0_SET
+ (params->cfg_itx_ipdriver_base_2_0),
+ SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0,
+ priv,
+ SD25G_LANE_LANE_01(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0_SET(params->cfg_tap_dly_4_0),
+ SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0,
+ priv,
+ SD25G_LANE_LANE_03(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0_SET(params->cfg_tap_adv_3_0),
+ SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0,
+ priv,
+ SD25G_LANE_LANE_06(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_07_LN_CFG_EN_ADV_SET(params->cfg_en_adv) |
+ SD25G_LANE_LANE_07_LN_CFG_EN_DLY_SET(params->cfg_en_dly),
+ SD25G_LANE_LANE_07_LN_CFG_EN_ADV |
+ SD25G_LANE_LANE_07_LN_CFG_EN_DLY,
+ priv,
+ SD25G_LANE_LANE_07(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8_SET
+ (params->cfg_tx_reserve_15_8),
+ SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8,
+ priv,
+ SD25G_LANE_LANE_43(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0_SET
+ (params->cfg_tx_reserve_7_0),
+ SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0,
+ priv,
+ SD25G_LANE_LANE_42(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_05_LN_CFG_BW_1_0_SET(params->cfg_bw_1_0),
+ SD25G_LANE_LANE_05_LN_CFG_BW_1_0,
+ priv,
+ SD25G_LANE_LANE_05(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN_SET
+ (params->cfg_txcal_man_en),
+ SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN,
+ priv,
+ SD25G_LANE_LANE_0B(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0_SET
+ (params->cfg_txcal_shift_code_5_0),
+ SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0,
+ priv,
+ SD25G_LANE_LANE_0A(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0_SET
+ (params->cfg_txcal_valid_sel_3_0),
+ SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0,
+ priv,
+ SD25G_LANE_LANE_09(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0_SET(params->cfg_cdr_kf_2_0),
+ SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0,
+ priv,
+ SD25G_LANE_LANE_1A(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0_SET(params->cfg_cdr_m_7_0),
+ SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0,
+ priv,
+ SD25G_LANE_LANE_1B(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0_SET(params->cfg_pi_bw_3_0),
+ SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0,
+ priv,
+ SD25G_LANE_LANE_2B(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER_SET
+ (params->cfg_dis_2ndorder),
+ SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER,
+ priv,
+ SD25G_LANE_LANE_2C(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN_SET(params->cfg_ctle_rstn),
+ SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN,
+ priv,
+ SD25G_LANE_LANE_2E(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0_SET
+ (params->cfg_itx_ipcml_base_1_0),
+ SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0,
+ priv,
+ SD25G_LANE_LANE_00(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0_SET
+ (params->cfg_rx_reserve_7_0),
+ SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0,
+ priv,
+ SD25G_LANE_LANE_44(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8_SET
+ (params->cfg_rx_reserve_15_8),
+ SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8,
+ priv,
+ SD25G_LANE_LANE_45(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN_SET(params->cfg_dfeck_en) |
+ SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0_SET(params->cfg_rxterm_2_0),
+ SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN |
+ SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0,
+ priv,
+ SD25G_LANE_LANE_0D(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0_SET
+ (params->cfg_vga_ctrl_byp_4_0),
+ SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0,
+ priv,
+ SD25G_LANE_LANE_21(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0_SET
+ (params->cfg_eqr_force_3_0),
+ SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0,
+ priv,
+ SD25G_LANE_LANE_22(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0_SET
+ (params->cfg_eqc_force_3_0) |
+ SD25G_LANE_LANE_1C_LN_CFG_DFE_PD_SET(params->cfg_dfe_pd),
+ SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0 |
+ SD25G_LANE_LANE_1C_LN_CFG_DFE_PD,
+ priv,
+ SD25G_LANE_LANE_1C(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN_SET
+ (params->cfg_sum_setcm_en),
+ SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN,
+ priv,
+ SD25G_LANE_LANE_1E(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0_SET
+ (params->cfg_init_pos_iscan_6_0),
+ SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0,
+ priv,
+ SD25G_LANE_LANE_25(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0_SET
+ (params->cfg_init_pos_ipi_6_0),
+ SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0,
+ priv,
+ SD25G_LANE_LANE_26(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD_SET(params->cfg_erramp_pd),
+ SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD,
+ priv,
+ SD25G_LANE_LANE_18(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0_SET
+ (params->cfg_dfedig_m_2_0),
+ SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0,
+ priv,
+ SD25G_LANE_LANE_0E(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG_SET(params->cfg_en_dfedig),
+ SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG,
+ priv,
+ SD25G_LANE_LANE_0E(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_40_LN_R_TX_POL_INV_SET(params->r_tx_pol_inv) |
+ SD25G_LANE_LANE_40_LN_R_RX_POL_INV_SET(params->r_rx_pol_inv),
+ SD25G_LANE_LANE_40_LN_R_TX_POL_INV |
+ SD25G_LANE_LANE_40_LN_R_RX_POL_INV,
+ priv,
+ SD25G_LANE_LANE_40(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN_SET(params->cfg_rx2tx_lp_en) |
+ SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN_SET(params->cfg_tx2rx_lp_en),
+ SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN |
+ SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN,
+ priv,
+ SD25G_LANE_LANE_04(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN_SET(params->cfg_rxlb_en),
+ SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN,
+ priv,
+ SD25G_LANE_LANE_1E(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_19_LN_CFG_TXLB_EN_SET(params->cfg_txlb_en),
+ SD25G_LANE_LANE_19_LN_CFG_TXLB_EN,
+ priv,
+ SD25G_LANE_LANE_19(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG_SET(0),
+ SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG,
+ priv,
+ SD25G_LANE_LANE_2E(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG_SET(1),
+ SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG,
+ priv,
+ SD25G_LANE_LANE_2E(sd_index));
+
+ sdx5_rmw(SD_LANE_25G_SD_LANE_CFG_MACRO_RST_SET(0),
+ SD_LANE_25G_SD_LANE_CFG_MACRO_RST,
+ priv,
+ SD_LANE_25G_SD_LANE_CFG(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN_SET(0),
+ SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN,
+ priv,
+ SD25G_LANE_LANE_1C(sd_index));
+
+ usleep_range(1000, 2000);
+
+ sdx5_rmw(SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN_SET(1),
+ SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN,
+ priv,
+ SD25G_LANE_LANE_1C(sd_index));
+
+ usleep_range(10000, 20000);
+
+ sdx5_rmw(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(0xff),
+ SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX,
+ priv,
+ SD25G_LANE_CMU_FF(sd_index));
+
+ value = readl(sdx5_addr(regs, SD25G_LANE_CMU_C0(sd_index)));
+ value = SD25G_LANE_CMU_C0_PLL_LOL_UDL_GET(value);
+
+ if (value) {
+ dev_err(dev, "25G PLL Loss of Lock: 0x%x\n", value);
+ return -EINVAL;
+ }
+
+ value = readl(sdx5_addr(regs, SD_LANE_25G_SD_LANE_STAT(sd_index)));
+ value = SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE_GET(value);
+
+ if (value != 0x1) {
+ dev_err(dev, "25G PMA Reset failed: 0x%x\n", value);
+ return -EINVAL;
+ }
+ sdx5_rmw(SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS_SET(0x1),
+ SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS,
+ priv,
+ SD25G_LANE_CMU_2A(sd_index));
+
+ sdx5_rmw(SD_LANE_25G_SD_SER_RST_SER_RST_SET(0x0),
+ SD_LANE_25G_SD_SER_RST_SER_RST,
+ priv,
+ SD_LANE_25G_SD_SER_RST(sd_index));
+
+ sdx5_rmw(SD_LANE_25G_SD_DES_RST_DES_RST_SET(0x0),
+ SD_LANE_25G_SD_DES_RST_DES_RST,
+ priv,
+ SD_LANE_25G_SD_DES_RST(sd_index));
+
+ sdx5_rmw(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(0),
+ SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX,
+ priv,
+ SD25G_LANE_CMU_FF(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0_SET
+ (params->cfg_alos_thr_2_0),
+ SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0,
+ priv,
+ SD25G_LANE_LANE_2D(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ_SET(0),
+ SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ,
+ priv,
+ SD25G_LANE_LANE_2E(sd_index));
+
+ sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_PD_SQ_SET(0),
+ SD25G_LANE_LANE_2E_LN_CFG_PD_SQ,
+ priv,
+ SD25G_LANE_LANE_2E(sd_index));
+
+ return 0;
+}
+
+static void sparx5_sd10g28_reset(void __iomem *regs[], u32 lane_index)
+{
+ /* Note: SerDes SD10G_LANE_1 is configured in 10G_LAN mode */
+ sdx5_rmw_addr(SD_LANE_SD_LANE_CFG_EXT_CFG_RST_SET(1),
+ SD_LANE_SD_LANE_CFG_EXT_CFG_RST,
+ sdx5_addr(regs, SD_LANE_SD_LANE_CFG(lane_index)));
+
+ usleep_range(1000, 2000);
+
+ sdx5_rmw_addr(SD_LANE_SD_LANE_CFG_EXT_CFG_RST_SET(0),
+ SD_LANE_SD_LANE_CFG_EXT_CFG_RST,
+ sdx5_addr(regs, SD_LANE_SD_LANE_CFG(lane_index)));
+}
+
+static int sparx5_sd10g28_apply_params(struct sparx5_serdes_macro *macro,
+ struct sparx5_sd10g28_params *params)
+{
+ struct sparx5_serdes_private *priv = macro->priv;
+ void __iomem **regs = priv->regs;
+ struct device *dev = priv->dev;
+ u32 lane_index = macro->sidx;
+ u32 sd_index = macro->stpidx;
+ void __iomem *sd_inst;
+ u32 value;
+
+ if (params->is_6g)
+ sd_inst = sdx5_inst_get(priv, TARGET_SD6G_LANE, sd_index);
+ else
+ sd_inst = sdx5_inst_get(priv, TARGET_SD10G_LANE, sd_index);
+
+ sdx5_rmw(SD_LANE_SD_LANE_CFG_MACRO_RST_SET(1),
+ SD_LANE_SD_LANE_CFG_MACRO_RST,
+ priv,
+ SD_LANE_SD_LANE_CFG(lane_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT_SET(0x0) |
+ SD10G_LANE_LANE_93_R_REG_MANUAL_SET(0x1) |
+ SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT_SET(0x1) |
+ SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT_SET(0x1) |
+ SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL_SET(0x0),
+ SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT |
+ SD10G_LANE_LANE_93_R_REG_MANUAL |
+ SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT |
+ SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT |
+ SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL,
+ sd_inst,
+ SD10G_LANE_LANE_93(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_94_R_ISCAN_REG_SET(0x1) |
+ SD10G_LANE_LANE_94_R_TXEQ_REG_SET(0x1) |
+ SD10G_LANE_LANE_94_R_MISC_REG_SET(0x1) |
+ SD10G_LANE_LANE_94_R_SWING_REG_SET(0x1),
+ SD10G_LANE_LANE_94_R_ISCAN_REG |
+ SD10G_LANE_LANE_94_R_TXEQ_REG |
+ SD10G_LANE_LANE_94_R_MISC_REG |
+ SD10G_LANE_LANE_94_R_SWING_REG,
+ sd_inst,
+ SD10G_LANE_LANE_94(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_9E_R_RXEQ_REG_SET(0x1),
+ SD10G_LANE_LANE_9E_R_RXEQ_REG,
+ sd_inst,
+ SD10G_LANE_LANE_9E(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_A1_R_SSC_FROM_HWT_SET(0x0) |
+ SD10G_LANE_LANE_A1_R_CDR_FROM_HWT_SET(0x0) |
+ SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT_SET(0x1),
+ SD10G_LANE_LANE_A1_R_SSC_FROM_HWT |
+ SD10G_LANE_LANE_A1_R_CDR_FROM_HWT |
+ SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT,
+ sd_inst,
+ SD10G_LANE_LANE_A1(sd_index));
+
+ sdx5_rmw(SD_LANE_SD_LANE_CFG_RX_REF_SEL_SET(params->cmu_sel) |
+ SD_LANE_SD_LANE_CFG_TX_REF_SEL_SET(params->cmu_sel),
+ SD_LANE_SD_LANE_CFG_RX_REF_SEL |
+ SD_LANE_SD_LANE_CFG_TX_REF_SEL,
+ priv,
+ SD_LANE_SD_LANE_CFG(lane_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0_SET
+ (params->cfg_lane_reserve_7_0),
+ SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0,
+ sd_inst,
+ SD10G_LANE_LANE_40(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL_SET
+ (params->cfg_ssc_rtl_clk_sel),
+ SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL,
+ sd_inst,
+ SD10G_LANE_LANE_50(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_35_CFG_TXRATE_1_0_SET
+ (params->cfg_txrate_1_0) |
+ SD10G_LANE_LANE_35_CFG_RXRATE_1_0_SET
+ (params->cfg_rxrate_1_0),
+ SD10G_LANE_LANE_35_CFG_TXRATE_1_0 |
+ SD10G_LANE_LANE_35_CFG_RXRATE_1_0,
+ sd_inst,
+ SD10G_LANE_LANE_35(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0_SET
+ (params->r_d_width_ctrl_2_0),
+ SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0,
+ sd_inst,
+ SD10G_LANE_LANE_94(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0_SET
+ (params->cfg_pma_tx_ck_bitwidth_2_0),
+ SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0,
+ sd_inst,
+ SD10G_LANE_LANE_01(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0_SET
+ (params->cfg_rxdiv_sel_2_0),
+ SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0,
+ sd_inst,
+ SD10G_LANE_LANE_30(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0_SET
+ (params->r_pcs2pma_phymode_4_0),
+ SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0,
+ sd_inst,
+ SD10G_LANE_LANE_A2(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_13_CFG_CDRCK_EN_SET(params->cfg_cdrck_en),
+ SD10G_LANE_LANE_13_CFG_CDRCK_EN,
+ sd_inst,
+ SD10G_LANE_LANE_13(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_23_CFG_DFECK_EN_SET
+ (params->cfg_dfeck_en) |
+ SD10G_LANE_LANE_23_CFG_DFE_PD_SET(params->cfg_dfe_pd) |
+ SD10G_LANE_LANE_23_CFG_ERRAMP_PD_SET
+ (params->cfg_erramp_pd),
+ SD10G_LANE_LANE_23_CFG_DFECK_EN |
+ SD10G_LANE_LANE_23_CFG_DFE_PD |
+ SD10G_LANE_LANE_23_CFG_ERRAMP_PD,
+ sd_inst,
+ SD10G_LANE_LANE_23(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1_SET
+ (params->cfg_dfetap_en_5_1),
+ SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1,
+ sd_inst,
+ SD10G_LANE_LANE_22(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_1A_CFG_PI_DFE_EN_SET
+ (params->cfg_pi_DFE_en),
+ SD10G_LANE_LANE_1A_CFG_PI_DFE_EN,
+ sd_inst,
+ SD10G_LANE_LANE_1A(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_02_CFG_EN_ADV_SET(params->cfg_en_adv) |
+ SD10G_LANE_LANE_02_CFG_EN_MAIN_SET(params->cfg_en_main) |
+ SD10G_LANE_LANE_02_CFG_EN_DLY_SET(params->cfg_en_dly) |
+ SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0_SET
+ (params->cfg_tap_adv_3_0),
+ SD10G_LANE_LANE_02_CFG_EN_ADV |
+ SD10G_LANE_LANE_02_CFG_EN_MAIN |
+ SD10G_LANE_LANE_02_CFG_EN_DLY |
+ SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0,
+ sd_inst,
+ SD10G_LANE_LANE_02(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_03_CFG_TAP_MAIN_SET(params->cfg_tap_main),
+ SD10G_LANE_LANE_03_CFG_TAP_MAIN,
+ sd_inst,
+ SD10G_LANE_LANE_03(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0_SET
+ (params->cfg_tap_dly_4_0),
+ SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0,
+ sd_inst,
+ SD10G_LANE_LANE_04(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0_SET
+ (params->cfg_vga_ctrl_3_0),
+ SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0,
+ sd_inst,
+ SD10G_LANE_LANE_2F(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0_SET
+ (params->cfg_vga_cp_2_0),
+ SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0,
+ sd_inst,
+ SD10G_LANE_LANE_2F(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0_SET
+ (params->cfg_eq_res_3_0),
+ SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0,
+ sd_inst,
+ SD10G_LANE_LANE_0B(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_0D_CFG_EQR_BYP_SET(params->cfg_eq_r_byp),
+ SD10G_LANE_LANE_0D_CFG_EQR_BYP,
+ sd_inst,
+ SD10G_LANE_LANE_0D(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0_SET
+ (params->cfg_eq_c_force_3_0) |
+ SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN_SET
+ (params->cfg_sum_setcm_en),
+ SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0 |
+ SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN,
+ sd_inst,
+ SD10G_LANE_LANE_0E(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_23_CFG_EN_DFEDIG_SET
+ (params->cfg_en_dfedig),
+ SD10G_LANE_LANE_23_CFG_EN_DFEDIG,
+ sd_inst,
+ SD10G_LANE_LANE_23(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_06_CFG_EN_PREEMPH_SET
+ (params->cfg_en_preemph),
+ SD10G_LANE_LANE_06_CFG_EN_PREEMPH,
+ sd_inst,
+ SD10G_LANE_LANE_06(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0_SET
+ (params->cfg_itx_ippreemp_base_1_0) |
+ SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0_SET
+ (params->cfg_itx_ipdriver_base_2_0),
+ SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0 |
+ SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0,
+ sd_inst,
+ SD10G_LANE_LANE_33(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0_SET
+ (params->cfg_ibias_tune_reserve_5_0),
+ SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0,
+ sd_inst,
+ SD10G_LANE_LANE_52(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_37_CFG_TXSWING_HALF_SET
+ (params->cfg_txswing_half),
+ SD10G_LANE_LANE_37_CFG_TXSWING_HALF,
+ sd_inst,
+ SD10G_LANE_LANE_37(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER_SET
+ (params->cfg_dis_2nd_order),
+ SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER,
+ sd_inst,
+ SD10G_LANE_LANE_3C(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_39_CFG_RX_SSC_LH_SET
+ (params->cfg_rx_ssc_lh),
+ SD10G_LANE_LANE_39_CFG_RX_SSC_LH,
+ sd_inst,
+ SD10G_LANE_LANE_39(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0_SET
+ (params->cfg_pi_floop_steps_1_0),
+ SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0,
+ sd_inst,
+ SD10G_LANE_LANE_1A(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16_SET
+ (params->cfg_pi_ext_dac_23_16),
+ SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16,
+ sd_inst,
+ SD10G_LANE_LANE_16(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8_SET
+ (params->cfg_pi_ext_dac_15_8),
+ SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8,
+ sd_inst,
+ SD10G_LANE_LANE_15(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0_SET
+ (params->cfg_iscan_ext_dac_7_0),
+ SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0,
+ sd_inst,
+ SD10G_LANE_LANE_26(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0_SET
+ (params->cfg_cdr_kf_gen1_2_0),
+ SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0,
+ sd_inst,
+ SD10G_LANE_LANE_42(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0_SET
+ (params->r_cdr_m_gen1_7_0),
+ SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0,
+ sd_inst,
+ SD10G_LANE_LANE_0F(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0_SET
+ (params->cfg_pi_bw_gen1_3_0),
+ SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0,
+ sd_inst,
+ SD10G_LANE_LANE_24(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0_SET
+ (params->cfg_pi_ext_dac_7_0),
+ SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0,
+ sd_inst,
+ SD10G_LANE_LANE_14(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_1A_CFG_PI_STEPS_SET(params->cfg_pi_steps),
+ SD10G_LANE_LANE_1A_CFG_PI_STEPS,
+ sd_inst,
+ SD10G_LANE_LANE_1A(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0_SET
+ (params->cfg_mp_max_3_0),
+ SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0,
+ sd_inst,
+ SD10G_LANE_LANE_3A(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG_SET
+ (params->cfg_rstn_dfedig),
+ SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG,
+ sd_inst,
+ SD10G_LANE_LANE_31(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0_SET
+ (params->cfg_alos_thr_3_0),
+ SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0,
+ sd_inst,
+ SD10G_LANE_LANE_48(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0_SET
+ (params->cfg_predrv_slewrate_1_0),
+ SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0,
+ sd_inst,
+ SD10G_LANE_LANE_36(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0_SET
+ (params->cfg_itx_ipcml_base_1_0),
+ SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0,
+ sd_inst,
+ SD10G_LANE_LANE_32(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0_SET
+ (params->cfg_ip_pre_base_1_0),
+ SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0,
+ sd_inst,
+ SD10G_LANE_LANE_37(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8_SET
+ (params->cfg_lane_reserve_15_8),
+ SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8,
+ sd_inst,
+ SD10G_LANE_LANE_41(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN_SET
+ (params->r_en_auto_cdr_rstn),
+ SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN,
+ sd_inst,
+ SD10G_LANE_LANE_9E(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_0C_CFG_OSCAL_AFE_SET
+ (params->cfg_oscal_afe) |
+ SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE_SET
+ (params->cfg_pd_osdac_afe),
+ SD10G_LANE_LANE_0C_CFG_OSCAL_AFE |
+ SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE,
+ sd_inst,
+ SD10G_LANE_LANE_0C(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE_SET
+ (params->cfg_resetb_oscal_afe[0]),
+ SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE,
+ sd_inst,
+ SD10G_LANE_LANE_0B(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE_SET
+ (params->cfg_resetb_oscal_afe[1]),
+ SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE,
+ sd_inst,
+ SD10G_LANE_LANE_0B(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_83_R_TX_POL_INV_SET
+ (params->r_tx_pol_inv) |
+ SD10G_LANE_LANE_83_R_RX_POL_INV_SET
+ (params->r_rx_pol_inv),
+ SD10G_LANE_LANE_83_R_TX_POL_INV |
+ SD10G_LANE_LANE_83_R_RX_POL_INV,
+ sd_inst,
+ SD10G_LANE_LANE_83(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN_SET
+ (params->cfg_rx2tx_lp_en) |
+ SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN_SET
+ (params->cfg_tx2rx_lp_en),
+ SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN |
+ SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN,
+ sd_inst,
+ SD10G_LANE_LANE_06(sd_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_0E_CFG_RXLB_EN_SET(params->cfg_rxlb_en) |
+ SD10G_LANE_LANE_0E_CFG_TXLB_EN_SET(params->cfg_txlb_en),
+ SD10G_LANE_LANE_0E_CFG_RXLB_EN |
+ SD10G_LANE_LANE_0E_CFG_TXLB_EN,
+ sd_inst,
+ SD10G_LANE_LANE_0E(sd_index));
+
+ sdx5_rmw(SD_LANE_SD_LANE_CFG_MACRO_RST_SET(0),
+ SD_LANE_SD_LANE_CFG_MACRO_RST,
+ priv,
+ SD_LANE_SD_LANE_CFG(lane_index));
+
+ sdx5_inst_rmw(SD10G_LANE_LANE_50_CFG_SSC_RESETB_SET(1),
+ SD10G_LANE_LANE_50_CFG_SSC_RESETB,
+ sd_inst,
+ SD10G_LANE_LANE_50(sd_index));
+
+ sdx5_rmw(SD10G_LANE_LANE_50_CFG_SSC_RESETB_SET(1),
+ SD10G_LANE_LANE_50_CFG_SSC_RESETB,
+ priv,
+ SD10G_LANE_LANE_50(sd_index));
+
+ sdx5_rmw(SD_LANE_MISC_SD_125_RST_DIS_SET(params->fx_100),
+ SD_LANE_MISC_SD_125_RST_DIS,
+ priv,
+ SD_LANE_MISC(lane_index));
+
+ sdx5_rmw(SD_LANE_MISC_RX_ENA_SET(params->fx_100),
+ SD_LANE_MISC_RX_ENA,
+ priv,
+ SD_LANE_MISC(lane_index));
+
+ sdx5_rmw(SD_LANE_MISC_MUX_ENA_SET(params->fx_100),
+ SD_LANE_MISC_MUX_ENA,
+ priv,
+ SD_LANE_MISC(lane_index));
+
+ usleep_range(3000, 6000);
+
+ value = readl(sdx5_addr(regs, SD_LANE_SD_LANE_STAT(lane_index)));
+ value = SD_LANE_SD_LANE_STAT_PMA_RST_DONE_GET(value);
+ if (value != 1) {
+ dev_err(dev, "10G PMA Reset failed: 0x%x\n", value);
+ return -EINVAL;
+ }
+
+ sdx5_rmw(SD_LANE_SD_SER_RST_SER_RST_SET(0x0),
+ SD_LANE_SD_SER_RST_SER_RST,
+ priv,
+ SD_LANE_SD_SER_RST(lane_index));
+
+ sdx5_rmw(SD_LANE_SD_DES_RST_DES_RST_SET(0x0),
+ SD_LANE_SD_DES_RST_DES_RST,
+ priv,
+ SD_LANE_SD_DES_RST(lane_index));
+
+ return 0;
+}
+
+static int sparx5_sd25g28_config(struct sparx5_serdes_macro *macro, bool reset)
+{
+ struct sparx5_sd25g28_media_preset media = media_presets_25g[macro->media];
+ struct sparx5_sd25g28_mode_preset mode;
+ struct sparx5_sd25g28_args args = {
+ .rxinvert = 1,
+ .txinvert = 0,
+ .txswing = 240,
+ .com_pll_reserve = 0xf,
+ .reg_rst = reset,
+ };
+ struct sparx5_sd25g28_params params;
+ int err;
+
+ err = sparx5_sd10g25_get_mode_preset(macro, &mode);
+ if (err)
+ return err;
+ sparx5_sd25g28_get_params(macro, &media, &mode, &args, &params);
+ sparx5_sd25g28_reset(macro->priv->regs, &params, macro->stpidx);
+ return sparx5_sd25g28_apply_params(macro, &params);
+}
+
+static int sparx5_sd10g28_config(struct sparx5_serdes_macro *macro, bool reset)
+{
+ struct sparx5_sd10g28_media_preset media = media_presets_10g[macro->media];
+ struct sparx5_sd10g28_mode_preset mode;
+ struct sparx5_sd10g28_params params;
+ struct sparx5_sd10g28_args args = {
+ .is_6g = (macro->serdestype == SPX5_SDT_6G),
+ .txinvert = 0,
+ .rxinvert = 1,
+ .txswing = 240,
+ .reg_rst = reset,
+ };
+ int err;
+
+ err = sparx5_sd10g28_get_mode_preset(macro, &mode, &args);
+ if (err)
+ return err;
+ sparx5_sd10g28_get_params(macro, &media, &mode, &args, &params);
+ sparx5_sd10g28_reset(macro->priv->regs, macro->sidx);
+ return sparx5_sd10g28_apply_params(macro, &params);
+}
+
+/* Power down serdes TX driver */
+static int sparx5_serdes_power_save(struct sparx5_serdes_macro *macro, u32 pwdn)
+{
+ struct sparx5_serdes_private *priv = macro->priv;
+ void __iomem *sd_inst;
+
+ if (macro->serdestype == SPX5_SDT_6G)
+ sd_inst = sdx5_inst_get(priv, TARGET_SD6G_LANE, macro->stpidx);
+ else if (macro->serdestype == SPX5_SDT_10G)
+ sd_inst = sdx5_inst_get(priv, TARGET_SD10G_LANE, macro->stpidx);
+ else
+ sd_inst = sdx5_inst_get(priv, TARGET_SD25G_LANE, macro->stpidx);
+
+ if (macro->serdestype == SPX5_SDT_25G) {
+ sdx5_inst_rmw(SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER_SET(pwdn),
+ SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER,
+ sd_inst,
+ SD25G_LANE_LANE_04(0));
+ } else {
+ /* 6G and 10G */
+ sdx5_inst_rmw(SD10G_LANE_LANE_06_CFG_PD_DRIVER_SET(pwdn),
+ SD10G_LANE_LANE_06_CFG_PD_DRIVER,
+ sd_inst,
+ SD10G_LANE_LANE_06(0));
+ }
+ return 0;
+}
+
+static int sparx5_serdes_clock_config(struct sparx5_serdes_macro *macro)
+{
+ struct sparx5_serdes_private *priv = macro->priv;
+
+ if (macro->serdesmode == SPX5_SD_MODE_100FX) {
+ u32 freq = priv->coreclock == 250000000 ? 2 :
+ priv->coreclock == 500000000 ? 1 : 0;
+
+ sdx5_rmw(SD_LANE_MISC_CORE_CLK_FREQ_SET(freq),
+ SD_LANE_MISC_CORE_CLK_FREQ,
+ priv,
+ SD_LANE_MISC(macro->sidx));
+ }
+ return 0;
+}
+
+static int sparx5_cmu_apply_cfg(struct sparx5_serdes_private *priv,
+ u32 cmu_idx,
+ void __iomem *cmu_tgt,
+ void __iomem *cmu_cfg_tgt,
+ u32 spd10g)
+{
+ void __iomem **regs = priv->regs;
+ struct device *dev = priv->dev;
+ int value;
+
+ cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
+ cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
+
+ if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
+ cmu_idx == 10 || cmu_idx == 13) {
+ spd10g = 0;
+ }
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(1),
+ SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
+ cmu_cfg_tgt,
+ SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(0),
+ SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
+ cmu_cfg_tgt,
+ SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(1),
+ SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
+ cmu_cfg_tgt,
+ SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT_SET(0x1) |
+ SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT_SET(0x1) |
+ SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT_SET(0x1) |
+ SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT_SET(0x1) |
+ SD_CMU_CMU_45_R_EN_RATECHG_CTRL_SET(0x0),
+ SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT |
+ SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT |
+ SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT |
+ SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT |
+ SD_CMU_CMU_45_R_EN_RATECHG_CTRL,
+ cmu_tgt,
+ SD_CMU_CMU_45(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_SET(0),
+ SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0,
+ cmu_tgt,
+ SD_CMU_CMU_47(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_1B_CFG_RESERVE_7_0_SET(0),
+ SD_CMU_CMU_1B_CFG_RESERVE_7_0,
+ cmu_tgt,
+ SD_CMU_CMU_1B(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_JC_BYP_SET(0x1),
+ SD_CMU_CMU_0D_CFG_JC_BYP,
+ cmu_tgt,
+ SD_CMU_CMU_0D(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_1F_CFG_VTUNE_SEL_SET(1),
+ SD_CMU_CMU_1F_CFG_VTUNE_SEL,
+ cmu_tgt,
+ SD_CMU_CMU_1F(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_SET(3),
+ SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0,
+ cmu_tgt,
+ SD_CMU_CMU_00(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_SET(3),
+ SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0,
+ cmu_tgt,
+ SD_CMU_CMU_05(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_30_R_PLL_DLOL_EN_SET(1),
+ SD_CMU_CMU_30_R_PLL_DLOL_EN,
+ cmu_tgt,
+ SD_CMU_CMU_30(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_09_CFG_SW_10G_SET(spd10g),
+ SD_CMU_CMU_09_CFG_SW_10G,
+ cmu_tgt,
+ SD_CMU_CMU_09(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(0),
+ SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
+ cmu_cfg_tgt,
+ SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+ msleep(20);
+
+ sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(0),
+ SD_CMU_CMU_44_R_PLL_RSTN,
+ cmu_tgt,
+ SD_CMU_CMU_44(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(1),
+ SD_CMU_CMU_44_R_PLL_RSTN,
+ cmu_tgt,
+ SD_CMU_CMU_44(cmu_idx));
+
+ msleep(20);
+
+ value = readl(sdx5_addr(regs, SD_CMU_CMU_E0(cmu_idx)));
+ value = SD_CMU_CMU_E0_PLL_LOL_UDL_GET(value);
+
+ if (value) {
+ dev_err(dev, "CMU PLL Loss of Lock: 0x%x\n", value);
+ return -EINVAL;
+ }
+ sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD_SET(0),
+ SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD,
+ cmu_tgt,
+ SD_CMU_CMU_0D(cmu_idx));
+ return 0;
+}
+
+static int sparx5_cmu_cfg(struct sparx5_serdes_private *priv, u32 cmu_idx)
+{
+ void __iomem *cmu_tgt, *cmu_cfg_tgt;
+ u32 spd10g = 1;
+
+ if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
+ cmu_idx == 10 || cmu_idx == 13) {
+ spd10g = 0;
+ }
+
+ cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
+ cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
+
+ return sparx5_cmu_apply_cfg(priv, cmu_idx, cmu_tgt, cmu_cfg_tgt, spd10g);
+}
+
+static int sparx5_serdes_cmu_enable(struct sparx5_serdes_private *priv)
+{
+ int idx, err = 0;
+
+ if (!priv->cmu_enabled) {
+ for (idx = 0; idx < SPX5_CMU_MAX; idx++) {
+ err = sparx5_cmu_cfg(priv, idx);
+ if (err) {
+ dev_err(priv->dev, "CMU %u, error: %d\n", idx, err);
+ goto leave;
+ }
+ }
+ priv->cmu_enabled = true;
+ }
+leave:
+ return err;
+}
+
+static int sparx5_serdes_get_serdesmode(phy_interface_t portmode, int speed)
+{
+ switch (portmode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (speed == SPEED_2500)
+ return SPX5_SD_MODE_2G5;
+ if (speed == SPEED_100)
+ return SPX5_SD_MODE_100FX;
+ return SPX5_SD_MODE_1000BASEX;
+ case PHY_INTERFACE_MODE_SGMII:
+ /* The same Serdes mode is used for both SGMII and 1000BaseX */
+ return SPX5_SD_MODE_1000BASEX;
+ case PHY_INTERFACE_MODE_QSGMII:
+ return SPX5_SD_MODE_QSGMII;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return SPX5_SD_MODE_SFI;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sparx5_serdes_config(struct sparx5_serdes_macro *macro)
+{
+ struct device *dev = macro->priv->dev;
+ int serdesmode;
+ int err;
+
+ err = sparx5_serdes_cmu_enable(macro->priv);
+ if (err)
+ return err;
+
+ serdesmode = sparx5_serdes_get_serdesmode(macro->portmode, macro->speed);
+ if (serdesmode < 0) {
+ dev_err(dev, "SerDes %u, interface not supported: %s\n",
+ macro->sidx,
+ phy_modes(macro->portmode));
+ return serdesmode;
+ }
+ macro->serdesmode = serdesmode;
+
+ sparx5_serdes_clock_config(macro);
+
+ if (macro->serdestype == SPX5_SDT_25G)
+ err = sparx5_sd25g28_config(macro, false);
+ else
+ err = sparx5_sd10g28_config(macro, false);
+ if (err) {
+ dev_err(dev, "SerDes %u, config error: %d\n",
+ macro->sidx, err);
+ }
+ return err;
+}
+
+static int sparx5_serdes_power_on(struct phy *phy)
+{
+ struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
+
+ return sparx5_serdes_power_save(macro, false);
+}
+
+static int sparx5_serdes_power_off(struct phy *phy)
+{
+ struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
+
+ return sparx5_serdes_power_save(macro, true);
+}
+
+static int sparx5_serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct sparx5_serdes_macro *macro;
+
+ if (mode != PHY_MODE_ETHERNET)
+ return -EINVAL;
+
+ switch (submode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_10GBASER:
+ macro = phy_get_drvdata(phy);
+ macro->portmode = submode;
+ sparx5_serdes_config(macro);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sparx5_serdes_set_media(struct phy *phy, enum phy_media media)
+{
+ struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
+
+ if (media != macro->media) {
+ macro->media = media;
+ if (macro->serdesmode != SPX5_SD_MODE_NONE)
+ sparx5_serdes_config(macro);
+ }
+ return 0;
+}
+
+static int sparx5_serdes_set_speed(struct phy *phy, int speed)
+{
+ struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
+
+ if (macro->sidx < SPX5_SERDES_10G_START && speed > SPEED_5000)
+ return -EINVAL;
+ if (macro->sidx < SPX5_SERDES_25G_START && speed > SPEED_10000)
+ return -EINVAL;
+ if (speed != macro->speed) {
+ macro->speed = speed;
+ if (macro->serdesmode != SPX5_SD_MODE_NONE)
+ sparx5_serdes_config(macro);
+ }
+ return 0;
+}
+
+static int sparx5_serdes_reset(struct phy *phy)
+{
+ struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
+ int err;
+
+ err = sparx5_serdes_cmu_enable(macro->priv);
+ if (err)
+ return err;
+ if (macro->serdestype == SPX5_SDT_25G)
+ err = sparx5_sd25g28_config(macro, true);
+ else
+ err = sparx5_sd10g28_config(macro, true);
+ if (err) {
+ dev_err(&phy->dev, "SerDes %u, reset error: %d\n",
+ macro->sidx, err);
+ }
+ return err;
+}
+
+static int sparx5_serdes_validate(struct phy *phy, enum phy_mode mode,
+ int submode,
+ union phy_configure_opts *opts)
+{
+ struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
+
+ if (mode != PHY_MODE_ETHERNET)
+ return -EINVAL;
+
+ if (macro->speed == 0)
+ return -EINVAL;
+
+ if (macro->sidx < SPX5_SERDES_10G_START && macro->speed > SPEED_5000)
+ return -EINVAL;
+ if (macro->sidx < SPX5_SERDES_25G_START && macro->speed > SPEED_10000)
+ return -EINVAL;
+
+ switch (submode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ if (macro->speed != SPEED_100 && /* This is for 100BASE-FX */
+ macro->speed != SPEED_1000)
+ return -EINVAL;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_QSGMII:
+ if (macro->speed >= SPEED_5000)
+ return -EINVAL;
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ if (macro->speed < SPEED_5000)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct phy_ops sparx5_serdes_ops = {
+ .power_on = sparx5_serdes_power_on,
+ .power_off = sparx5_serdes_power_off,
+ .set_mode = sparx5_serdes_set_mode,
+ .set_media = sparx5_serdes_set_media,
+ .set_speed = sparx5_serdes_set_speed,
+ .reset = sparx5_serdes_reset,
+ .validate = sparx5_serdes_validate,
+ .owner = THIS_MODULE,
+};
+
+static int sparx5_phy_create(struct sparx5_serdes_private *priv,
+ int idx, struct phy **phy)
+{
+ struct sparx5_serdes_macro *macro;
+
+ *phy = devm_phy_create(priv->dev, NULL, &sparx5_serdes_ops);
+ if (IS_ERR(*phy))
+ return PTR_ERR(*phy);
+
+ macro = devm_kzalloc(priv->dev, sizeof(*macro), GFP_KERNEL);
+ if (!macro)
+ return -ENOMEM;
+
+ macro->sidx = idx;
+ macro->priv = priv;
+ macro->speed = SPEED_UNKNOWN;
+ if (idx < SPX5_SERDES_10G_START) {
+ macro->serdestype = SPX5_SDT_6G;
+ macro->stpidx = macro->sidx;
+ } else if (idx < SPX5_SERDES_25G_START) {
+ macro->serdestype = SPX5_SDT_10G;
+ macro->stpidx = macro->sidx - SPX5_SERDES_10G_START;
+ } else {
+ macro->serdestype = SPX5_SDT_25G;
+ macro->stpidx = macro->sidx - SPX5_SERDES_25G_START;
+ }
+
+ phy_set_drvdata(*phy, macro);
+
+ return 0;
+}
+
+static struct sparx5_serdes_io_resource sparx5_serdes_iomap[] = {
+ { TARGET_SD_CMU, 0x0 }, /* 0x610808000: sd_cmu_0 */
+ { TARGET_SD_CMU + 1, 0x8000 }, /* 0x610810000: sd_cmu_1 */
+ { TARGET_SD_CMU + 2, 0x10000 }, /* 0x610818000: sd_cmu_2 */
+ { TARGET_SD_CMU + 3, 0x18000 }, /* 0x610820000: sd_cmu_3 */
+ { TARGET_SD_CMU + 4, 0x20000 }, /* 0x610828000: sd_cmu_4 */
+ { TARGET_SD_CMU + 5, 0x28000 }, /* 0x610830000: sd_cmu_5 */
+ { TARGET_SD_CMU + 6, 0x30000 }, /* 0x610838000: sd_cmu_6 */
+ { TARGET_SD_CMU + 7, 0x38000 }, /* 0x610840000: sd_cmu_7 */
+ { TARGET_SD_CMU + 8, 0x40000 }, /* 0x610848000: sd_cmu_8 */
+ { TARGET_SD_CMU_CFG, 0x48000 }, /* 0x610850000: sd_cmu_cfg_0 */
+ { TARGET_SD_CMU_CFG + 1, 0x50000 }, /* 0x610858000: sd_cmu_cfg_1 */
+ { TARGET_SD_CMU_CFG + 2, 0x58000 }, /* 0x610860000: sd_cmu_cfg_2 */
+ { TARGET_SD_CMU_CFG + 3, 0x60000 }, /* 0x610868000: sd_cmu_cfg_3 */
+ { TARGET_SD_CMU_CFG + 4, 0x68000 }, /* 0x610870000: sd_cmu_cfg_4 */
+ { TARGET_SD_CMU_CFG + 5, 0x70000 }, /* 0x610878000: sd_cmu_cfg_5 */
+ { TARGET_SD_CMU_CFG + 6, 0x78000 }, /* 0x610880000: sd_cmu_cfg_6 */
+ { TARGET_SD_CMU_CFG + 7, 0x80000 }, /* 0x610888000: sd_cmu_cfg_7 */
+ { TARGET_SD_CMU_CFG + 8, 0x88000 }, /* 0x610890000: sd_cmu_cfg_8 */
+ { TARGET_SD6G_LANE, 0x90000 }, /* 0x610898000: sd6g_lane_0 */
+ { TARGET_SD6G_LANE + 1, 0x98000 }, /* 0x6108a0000: sd6g_lane_1 */
+ { TARGET_SD6G_LANE + 2, 0xa0000 }, /* 0x6108a8000: sd6g_lane_2 */
+ { TARGET_SD6G_LANE + 3, 0xa8000 }, /* 0x6108b0000: sd6g_lane_3 */
+ { TARGET_SD6G_LANE + 4, 0xb0000 }, /* 0x6108b8000: sd6g_lane_4 */
+ { TARGET_SD6G_LANE + 5, 0xb8000 }, /* 0x6108c0000: sd6g_lane_5 */
+ { TARGET_SD6G_LANE + 6, 0xc0000 }, /* 0x6108c8000: sd6g_lane_6 */
+ { TARGET_SD6G_LANE + 7, 0xc8000 }, /* 0x6108d0000: sd6g_lane_7 */
+ { TARGET_SD6G_LANE + 8, 0xd0000 }, /* 0x6108d8000: sd6g_lane_8 */
+ { TARGET_SD6G_LANE + 9, 0xd8000 }, /* 0x6108e0000: sd6g_lane_9 */
+ { TARGET_SD6G_LANE + 10, 0xe0000 }, /* 0x6108e8000: sd6g_lane_10 */
+ { TARGET_SD6G_LANE + 11, 0xe8000 }, /* 0x6108f0000: sd6g_lane_11 */
+ { TARGET_SD6G_LANE + 12, 0xf0000 }, /* 0x6108f8000: sd6g_lane_12 */
+ { TARGET_SD10G_LANE, 0xf8000 }, /* 0x610900000: sd10g_lane_0 */
+ { TARGET_SD10G_LANE + 1, 0x100000 }, /* 0x610908000: sd10g_lane_1 */
+ { TARGET_SD10G_LANE + 2, 0x108000 }, /* 0x610910000: sd10g_lane_2 */
+ { TARGET_SD10G_LANE + 3, 0x110000 }, /* 0x610918000: sd10g_lane_3 */
+ { TARGET_SD_LANE, 0x1a0000 }, /* 0x6109a8000: sd_lane_0 */
+ { TARGET_SD_LANE + 1, 0x1a8000 }, /* 0x6109b0000: sd_lane_1 */
+ { TARGET_SD_LANE + 2, 0x1b0000 }, /* 0x6109b8000: sd_lane_2 */
+ { TARGET_SD_LANE + 3, 0x1b8000 }, /* 0x6109c0000: sd_lane_3 */
+ { TARGET_SD_LANE + 4, 0x1c0000 }, /* 0x6109c8000: sd_lane_4 */
+ { TARGET_SD_LANE + 5, 0x1c8000 }, /* 0x6109d0000: sd_lane_5 */
+ { TARGET_SD_LANE + 6, 0x1d0000 }, /* 0x6109d8000: sd_lane_6 */
+ { TARGET_SD_LANE + 7, 0x1d8000 }, /* 0x6109e0000: sd_lane_7 */
+ { TARGET_SD_LANE + 8, 0x1e0000 }, /* 0x6109e8000: sd_lane_8 */
+ { TARGET_SD_LANE + 9, 0x1e8000 }, /* 0x6109f0000: sd_lane_9 */
+ { TARGET_SD_LANE + 10, 0x1f0000 }, /* 0x6109f8000: sd_lane_10 */
+ { TARGET_SD_LANE + 11, 0x1f8000 }, /* 0x610a00000: sd_lane_11 */
+ { TARGET_SD_LANE + 12, 0x200000 }, /* 0x610a08000: sd_lane_12 */
+ { TARGET_SD_LANE + 13, 0x208000 }, /* 0x610a10000: sd_lane_13 */
+ { TARGET_SD_LANE + 14, 0x210000 }, /* 0x610a18000: sd_lane_14 */
+ { TARGET_SD_LANE + 15, 0x218000 }, /* 0x610a20000: sd_lane_15 */
+ { TARGET_SD_LANE + 16, 0x220000 }, /* 0x610a28000: sd_lane_16 */
+ { TARGET_SD_CMU + 9, 0x400000 }, /* 0x610c08000: sd_cmu_9 */
+ { TARGET_SD_CMU + 10, 0x408000 }, /* 0x610c10000: sd_cmu_10 */
+ { TARGET_SD_CMU + 11, 0x410000 }, /* 0x610c18000: sd_cmu_11 */
+ { TARGET_SD_CMU + 12, 0x418000 }, /* 0x610c20000: sd_cmu_12 */
+ { TARGET_SD_CMU + 13, 0x420000 }, /* 0x610c28000: sd_cmu_13 */
+ { TARGET_SD_CMU_CFG + 9, 0x428000 }, /* 0x610c30000: sd_cmu_cfg_9 */
+ { TARGET_SD_CMU_CFG + 10, 0x430000 }, /* 0x610c38000: sd_cmu_cfg_10 */
+ { TARGET_SD_CMU_CFG + 11, 0x438000 }, /* 0x610c40000: sd_cmu_cfg_11 */
+ { TARGET_SD_CMU_CFG + 12, 0x440000 }, /* 0x610c48000: sd_cmu_cfg_12 */
+ { TARGET_SD_CMU_CFG + 13, 0x448000 }, /* 0x610c50000: sd_cmu_cfg_13 */
+ { TARGET_SD10G_LANE + 4, 0x450000 }, /* 0x610c58000: sd10g_lane_4 */
+ { TARGET_SD10G_LANE + 5, 0x458000 }, /* 0x610c60000: sd10g_lane_5 */
+ { TARGET_SD10G_LANE + 6, 0x460000 }, /* 0x610c68000: sd10g_lane_6 */
+ { TARGET_SD10G_LANE + 7, 0x468000 }, /* 0x610c70000: sd10g_lane_7 */
+ { TARGET_SD10G_LANE + 8, 0x470000 }, /* 0x610c78000: sd10g_lane_8 */
+ { TARGET_SD10G_LANE + 9, 0x478000 }, /* 0x610c80000: sd10g_lane_9 */
+ { TARGET_SD10G_LANE + 10, 0x480000 }, /* 0x610c88000: sd10g_lane_10 */
+ { TARGET_SD10G_LANE + 11, 0x488000 }, /* 0x610c90000: sd10g_lane_11 */
+ { TARGET_SD25G_LANE, 0x490000 }, /* 0x610c98000: sd25g_lane_0 */
+ { TARGET_SD25G_LANE + 1, 0x498000 }, /* 0x610ca0000: sd25g_lane_1 */
+ { TARGET_SD25G_LANE + 2, 0x4a0000 }, /* 0x610ca8000: sd25g_lane_2 */
+ { TARGET_SD25G_LANE + 3, 0x4a8000 }, /* 0x610cb0000: sd25g_lane_3 */
+ { TARGET_SD25G_LANE + 4, 0x4b0000 }, /* 0x610cb8000: sd25g_lane_4 */
+ { TARGET_SD25G_LANE + 5, 0x4b8000 }, /* 0x610cc0000: sd25g_lane_5 */
+ { TARGET_SD25G_LANE + 6, 0x4c0000 }, /* 0x610cc8000: sd25g_lane_6 */
+ { TARGET_SD25G_LANE + 7, 0x4c8000 }, /* 0x610cd0000: sd25g_lane_7 */
+ { TARGET_SD_LANE + 17, 0x550000 }, /* 0x610d58000: sd_lane_17 */
+ { TARGET_SD_LANE + 18, 0x558000 }, /* 0x610d60000: sd_lane_18 */
+ { TARGET_SD_LANE + 19, 0x560000 }, /* 0x610d68000: sd_lane_19 */
+ { TARGET_SD_LANE + 20, 0x568000 }, /* 0x610d70000: sd_lane_20 */
+ { TARGET_SD_LANE + 21, 0x570000 }, /* 0x610d78000: sd_lane_21 */
+ { TARGET_SD_LANE + 22, 0x578000 }, /* 0x610d80000: sd_lane_22 */
+ { TARGET_SD_LANE + 23, 0x580000 }, /* 0x610d88000: sd_lane_23 */
+ { TARGET_SD_LANE + 24, 0x588000 }, /* 0x610d90000: sd_lane_24 */
+ { TARGET_SD_LANE_25G, 0x590000 }, /* 0x610d98000: sd_lane_25g_25 */
+ { TARGET_SD_LANE_25G + 1, 0x598000 }, /* 0x610da0000: sd_lane_25g_26 */
+ { TARGET_SD_LANE_25G + 2, 0x5a0000 }, /* 0x610da8000: sd_lane_25g_27 */
+ { TARGET_SD_LANE_25G + 3, 0x5a8000 }, /* 0x610db0000: sd_lane_25g_28 */
+ { TARGET_SD_LANE_25G + 4, 0x5b0000 }, /* 0x610db8000: sd_lane_25g_29 */
+ { TARGET_SD_LANE_25G + 5, 0x5b8000 }, /* 0x610dc0000: sd_lane_25g_30 */
+ { TARGET_SD_LANE_25G + 6, 0x5c0000 }, /* 0x610dc8000: sd_lane_25g_31 */
+ { TARGET_SD_LANE_25G + 7, 0x5c8000 }, /* 0x610dd0000: sd_lane_25g_32 */
+};
+
+/* Client lookup function, uses serdes index */
+static struct phy *sparx5_serdes_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct sparx5_serdes_private *priv = dev_get_drvdata(dev);
+ int idx;
+ unsigned int sidx;
+
+ if (args->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ sidx = args->args[0];
+
+ /* Check validity: ERR_PTR(-ENODEV) if not valid */
+ for (idx = 0; idx < SPX5_SERDES_MAX; idx++) {
+ struct sparx5_serdes_macro *macro =
+ phy_get_drvdata(priv->phys[idx]);
+
+ if (sidx != macro->sidx)
+ continue;
+
+ return priv->phys[idx];
+ }
+ return ERR_PTR(-ENODEV);
+}
+
+static int sparx5_serdes_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sparx5_serdes_private *priv;
+ struct phy_provider *provider;
+ struct resource *iores;
+ void __iomem *iomem;
+ unsigned long clock;
+ struct clk *clk;
+ int idx;
+ int err;
+
+ if (!np && !pdev->dev.platform_data)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+ priv->dev = &pdev->dev;
+
+ /* Get coreclock */
+ clk = devm_clk_get(priv->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(priv->dev, "Failed to get coreclock\n");
+ return PTR_ERR(clk);
+ }
+ clock = clk_get_rate(clk);
+ if (clock == 0) {
+ dev_err(priv->dev, "Invalid coreclock %lu\n", clock);
+ return -EINVAL;
+ }
+ priv->coreclock = clock;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iomem = devm_ioremap(priv->dev, iores->start, resource_size(iores));
+ if (IS_ERR(iomem)) {
+ dev_err(priv->dev, "Unable to get serdes registers: %s\n",
+ iores->name);
+ return PTR_ERR(iomem);
+ }
+ for (idx = 0; idx < ARRAY_SIZE(sparx5_serdes_iomap); idx++) {
+ struct sparx5_serdes_io_resource *iomap = &sparx5_serdes_iomap[idx];
+
+ priv->regs[iomap->id] = iomem + iomap->offset;
+ }
+ for (idx = 0; idx < SPX5_SERDES_MAX; idx++) {
+ err = sparx5_phy_create(priv, idx, &priv->phys[idx]);
+ if (err)
+ return err;
+ }
+
+ provider = devm_of_phy_provider_register(priv->dev, sparx5_serdes_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id sparx5_serdes_match[] = {
+ { .compatible = "microchip,sparx5-serdes" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sparx5_serdes_match);
+
+static struct platform_driver sparx5_serdes_driver = {
+ .probe = sparx5_serdes_probe,
+ .driver = {
+ .name = "sparx5-serdes",
+ .of_match_table = sparx5_serdes_match,
+ },
+};
+
+module_platform_driver(sparx5_serdes_driver);
+
+MODULE_DESCRIPTION("Microchip Sparx5 switch serdes driver");
+MODULE_AUTHOR("Steen Hegelund <steen.hegelund@microchip.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/microchip/sparx5_serdes.h b/drivers/phy/microchip/sparx5_serdes.h
new file mode 100644
index 000000000000..0a3e496e6210
--- /dev/null
+++ b/drivers/phy/microchip/sparx5_serdes.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ * Microchip Sparx5 SerDes driver
+ *
+ * Copyright (c) 2020 Microchip Technology Inc.
+ */
+
+#ifndef _SPARX5_SERDES_H_
+#define _SPARX5_SERDES_H_
+
+#include "sparx5_serdes_regs.h"
+
+#define SPX5_SERDES_MAX 33
+
+enum sparx5_serdes_type {
+ SPX5_SDT_6G = 6,
+ SPX5_SDT_10G = 10,
+ SPX5_SDT_25G = 25,
+};
+
+enum sparx5_serdes_mode {
+ SPX5_SD_MODE_NONE,
+ SPX5_SD_MODE_2G5,
+ SPX5_SD_MODE_QSGMII,
+ SPX5_SD_MODE_100FX,
+ SPX5_SD_MODE_1000BASEX,
+ SPX5_SD_MODE_SFI,
+};
+
+struct sparx5_serdes_private {
+ struct device *dev;
+ void __iomem *regs[NUM_TARGETS];
+ struct phy *phys[SPX5_SERDES_MAX];
+ bool cmu_enabled;
+ unsigned long coreclock;
+};
+
+struct sparx5_serdes_macro {
+ struct sparx5_serdes_private *priv;
+ u32 sidx;
+ u32 stpidx;
+ enum sparx5_serdes_type serdestype;
+ enum sparx5_serdes_mode serdesmode;
+ phy_interface_t portmode;
+ int speed;
+ enum phy_media media;
+};
+
+/* Read, Write and modify registers content.
+ * The register definition macros start at the id
+ */
+static inline void __iomem *sdx5_addr(void __iomem *base[],
+ int id, int tinst, int tcnt,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((tinst) >= tcnt);
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return base[id + (tinst)] +
+ gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+static inline void __iomem *sdx5_inst_baseaddr(void __iomem *base,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return base +
+ gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+static inline void sdx5_rmw(u32 val, u32 mask, struct sparx5_serdes_private *priv,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ u32 nval;
+ void __iomem *addr =
+ sdx5_addr(priv->regs, id, tinst, tcnt,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth);
+ nval = readl(addr);
+ nval = (nval & ~mask) | (val & mask);
+ writel(nval, addr);
+}
+
+static inline void sdx5_inst_rmw(u32 val, u32 mask, void __iomem *iomem,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ u32 nval;
+ void __iomem *addr =
+ sdx5_inst_baseaddr(iomem,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth);
+ nval = readl(addr);
+ nval = (nval & ~mask) | (val & mask);
+ writel(nval, addr);
+}
+
+static inline void sdx5_rmw_addr(u32 val, u32 mask, void __iomem *addr)
+{
+ u32 nval;
+
+ nval = readl(addr);
+ nval = (nval & ~mask) | (val & mask);
+ writel(nval, addr);
+}
+
+static inline void __iomem *sdx5_inst_get(struct sparx5_serdes_private *priv,
+ int id, int tinst)
+{
+ return priv->regs[id + tinst];
+}
+
+static inline void __iomem *sdx5_inst_addr(void __iomem *iomem,
+ int id, int tinst, int tcnt,
+ int gbase,
+ int ginst, int gcnt, int gwidth,
+ int raddr,
+ int rinst, int rcnt, int rwidth)
+{
+ return sdx5_inst_baseaddr(iomem, gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth);
+}
+
+
+#endif /* _SPARX5_SERDES_REGS_H_ */
diff --git a/drivers/phy/microchip/sparx5_serdes_regs.h b/drivers/phy/microchip/sparx5_serdes_regs.h
new file mode 100644
index 000000000000..b96386a4df5a
--- /dev/null
+++ b/drivers/phy/microchip/sparx5_serdes_regs.h
@@ -0,0 +1,2695 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ * Microchip Sparx5 SerDes driver
+ *
+ * Copyright (c) 2020 Microchip Technology Inc.
+ */
+
+/* This file is autogenerated by cml-utils 2020-11-16 13:11:27 +0100.
+ * Commit ID: 13bdf073131d8bf40c54901df6988ae4e9c8f29f
+ */
+
+#ifndef _SPARX5_SERDES_REGS_H_
+#define _SPARX5_SERDES_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum sparx5_serdes_target {
+ TARGET_SD10G_LANE = 200,
+ TARGET_SD25G_LANE = 212,
+ TARGET_SD6G_LANE = 233,
+ TARGET_SD_CMU = 248,
+ TARGET_SD_CMU_CFG = 262,
+ TARGET_SD_LANE = 276,
+ TARGET_SD_LANE_25G = 301,
+ NUM_TARGETS = 332
+};
+
+#define __REG(...) __VA_ARGS__
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_01 */
+#define SD10G_LANE_LANE_01(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 4, 0, 1, 4)
+
+#define SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0 GENMASK(2, 0)
+#define SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0, x)
+#define SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0, x)
+
+#define SD10G_LANE_LANE_01_CFG_RXDET_EN BIT(4)
+#define SD10G_LANE_LANE_01_CFG_RXDET_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_01_CFG_RXDET_EN, x)
+#define SD10G_LANE_LANE_01_CFG_RXDET_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_01_CFG_RXDET_EN, x)
+
+#define SD10G_LANE_LANE_01_CFG_RXDET_STR BIT(5)
+#define SD10G_LANE_LANE_01_CFG_RXDET_STR_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_01_CFG_RXDET_STR, x)
+#define SD10G_LANE_LANE_01_CFG_RXDET_STR_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_01_CFG_RXDET_STR, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_02 */
+#define SD10G_LANE_LANE_02(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 8, 0, 1, 4)
+
+#define SD10G_LANE_LANE_02_CFG_EN_ADV BIT(0)
+#define SD10G_LANE_LANE_02_CFG_EN_ADV_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_02_CFG_EN_ADV, x)
+#define SD10G_LANE_LANE_02_CFG_EN_ADV_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_02_CFG_EN_ADV, x)
+
+#define SD10G_LANE_LANE_02_CFG_EN_MAIN BIT(1)
+#define SD10G_LANE_LANE_02_CFG_EN_MAIN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_02_CFG_EN_MAIN, x)
+#define SD10G_LANE_LANE_02_CFG_EN_MAIN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_02_CFG_EN_MAIN, x)
+
+#define SD10G_LANE_LANE_02_CFG_EN_DLY BIT(2)
+#define SD10G_LANE_LANE_02_CFG_EN_DLY_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_02_CFG_EN_DLY, x)
+#define SD10G_LANE_LANE_02_CFG_EN_DLY_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_02_CFG_EN_DLY, x)
+
+#define SD10G_LANE_LANE_02_CFG_EN_DLY2 BIT(3)
+#define SD10G_LANE_LANE_02_CFG_EN_DLY2_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_02_CFG_EN_DLY2, x)
+#define SD10G_LANE_LANE_02_CFG_EN_DLY2_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_02_CFG_EN_DLY2, x)
+
+#define SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0 GENMASK(7, 4)
+#define SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0, x)
+#define SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_03 */
+#define SD10G_LANE_LANE_03(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 12, 0, 1, 4)
+
+#define SD10G_LANE_LANE_03_CFG_TAP_MAIN BIT(0)
+#define SD10G_LANE_LANE_03_CFG_TAP_MAIN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_03_CFG_TAP_MAIN, x)
+#define SD10G_LANE_LANE_03_CFG_TAP_MAIN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_03_CFG_TAP_MAIN, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_04 */
+#define SD10G_LANE_LANE_04(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 16, 0, 1, 4)
+
+#define SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0 GENMASK(4, 0)
+#define SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0, x)
+#define SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_06 */
+#define SD10G_LANE_LANE_06(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 24, 0, 1, 4)
+
+#define SD10G_LANE_LANE_06_CFG_PD_DRIVER BIT(0)
+#define SD10G_LANE_LANE_06_CFG_PD_DRIVER_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_06_CFG_PD_DRIVER, x)
+#define SD10G_LANE_LANE_06_CFG_PD_DRIVER_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_06_CFG_PD_DRIVER, x)
+
+#define SD10G_LANE_LANE_06_CFG_PD_CLK BIT(1)
+#define SD10G_LANE_LANE_06_CFG_PD_CLK_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_06_CFG_PD_CLK, x)
+#define SD10G_LANE_LANE_06_CFG_PD_CLK_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_06_CFG_PD_CLK, x)
+
+#define SD10G_LANE_LANE_06_CFG_PD_CML BIT(2)
+#define SD10G_LANE_LANE_06_CFG_PD_CML_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_06_CFG_PD_CML, x)
+#define SD10G_LANE_LANE_06_CFG_PD_CML_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_06_CFG_PD_CML, x)
+
+#define SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN BIT(3)
+#define SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN, x)
+#define SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN, x)
+
+#define SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN BIT(4)
+#define SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN, x)
+#define SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN, x)
+
+#define SD10G_LANE_LANE_06_CFG_EN_PREEMPH BIT(5)
+#define SD10G_LANE_LANE_06_CFG_EN_PREEMPH_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_06_CFG_EN_PREEMPH, x)
+#define SD10G_LANE_LANE_06_CFG_EN_PREEMPH_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_06_CFG_EN_PREEMPH, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0B */
+#define SD10G_LANE_LANE_0B(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 44, 0, 1, 4)
+
+#define SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0 GENMASK(3, 0)
+#define SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0, x)
+#define SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0, x)
+
+#define SD10G_LANE_LANE_0B_CFG_PD_CTLE BIT(4)
+#define SD10G_LANE_LANE_0B_CFG_PD_CTLE_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0B_CFG_PD_CTLE, x)
+#define SD10G_LANE_LANE_0B_CFG_PD_CTLE_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0B_CFG_PD_CTLE, x)
+
+#define SD10G_LANE_LANE_0B_CFG_CTLE_TP_EN BIT(5)
+#define SD10G_LANE_LANE_0B_CFG_CTLE_TP_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0B_CFG_CTLE_TP_EN, x)
+#define SD10G_LANE_LANE_0B_CFG_CTLE_TP_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0B_CFG_CTLE_TP_EN, x)
+
+#define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE BIT(6)
+#define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE, x)
+#define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE, x)
+
+#define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ BIT(7)
+#define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ, x)
+#define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0C */
+#define SD10G_LANE_LANE_0C(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 48, 0, 1, 4)
+
+#define SD10G_LANE_LANE_0C_CFG_OSCAL_AFE BIT(0)
+#define SD10G_LANE_LANE_0C_CFG_OSCAL_AFE_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0C_CFG_OSCAL_AFE, x)
+#define SD10G_LANE_LANE_0C_CFG_OSCAL_AFE_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0C_CFG_OSCAL_AFE, x)
+
+#define SD10G_LANE_LANE_0C_CFG_OSCAL_SQ BIT(1)
+#define SD10G_LANE_LANE_0C_CFG_OSCAL_SQ_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0C_CFG_OSCAL_SQ, x)
+#define SD10G_LANE_LANE_0C_CFG_OSCAL_SQ_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0C_CFG_OSCAL_SQ, x)
+
+#define SD10G_LANE_LANE_0C_CFG_OSDAC_2X_AFE BIT(2)
+#define SD10G_LANE_LANE_0C_CFG_OSDAC_2X_AFE_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0C_CFG_OSDAC_2X_AFE, x)
+#define SD10G_LANE_LANE_0C_CFG_OSDAC_2X_AFE_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0C_CFG_OSDAC_2X_AFE, x)
+
+#define SD10G_LANE_LANE_0C_CFG_OSDAC_2X_SQ BIT(3)
+#define SD10G_LANE_LANE_0C_CFG_OSDAC_2X_SQ_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0C_CFG_OSDAC_2X_SQ, x)
+#define SD10G_LANE_LANE_0C_CFG_OSDAC_2X_SQ_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0C_CFG_OSDAC_2X_SQ, x)
+
+#define SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE BIT(4)
+#define SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE, x)
+#define SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE, x)
+
+#define SD10G_LANE_LANE_0C_CFG_PD_OSDAC_SQ BIT(5)
+#define SD10G_LANE_LANE_0C_CFG_PD_OSDAC_SQ_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0C_CFG_PD_OSDAC_SQ, x)
+#define SD10G_LANE_LANE_0C_CFG_PD_OSDAC_SQ_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0C_CFG_PD_OSDAC_SQ, x)
+
+#define SD10G_LANE_LANE_0C_CFG_PD_RX_LS BIT(6)
+#define SD10G_LANE_LANE_0C_CFG_PD_RX_LS_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0C_CFG_PD_RX_LS, x)
+#define SD10G_LANE_LANE_0C_CFG_PD_RX_LS_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0C_CFG_PD_RX_LS, x)
+
+#define SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12 BIT(7)
+#define SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12, x)
+#define SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0D */
+#define SD10G_LANE_LANE_0D(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 52, 0, 1, 4)
+
+#define SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0 GENMASK(1, 0)
+#define SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0, x)
+#define SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0, x)
+
+#define SD10G_LANE_LANE_0D_CFG_EQR_BYP BIT(4)
+#define SD10G_LANE_LANE_0D_CFG_EQR_BYP_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0D_CFG_EQR_BYP, x)
+#define SD10G_LANE_LANE_0D_CFG_EQR_BYP_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0D_CFG_EQR_BYP, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0E */
+#define SD10G_LANE_LANE_0E(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 56, 0, 1, 4)
+
+#define SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0 GENMASK(3, 0)
+#define SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0, x)
+#define SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0, x)
+
+#define SD10G_LANE_LANE_0E_CFG_RXLB_EN BIT(4)
+#define SD10G_LANE_LANE_0E_CFG_RXLB_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0E_CFG_RXLB_EN, x)
+#define SD10G_LANE_LANE_0E_CFG_RXLB_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0E_CFG_RXLB_EN, x)
+
+#define SD10G_LANE_LANE_0E_CFG_TXLB_EN BIT(5)
+#define SD10G_LANE_LANE_0E_CFG_TXLB_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0E_CFG_TXLB_EN, x)
+#define SD10G_LANE_LANE_0E_CFG_TXLB_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0E_CFG_TXLB_EN, x)
+
+#define SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN BIT(6)
+#define SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN, x)
+#define SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0F */
+#define SD10G_LANE_LANE_0F(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 60, 0, 1, 4)
+
+#define SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0 GENMASK(7, 0)
+#define SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0, x)
+#define SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_13 */
+#define SD10G_LANE_LANE_13(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 76, 0, 1, 4)
+
+#define SD10G_LANE_LANE_13_CFG_DCDR_PD BIT(0)
+#define SD10G_LANE_LANE_13_CFG_DCDR_PD_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_13_CFG_DCDR_PD, x)
+#define SD10G_LANE_LANE_13_CFG_DCDR_PD_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_13_CFG_DCDR_PD, x)
+
+#define SD10G_LANE_LANE_13_CFG_PHID_1T BIT(1)
+#define SD10G_LANE_LANE_13_CFG_PHID_1T_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_13_CFG_PHID_1T, x)
+#define SD10G_LANE_LANE_13_CFG_PHID_1T_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_13_CFG_PHID_1T, x)
+
+#define SD10G_LANE_LANE_13_CFG_CDRCK_EN BIT(2)
+#define SD10G_LANE_LANE_13_CFG_CDRCK_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_13_CFG_CDRCK_EN, x)
+#define SD10G_LANE_LANE_13_CFG_CDRCK_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_13_CFG_CDRCK_EN, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_14 */
+#define SD10G_LANE_LANE_14(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 80, 0, 1, 4)
+
+#define SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0 GENMASK(7, 0)
+#define SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0, x)
+#define SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_15 */
+#define SD10G_LANE_LANE_15(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 84, 0, 1, 4)
+
+#define SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8 GENMASK(7, 0)
+#define SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8, x)
+#define SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_16 */
+#define SD10G_LANE_LANE_16(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 88, 0, 1, 4)
+
+#define SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16 GENMASK(7, 0)
+#define SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16, x)
+#define SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_1A */
+#define SD10G_LANE_LANE_1A(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 104, 0, 1, 4)
+
+#define SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN BIT(0)
+#define SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN, x)
+#define SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN, x)
+
+#define SD10G_LANE_LANE_1A_CFG_PI_EN BIT(1)
+#define SD10G_LANE_LANE_1A_CFG_PI_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_1A_CFG_PI_EN, x)
+#define SD10G_LANE_LANE_1A_CFG_PI_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_1A_CFG_PI_EN, x)
+
+#define SD10G_LANE_LANE_1A_CFG_PI_DFE_EN BIT(2)
+#define SD10G_LANE_LANE_1A_CFG_PI_DFE_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_1A_CFG_PI_DFE_EN, x)
+#define SD10G_LANE_LANE_1A_CFG_PI_DFE_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_1A_CFG_PI_DFE_EN, x)
+
+#define SD10G_LANE_LANE_1A_CFG_PI_STEPS BIT(3)
+#define SD10G_LANE_LANE_1A_CFG_PI_STEPS_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_1A_CFG_PI_STEPS, x)
+#define SD10G_LANE_LANE_1A_CFG_PI_STEPS_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_1A_CFG_PI_STEPS, x)
+
+#define SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0 GENMASK(5, 4)
+#define SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0, x)
+#define SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_22 */
+#define SD10G_LANE_LANE_22(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 136, 0, 1, 4)
+
+#define SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1 GENMASK(4, 0)
+#define SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1, x)
+#define SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_23 */
+#define SD10G_LANE_LANE_23(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 140, 0, 1, 4)
+
+#define SD10G_LANE_LANE_23_CFG_DFE_PD BIT(0)
+#define SD10G_LANE_LANE_23_CFG_DFE_PD_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_23_CFG_DFE_PD, x)
+#define SD10G_LANE_LANE_23_CFG_DFE_PD_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_23_CFG_DFE_PD, x)
+
+#define SD10G_LANE_LANE_23_CFG_EN_DFEDIG BIT(1)
+#define SD10G_LANE_LANE_23_CFG_EN_DFEDIG_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_23_CFG_EN_DFEDIG, x)
+#define SD10G_LANE_LANE_23_CFG_EN_DFEDIG_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_23_CFG_EN_DFEDIG, x)
+
+#define SD10G_LANE_LANE_23_CFG_DFECK_EN BIT(2)
+#define SD10G_LANE_LANE_23_CFG_DFECK_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_23_CFG_DFECK_EN, x)
+#define SD10G_LANE_LANE_23_CFG_DFECK_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_23_CFG_DFECK_EN, x)
+
+#define SD10G_LANE_LANE_23_CFG_ERRAMP_PD BIT(3)
+#define SD10G_LANE_LANE_23_CFG_ERRAMP_PD_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_23_CFG_ERRAMP_PD, x)
+#define SD10G_LANE_LANE_23_CFG_ERRAMP_PD_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_23_CFG_ERRAMP_PD, x)
+
+#define SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0 GENMASK(6, 4)
+#define SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0, x)
+#define SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_24 */
+#define SD10G_LANE_LANE_24(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 144, 0, 1, 4)
+
+#define SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0 GENMASK(3, 0)
+#define SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0, x)
+#define SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0, x)
+
+#define SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0 GENMASK(7, 4)
+#define SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0, x)
+#define SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_26 */
+#define SD10G_LANE_LANE_26(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 152, 0, 1, 4)
+
+#define SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0 GENMASK(7, 0)
+#define SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0, x)
+#define SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_2F */
+#define SD10G_LANE_LANE_2F(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 188, 0, 1, 4)
+
+#define SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0 GENMASK(2, 0)
+#define SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0, x)
+#define SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0, x)
+
+#define SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0 GENMASK(7, 4)
+#define SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0, x)
+#define SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_30 */
+#define SD10G_LANE_LANE_30(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 192, 0, 1, 4)
+
+#define SD10G_LANE_LANE_30_CFG_SUMMER_EN BIT(0)
+#define SD10G_LANE_LANE_30_CFG_SUMMER_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_30_CFG_SUMMER_EN, x)
+#define SD10G_LANE_LANE_30_CFG_SUMMER_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_30_CFG_SUMMER_EN, x)
+
+#define SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0 GENMASK(6, 4)
+#define SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0, x)
+#define SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_31 */
+#define SD10G_LANE_LANE_31(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 196, 0, 1, 4)
+
+#define SD10G_LANE_LANE_31_CFG_PI_RSTN BIT(0)
+#define SD10G_LANE_LANE_31_CFG_PI_RSTN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_31_CFG_PI_RSTN, x)
+#define SD10G_LANE_LANE_31_CFG_PI_RSTN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_31_CFG_PI_RSTN, x)
+
+#define SD10G_LANE_LANE_31_CFG_CDR_RSTN BIT(1)
+#define SD10G_LANE_LANE_31_CFG_CDR_RSTN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_31_CFG_CDR_RSTN, x)
+#define SD10G_LANE_LANE_31_CFG_CDR_RSTN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_31_CFG_CDR_RSTN, x)
+
+#define SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG BIT(2)
+#define SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG, x)
+#define SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG, x)
+
+#define SD10G_LANE_LANE_31_CFG_CTLE_RSTN BIT(3)
+#define SD10G_LANE_LANE_31_CFG_CTLE_RSTN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_31_CFG_CTLE_RSTN, x)
+#define SD10G_LANE_LANE_31_CFG_CTLE_RSTN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_31_CFG_CTLE_RSTN, x)
+
+#define SD10G_LANE_LANE_31_CFG_RSTN_DIV5_8 BIT(4)
+#define SD10G_LANE_LANE_31_CFG_RSTN_DIV5_8_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_31_CFG_RSTN_DIV5_8, x)
+#define SD10G_LANE_LANE_31_CFG_RSTN_DIV5_8_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_31_CFG_RSTN_DIV5_8, x)
+
+#define SD10G_LANE_LANE_31_CFG_R50_EN BIT(5)
+#define SD10G_LANE_LANE_31_CFG_R50_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_31_CFG_R50_EN, x)
+#define SD10G_LANE_LANE_31_CFG_R50_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_31_CFG_R50_EN, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_32 */
+#define SD10G_LANE_LANE_32(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 200, 0, 1, 4)
+
+#define SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0 GENMASK(1, 0)
+#define SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0, x)
+#define SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0, x)
+
+#define SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0 GENMASK(5, 4)
+#define SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0, x)
+#define SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_33 */
+#define SD10G_LANE_LANE_33(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 204, 0, 1, 4)
+
+#define SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0 GENMASK(2, 0)
+#define SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0, x)
+#define SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0, x)
+
+#define SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0 GENMASK(5, 4)
+#define SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0, x)
+#define SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_35 */
+#define SD10G_LANE_LANE_35(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 212, 0, 1, 4)
+
+#define SD10G_LANE_LANE_35_CFG_TXRATE_1_0 GENMASK(1, 0)
+#define SD10G_LANE_LANE_35_CFG_TXRATE_1_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_35_CFG_TXRATE_1_0, x)
+#define SD10G_LANE_LANE_35_CFG_TXRATE_1_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_35_CFG_TXRATE_1_0, x)
+
+#define SD10G_LANE_LANE_35_CFG_RXRATE_1_0 GENMASK(5, 4)
+#define SD10G_LANE_LANE_35_CFG_RXRATE_1_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_35_CFG_RXRATE_1_0, x)
+#define SD10G_LANE_LANE_35_CFG_RXRATE_1_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_35_CFG_RXRATE_1_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_36 */
+#define SD10G_LANE_LANE_36(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 216, 0, 1, 4)
+
+#define SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0 GENMASK(1, 0)
+#define SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0, x)
+#define SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0, x)
+
+#define SD10G_LANE_LANE_36_CFG_EID_LP BIT(4)
+#define SD10G_LANE_LANE_36_CFG_EID_LP_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_36_CFG_EID_LP, x)
+#define SD10G_LANE_LANE_36_CFG_EID_LP_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_36_CFG_EID_LP, x)
+
+#define SD10G_LANE_LANE_36_CFG_EN_PREDRV_EMPH BIT(5)
+#define SD10G_LANE_LANE_36_CFG_EN_PREDRV_EMPH_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_36_CFG_EN_PREDRV_EMPH, x)
+#define SD10G_LANE_LANE_36_CFG_EN_PREDRV_EMPH_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_36_CFG_EN_PREDRV_EMPH, x)
+
+#define SD10G_LANE_LANE_36_CFG_PRBS_SEL BIT(6)
+#define SD10G_LANE_LANE_36_CFG_PRBS_SEL_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_36_CFG_PRBS_SEL, x)
+#define SD10G_LANE_LANE_36_CFG_PRBS_SEL_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_36_CFG_PRBS_SEL, x)
+
+#define SD10G_LANE_LANE_36_CFG_PRBS_SETB BIT(7)
+#define SD10G_LANE_LANE_36_CFG_PRBS_SETB_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_36_CFG_PRBS_SETB, x)
+#define SD10G_LANE_LANE_36_CFG_PRBS_SETB_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_36_CFG_PRBS_SETB, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_37 */
+#define SD10G_LANE_LANE_37(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 220, 0, 1, 4)
+
+#define SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD BIT(0)
+#define SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD, x)
+#define SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD, x)
+
+#define SD10G_LANE_LANE_37_CFG_PD_RX_CKTREE BIT(1)
+#define SD10G_LANE_LANE_37_CFG_PD_RX_CKTREE_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_37_CFG_PD_RX_CKTREE, x)
+#define SD10G_LANE_LANE_37_CFG_PD_RX_CKTREE_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_37_CFG_PD_RX_CKTREE, x)
+
+#define SD10G_LANE_LANE_37_CFG_TXSWING_HALF BIT(2)
+#define SD10G_LANE_LANE_37_CFG_TXSWING_HALF_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_37_CFG_TXSWING_HALF, x)
+#define SD10G_LANE_LANE_37_CFG_TXSWING_HALF_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_37_CFG_TXSWING_HALF, x)
+
+#define SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0 GENMASK(5, 4)
+#define SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0, x)
+#define SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_39 */
+#define SD10G_LANE_LANE_39(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 228, 0, 1, 4)
+
+#define SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0 GENMASK(2, 0)
+#define SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0, x)
+#define SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0, x)
+
+#define SD10G_LANE_LANE_39_CFG_RX_SSC_LH BIT(4)
+#define SD10G_LANE_LANE_39_CFG_RX_SSC_LH_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_39_CFG_RX_SSC_LH, x)
+#define SD10G_LANE_LANE_39_CFG_RX_SSC_LH_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_39_CFG_RX_SSC_LH, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_3A */
+#define SD10G_LANE_LANE_3A(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 232, 0, 1, 4)
+
+#define SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0 GENMASK(3, 0)
+#define SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0, x)
+#define SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0, x)
+
+#define SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0 GENMASK(7, 4)
+#define SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0, x)
+#define SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_3C */
+#define SD10G_LANE_LANE_3C(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 240, 0, 1, 4)
+
+#define SD10G_LANE_LANE_3C_CFG_DIS_ACC BIT(0)
+#define SD10G_LANE_LANE_3C_CFG_DIS_ACC_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_3C_CFG_DIS_ACC, x)
+#define SD10G_LANE_LANE_3C_CFG_DIS_ACC_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_3C_CFG_DIS_ACC, x)
+
+#define SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER BIT(1)
+#define SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER, x)
+#define SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_40 */
+#define SD10G_LANE_LANE_40(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 256, 0, 1, 4)
+
+#define SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0 GENMASK(7, 0)
+#define SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0, x)
+#define SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_41 */
+#define SD10G_LANE_LANE_41(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 260, 0, 1, 4)
+
+#define SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8 GENMASK(7, 0)
+#define SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8, x)
+#define SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_42 */
+#define SD10G_LANE_LANE_42(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 264, 0, 1, 4)
+
+#define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0 GENMASK(2, 0)
+#define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0, x)
+#define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0, x)
+
+#define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0 GENMASK(6, 4)
+#define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0, x)
+#define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_1:LANE_48 */
+#define SD10G_LANE_LANE_48(t) __REG(TARGET_SD10G_LANE, t, 12, 288, 0, 1, 40, 0, 0, 1, 4)
+
+#define SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0 GENMASK(3, 0)
+#define SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0, x)
+#define SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0, x)
+
+#define SD10G_LANE_LANE_48_CFG_AUX_RXCK_SEL BIT(4)
+#define SD10G_LANE_LANE_48_CFG_AUX_RXCK_SEL_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_48_CFG_AUX_RXCK_SEL, x)
+#define SD10G_LANE_LANE_48_CFG_AUX_RXCK_SEL_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_48_CFG_AUX_RXCK_SEL, x)
+
+#define SD10G_LANE_LANE_48_CFG_CLK_ENQ BIT(5)
+#define SD10G_LANE_LANE_48_CFG_CLK_ENQ_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_48_CFG_CLK_ENQ, x)
+#define SD10G_LANE_LANE_48_CFG_CLK_ENQ_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_48_CFG_CLK_ENQ, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_1:LANE_50 */
+#define SD10G_LANE_LANE_50(t) __REG(TARGET_SD10G_LANE, t, 12, 288, 0, 1, 40, 32, 0, 1, 4)
+
+#define SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0 GENMASK(1, 0)
+#define SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0, x)
+#define SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0, x)
+
+#define SD10G_LANE_LANE_50_CFG_SSC_RESETB BIT(4)
+#define SD10G_LANE_LANE_50_CFG_SSC_RESETB_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_50_CFG_SSC_RESETB, x)
+#define SD10G_LANE_LANE_50_CFG_SSC_RESETB_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_50_CFG_SSC_RESETB, x)
+
+#define SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL BIT(5)
+#define SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL, x)
+#define SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL, x)
+
+#define SD10G_LANE_LANE_50_CFG_AUX_TXCK_SEL BIT(6)
+#define SD10G_LANE_LANE_50_CFG_AUX_TXCK_SEL_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_50_CFG_AUX_TXCK_SEL, x)
+#define SD10G_LANE_LANE_50_CFG_AUX_TXCK_SEL_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_50_CFG_AUX_TXCK_SEL, x)
+
+#define SD10G_LANE_LANE_50_CFG_JT_EN BIT(7)
+#define SD10G_LANE_LANE_50_CFG_JT_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_50_CFG_JT_EN, x)
+#define SD10G_LANE_LANE_50_CFG_JT_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_50_CFG_JT_EN, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_2:LANE_52 */
+#define SD10G_LANE_LANE_52(t) __REG(TARGET_SD10G_LANE, t, 12, 328, 0, 1, 24, 0, 0, 1, 4)
+
+#define SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0 GENMASK(5, 0)
+#define SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0, x)
+#define SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_4:LANE_83 */
+#define SD10G_LANE_LANE_83(t) __REG(TARGET_SD10G_LANE, t, 12, 464, 0, 1, 112, 60, 0, 1, 4)
+
+#define SD10G_LANE_LANE_83_R_TX_BIT_REVERSE BIT(0)
+#define SD10G_LANE_LANE_83_R_TX_BIT_REVERSE_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_83_R_TX_BIT_REVERSE, x)
+#define SD10G_LANE_LANE_83_R_TX_BIT_REVERSE_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_83_R_TX_BIT_REVERSE, x)
+
+#define SD10G_LANE_LANE_83_R_TX_POL_INV BIT(1)
+#define SD10G_LANE_LANE_83_R_TX_POL_INV_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_83_R_TX_POL_INV, x)
+#define SD10G_LANE_LANE_83_R_TX_POL_INV_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_83_R_TX_POL_INV, x)
+
+#define SD10G_LANE_LANE_83_R_RX_BIT_REVERSE BIT(2)
+#define SD10G_LANE_LANE_83_R_RX_BIT_REVERSE_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_83_R_RX_BIT_REVERSE, x)
+#define SD10G_LANE_LANE_83_R_RX_BIT_REVERSE_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_83_R_RX_BIT_REVERSE, x)
+
+#define SD10G_LANE_LANE_83_R_RX_POL_INV BIT(3)
+#define SD10G_LANE_LANE_83_R_RX_POL_INV_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_83_R_RX_POL_INV, x)
+#define SD10G_LANE_LANE_83_R_RX_POL_INV_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_83_R_RX_POL_INV, x)
+
+#define SD10G_LANE_LANE_83_R_DFE_RSTN BIT(4)
+#define SD10G_LANE_LANE_83_R_DFE_RSTN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_83_R_DFE_RSTN, x)
+#define SD10G_LANE_LANE_83_R_DFE_RSTN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_83_R_DFE_RSTN, x)
+
+#define SD10G_LANE_LANE_83_R_CDR_RSTN BIT(5)
+#define SD10G_LANE_LANE_83_R_CDR_RSTN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_83_R_CDR_RSTN, x)
+#define SD10G_LANE_LANE_83_R_CDR_RSTN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_83_R_CDR_RSTN, x)
+
+#define SD10G_LANE_LANE_83_R_CTLE_RSTN BIT(6)
+#define SD10G_LANE_LANE_83_R_CTLE_RSTN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_83_R_CTLE_RSTN, x)
+#define SD10G_LANE_LANE_83_R_CTLE_RSTN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_83_R_CTLE_RSTN, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_93 */
+#define SD10G_LANE_LANE_93(t) __REG(TARGET_SD10G_LANE, t, 12, 576, 0, 1, 64, 12, 0, 1, 4)
+
+#define SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN BIT(0)
+#define SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN, x)
+#define SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN, x)
+
+#define SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT BIT(1)
+#define SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT, x)
+#define SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT, x)
+
+#define SD10G_LANE_LANE_93_R_DIS_RESTORE_DFE BIT(2)
+#define SD10G_LANE_LANE_93_R_DIS_RESTORE_DFE_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_93_R_DIS_RESTORE_DFE, x)
+#define SD10G_LANE_LANE_93_R_DIS_RESTORE_DFE_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_93_R_DIS_RESTORE_DFE, x)
+
+#define SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL BIT(3)
+#define SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL, x)
+#define SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL, x)
+
+#define SD10G_LANE_LANE_93_R_REG_MANUAL BIT(4)
+#define SD10G_LANE_LANE_93_R_REG_MANUAL_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_93_R_REG_MANUAL, x)
+#define SD10G_LANE_LANE_93_R_REG_MANUAL_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_93_R_REG_MANUAL, x)
+
+#define SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT BIT(5)
+#define SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT, x)
+#define SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT, x)
+
+#define SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT BIT(6)
+#define SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT, x)
+#define SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT, x)
+
+#define SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT BIT(7)
+#define SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT, x)
+#define SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_94 */
+#define SD10G_LANE_LANE_94(t) __REG(TARGET_SD10G_LANE, t, 12, 576, 0, 1, 64, 16, 0, 1, 4)
+
+#define SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0 GENMASK(2, 0)
+#define SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0, x)
+#define SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0, x)
+
+#define SD10G_LANE_LANE_94_R_ISCAN_REG BIT(4)
+#define SD10G_LANE_LANE_94_R_ISCAN_REG_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_94_R_ISCAN_REG, x)
+#define SD10G_LANE_LANE_94_R_ISCAN_REG_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_94_R_ISCAN_REG, x)
+
+#define SD10G_LANE_LANE_94_R_TXEQ_REG BIT(5)
+#define SD10G_LANE_LANE_94_R_TXEQ_REG_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_94_R_TXEQ_REG, x)
+#define SD10G_LANE_LANE_94_R_TXEQ_REG_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_94_R_TXEQ_REG, x)
+
+#define SD10G_LANE_LANE_94_R_MISC_REG BIT(6)
+#define SD10G_LANE_LANE_94_R_MISC_REG_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_94_R_MISC_REG, x)
+#define SD10G_LANE_LANE_94_R_MISC_REG_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_94_R_MISC_REG, x)
+
+#define SD10G_LANE_LANE_94_R_SWING_REG BIT(7)
+#define SD10G_LANE_LANE_94_R_SWING_REG_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_94_R_SWING_REG, x)
+#define SD10G_LANE_LANE_94_R_SWING_REG_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_94_R_SWING_REG, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_9E */
+#define SD10G_LANE_LANE_9E(t) __REG(TARGET_SD10G_LANE, t, 12, 576, 0, 1, 64, 56, 0, 1, 4)
+
+#define SD10G_LANE_LANE_9E_R_RXEQ_REG BIT(0)
+#define SD10G_LANE_LANE_9E_R_RXEQ_REG_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_9E_R_RXEQ_REG, x)
+#define SD10G_LANE_LANE_9E_R_RXEQ_REG_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_9E_R_RXEQ_REG, x)
+
+#define SD10G_LANE_LANE_9E_R_AUTO_RST_TREE_PD_MAN BIT(1)
+#define SD10G_LANE_LANE_9E_R_AUTO_RST_TREE_PD_MAN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_9E_R_AUTO_RST_TREE_PD_MAN, x)
+#define SD10G_LANE_LANE_9E_R_AUTO_RST_TREE_PD_MAN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_9E_R_AUTO_RST_TREE_PD_MAN, x)
+
+#define SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN BIT(2)
+#define SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN, x)
+#define SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_6:LANE_A1 */
+#define SD10G_LANE_LANE_A1(t) __REG(TARGET_SD10G_LANE, t, 12, 640, 0, 1, 128, 4, 0, 1, 4)
+
+#define SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0 GENMASK(1, 0)
+#define SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0, x)
+#define SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0, x)
+
+#define SD10G_LANE_LANE_A1_R_SSC_FROM_HWT BIT(4)
+#define SD10G_LANE_LANE_A1_R_SSC_FROM_HWT_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_A1_R_SSC_FROM_HWT, x)
+#define SD10G_LANE_LANE_A1_R_SSC_FROM_HWT_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_A1_R_SSC_FROM_HWT, x)
+
+#define SD10G_LANE_LANE_A1_R_CDR_FROM_HWT BIT(5)
+#define SD10G_LANE_LANE_A1_R_CDR_FROM_HWT_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_A1_R_CDR_FROM_HWT, x)
+#define SD10G_LANE_LANE_A1_R_CDR_FROM_HWT_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_A1_R_CDR_FROM_HWT, x)
+
+#define SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT BIT(6)
+#define SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT, x)
+#define SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT, x)
+
+#define SD10G_LANE_LANE_A1_R_PCLK_GATING BIT(7)
+#define SD10G_LANE_LANE_A1_R_PCLK_GATING_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_A1_R_PCLK_GATING, x)
+#define SD10G_LANE_LANE_A1_R_PCLK_GATING_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_A1_R_PCLK_GATING, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_6:LANE_A2 */
+#define SD10G_LANE_LANE_A2(t) __REG(TARGET_SD10G_LANE, t, 12, 640, 0, 1, 128, 8, 0, 1, 4)
+
+#define SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0 GENMASK(4, 0)
+#define SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0, x)
+#define SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_8:LANE_DF */
+#define SD10G_LANE_LANE_DF(t) __REG(TARGET_SD10G_LANE, t, 12, 832, 0, 1, 84, 60, 0, 1, 4)
+
+#define SD10G_LANE_LANE_DF_LOL_UDL BIT(0)
+#define SD10G_LANE_LANE_DF_LOL_UDL_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_DF_LOL_UDL, x)
+#define SD10G_LANE_LANE_DF_LOL_UDL_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_DF_LOL_UDL, x)
+
+#define SD10G_LANE_LANE_DF_LOL BIT(1)
+#define SD10G_LANE_LANE_DF_LOL_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_DF_LOL, x)
+#define SD10G_LANE_LANE_DF_LOL_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_DF_LOL, x)
+
+#define SD10G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED BIT(2)
+#define SD10G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED, x)
+#define SD10G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED, x)
+
+#define SD10G_LANE_LANE_DF_SQUELCH BIT(3)
+#define SD10G_LANE_LANE_DF_SQUELCH_SET(x)\
+ FIELD_PREP(SD10G_LANE_LANE_DF_SQUELCH, x)
+#define SD10G_LANE_LANE_DF_SQUELCH_GET(x)\
+ FIELD_GET(SD10G_LANE_LANE_DF_SQUELCH, x)
+
+/* SD25G_TARGET:CMU_GRP_0:CMU_09 */
+#define SD25G_LANE_CMU_09(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 36, 0, 1, 4)
+
+#define SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN BIT(0)
+#define SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN, x)
+#define SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN, x)
+
+#define SD25G_LANE_CMU_09_CFG_EN_DUMMY BIT(1)
+#define SD25G_LANE_CMU_09_CFG_EN_DUMMY_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_09_CFG_EN_DUMMY, x)
+#define SD25G_LANE_CMU_09_CFG_EN_DUMMY_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_09_CFG_EN_DUMMY, x)
+
+#define SD25G_LANE_CMU_09_CFG_PLL_LOS_SET BIT(2)
+#define SD25G_LANE_CMU_09_CFG_PLL_LOS_SET_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_09_CFG_PLL_LOS_SET, x)
+#define SD25G_LANE_CMU_09_CFG_PLL_LOS_SET_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_09_CFG_PLL_LOS_SET, x)
+
+#define SD25G_LANE_CMU_09_CFG_CTRL_LOGIC_PD BIT(3)
+#define SD25G_LANE_CMU_09_CFG_CTRL_LOGIC_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_09_CFG_CTRL_LOGIC_PD, x)
+#define SD25G_LANE_CMU_09_CFG_CTRL_LOGIC_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_09_CFG_CTRL_LOGIC_PD, x)
+
+#define SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0 GENMASK(5, 4)
+#define SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0, x)
+#define SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0, x)
+
+/* SD25G_TARGET:CMU_GRP_0:CMU_0B */
+#define SD25G_LANE_CMU_0B(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 44, 0, 1, 4)
+
+#define SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT BIT(0)
+#define SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT, x)
+#define SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT, x)
+
+#define SD25G_LANE_CMU_0B_CFG_DISLOL BIT(1)
+#define SD25G_LANE_CMU_0B_CFG_DISLOL_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0B_CFG_DISLOL, x)
+#define SD25G_LANE_CMU_0B_CFG_DISLOL_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0B_CFG_DISLOL, x)
+
+#define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_EN BIT(2)
+#define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_EN, x)
+#define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_EN, x)
+
+#define SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN BIT(3)
+#define SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN, x)
+#define SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN, x)
+
+#define SD25G_LANE_CMU_0B_CFG_VFILT2PAD BIT(4)
+#define SD25G_LANE_CMU_0B_CFG_VFILT2PAD_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0B_CFG_VFILT2PAD, x)
+#define SD25G_LANE_CMU_0B_CFG_VFILT2PAD_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0B_CFG_VFILT2PAD, x)
+
+#define SD25G_LANE_CMU_0B_CFG_DISLOS BIT(5)
+#define SD25G_LANE_CMU_0B_CFG_DISLOS_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0B_CFG_DISLOS, x)
+#define SD25G_LANE_CMU_0B_CFG_DISLOS_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0B_CFG_DISLOS, x)
+
+#define SD25G_LANE_CMU_0B_CFG_DCLOL BIT(6)
+#define SD25G_LANE_CMU_0B_CFG_DCLOL_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0B_CFG_DCLOL, x)
+#define SD25G_LANE_CMU_0B_CFG_DCLOL_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0B_CFG_DCLOL, x)
+
+#define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN BIT(7)
+#define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN, x)
+#define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN, x)
+
+/* SD25G_TARGET:CMU_GRP_0:CMU_0C */
+#define SD25G_LANE_CMU_0C(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 48, 0, 1, 4)
+
+#define SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET BIT(0)
+#define SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET, x)
+#define SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET, x)
+
+#define SD25G_LANE_CMU_0C_CFG_EN_TX_CK_DN BIT(1)
+#define SD25G_LANE_CMU_0C_CFG_EN_TX_CK_DN_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0C_CFG_EN_TX_CK_DN, x)
+#define SD25G_LANE_CMU_0C_CFG_EN_TX_CK_DN_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0C_CFG_EN_TX_CK_DN, x)
+
+#define SD25G_LANE_CMU_0C_CFG_VCO_PD BIT(2)
+#define SD25G_LANE_CMU_0C_CFG_VCO_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0C_CFG_VCO_PD, x)
+#define SD25G_LANE_CMU_0C_CFG_VCO_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0C_CFG_VCO_PD, x)
+
+#define SD25G_LANE_CMU_0C_CFG_EN_TX_CK_UP BIT(3)
+#define SD25G_LANE_CMU_0C_CFG_EN_TX_CK_UP_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0C_CFG_EN_TX_CK_UP, x)
+#define SD25G_LANE_CMU_0C_CFG_EN_TX_CK_UP_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0C_CFG_EN_TX_CK_UP, x)
+
+#define SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0 GENMASK(5, 4)
+#define SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0, x)
+#define SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0, x)
+
+/* SD25G_TARGET:CMU_GRP_0:CMU_0D */
+#define SD25G_LANE_CMU_0D(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 52, 0, 1, 4)
+
+#define SD25G_LANE_CMU_0D_CFG_CK_TREE_PD BIT(0)
+#define SD25G_LANE_CMU_0D_CFG_CK_TREE_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0D_CFG_CK_TREE_PD, x)
+#define SD25G_LANE_CMU_0D_CFG_CK_TREE_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0D_CFG_CK_TREE_PD, x)
+
+#define SD25G_LANE_CMU_0D_CFG_EN_RX_CK_DN BIT(1)
+#define SD25G_LANE_CMU_0D_CFG_EN_RX_CK_DN_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0D_CFG_EN_RX_CK_DN, x)
+#define SD25G_LANE_CMU_0D_CFG_EN_RX_CK_DN_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0D_CFG_EN_RX_CK_DN, x)
+
+#define SD25G_LANE_CMU_0D_CFG_EN_RX_CK_UP BIT(2)
+#define SD25G_LANE_CMU_0D_CFG_EN_RX_CK_UP_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0D_CFG_EN_RX_CK_UP, x)
+#define SD25G_LANE_CMU_0D_CFG_EN_RX_CK_UP_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0D_CFG_EN_RX_CK_UP, x)
+
+#define SD25G_LANE_CMU_0D_CFG_VCO_CAL_BYP BIT(3)
+#define SD25G_LANE_CMU_0D_CFG_VCO_CAL_BYP_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0D_CFG_VCO_CAL_BYP, x)
+#define SD25G_LANE_CMU_0D_CFG_VCO_CAL_BYP_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0D_CFG_VCO_CAL_BYP, x)
+
+#define SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0 GENMASK(5, 4)
+#define SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0, x)
+#define SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0, x)
+
+/* SD25G_TARGET:CMU_GRP_0:CMU_0E */
+#define SD25G_LANE_CMU_0E(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 56, 0, 1, 4)
+
+#define SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0 GENMASK(3, 0)
+#define SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0, x)
+#define SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0, x)
+
+#define SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD BIT(4)
+#define SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD, x)
+#define SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD, x)
+
+/* SD25G_TARGET:CMU_GRP_0:CMU_13 */
+#define SD25G_LANE_CMU_13(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 76, 0, 1, 4)
+
+#define SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0 GENMASK(3, 0)
+#define SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0, x)
+#define SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0, x)
+
+#define SD25G_LANE_CMU_13_CFG_JT_EN BIT(4)
+#define SD25G_LANE_CMU_13_CFG_JT_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_13_CFG_JT_EN, x)
+#define SD25G_LANE_CMU_13_CFG_JT_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_13_CFG_JT_EN, x)
+
+/* SD25G_TARGET:CMU_GRP_0:CMU_18 */
+#define SD25G_LANE_CMU_18(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 96, 0, 1, 4)
+
+#define SD25G_LANE_CMU_18_R_PLL_RSTN BIT(0)
+#define SD25G_LANE_CMU_18_R_PLL_RSTN_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_18_R_PLL_RSTN, x)
+#define SD25G_LANE_CMU_18_R_PLL_RSTN_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_18_R_PLL_RSTN, x)
+
+#define SD25G_LANE_CMU_18_R_PLL_LOL_SET BIT(1)
+#define SD25G_LANE_CMU_18_R_PLL_LOL_SET_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_18_R_PLL_LOL_SET, x)
+#define SD25G_LANE_CMU_18_R_PLL_LOL_SET_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_18_R_PLL_LOL_SET, x)
+
+#define SD25G_LANE_CMU_18_R_PLL_LOS_SET BIT(2)
+#define SD25G_LANE_CMU_18_R_PLL_LOS_SET_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_18_R_PLL_LOS_SET, x)
+#define SD25G_LANE_CMU_18_R_PLL_LOS_SET_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_18_R_PLL_LOS_SET, x)
+
+#define SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0 GENMASK(5, 4)
+#define SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0, x)
+#define SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0, x)
+
+/* SD25G_TARGET:CMU_GRP_0:CMU_19 */
+#define SD25G_LANE_CMU_19(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 100, 0, 1, 4)
+
+#define SD25G_LANE_CMU_19_R_CK_RESETB BIT(0)
+#define SD25G_LANE_CMU_19_R_CK_RESETB_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_19_R_CK_RESETB, x)
+#define SD25G_LANE_CMU_19_R_CK_RESETB_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_19_R_CK_RESETB, x)
+
+#define SD25G_LANE_CMU_19_R_PLL_DLOL_EN BIT(1)
+#define SD25G_LANE_CMU_19_R_PLL_DLOL_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_19_R_PLL_DLOL_EN, x)
+#define SD25G_LANE_CMU_19_R_PLL_DLOL_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_19_R_PLL_DLOL_EN, x)
+
+/* SD25G_TARGET:CMU_GRP_0:CMU_1A */
+#define SD25G_LANE_CMU_1A(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 104, 0, 1, 4)
+
+#define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0 GENMASK(2, 0)
+#define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0, x)
+#define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0, x)
+
+#define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT BIT(4)
+#define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT, x)
+#define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT, x)
+
+#define SD25G_LANE_CMU_1A_R_MASK_EI_SOURCE BIT(5)
+#define SD25G_LANE_CMU_1A_R_MASK_EI_SOURCE_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_1A_R_MASK_EI_SOURCE, x)
+#define SD25G_LANE_CMU_1A_R_MASK_EI_SOURCE_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_1A_R_MASK_EI_SOURCE, x)
+
+#define SD25G_LANE_CMU_1A_R_REG_MANUAL BIT(6)
+#define SD25G_LANE_CMU_1A_R_REG_MANUAL_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_1A_R_REG_MANUAL, x)
+#define SD25G_LANE_CMU_1A_R_REG_MANUAL_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_1A_R_REG_MANUAL, x)
+
+/* SD25G_TARGET:CMU_GRP_1:CMU_2A */
+#define SD25G_LANE_CMU_2A(t) __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 36, 0, 1, 4)
+
+#define SD25G_LANE_CMU_2A_R_DBG_SEL_1_0 GENMASK(1, 0)
+#define SD25G_LANE_CMU_2A_R_DBG_SEL_1_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_2A_R_DBG_SEL_1_0, x)
+#define SD25G_LANE_CMU_2A_R_DBG_SEL_1_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_2A_R_DBG_SEL_1_0, x)
+
+#define SD25G_LANE_CMU_2A_R_DBG_LINK_LANE BIT(4)
+#define SD25G_LANE_CMU_2A_R_DBG_LINK_LANE_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_2A_R_DBG_LINK_LANE, x)
+#define SD25G_LANE_CMU_2A_R_DBG_LINK_LANE_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_2A_R_DBG_LINK_LANE, x)
+
+#define SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS BIT(5)
+#define SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS, x)
+#define SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS, x)
+
+/* SD25G_TARGET:CMU_GRP_1:CMU_30 */
+#define SD25G_LANE_CMU_30(t) __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 60, 0, 1, 4)
+
+#define SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0 GENMASK(2, 0)
+#define SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0, x)
+#define SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0, x)
+
+#define SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0 GENMASK(6, 4)
+#define SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0, x)
+#define SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0, x)
+
+/* SD25G_TARGET:CMU_GRP_1:CMU_31 */
+#define SD25G_LANE_CMU_31(t) __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 64, 0, 1, 4)
+
+#define SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0 GENMASK(7, 0)
+#define SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0, x)
+#define SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0, x)
+
+/* SD25G_TARGET:CMU_GRP_2:CMU_40 */
+#define SD25G_LANE_CMU_40(t) __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 0, 0, 1, 4)
+
+#define SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL BIT(0)
+#define SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL, x)
+#define SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL, x)
+
+#define SD25G_LANE_CMU_40_L0_CFG_ISCAN_HOLD BIT(1)
+#define SD25G_LANE_CMU_40_L0_CFG_ISCAN_HOLD_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_40_L0_CFG_ISCAN_HOLD, x)
+#define SD25G_LANE_CMU_40_L0_CFG_ISCAN_HOLD_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_ISCAN_HOLD, x)
+
+#define SD25G_LANE_CMU_40_L0_CFG_PD_CLK BIT(2)
+#define SD25G_LANE_CMU_40_L0_CFG_PD_CLK_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_40_L0_CFG_PD_CLK, x)
+#define SD25G_LANE_CMU_40_L0_CFG_PD_CLK_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_PD_CLK, x)
+
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN BIT(3)
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN, x)
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN, x)
+
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_MAN_EN BIT(4)
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_MAN_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_40_L0_CFG_TXCAL_MAN_EN, x)
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_MAN_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_TXCAL_MAN_EN, x)
+
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST BIT(5)
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST, x)
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST, x)
+
+/* SD25G_TARGET:CMU_GRP_2:CMU_45 */
+#define SD25G_LANE_CMU_45(t) __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 20, 0, 1, 4)
+
+#define SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0 GENMASK(7, 0)
+#define SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0, x)
+#define SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0, x)
+
+/* SD25G_TARGET:CMU_GRP_2:CMU_46 */
+#define SD25G_LANE_CMU_46(t) __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 24, 0, 1, 4)
+
+#define SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8 GENMASK(7, 0)
+#define SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8, x)
+#define SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8, x)
+
+/* SD25G_TARGET:CMU_GRP_3:CMU_C0 */
+#define SD25G_LANE_CMU_C0(t) __REG(TARGET_SD25G_LANE, t, 8, 768, 0, 1, 252, 0, 0, 1, 4)
+
+#define SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0 GENMASK(3, 0)
+#define SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0, x)
+#define SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0, x)
+
+#define SD25G_LANE_CMU_C0_PLL_LOL_UDL BIT(4)
+#define SD25G_LANE_CMU_C0_PLL_LOL_UDL_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_C0_PLL_LOL_UDL, x)
+#define SD25G_LANE_CMU_C0_PLL_LOL_UDL_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_C0_PLL_LOL_UDL, x)
+
+/* SD25G_TARGET:CMU_GRP_4:CMU_FF */
+#define SD25G_LANE_CMU_FF(t) __REG(TARGET_SD25G_LANE, t, 8, 1020, 0, 1, 4, 0, 0, 1, 4)
+
+#define SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX GENMASK(7, 0)
+#define SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(x)\
+ FIELD_PREP(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX, x)
+#define SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_GET(x)\
+ FIELD_GET(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_00 */
+#define SD25G_LANE_LANE_00(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 0, 0, 1, 4)
+
+#define SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0 GENMASK(3, 0)
+#define SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0, x)
+#define SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0, x)
+
+#define SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0 GENMASK(5, 4)
+#define SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0, x)
+#define SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_01 */
+#define SD25G_LANE_LANE_01(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 4, 0, 1, 4)
+
+#define SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0 GENMASK(2, 0)
+#define SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0, x)
+#define SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0, x)
+
+#define SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0 GENMASK(5, 4)
+#define SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0, x)
+#define SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_03 */
+#define SD25G_LANE_LANE_03(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 12, 0, 1, 4)
+
+#define SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0 GENMASK(4, 0)
+#define SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0, x)
+#define SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_04 */
+#define SD25G_LANE_LANE_04(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 16, 0, 1, 4)
+
+#define SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN BIT(0)
+#define SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN, x)
+#define SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN, x)
+
+#define SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN BIT(1)
+#define SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN, x)
+#define SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN, x)
+
+#define SD25G_LANE_LANE_04_LN_CFG_PD_CML BIT(2)
+#define SD25G_LANE_LANE_04_LN_CFG_PD_CML_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_04_LN_CFG_PD_CML, x)
+#define SD25G_LANE_LANE_04_LN_CFG_PD_CML_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_PD_CML, x)
+
+#define SD25G_LANE_LANE_04_LN_CFG_PD_CLK BIT(3)
+#define SD25G_LANE_LANE_04_LN_CFG_PD_CLK_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_04_LN_CFG_PD_CLK, x)
+#define SD25G_LANE_LANE_04_LN_CFG_PD_CLK_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_PD_CLK, x)
+
+#define SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER BIT(4)
+#define SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER, x)
+#define SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER, x)
+
+#define SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN BIT(5)
+#define SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN, x)
+#define SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_05 */
+#define SD25G_LANE_LANE_05(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 20, 0, 1, 4)
+
+#define SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0 GENMASK(3, 0)
+#define SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0, x)
+#define SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0, x)
+
+#define SD25G_LANE_LANE_05_LN_CFG_BW_1_0 GENMASK(5, 4)
+#define SD25G_LANE_LANE_05_LN_CFG_BW_1_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_05_LN_CFG_BW_1_0, x)
+#define SD25G_LANE_LANE_05_LN_CFG_BW_1_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_05_LN_CFG_BW_1_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_06 */
+#define SD25G_LANE_LANE_06(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 24, 0, 1, 4)
+
+#define SD25G_LANE_LANE_06_LN_CFG_EN_MAIN BIT(0)
+#define SD25G_LANE_LANE_06_LN_CFG_EN_MAIN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_06_LN_CFG_EN_MAIN, x)
+#define SD25G_LANE_LANE_06_LN_CFG_EN_MAIN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_06_LN_CFG_EN_MAIN, x)
+
+#define SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0 GENMASK(7, 4)
+#define SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0, x)
+#define SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_07 */
+#define SD25G_LANE_LANE_07(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 28, 0, 1, 4)
+
+#define SD25G_LANE_LANE_07_LN_CFG_EN_ADV BIT(0)
+#define SD25G_LANE_LANE_07_LN_CFG_EN_ADV_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_07_LN_CFG_EN_ADV, x)
+#define SD25G_LANE_LANE_07_LN_CFG_EN_ADV_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_07_LN_CFG_EN_ADV, x)
+
+#define SD25G_LANE_LANE_07_LN_CFG_EN_DLY2 BIT(1)
+#define SD25G_LANE_LANE_07_LN_CFG_EN_DLY2_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_07_LN_CFG_EN_DLY2, x)
+#define SD25G_LANE_LANE_07_LN_CFG_EN_DLY2_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_07_LN_CFG_EN_DLY2, x)
+
+#define SD25G_LANE_LANE_07_LN_CFG_EN_DLY BIT(2)
+#define SD25G_LANE_LANE_07_LN_CFG_EN_DLY_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_07_LN_CFG_EN_DLY, x)
+#define SD25G_LANE_LANE_07_LN_CFG_EN_DLY_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_07_LN_CFG_EN_DLY, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_09 */
+#define SD25G_LANE_LANE_09(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 36, 0, 1, 4)
+
+#define SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0 GENMASK(3, 0)
+#define SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0, x)
+#define SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_0A */
+#define SD25G_LANE_LANE_0A(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 40, 0, 1, 4)
+
+#define SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0 GENMASK(5, 0)
+#define SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0, x)
+#define SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_0B */
+#define SD25G_LANE_LANE_0B(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 44, 0, 1, 4)
+
+#define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN BIT(0)
+#define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN, x)
+#define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN, x)
+
+#define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_RST BIT(1)
+#define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_RST_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0B_LN_CFG_TXCAL_RST, x)
+#define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_RST_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0B_LN_CFG_TXCAL_RST, x)
+
+#define SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0 GENMASK(5, 4)
+#define SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0, x)
+#define SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_0C */
+#define SD25G_LANE_LANE_0C(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 48, 0, 1, 4)
+
+#define SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0 GENMASK(2, 0)
+#define SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0, x)
+#define SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0, x)
+
+#define SD25G_LANE_LANE_0C_LN_CFG_TXCAL_EN BIT(4)
+#define SD25G_LANE_LANE_0C_LN_CFG_TXCAL_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0C_LN_CFG_TXCAL_EN, x)
+#define SD25G_LANE_LANE_0C_LN_CFG_TXCAL_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0C_LN_CFG_TXCAL_EN, x)
+
+#define SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD BIT(5)
+#define SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD, x)
+#define SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_0D */
+#define SD25G_LANE_LANE_0D(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 52, 0, 1, 4)
+
+#define SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0 GENMASK(2, 0)
+#define SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0, x)
+#define SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0, x)
+
+#define SD25G_LANE_LANE_0D_LN_CFG_RSTN_DIV5_8 BIT(4)
+#define SD25G_LANE_LANE_0D_LN_CFG_RSTN_DIV5_8_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0D_LN_CFG_RSTN_DIV5_8, x)
+#define SD25G_LANE_LANE_0D_LN_CFG_RSTN_DIV5_8_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0D_LN_CFG_RSTN_DIV5_8, x)
+
+#define SD25G_LANE_LANE_0D_LN_CFG_SUMMER_EN BIT(5)
+#define SD25G_LANE_LANE_0D_LN_CFG_SUMMER_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0D_LN_CFG_SUMMER_EN, x)
+#define SD25G_LANE_LANE_0D_LN_CFG_SUMMER_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0D_LN_CFG_SUMMER_EN, x)
+
+#define SD25G_LANE_LANE_0D_LN_CFG_DMUX_PD BIT(6)
+#define SD25G_LANE_LANE_0D_LN_CFG_DMUX_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0D_LN_CFG_DMUX_PD, x)
+#define SD25G_LANE_LANE_0D_LN_CFG_DMUX_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0D_LN_CFG_DMUX_PD, x)
+
+#define SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN BIT(7)
+#define SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN, x)
+#define SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_0E */
+#define SD25G_LANE_LANE_0E(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 56, 0, 1, 4)
+
+#define SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN BIT(0)
+#define SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN, x)
+#define SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN, x)
+
+#define SD25G_LANE_LANE_0E_LN_CFG_DMUX_CLK_PD BIT(1)
+#define SD25G_LANE_LANE_0E_LN_CFG_DMUX_CLK_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0E_LN_CFG_DMUX_CLK_PD, x)
+#define SD25G_LANE_LANE_0E_LN_CFG_DMUX_CLK_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0E_LN_CFG_DMUX_CLK_PD, x)
+
+#define SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG BIT(2)
+#define SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG, x)
+#define SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG, x)
+
+#define SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0 GENMASK(6, 4)
+#define SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0, x)
+#define SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_0F */
+#define SD25G_LANE_LANE_0F(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 60, 0, 1, 4)
+
+#define SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1 GENMASK(4, 0)
+#define SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1, x)
+#define SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_18 */
+#define SD25G_LANE_LANE_18(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 96, 0, 1, 4)
+
+#define SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN BIT(0)
+#define SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN, x)
+#define SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN, x)
+
+#define SD25G_LANE_LANE_18_LN_CFG_ADD_VOLT BIT(1)
+#define SD25G_LANE_LANE_18_LN_CFG_ADD_VOLT_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_18_LN_CFG_ADD_VOLT, x)
+#define SD25G_LANE_LANE_18_LN_CFG_ADD_VOLT_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_18_LN_CFG_ADD_VOLT, x)
+
+#define SD25G_LANE_LANE_18_LN_CFG_MAN_VOLT_EN BIT(2)
+#define SD25G_LANE_LANE_18_LN_CFG_MAN_VOLT_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_18_LN_CFG_MAN_VOLT_EN, x)
+#define SD25G_LANE_LANE_18_LN_CFG_MAN_VOLT_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_18_LN_CFG_MAN_VOLT_EN, x)
+
+#define SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD BIT(3)
+#define SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD, x)
+#define SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD, x)
+
+#define SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0 GENMASK(6, 4)
+#define SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0, x)
+#define SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_19 */
+#define SD25G_LANE_LANE_19(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 100, 0, 1, 4)
+
+#define SD25G_LANE_LANE_19_LN_CFG_DCDR_PD BIT(0)
+#define SD25G_LANE_LANE_19_LN_CFG_DCDR_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_DCDR_PD, x)
+#define SD25G_LANE_LANE_19_LN_CFG_DCDR_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_DCDR_PD, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_ECDR_PD BIT(1)
+#define SD25G_LANE_LANE_19_LN_CFG_ECDR_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_ECDR_PD, x)
+#define SD25G_LANE_LANE_19_LN_CFG_ECDR_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_ECDR_PD, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_ISCAN_SEL BIT(2)
+#define SD25G_LANE_LANE_19_LN_CFG_ISCAN_SEL_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_ISCAN_SEL, x)
+#define SD25G_LANE_LANE_19_LN_CFG_ISCAN_SEL_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_ISCAN_SEL, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_TXLB_EN BIT(3)
+#define SD25G_LANE_LANE_19_LN_CFG_TXLB_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_TXLB_EN, x)
+#define SD25G_LANE_LANE_19_LN_CFG_TXLB_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_TXLB_EN, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_RX_REG_PU BIT(4)
+#define SD25G_LANE_LANE_19_LN_CFG_RX_REG_PU_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_RX_REG_PU, x)
+#define SD25G_LANE_LANE_19_LN_CFG_RX_REG_PU_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_RX_REG_PU, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_RX_REG_BYP BIT(5)
+#define SD25G_LANE_LANE_19_LN_CFG_RX_REG_BYP_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_RX_REG_BYP, x)
+#define SD25G_LANE_LANE_19_LN_CFG_RX_REG_BYP_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_RX_REG_BYP, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_PD_RMS_DET BIT(6)
+#define SD25G_LANE_LANE_19_LN_CFG_PD_RMS_DET_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_PD_RMS_DET, x)
+#define SD25G_LANE_LANE_19_LN_CFG_PD_RMS_DET_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_PD_RMS_DET, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_PD_CTLE BIT(7)
+#define SD25G_LANE_LANE_19_LN_CFG_PD_CTLE_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_PD_CTLE, x)
+#define SD25G_LANE_LANE_19_LN_CFG_PD_CTLE_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_PD_CTLE, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_1A */
+#define SD25G_LANE_LANE_1A(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 104, 0, 1, 4)
+
+#define SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN BIT(0)
+#define SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN, x)
+#define SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN, x)
+
+#define SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0 GENMASK(6, 4)
+#define SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0, x)
+#define SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_1B */
+#define SD25G_LANE_LANE_1B(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 108, 0, 1, 4)
+
+#define SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0 GENMASK(7, 0)
+#define SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0, x)
+#define SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_1C */
+#define SD25G_LANE_LANE_1C(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 112, 0, 1, 4)
+
+#define SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN BIT(0)
+#define SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN, x)
+#define SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN, x)
+
+#define SD25G_LANE_LANE_1C_LN_CFG_DFE_PD BIT(1)
+#define SD25G_LANE_LANE_1C_LN_CFG_DFE_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1C_LN_CFG_DFE_PD, x)
+#define SD25G_LANE_LANE_1C_LN_CFG_DFE_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1C_LN_CFG_DFE_PD, x)
+
+#define SD25G_LANE_LANE_1C_LN_CFG_DFEDMX_PD BIT(2)
+#define SD25G_LANE_LANE_1C_LN_CFG_DFEDMX_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1C_LN_CFG_DFEDMX_PD, x)
+#define SD25G_LANE_LANE_1C_LN_CFG_DFEDMX_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1C_LN_CFG_DFEDMX_PD, x)
+
+#define SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0 GENMASK(7, 4)
+#define SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0, x)
+#define SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_1D */
+#define SD25G_LANE_LANE_1D(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 116, 0, 1, 4)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR BIT(0)
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_HOLD BIT(1)
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_HOLD_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_ISCAN_HOLD, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_HOLD_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_ISCAN_HOLD, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_RSTN BIT(2)
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_RSTN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_ISCAN_RSTN, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_RSTN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_ISCAN_RSTN, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_AGC_ADPT_BYP BIT(3)
+#define SD25G_LANE_LANE_1D_LN_CFG_AGC_ADPT_BYP_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_AGC_ADPT_BYP, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_AGC_ADPT_BYP_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_AGC_ADPT_BYP, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_PHID_1T BIT(4)
+#define SD25G_LANE_LANE_1D_LN_CFG_PHID_1T_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_PHID_1T, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_PHID_1T_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_PHID_1T, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN BIT(5)
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_EXT_OVR BIT(6)
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_EXT_OVR_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_PI_EXT_OVR, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_EXT_OVR_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_PI_EXT_OVR, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD BIT(7)
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_1E */
+#define SD25G_LANE_LANE_1E(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 120, 0, 1, 4)
+
+#define SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0 GENMASK(1, 0)
+#define SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0, x)
+#define SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0, x)
+
+#define SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN BIT(4)
+#define SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN, x)
+#define SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN, x)
+
+#define SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN BIT(5)
+#define SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN, x)
+#define SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN, x)
+
+#define SD25G_LANE_LANE_1E_LN_CFG_R_OFFSET_DIR BIT(6)
+#define SD25G_LANE_LANE_1E_LN_CFG_R_OFFSET_DIR_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1E_LN_CFG_R_OFFSET_DIR, x)
+#define SD25G_LANE_LANE_1E_LN_CFG_R_OFFSET_DIR_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1E_LN_CFG_R_OFFSET_DIR, x)
+
+#define SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD BIT(7)
+#define SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD, x)
+#define SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_21 */
+#define SD25G_LANE_LANE_21(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 132, 0, 1, 4)
+
+#define SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0 GENMASK(4, 0)
+#define SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0, x)
+#define SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_22 */
+#define SD25G_LANE_LANE_22(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 136, 0, 1, 4)
+
+#define SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0 GENMASK(3, 0)
+#define SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0, x)
+#define SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_25 */
+#define SD25G_LANE_LANE_25(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 148, 0, 1, 4)
+
+#define SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0 GENMASK(6, 0)
+#define SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0, x)
+#define SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_26 */
+#define SD25G_LANE_LANE_26(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 152, 0, 1, 4)
+
+#define SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0 GENMASK(6, 0)
+#define SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0, x)
+#define SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_28 */
+#define SD25G_LANE_LANE_28(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 160, 0, 1, 4)
+
+#define SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN BIT(0)
+#define SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN, x)
+#define SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN, x)
+
+#define SD25G_LANE_LANE_28_LN_CFG_RX_SSC_LH BIT(1)
+#define SD25G_LANE_LANE_28_LN_CFG_RX_SSC_LH_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_28_LN_CFG_RX_SSC_LH, x)
+#define SD25G_LANE_LANE_28_LN_CFG_RX_SSC_LH_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_28_LN_CFG_RX_SSC_LH, x)
+
+#define SD25G_LANE_LANE_28_LN_CFG_FIGMERIT_SEL BIT(2)
+#define SD25G_LANE_LANE_28_LN_CFG_FIGMERIT_SEL_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_28_LN_CFG_FIGMERIT_SEL, x)
+#define SD25G_LANE_LANE_28_LN_CFG_FIGMERIT_SEL_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_28_LN_CFG_FIGMERIT_SEL, x)
+
+#define SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0 GENMASK(6, 4)
+#define SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0, x)
+#define SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_2B */
+#define SD25G_LANE_LANE_2B(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 172, 0, 1, 4)
+
+#define SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0 GENMASK(3, 0)
+#define SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0, x)
+#define SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0, x)
+
+#define SD25G_LANE_LANE_2B_LN_CFG_RSTN_DMUX_SUBR BIT(4)
+#define SD25G_LANE_LANE_2B_LN_CFG_RSTN_DMUX_SUBR_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2B_LN_CFG_RSTN_DMUX_SUBR, x)
+#define SD25G_LANE_LANE_2B_LN_CFG_RSTN_DMUX_SUBR_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2B_LN_CFG_RSTN_DMUX_SUBR, x)
+
+#define SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU BIT(5)
+#define SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU, x)
+#define SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_2C */
+#define SD25G_LANE_LANE_2C(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 176, 0, 1, 4)
+
+#define SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0 GENMASK(2, 0)
+#define SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0, x)
+#define SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0, x)
+
+#define SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER BIT(4)
+#define SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER, x)
+#define SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_2D */
+#define SD25G_LANE_LANE_2D(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 180, 0, 1, 4)
+
+#define SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0 GENMASK(2, 0)
+#define SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0, x)
+#define SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0, x)
+
+#define SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0 GENMASK(6, 4)
+#define SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0, x)
+#define SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_2E */
+#define SD25G_LANE_LANE_2E(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 184, 0, 1, 4)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN BIT(0)
+#define SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ BIT(1)
+#define SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_PD_SQ BIT(2)
+#define SD25G_LANE_LANE_2E_LN_CFG_PD_SQ_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_PD_SQ, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_PD_SQ_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_PD_SQ, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_DIS_ALOS BIT(3)
+#define SD25G_LANE_LANE_2E_LN_CFG_DIS_ALOS_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_DIS_ALOS, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_DIS_ALOS_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_DIS_ALOS, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_RESETN_AGC BIT(4)
+#define SD25G_LANE_LANE_2E_LN_CFG_RESETN_AGC_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_RESETN_AGC, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_RESETN_AGC_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_RESETN_AGC, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG BIT(5)
+#define SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_PI_RSTN BIT(6)
+#define SD25G_LANE_LANE_2E_LN_CFG_PI_RSTN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_PI_RSTN, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_PI_RSTN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_PI_RSTN, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN BIT(7)
+#define SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_40 */
+#define SD25G_LANE_LANE_40(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 256, 0, 1, 4)
+
+#define SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE BIT(0)
+#define SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE, x)
+#define SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE, x)
+
+#define SD25G_LANE_LANE_40_LN_R_TX_POL_INV BIT(1)
+#define SD25G_LANE_LANE_40_LN_R_TX_POL_INV_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_40_LN_R_TX_POL_INV, x)
+#define SD25G_LANE_LANE_40_LN_R_TX_POL_INV_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_40_LN_R_TX_POL_INV, x)
+
+#define SD25G_LANE_LANE_40_LN_R_RX_BIT_REVERSE BIT(2)
+#define SD25G_LANE_LANE_40_LN_R_RX_BIT_REVERSE_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_40_LN_R_RX_BIT_REVERSE, x)
+#define SD25G_LANE_LANE_40_LN_R_RX_BIT_REVERSE_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_40_LN_R_RX_BIT_REVERSE, x)
+
+#define SD25G_LANE_LANE_40_LN_R_RX_POL_INV BIT(3)
+#define SD25G_LANE_LANE_40_LN_R_RX_POL_INV_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_40_LN_R_RX_POL_INV, x)
+#define SD25G_LANE_LANE_40_LN_R_RX_POL_INV_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_40_LN_R_RX_POL_INV, x)
+
+#define SD25G_LANE_LANE_40_LN_R_CDR_RSTN BIT(4)
+#define SD25G_LANE_LANE_40_LN_R_CDR_RSTN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_40_LN_R_CDR_RSTN, x)
+#define SD25G_LANE_LANE_40_LN_R_CDR_RSTN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_40_LN_R_CDR_RSTN, x)
+
+#define SD25G_LANE_LANE_40_LN_R_DFE_RSTN BIT(5)
+#define SD25G_LANE_LANE_40_LN_R_DFE_RSTN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_40_LN_R_DFE_RSTN, x)
+#define SD25G_LANE_LANE_40_LN_R_DFE_RSTN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_40_LN_R_DFE_RSTN, x)
+
+#define SD25G_LANE_LANE_40_LN_R_CTLE_RSTN BIT(6)
+#define SD25G_LANE_LANE_40_LN_R_CTLE_RSTN_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_40_LN_R_CTLE_RSTN, x)
+#define SD25G_LANE_LANE_40_LN_R_CTLE_RSTN_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_40_LN_R_CTLE_RSTN, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_42 */
+#define SD25G_LANE_LANE_42(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 264, 0, 1, 4)
+
+#define SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0 GENMASK(7, 0)
+#define SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0, x)
+#define SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_43 */
+#define SD25G_LANE_LANE_43(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 268, 0, 1, 4)
+
+#define SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8 GENMASK(7, 0)
+#define SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8, x)
+#define SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_44 */
+#define SD25G_LANE_LANE_44(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 272, 0, 1, 4)
+
+#define SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0 GENMASK(7, 0)
+#define SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0, x)
+#define SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0, x)
+
+/* SD25G_TARGET:LANE_GRP_0:LANE_45 */
+#define SD25G_LANE_LANE_45(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 276, 0, 1, 4)
+
+#define SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8 GENMASK(7, 0)
+#define SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8, x)
+#define SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8, x)
+
+/* SD25G_TARGET:LANE_GRP_1:LANE_DE */
+#define SD25G_LANE_LANE_DE(t) __REG(TARGET_SD25G_LANE, t, 8, 1792, 0, 1, 128, 120, 0, 1, 4)
+
+#define SD25G_LANE_LANE_DE_LN_LOL_UDL BIT(0)
+#define SD25G_LANE_LANE_DE_LN_LOL_UDL_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_DE_LN_LOL_UDL, x)
+#define SD25G_LANE_LANE_DE_LN_LOL_UDL_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_DE_LN_LOL_UDL, x)
+
+#define SD25G_LANE_LANE_DE_LN_LOL BIT(1)
+#define SD25G_LANE_LANE_DE_LN_LOL_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_DE_LN_LOL, x)
+#define SD25G_LANE_LANE_DE_LN_LOL_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_DE_LN_LOL, x)
+
+#define SD25G_LANE_LANE_DE_LN_PMA2PCS_RXEI_FILTERED BIT(2)
+#define SD25G_LANE_LANE_DE_LN_PMA2PCS_RXEI_FILTERED_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_DE_LN_PMA2PCS_RXEI_FILTERED, x)
+#define SD25G_LANE_LANE_DE_LN_PMA2PCS_RXEI_FILTERED_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_DE_LN_PMA2PCS_RXEI_FILTERED, x)
+
+#define SD25G_LANE_LANE_DE_LN_PMA_RXEI BIT(3)
+#define SD25G_LANE_LANE_DE_LN_PMA_RXEI_SET(x)\
+ FIELD_PREP(SD25G_LANE_LANE_DE_LN_PMA_RXEI, x)
+#define SD25G_LANE_LANE_DE_LN_PMA_RXEI_GET(x)\
+ FIELD_GET(SD25G_LANE_LANE_DE_LN_PMA_RXEI, x)
+
+/* SD10G_LANE_TARGET:LANE_GRP_8:LANE_DF */
+#define SD6G_LANE_LANE_DF(t) __REG(TARGET_SD6G_LANE, t, 13, 832, 0, 1, 84, 60, 0, 1, 4)
+
+#define SD6G_LANE_LANE_DF_LOL_UDL BIT(0)
+#define SD6G_LANE_LANE_DF_LOL_UDL_SET(x)\
+ FIELD_PREP(SD6G_LANE_LANE_DF_LOL_UDL, x)
+#define SD6G_LANE_LANE_DF_LOL_UDL_GET(x)\
+ FIELD_GET(SD6G_LANE_LANE_DF_LOL_UDL, x)
+
+#define SD6G_LANE_LANE_DF_LOL BIT(1)
+#define SD6G_LANE_LANE_DF_LOL_SET(x)\
+ FIELD_PREP(SD6G_LANE_LANE_DF_LOL, x)
+#define SD6G_LANE_LANE_DF_LOL_GET(x)\
+ FIELD_GET(SD6G_LANE_LANE_DF_LOL, x)
+
+#define SD6G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED BIT(2)
+#define SD6G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED_SET(x)\
+ FIELD_PREP(SD6G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED, x)
+#define SD6G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED_GET(x)\
+ FIELD_GET(SD6G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED, x)
+
+#define SD6G_LANE_LANE_DF_SQUELCH BIT(3)
+#define SD6G_LANE_LANE_DF_SQUELCH_SET(x)\
+ FIELD_PREP(SD6G_LANE_LANE_DF_SQUELCH, x)
+#define SD6G_LANE_LANE_DF_SQUELCH_GET(x)\
+ FIELD_GET(SD6G_LANE_LANE_DF_SQUELCH, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_0:CMU_00 */
+#define SD_CMU_CMU_00(t) __REG(TARGET_SD_CMU, t, 14, 0, 0, 1, 20, 0, 0, 1, 4)
+
+#define SD_CMU_CMU_00_R_HWT_SIMULATION_MODE BIT(0)
+#define SD_CMU_CMU_00_R_HWT_SIMULATION_MODE_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_00_R_HWT_SIMULATION_MODE, x)
+#define SD_CMU_CMU_00_R_HWT_SIMULATION_MODE_GET(x)\
+ FIELD_GET(SD_CMU_CMU_00_R_HWT_SIMULATION_MODE, x)
+
+#define SD_CMU_CMU_00_CFG_PLL_LOL_SET BIT(1)
+#define SD_CMU_CMU_00_CFG_PLL_LOL_SET_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_00_CFG_PLL_LOL_SET, x)
+#define SD_CMU_CMU_00_CFG_PLL_LOL_SET_GET(x)\
+ FIELD_GET(SD_CMU_CMU_00_CFG_PLL_LOL_SET, x)
+
+#define SD_CMU_CMU_00_CFG_PLL_LOS_SET BIT(2)
+#define SD_CMU_CMU_00_CFG_PLL_LOS_SET_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_00_CFG_PLL_LOS_SET, x)
+#define SD_CMU_CMU_00_CFG_PLL_LOS_SET_GET(x)\
+ FIELD_GET(SD_CMU_CMU_00_CFG_PLL_LOS_SET, x)
+
+#define SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0 GENMASK(5, 4)
+#define SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0, x)
+#define SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_GET(x)\
+ FIELD_GET(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_05 */
+#define SD_CMU_CMU_05(t) __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 0, 0, 1, 4)
+
+#define SD_CMU_CMU_05_CFG_REFCK_TERM_EN BIT(0)
+#define SD_CMU_CMU_05_CFG_REFCK_TERM_EN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_05_CFG_REFCK_TERM_EN, x)
+#define SD_CMU_CMU_05_CFG_REFCK_TERM_EN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_05_CFG_REFCK_TERM_EN, x)
+
+#define SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0 GENMASK(5, 4)
+#define SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0, x)
+#define SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_GET(x)\
+ FIELD_GET(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_09 */
+#define SD_CMU_CMU_09(t) __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 16, 0, 1, 4)
+
+#define SD_CMU_CMU_09_CFG_EN_TX_CK_UP BIT(0)
+#define SD_CMU_CMU_09_CFG_EN_TX_CK_UP_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_09_CFG_EN_TX_CK_UP, x)
+#define SD_CMU_CMU_09_CFG_EN_TX_CK_UP_GET(x)\
+ FIELD_GET(SD_CMU_CMU_09_CFG_EN_TX_CK_UP, x)
+
+#define SD_CMU_CMU_09_CFG_EN_TX_CK_DN BIT(1)
+#define SD_CMU_CMU_09_CFG_EN_TX_CK_DN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_09_CFG_EN_TX_CK_DN, x)
+#define SD_CMU_CMU_09_CFG_EN_TX_CK_DN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_09_CFG_EN_TX_CK_DN, x)
+
+#define SD_CMU_CMU_09_CFG_SW_8G BIT(4)
+#define SD_CMU_CMU_09_CFG_SW_8G_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_09_CFG_SW_8G, x)
+#define SD_CMU_CMU_09_CFG_SW_8G_GET(x)\
+ FIELD_GET(SD_CMU_CMU_09_CFG_SW_8G, x)
+
+#define SD_CMU_CMU_09_CFG_SW_10G BIT(5)
+#define SD_CMU_CMU_09_CFG_SW_10G_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_09_CFG_SW_10G, x)
+#define SD_CMU_CMU_09_CFG_SW_10G_GET(x)\
+ FIELD_GET(SD_CMU_CMU_09_CFG_SW_10G, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_0D */
+#define SD_CMU_CMU_0D(t) __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 32, 0, 1, 4)
+
+#define SD_CMU_CMU_0D_CFG_PD_DIV64 BIT(0)
+#define SD_CMU_CMU_0D_CFG_PD_DIV64_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_0D_CFG_PD_DIV64, x)
+#define SD_CMU_CMU_0D_CFG_PD_DIV64_GET(x)\
+ FIELD_GET(SD_CMU_CMU_0D_CFG_PD_DIV64, x)
+
+#define SD_CMU_CMU_0D_CFG_PD_DIV66 BIT(1)
+#define SD_CMU_CMU_0D_CFG_PD_DIV66_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_0D_CFG_PD_DIV66, x)
+#define SD_CMU_CMU_0D_CFG_PD_DIV66_GET(x)\
+ FIELD_GET(SD_CMU_CMU_0D_CFG_PD_DIV66, x)
+
+#define SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD BIT(2)
+#define SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD, x)
+#define SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD_GET(x)\
+ FIELD_GET(SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD, x)
+
+#define SD_CMU_CMU_0D_CFG_JC_BYP BIT(3)
+#define SD_CMU_CMU_0D_CFG_JC_BYP_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_0D_CFG_JC_BYP, x)
+#define SD_CMU_CMU_0D_CFG_JC_BYP_GET(x)\
+ FIELD_GET(SD_CMU_CMU_0D_CFG_JC_BYP, x)
+
+#define SD_CMU_CMU_0D_CFG_REFCK_PD BIT(4)
+#define SD_CMU_CMU_0D_CFG_REFCK_PD_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_0D_CFG_REFCK_PD, x)
+#define SD_CMU_CMU_0D_CFG_REFCK_PD_GET(x)\
+ FIELD_GET(SD_CMU_CMU_0D_CFG_REFCK_PD, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_3:CMU_1B */
+#define SD_CMU_CMU_1B(t) __REG(TARGET_SD_CMU, t, 14, 104, 0, 1, 20, 4, 0, 1, 4)
+
+#define SD_CMU_CMU_1B_CFG_RESERVE_7_0 GENMASK(7, 0)
+#define SD_CMU_CMU_1B_CFG_RESERVE_7_0_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_1B_CFG_RESERVE_7_0, x)
+#define SD_CMU_CMU_1B_CFG_RESERVE_7_0_GET(x)\
+ FIELD_GET(SD_CMU_CMU_1B_CFG_RESERVE_7_0, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_4:CMU_1F */
+#define SD_CMU_CMU_1F(t) __REG(TARGET_SD_CMU, t, 14, 124, 0, 1, 68, 0, 0, 1, 4)
+
+#define SD_CMU_CMU_1F_CFG_BIAS_DN_EN BIT(0)
+#define SD_CMU_CMU_1F_CFG_BIAS_DN_EN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_1F_CFG_BIAS_DN_EN, x)
+#define SD_CMU_CMU_1F_CFG_BIAS_DN_EN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_1F_CFG_BIAS_DN_EN, x)
+
+#define SD_CMU_CMU_1F_CFG_BIAS_UP_EN BIT(1)
+#define SD_CMU_CMU_1F_CFG_BIAS_UP_EN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_1F_CFG_BIAS_UP_EN, x)
+#define SD_CMU_CMU_1F_CFG_BIAS_UP_EN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_1F_CFG_BIAS_UP_EN, x)
+
+#define SD_CMU_CMU_1F_CFG_IC2IP_N BIT(2)
+#define SD_CMU_CMU_1F_CFG_IC2IP_N_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_1F_CFG_IC2IP_N, x)
+#define SD_CMU_CMU_1F_CFG_IC2IP_N_GET(x)\
+ FIELD_GET(SD_CMU_CMU_1F_CFG_IC2IP_N, x)
+
+#define SD_CMU_CMU_1F_CFG_VTUNE_SEL BIT(3)
+#define SD_CMU_CMU_1F_CFG_VTUNE_SEL_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_1F_CFG_VTUNE_SEL, x)
+#define SD_CMU_CMU_1F_CFG_VTUNE_SEL_GET(x)\
+ FIELD_GET(SD_CMU_CMU_1F_CFG_VTUNE_SEL, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_5:CMU_30 */
+#define SD_CMU_CMU_30(t) __REG(TARGET_SD_CMU, t, 14, 192, 0, 1, 72, 0, 0, 1, 4)
+
+#define SD_CMU_CMU_30_R_PLL_DLOL_EN BIT(0)
+#define SD_CMU_CMU_30_R_PLL_DLOL_EN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_30_R_PLL_DLOL_EN, x)
+#define SD_CMU_CMU_30_R_PLL_DLOL_EN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_30_R_PLL_DLOL_EN, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_44 */
+#define SD_CMU_CMU_44(t) __REG(TARGET_SD_CMU, t, 14, 264, 0, 1, 632, 8, 0, 1, 4)
+
+#define SD_CMU_CMU_44_R_PLL_RSTN BIT(0)
+#define SD_CMU_CMU_44_R_PLL_RSTN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_44_R_PLL_RSTN, x)
+#define SD_CMU_CMU_44_R_PLL_RSTN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_44_R_PLL_RSTN, x)
+
+#define SD_CMU_CMU_44_R_CK_RESETB BIT(1)
+#define SD_CMU_CMU_44_R_CK_RESETB_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_44_R_CK_RESETB, x)
+#define SD_CMU_CMU_44_R_CK_RESETB_GET(x)\
+ FIELD_GET(SD_CMU_CMU_44_R_CK_RESETB, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_45 */
+#define SD_CMU_CMU_45(t) __REG(TARGET_SD_CMU, t, 14, 264, 0, 1, 632, 12, 0, 1, 4)
+
+#define SD_CMU_CMU_45_R_EN_RATECHG_CTRL BIT(0)
+#define SD_CMU_CMU_45_R_EN_RATECHG_CTRL_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_45_R_EN_RATECHG_CTRL, x)
+#define SD_CMU_CMU_45_R_EN_RATECHG_CTRL_GET(x)\
+ FIELD_GET(SD_CMU_CMU_45_R_EN_RATECHG_CTRL, x)
+
+#define SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT BIT(1)
+#define SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT, x)
+#define SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT_GET(x)\
+ FIELD_GET(SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT, x)
+
+#define SD_CMU_CMU_45_RESERVED BIT(2)
+#define SD_CMU_CMU_45_RESERVED_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_45_RESERVED, x)
+#define SD_CMU_CMU_45_RESERVED_GET(x)\
+ FIELD_GET(SD_CMU_CMU_45_RESERVED, x)
+
+#define SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT BIT(3)
+#define SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT, x)
+#define SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT_GET(x)\
+ FIELD_GET(SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT, x)
+
+#define SD_CMU_CMU_45_RESERVED_2 BIT(4)
+#define SD_CMU_CMU_45_RESERVED_2_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_45_RESERVED_2, x)
+#define SD_CMU_CMU_45_RESERVED_2_GET(x)\
+ FIELD_GET(SD_CMU_CMU_45_RESERVED_2, x)
+
+#define SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT BIT(5)
+#define SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT, x)
+#define SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT_GET(x)\
+ FIELD_GET(SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT, x)
+
+#define SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT BIT(6)
+#define SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT, x)
+#define SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT_GET(x)\
+ FIELD_GET(SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT, x)
+
+#define SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN BIT(7)
+#define SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN, x)
+#define SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_47 */
+#define SD_CMU_CMU_47(t) __REG(TARGET_SD_CMU, t, 14, 264, 0, 1, 632, 20, 0, 1, 4)
+
+#define SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0 GENMASK(4, 0)
+#define SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0, x)
+#define SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_GET(x)\
+ FIELD_GET(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_7:CMU_E0 */
+#define SD_CMU_CMU_E0(t) __REG(TARGET_SD_CMU, t, 14, 896, 0, 1, 8, 0, 0, 1, 4)
+
+#define SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0 GENMASK(3, 0)
+#define SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0, x)
+#define SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0_GET(x)\
+ FIELD_GET(SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0, x)
+
+#define SD_CMU_CMU_E0_PLL_LOL_UDL BIT(4)
+#define SD_CMU_CMU_E0_PLL_LOL_UDL_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_E0_PLL_LOL_UDL, x)
+#define SD_CMU_CMU_E0_PLL_LOL_UDL_GET(x)\
+ FIELD_GET(SD_CMU_CMU_E0_PLL_LOL_UDL, x)
+
+/* SD_CMU_TARGET:SD_CMU_CFG:SD_CMU_CFG */
+#define SD_CMU_CFG_SD_CMU_CFG(t) __REG(TARGET_SD_CMU_CFG, t, 14, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define SD_CMU_CFG_SD_CMU_CFG_CMU_RST BIT(0)
+#define SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(x)\
+ FIELD_PREP(SD_CMU_CFG_SD_CMU_CFG_CMU_RST, x)
+#define SD_CMU_CFG_SD_CMU_CFG_CMU_RST_GET(x)\
+ FIELD_GET(SD_CMU_CFG_SD_CMU_CFG_CMU_RST, x)
+
+#define SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST BIT(1)
+#define SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(x)\
+ FIELD_PREP(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST, x)
+#define SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_GET(x)\
+ FIELD_GET(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST, x)
+
+/* SD_LANE_TARGET:SD_RESET:SD_SER_RST */
+#define SD_LANE_SD_SER_RST(t) __REG(TARGET_SD_LANE, t, 25, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define SD_LANE_SD_SER_RST_SER_RST BIT(0)
+#define SD_LANE_SD_SER_RST_SER_RST_SET(x)\
+ FIELD_PREP(SD_LANE_SD_SER_RST_SER_RST, x)
+#define SD_LANE_SD_SER_RST_SER_RST_GET(x)\
+ FIELD_GET(SD_LANE_SD_SER_RST_SER_RST, x)
+
+/* SD_LANE_TARGET:SD_RESET:SD_DES_RST */
+#define SD_LANE_SD_DES_RST(t) __REG(TARGET_SD_LANE, t, 25, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define SD_LANE_SD_DES_RST_DES_RST BIT(0)
+#define SD_LANE_SD_DES_RST_DES_RST_SET(x)\
+ FIELD_PREP(SD_LANE_SD_DES_RST_DES_RST, x)
+#define SD_LANE_SD_DES_RST_DES_RST_GET(x)\
+ FIELD_GET(SD_LANE_SD_DES_RST_DES_RST, x)
+
+/* SD_LANE_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG */
+#define SD_LANE_SD_LANE_CFG(t) __REG(TARGET_SD_LANE, t, 25, 8, 0, 1, 8, 0, 0, 1, 4)
+
+#define SD_LANE_SD_LANE_CFG_MACRO_RST BIT(0)
+#define SD_LANE_SD_LANE_CFG_MACRO_RST_SET(x)\
+ FIELD_PREP(SD_LANE_SD_LANE_CFG_MACRO_RST, x)
+#define SD_LANE_SD_LANE_CFG_MACRO_RST_GET(x)\
+ FIELD_GET(SD_LANE_SD_LANE_CFG_MACRO_RST, x)
+
+#define SD_LANE_SD_LANE_CFG_EXT_CFG_RST BIT(1)
+#define SD_LANE_SD_LANE_CFG_EXT_CFG_RST_SET(x)\
+ FIELD_PREP(SD_LANE_SD_LANE_CFG_EXT_CFG_RST, x)
+#define SD_LANE_SD_LANE_CFG_EXT_CFG_RST_GET(x)\
+ FIELD_GET(SD_LANE_SD_LANE_CFG_EXT_CFG_RST, x)
+
+#define SD_LANE_SD_LANE_CFG_TX_REF_SEL GENMASK(5, 4)
+#define SD_LANE_SD_LANE_CFG_TX_REF_SEL_SET(x)\
+ FIELD_PREP(SD_LANE_SD_LANE_CFG_TX_REF_SEL, x)
+#define SD_LANE_SD_LANE_CFG_TX_REF_SEL_GET(x)\
+ FIELD_GET(SD_LANE_SD_LANE_CFG_TX_REF_SEL, x)
+
+#define SD_LANE_SD_LANE_CFG_RX_REF_SEL GENMASK(7, 6)
+#define SD_LANE_SD_LANE_CFG_RX_REF_SEL_SET(x)\
+ FIELD_PREP(SD_LANE_SD_LANE_CFG_RX_REF_SEL, x)
+#define SD_LANE_SD_LANE_CFG_RX_REF_SEL_GET(x)\
+ FIELD_GET(SD_LANE_SD_LANE_CFG_RX_REF_SEL, x)
+
+#define SD_LANE_SD_LANE_CFG_LANE_RST BIT(8)
+#define SD_LANE_SD_LANE_CFG_LANE_RST_SET(x)\
+ FIELD_PREP(SD_LANE_SD_LANE_CFG_LANE_RST, x)
+#define SD_LANE_SD_LANE_CFG_LANE_RST_GET(x)\
+ FIELD_GET(SD_LANE_SD_LANE_CFG_LANE_RST, x)
+
+#define SD_LANE_SD_LANE_CFG_LANE_TX_RST BIT(9)
+#define SD_LANE_SD_LANE_CFG_LANE_TX_RST_SET(x)\
+ FIELD_PREP(SD_LANE_SD_LANE_CFG_LANE_TX_RST, x)
+#define SD_LANE_SD_LANE_CFG_LANE_TX_RST_GET(x)\
+ FIELD_GET(SD_LANE_SD_LANE_CFG_LANE_TX_RST, x)
+
+#define SD_LANE_SD_LANE_CFG_LANE_RX_RST BIT(10)
+#define SD_LANE_SD_LANE_CFG_LANE_RX_RST_SET(x)\
+ FIELD_PREP(SD_LANE_SD_LANE_CFG_LANE_RX_RST, x)
+#define SD_LANE_SD_LANE_CFG_LANE_RX_RST_GET(x)\
+ FIELD_GET(SD_LANE_SD_LANE_CFG_LANE_RX_RST, x)
+
+/* SD_LANE_TARGET:SD_LANE_CFG_STAT:SD_LANE_STAT */
+#define SD_LANE_SD_LANE_STAT(t) __REG(TARGET_SD_LANE, t, 25, 8, 0, 1, 8, 4, 0, 1, 4)
+
+#define SD_LANE_SD_LANE_STAT_PMA_RST_DONE BIT(0)
+#define SD_LANE_SD_LANE_STAT_PMA_RST_DONE_SET(x)\
+ FIELD_PREP(SD_LANE_SD_LANE_STAT_PMA_RST_DONE, x)
+#define SD_LANE_SD_LANE_STAT_PMA_RST_DONE_GET(x)\
+ FIELD_GET(SD_LANE_SD_LANE_STAT_PMA_RST_DONE, x)
+
+#define SD_LANE_SD_LANE_STAT_DFE_RST_DONE BIT(1)
+#define SD_LANE_SD_LANE_STAT_DFE_RST_DONE_SET(x)\
+ FIELD_PREP(SD_LANE_SD_LANE_STAT_DFE_RST_DONE, x)
+#define SD_LANE_SD_LANE_STAT_DFE_RST_DONE_GET(x)\
+ FIELD_GET(SD_LANE_SD_LANE_STAT_DFE_RST_DONE, x)
+
+#define SD_LANE_SD_LANE_STAT_DBG_OBS GENMASK(31, 16)
+#define SD_LANE_SD_LANE_STAT_DBG_OBS_SET(x)\
+ FIELD_PREP(SD_LANE_SD_LANE_STAT_DBG_OBS, x)
+#define SD_LANE_SD_LANE_STAT_DBG_OBS_GET(x)\
+ FIELD_GET(SD_LANE_SD_LANE_STAT_DBG_OBS, x)
+
+/* SD_LANE_TARGET:CFG_STAT_FX100:MISC */
+#define SD_LANE_MISC(t) __REG(TARGET_SD_LANE, t, 25, 56, 0, 1, 56, 0, 0, 1, 4)
+
+#define SD_LANE_MISC_SD_125_RST_DIS BIT(0)
+#define SD_LANE_MISC_SD_125_RST_DIS_SET(x)\
+ FIELD_PREP(SD_LANE_MISC_SD_125_RST_DIS, x)
+#define SD_LANE_MISC_SD_125_RST_DIS_GET(x)\
+ FIELD_GET(SD_LANE_MISC_SD_125_RST_DIS, x)
+
+#define SD_LANE_MISC_RX_ENA BIT(1)
+#define SD_LANE_MISC_RX_ENA_SET(x)\
+ FIELD_PREP(SD_LANE_MISC_RX_ENA, x)
+#define SD_LANE_MISC_RX_ENA_GET(x)\
+ FIELD_GET(SD_LANE_MISC_RX_ENA, x)
+
+#define SD_LANE_MISC_MUX_ENA BIT(2)
+#define SD_LANE_MISC_MUX_ENA_SET(x)\
+ FIELD_PREP(SD_LANE_MISC_MUX_ENA, x)
+#define SD_LANE_MISC_MUX_ENA_GET(x)\
+ FIELD_GET(SD_LANE_MISC_MUX_ENA, x)
+
+#define SD_LANE_MISC_CORE_CLK_FREQ GENMASK(5, 4)
+#define SD_LANE_MISC_CORE_CLK_FREQ_SET(x)\
+ FIELD_PREP(SD_LANE_MISC_CORE_CLK_FREQ, x)
+#define SD_LANE_MISC_CORE_CLK_FREQ_GET(x)\
+ FIELD_GET(SD_LANE_MISC_CORE_CLK_FREQ, x)
+
+/* SD_LANE_TARGET:CFG_STAT_FX100:M_STAT_MISC */
+#define SD_LANE_M_STAT_MISC(t) __REG(TARGET_SD_LANE, t, 25, 56, 0, 1, 56, 36, 0, 1, 4)
+
+#define SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM GENMASK(21, 0)
+#define SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM_SET(x)\
+ FIELD_PREP(SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM, x)
+#define SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM_GET(x)\
+ FIELD_GET(SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM, x)
+
+#define SD_LANE_M_STAT_MISC_M_LOCK_CNT GENMASK(31, 24)
+#define SD_LANE_M_STAT_MISC_M_LOCK_CNT_SET(x)\
+ FIELD_PREP(SD_LANE_M_STAT_MISC_M_LOCK_CNT, x)
+#define SD_LANE_M_STAT_MISC_M_LOCK_CNT_GET(x)\
+ FIELD_GET(SD_LANE_M_STAT_MISC_M_LOCK_CNT, x)
+
+/* SD25G_CFG_TARGET:SD_RESET:SD_SER_RST */
+#define SD_LANE_25G_SD_SER_RST(t) __REG(TARGET_SD_LANE_25G, t, 8, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define SD_LANE_25G_SD_SER_RST_SER_RST BIT(0)
+#define SD_LANE_25G_SD_SER_RST_SER_RST_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_SER_RST_SER_RST, x)
+#define SD_LANE_25G_SD_SER_RST_SER_RST_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_SER_RST_SER_RST, x)
+
+/* SD25G_CFG_TARGET:SD_RESET:SD_DES_RST */
+#define SD_LANE_25G_SD_DES_RST(t) __REG(TARGET_SD_LANE_25G, t, 8, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define SD_LANE_25G_SD_DES_RST_DES_RST BIT(0)
+#define SD_LANE_25G_SD_DES_RST_DES_RST_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_DES_RST_DES_RST, x)
+#define SD_LANE_25G_SD_DES_RST_DES_RST_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_DES_RST_DES_RST, x)
+
+/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG */
+#define SD_LANE_25G_SD_LANE_CFG(t) __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 0, 0, 1, 4)
+
+#define SD_LANE_25G_SD_LANE_CFG_MACRO_RST BIT(0)
+#define SD_LANE_25G_SD_LANE_CFG_MACRO_RST_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_MACRO_RST, x)
+#define SD_LANE_25G_SD_LANE_CFG_MACRO_RST_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_MACRO_RST, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST BIT(1)
+#define SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST, x)
+#define SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_HWT_MULTI_LANE_MODE BIT(4)
+#define SD_LANE_25G_SD_LANE_CFG_HWT_MULTI_LANE_MODE_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_HWT_MULTI_LANE_MODE, x)
+#define SD_LANE_25G_SD_LANE_CFG_HWT_MULTI_LANE_MODE_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_HWT_MULTI_LANE_MODE, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_PHYMODE GENMASK(7, 5)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_PHYMODE_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_PHYMODE, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_PHYMODE_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_PHYMODE, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_LANE_RST BIT(8)
+#define SD_LANE_25G_SD_LANE_CFG_LANE_RST_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_LANE_RST, x)
+#define SD_LANE_25G_SD_LANE_CFG_LANE_RST_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_LANE_RST, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_ADV BIT(9)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_ADV_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_EN_ADV, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_ADV_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_EN_ADV, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_MAIN BIT(10)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_MAIN_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_EN_MAIN, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_MAIN_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_EN_MAIN, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_DLY BIT(11)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_DLY_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_EN_DLY, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_DLY_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_EN_DLY, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_ADV GENMASK(15, 12)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_ADV_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_TAP_ADV, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_ADV_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_TAP_ADV, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_MAIN BIT(16)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_MAIN_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_TAP_MAIN, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_MAIN_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_TAP_MAIN, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_DLY GENMASK(21, 17)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_DLY_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_TAP_DLY, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_DLY_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_TAP_DLY, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_ISCAN_EN BIT(22)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_ISCAN_EN_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_ISCAN_EN, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_ISCAN_EN_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_ISCAN_EN, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_FAST_ISCAN BIT(23)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_FAST_ISCAN_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_EN_FAST_ISCAN, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_FAST_ISCAN_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_EN_FAST_ISCAN, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXSWING BIT(24)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXSWING_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXSWING, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXSWING_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXSWING, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXEI BIT(25)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXEI_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXEI, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXEI_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXEI, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN GENMASK(28, 26)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN, x)
+
+/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG2 */
+#define SD_LANE_25G_SD_LANE_CFG2(t) __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 4, 0, 1, 4)
+
+#define SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL GENMASK(2, 0)
+#define SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_PMA_TXCK_SEL GENMASK(5, 3)
+#define SD_LANE_25G_SD_LANE_CFG2_PMA_TXCK_SEL_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_PMA_TXCK_SEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_PMA_TXCK_SEL_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_PMA_TXCK_SEL, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_PMA_RXDIV_SEL GENMASK(8, 6)
+#define SD_LANE_25G_SD_LANE_CFG2_PMA_RXDIV_SEL_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_PMA_RXDIV_SEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_PMA_RXDIV_SEL_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_PMA_RXDIV_SEL, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_PCS2PMA_TX_SPEED GENMASK(10, 9)
+#define SD_LANE_25G_SD_LANE_CFG2_PCS2PMA_TX_SPEED_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_PCS2PMA_TX_SPEED, x)
+#define SD_LANE_25G_SD_LANE_CFG2_PCS2PMA_TX_SPEED_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_PCS2PMA_TX_SPEED, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_TXFIFO_CK_DIV GENMASK(13, 11)
+#define SD_LANE_25G_SD_LANE_CFG2_TXFIFO_CK_DIV_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_TXFIFO_CK_DIV, x)
+#define SD_LANE_25G_SD_LANE_CFG2_TXFIFO_CK_DIV_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_TXFIFO_CK_DIV, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_RXFIFO_CK_DIV GENMASK(16, 14)
+#define SD_LANE_25G_SD_LANE_CFG2_RXFIFO_CK_DIV_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_RXFIFO_CK_DIV, x)
+#define SD_LANE_25G_SD_LANE_CFG2_RXFIFO_CK_DIV_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_RXFIFO_CK_DIV, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_VCO_DIV_SEL GENMASK(19, 17)
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_VCO_DIV_SEL_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_HWT_VCO_DIV_SEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_VCO_DIV_SEL_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_HWT_VCO_DIV_SEL, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_CFG_SEL_DIV GENMASK(23, 20)
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_CFG_SEL_DIV_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_HWT_CFG_SEL_DIV, x)
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_CFG_SEL_DIV_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_HWT_CFG_SEL_DIV, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_PRE_DIVSEL GENMASK(25, 24)
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_PRE_DIVSEL_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_HWT_PRE_DIVSEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_PRE_DIVSEL_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_HWT_PRE_DIVSEL, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_TXRATE_SEL GENMASK(28, 26)
+#define SD_LANE_25G_SD_LANE_CFG2_TXRATE_SEL_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_TXRATE_SEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_TXRATE_SEL_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_TXRATE_SEL, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL GENMASK(31, 29)
+#define SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL, x)
+
+/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_STAT */
+#define SD_LANE_25G_SD_LANE_STAT(t) __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 8, 0, 1, 4)
+
+#define SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE BIT(0)
+#define SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE, x)
+#define SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE, x)
+
+#define SD_LANE_25G_SD_LANE_STAT_LANE_RST_DONE BIT(1)
+#define SD_LANE_25G_SD_LANE_STAT_LANE_RST_DONE_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_STAT_LANE_RST_DONE, x)
+#define SD_LANE_25G_SD_LANE_STAT_LANE_RST_DONE_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_STAT_LANE_RST_DONE, x)
+
+#define SD_LANE_25G_SD_LANE_STAT_DBG_OBS GENMASK(31, 16)
+#define SD_LANE_25G_SD_LANE_STAT_DBG_OBS_SET(x)\
+ FIELD_PREP(SD_LANE_25G_SD_LANE_STAT_DBG_OBS, x)
+#define SD_LANE_25G_SD_LANE_STAT_DBG_OBS_GET(x)\
+ FIELD_GET(SD_LANE_25G_SD_LANE_STAT_DBG_OBS, x)
+
+#endif /* _SPARX5_SERDES_REGS_H_ */
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 71cb10826326..ccb575b13777 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -373,6 +373,36 @@ int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode)
}
EXPORT_SYMBOL_GPL(phy_set_mode_ext);
+int phy_set_media(struct phy *phy, enum phy_media media)
+{
+ int ret;
+
+ if (!phy || !phy->ops->set_media)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->set_media(phy, media);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_set_media);
+
+int phy_set_speed(struct phy *phy, int speed)
+{
+ int ret;
+
+ if (!phy || !phy->ops->set_speed)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->set_speed(phy, speed);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_set_speed);
+
int phy_reset(struct phy *phy)
{
int ret;
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
index 9061ece7ff6a..bfff0c8c9130 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
@@ -276,8 +276,8 @@ static int qcom_ipq806x_usb_hs_phy_init(struct phy *phy)
val = HSUSB_CTRL_DPSEHV_CLAMP | HSUSB_CTRL_DMSEHV_CLAMP |
HSUSB_CTRL_RETENABLEN | HSUSB_CTRL_COMMONONN |
HSUSB_CTRL_OTGSESSVLD_CLAMP | HSUSB_CTRL_ID_HV_CLAMP |
- HSUSB_CTRL_DPSEHV_CLAMP | HSUSB_CTRL_UTMI_OTG_VBUS_VALID |
- HSUSB_CTRL_UTMI_CLK_EN | HSUSB_CTRL_CLAMP_EN | 0x70;
+ HSUSB_CTRL_UTMI_OTG_VBUS_VALID | HSUSB_CTRL_UTMI_CLK_EN |
+ HSUSB_CTRL_CLAMP_EN | 0x70;
/* use core clock if external reference is not present */
if (!phy_dwc3->xo_clk)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 9cdebe7f26cb..7877f70cf86f 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -1840,6 +1840,86 @@ static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
};
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_CONFIG, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORE_CLK_EN, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_rbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x6f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr2[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x8c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr3[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x2a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_VMODE_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_INTERFACE_SELECT, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_CLKBUF_ENABLE, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RESET_TSYNC_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_INTERFACE_MODE, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_BAND, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_POL_INV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_DRV_LVL, 0x2a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_EMP_POST1_LVL, 0x20),
+};
+
static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34),
@@ -2268,6 +2348,8 @@ static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
};
+struct qmp_phy;
+
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
/* phy-type - PCIE/UFS/USB */
@@ -2307,6 +2389,12 @@ struct qmp_phy_cfg {
const struct qmp_phy_init_tbl *serdes_tbl_hbr3;
int serdes_tbl_hbr3_num;
+ /* DP PHY callbacks */
+ int (*configure_dp_phy)(struct qmp_phy *qphy);
+ void (*configure_dp_tx)(struct qmp_phy *qphy);
+ int (*calibrate_dp_phy)(struct qmp_phy *qphy);
+ void (*dp_aux_init)(struct qmp_phy *qphy);
+
/* clock ids to be requested */
const char * const *clk_list;
int num_clks;
@@ -2423,6 +2511,16 @@ struct qcom_qmp {
struct reset_control *ufs_reset;
};
+static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy);
+static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy);
+static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy);
+static int qcom_qmp_v3_dp_phy_calibrate(struct qmp_phy *qphy);
+
+static void qcom_qmp_v4_phy_dp_aux_init(struct qmp_phy *qphy);
+static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy);
+static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy);
+static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy);
+
static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
{
u32 reg;
@@ -2871,6 +2969,11 @@ static const struct qmp_phy_cfg sc7180_dpphy_cfg = {
.has_phy_dp_com_ctrl = true,
.is_dual_lane_phy = true,
+
+ .dp_aux_init = qcom_qmp_v3_phy_dp_aux_init,
+ .configure_dp_tx = qcom_qmp_v3_phy_configure_dp_tx,
+ .configure_dp_phy = qcom_qmp_v3_phy_configure_dp_phy,
+ .calibrate_dp_phy = qcom_qmp_v3_dp_phy_calibrate,
};
static const struct qmp_phy_combo_cfg sc7180_usb3dpphy_cfg = {
@@ -3123,6 +3226,46 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
+static const struct qmp_phy_cfg sm8250_dpphy_cfg = {
+ .type = PHY_TYPE_DP,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v4_dp_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl),
+ .tx_tbl = qmp_v4_dp_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v4_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v4_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v4_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
+
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+
+ .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init,
+ .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx,
+ .configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy,
+ .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate,
+};
+
+static const struct qmp_phy_combo_cfg sm8250_usb3dpphy_cfg = {
+ .usb_cfg = &sm8250_usb3phy_cfg,
+ .dp_cfg = &sm8250_dpphy_cfg,
+};
+
static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
.type = PHY_TYPE_USB3,
.nlanes = 1,
@@ -3332,24 +3475,24 @@ static int qcom_qmp_phy_serdes_init(struct qmp_phy *qphy)
return 0;
}
-static void qcom_qmp_phy_dp_aux_init(struct qmp_phy *qphy)
+static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy)
{
writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
- qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+ qphy->pcs + QSERDES_DP_PHY_PD_CTL);
/* Turn on BIAS current for PHY/PLL */
writel(QSERDES_V3_COM_BIAS_EN | QSERDES_V3_COM_BIAS_EN_MUX |
QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL,
qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
- writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+ writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_DP_PHY_PD_CTL);
writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
DP_PHY_PD_CTL_LANE_0_1_PWRDN |
DP_PHY_PD_CTL_LANE_2_3_PWRDN | DP_PHY_PD_CTL_PLL_PWRDN |
DP_PHY_PD_CTL_DP_CLAMP_EN,
- qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+ qphy->pcs + QSERDES_DP_PHY_PD_CTL);
writel(QSERDES_V3_COM_BIAS_EN |
QSERDES_V3_COM_BIAS_EN_MUX | QSERDES_V3_COM_CLKBUF_R_EN |
@@ -3357,16 +3500,16 @@ static void qcom_qmp_phy_dp_aux_init(struct qmp_phy *qphy)
QSERDES_V3_COM_CLKBUF_RX_DRIVE_L,
qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
- writel(0x00, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG0);
- writel(0x13, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG1);
- writel(0x24, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG2);
- writel(0x00, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG3);
- writel(0x0a, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG4);
- writel(0x26, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG5);
- writel(0x0a, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG6);
- writel(0x03, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG7);
- writel(0xbb, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG8);
- writel(0x03, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG9);
+ writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG0);
+ writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+ writel(0x24, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
+ writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG3);
+ writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG4);
+ writel(0x26, qphy->pcs + QSERDES_DP_PHY_AUX_CFG5);
+ writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG6);
+ writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG7);
+ writel(0xbb, qphy->pcs + QSERDES_DP_PHY_AUX_CFG8);
+ writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG9);
qphy->dp_aux_cfg = 0;
writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
@@ -3375,6 +3518,20 @@ static void qcom_qmp_phy_dp_aux_init(struct qmp_phy *qphy)
qphy->pcs + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK);
}
+static const u8 qmp_dp_v3_pre_emphasis_hbr3_hbr2[4][4] = {
+ { 0x00, 0x0c, 0x15, 0x1a },
+ { 0x02, 0x0e, 0x16, 0xff },
+ { 0x02, 0x11, 0xff, 0xff },
+ { 0x04, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v3_voltage_swing_hbr3_hbr2[4][4] = {
+ { 0x02, 0x12, 0x16, 0x1a },
+ { 0x09, 0x19, 0x1f, 0xff },
+ { 0x10, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
static const u8 qmp_dp_v3_pre_emphasis_hbr_rbr[4][4] = {
{ 0x00, 0x0c, 0x14, 0x19 },
{ 0x00, 0x0b, 0x12, 0xff },
@@ -3389,11 +3546,11 @@ static const u8 qmp_dp_v3_voltage_swing_hbr_rbr[4][4] = {
{ 0x1f, 0xff, 0xff, 0xff }
};
-static void qcom_qmp_phy_configure_dp_tx(struct qmp_phy *qphy)
+static int qcom_qmp_phy_configure_dp_swing(struct qmp_phy *qphy,
+ unsigned int drv_lvl_reg, unsigned int emp_post_reg)
{
const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
unsigned int v_level = 0, p_level = 0;
- u32 bias_en, drvr_en;
u8 voltage_swing_cfg, pre_emphasis_cfg;
int i;
@@ -3402,56 +3559,58 @@ static void qcom_qmp_phy_configure_dp_tx(struct qmp_phy *qphy)
p_level = max(p_level, dp_opts->pre[i]);
}
- if (dp_opts->lanes == 1) {
- bias_en = 0x3e;
- drvr_en = 0x13;
+ if (dp_opts->link_rate <= 2700) {
+ voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr_rbr[v_level][p_level];
+ pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr_rbr[v_level][p_level];
} else {
- bias_en = 0x3f;
- drvr_en = 0x10;
+ voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr3_hbr2[v_level][p_level];
+ pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr3_hbr2[v_level][p_level];
}
- voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr_rbr[v_level][p_level];
- pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr_rbr[v_level][p_level];
-
/* TODO: Move check to config check */
if (voltage_swing_cfg == 0xFF && pre_emphasis_cfg == 0xFF)
- return;
+ return -EINVAL;
/* Enable MUX to use Cursor values from these registers */
voltage_swing_cfg |= DP_PHY_TXn_TX_DRV_LVL_MUX_EN;
pre_emphasis_cfg |= DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN;
- writel(voltage_swing_cfg, qphy->tx + QSERDES_V3_TX_TX_DRV_LVL);
- writel(pre_emphasis_cfg, qphy->tx + QSERDES_V3_TX_TX_EMP_POST1_LVL);
- writel(voltage_swing_cfg, qphy->tx2 + QSERDES_V3_TX_TX_DRV_LVL);
- writel(pre_emphasis_cfg, qphy->tx2 + QSERDES_V3_TX_TX_EMP_POST1_LVL);
+ writel(voltage_swing_cfg, qphy->tx + drv_lvl_reg);
+ writel(pre_emphasis_cfg, qphy->tx + emp_post_reg);
+ writel(voltage_swing_cfg, qphy->tx2 + drv_lvl_reg);
+ writel(pre_emphasis_cfg, qphy->tx2 + emp_post_reg);
- writel(drvr_en, qphy->tx + QSERDES_V3_TX_HIGHZ_DRVR_EN);
- writel(bias_en, qphy->tx + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
- writel(drvr_en, qphy->tx2 + QSERDES_V3_TX_HIGHZ_DRVR_EN);
- writel(bias_en, qphy->tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
+ return 0;
}
-static int qcom_qmp_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy)
{
- const struct phy_configure_opts_dp *dp_opts = &opts->dp;
- struct qmp_phy *qphy = phy_get_drvdata(phy);
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ u32 bias_en, drvr_en;
- memcpy(&qphy->dp_opts, dp_opts, sizeof(*dp_opts));
- if (qphy->dp_opts.set_voltages) {
- qcom_qmp_phy_configure_dp_tx(qphy);
- qphy->dp_opts.set_voltages = 0;
+ if (qcom_qmp_phy_configure_dp_swing(qphy,
+ QSERDES_V3_TX_TX_DRV_LVL,
+ QSERDES_V3_TX_TX_EMP_POST1_LVL) < 0)
+ return;
+
+ if (dp_opts->lanes == 1) {
+ bias_en = 0x3e;
+ drvr_en = 0x13;
+ } else {
+ bias_en = 0x3f;
+ drvr_en = 0x10;
}
- return 0;
+ writel(drvr_en, qphy->tx + QSERDES_V3_TX_HIGHZ_DRVR_EN);
+ writel(bias_en, qphy->tx + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
+ writel(drvr_en, qphy->tx2 + QSERDES_V3_TX_HIGHZ_DRVR_EN);
+ writel(bias_en, qphy->tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
}
-static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
+static bool qcom_qmp_phy_configure_dp_mode(struct qmp_phy *qphy)
{
- const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
- const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
- u32 val, phy_vco_div, status;
- unsigned long pixel_freq;
+ u32 val;
+ bool reverse = false;
val = DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN;
@@ -3471,9 +3630,22 @@ static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
* writel(0x4c, qphy->pcs + QSERDES_V3_DP_PHY_MODE);
*/
val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN;
- writel(val, qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+ writel(val, qphy->pcs + QSERDES_DP_PHY_PD_CTL);
+
+ writel(0x5c, qphy->pcs + QSERDES_DP_PHY_MODE);
+
+ return reverse;
+}
+
+static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ u32 phy_vco_div, status;
+ unsigned long pixel_freq;
+
+ qcom_qmp_phy_configure_dp_mode(qphy);
- writel(0x5c, qphy->pcs + QSERDES_V3_DP_PHY_MODE);
writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL);
writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL);
@@ -3503,11 +3675,11 @@ static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000);
clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq);
- writel(0x04, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG2);
- writel(0x01, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
- writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
- writel(0x01, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
- writel(0x09, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+ writel(0x04, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
+ writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
+ writel(0x05, qphy->pcs + QSERDES_DP_PHY_CFG);
+ writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
+ writel(0x09, qphy->pcs + QSERDES_DP_PHY_CFG);
writel(0x20, qphy->serdes + QSERDES_V3_COM_RESETSM_CNTRL);
@@ -3518,7 +3690,7 @@ static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
10000))
return -ETIMEDOUT;
- writel(0x19, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+ writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
if (readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS,
status,
@@ -3527,9 +3699,9 @@ static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
10000))
return -ETIMEDOUT;
- writel(0x18, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+ writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG);
udelay(2000);
- writel(0x19, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+ writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
return readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS,
status,
@@ -3542,9 +3714,8 @@ static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
* We need to calibrate the aux setting here as many times
* as the caller tries
*/
-static int qcom_qmp_dp_phy_calibrate(struct phy *phy)
+static int qcom_qmp_v3_dp_phy_calibrate(struct qmp_phy *qphy)
{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
static const u8 cfg1_settings[] = { 0x13, 0x23, 0x1d };
u8 val;
@@ -3552,7 +3723,231 @@ static int qcom_qmp_dp_phy_calibrate(struct phy *phy)
qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings);
val = cfg1_settings[qphy->dp_aux_cfg];
- writel(val, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG1);
+ writel(val, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+
+ return 0;
+}
+
+static void qcom_qmp_v4_phy_dp_aux_init(struct qmp_phy *qphy)
+{
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_PSR_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ qphy->pcs + QSERDES_DP_PHY_PD_CTL);
+
+ /* Turn on BIAS current for PHY/PLL */
+ writel(0x17, qphy->serdes + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN);
+
+ writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG0);
+ writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+ writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
+ writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG3);
+ writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG4);
+ writel(0x26, qphy->pcs + QSERDES_DP_PHY_AUX_CFG5);
+ writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG6);
+ writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG7);
+ writel(0xb7, qphy->pcs + QSERDES_DP_PHY_AUX_CFG8);
+ writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG9);
+ qphy->dp_aux_cfg = 0;
+
+ writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
+ PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK |
+ PHY_AUX_REQ_ERR_MASK,
+ qphy->pcs + QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK);
+}
+
+static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy)
+{
+ /* Program default values before writing proper values */
+ writel(0x27, qphy->tx + QSERDES_V4_TX_TX_DRV_LVL);
+ writel(0x27, qphy->tx2 + QSERDES_V4_TX_TX_DRV_LVL);
+
+ writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+ writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+
+ qcom_qmp_phy_configure_dp_swing(qphy,
+ QSERDES_V4_TX_TX_DRV_LVL,
+ QSERDES_V4_TX_TX_EMP_POST1_LVL);
+}
+
+static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ u32 phy_vco_div, status;
+ unsigned long pixel_freq;
+ u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
+ bool reverse;
+
+ writel(0x0f, qphy->pcs + QSERDES_V4_DP_PHY_CFG_1);
+
+ reverse = qcom_qmp_phy_configure_dp_mode(qphy);
+
+ writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+ writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
+
+ writel(0x05, qphy->pcs + QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL);
+ writel(0x05, qphy->pcs + QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL);
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ phy_vco_div = 0x1;
+ pixel_freq = 1620000000UL / 2;
+ break;
+ case 2700:
+ phy_vco_div = 0x1;
+ pixel_freq = 2700000000UL / 2;
+ break;
+ case 5400:
+ phy_vco_div = 0x2;
+ pixel_freq = 5400000000UL / 4;
+ break;
+ case 8100:
+ phy_vco_div = 0x0;
+ pixel_freq = 8100000000UL / 6;
+ break;
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+ writel(phy_vco_div, qphy->pcs + QSERDES_V4_DP_PHY_VCO_DIV);
+
+ clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000);
+ clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq);
+
+ writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
+ writel(0x05, qphy->pcs + QSERDES_DP_PHY_CFG);
+ writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
+ writel(0x09, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+ writel(0x20, qphy->serdes + QSERDES_V4_COM_RESETSM_CNTRL);
+
+ if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_C_READY_STATUS,
+ status,
+ ((status & BIT(0)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_CMN_STATUS,
+ status,
+ ((status & BIT(0)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_CMN_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+ if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
+ status,
+ ((status & BIT(0)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ /*
+ * At least for 7nm DP PHY this has to be done after enabling link
+ * clock.
+ */
+
+ if (dp_opts->lanes == 1) {
+ bias0_en = reverse ? 0x3e : 0x15;
+ bias1_en = reverse ? 0x15 : 0x3e;
+ drvr0_en = reverse ? 0x13 : 0x10;
+ drvr1_en = reverse ? 0x10 : 0x13;
+ } else if (dp_opts->lanes == 2) {
+ bias0_en = reverse ? 0x3f : 0x15;
+ bias1_en = reverse ? 0x15 : 0x3f;
+ drvr0_en = 0x10;
+ drvr1_en = 0x10;
+ } else {
+ bias0_en = 0x3f;
+ bias1_en = 0x3f;
+ drvr0_en = 0x10;
+ drvr1_en = 0x10;
+ }
+
+ writel(drvr0_en, qphy->tx + QSERDES_V4_TX_HIGHZ_DRVR_EN);
+ writel(bias0_en, qphy->tx + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
+ writel(drvr1_en, qphy->tx2 + QSERDES_V4_TX_HIGHZ_DRVR_EN);
+ writel(bias1_en, qphy->tx2 + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
+
+ writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG);
+ udelay(2000);
+ writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+ if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ writel(0x0a, qphy->tx + QSERDES_V4_TX_TX_POL_INV);
+ writel(0x0a, qphy->tx2 + QSERDES_V4_TX_TX_POL_INV);
+
+ writel(0x27, qphy->tx + QSERDES_V4_TX_TX_DRV_LVL);
+ writel(0x27, qphy->tx2 + QSERDES_V4_TX_TX_DRV_LVL);
+
+ writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+ writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+
+ return 0;
+}
+
+/*
+ * We need to calibrate the aux setting here as many times
+ * as the caller tries
+ */
+static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy)
+{
+ static const u8 cfg1_settings[] = { 0x20, 0x13, 0x23, 0x1d };
+ u8 val;
+
+ qphy->dp_aux_cfg++;
+ qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings);
+ val = cfg1_settings[qphy->dp_aux_cfg];
+
+ writel(val, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+
+ return 0;
+}
+
+static int qcom_qmp_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ const struct phy_configure_opts_dp *dp_opts = &opts->dp;
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ memcpy(&qphy->dp_opts, dp_opts, sizeof(*dp_opts));
+ if (qphy->dp_opts.set_voltages) {
+ cfg->configure_dp_tx(qphy);
+ qphy->dp_opts.set_voltages = 0;
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_dp_phy_calibrate(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ if (cfg->calibrate_dp_phy)
+ return cfg->calibrate_dp_phy(qphy);
return 0;
}
@@ -3729,7 +4124,7 @@ static int qcom_qmp_phy_init(struct phy *phy)
return ret;
if (cfg->type == PHY_TYPE_DP)
- qcom_qmp_phy_dp_aux_init(qphy);
+ cfg->dp_aux_init(qphy);
return 0;
}
@@ -3783,7 +4178,7 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
/* Configure special DP tx tunings */
if (cfg->type == PHY_TYPE_DP)
- qcom_qmp_phy_configure_dp_tx(qphy);
+ cfg->configure_dp_tx(qphy);
qcom_qmp_phy_configure_lane(rx, cfg->regs,
cfg->rx_tbl, cfg->rx_tbl_num, 1);
@@ -3802,7 +4197,7 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
/* Configure link rate, swing, etc. */
if (cfg->type == PHY_TYPE_DP) {
- qcom_qmp_phy_configure_dp_phy(qphy);
+ cfg->configure_dp_phy(qphy);
} else {
qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
if (cfg->pcs_tbl_sec)
@@ -3874,7 +4269,7 @@ static int qcom_qmp_phy_power_off(struct phy *phy)
if (cfg->type == PHY_TYPE_DP) {
/* Assert DP PHY power down */
- writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+ writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_DP_PHY_PD_CTL);
} else {
/* PHY reset */
if (!cfg->no_pcs_sw_reset)
@@ -4578,6 +4973,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sm8250-qmp-usb3-phy",
.data = &sm8250_usb3phy_cfg,
}, {
+ .compatible = "qcom,sm8250-qmp-usb3-dp-phy",
+ /* It's a combo phy */
+ }, {
.compatible = "qcom,sm8250-qmp-usb3-uni-phy",
.data = &sm8250_usb3_uniphy_cfg,
}, {
@@ -4611,6 +5009,10 @@ static const struct of_device_id qcom_qmp_combo_phy_of_match_table[] = {
.compatible = "qcom,sc7180-qmp-usb3-dp-phy",
.data = &sc7180_usb3dpphy_cfg,
},
+ {
+ .compatible = "qcom,sm8250-qmp-usb3-dp-phy",
+ .data = &sm8250_usb3dpphy_cfg,
+ },
{ }
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 71ce3aa174ae..67bd2dd0d8c5 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -349,13 +349,13 @@
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4 0x5c
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60
-/* Only for QMP V3 PHY - DP PHY registers */
-#define QSERDES_V3_DP_PHY_REVISION_ID0 0x000
-#define QSERDES_V3_DP_PHY_REVISION_ID1 0x004
-#define QSERDES_V3_DP_PHY_REVISION_ID2 0x008
-#define QSERDES_V3_DP_PHY_REVISION_ID3 0x00c
-#define QSERDES_V3_DP_PHY_CFG 0x010
-#define QSERDES_V3_DP_PHY_PD_CTL 0x018
+/* QMP PHY - DP PHY registers */
+#define QSERDES_DP_PHY_REVISION_ID0 0x000
+#define QSERDES_DP_PHY_REVISION_ID1 0x004
+#define QSERDES_DP_PHY_REVISION_ID2 0x008
+#define QSERDES_DP_PHY_REVISION_ID3 0x00c
+#define QSERDES_DP_PHY_CFG 0x010
+#define QSERDES_DP_PHY_PD_CTL 0x018
# define DP_PHY_PD_CTL_PWRDN 0x001
# define DP_PHY_PD_CTL_PSR_PWRDN 0x002
# define DP_PHY_PD_CTL_AUX_PWRDN 0x004
@@ -363,18 +363,19 @@
# define DP_PHY_PD_CTL_LANE_2_3_PWRDN 0x010
# define DP_PHY_PD_CTL_PLL_PWRDN 0x020
# define DP_PHY_PD_CTL_DP_CLAMP_EN 0x040
-#define QSERDES_V3_DP_PHY_MODE 0x01c
-#define QSERDES_V3_DP_PHY_AUX_CFG0 0x020
-#define QSERDES_V3_DP_PHY_AUX_CFG1 0x024
-#define QSERDES_V3_DP_PHY_AUX_CFG2 0x028
-#define QSERDES_V3_DP_PHY_AUX_CFG3 0x02c
-#define QSERDES_V3_DP_PHY_AUX_CFG4 0x030
-#define QSERDES_V3_DP_PHY_AUX_CFG5 0x034
-#define QSERDES_V3_DP_PHY_AUX_CFG6 0x038
-#define QSERDES_V3_DP_PHY_AUX_CFG7 0x03c
-#define QSERDES_V3_DP_PHY_AUX_CFG8 0x040
-#define QSERDES_V3_DP_PHY_AUX_CFG9 0x044
+#define QSERDES_DP_PHY_MODE 0x01c
+#define QSERDES_DP_PHY_AUX_CFG0 0x020
+#define QSERDES_DP_PHY_AUX_CFG1 0x024
+#define QSERDES_DP_PHY_AUX_CFG2 0x028
+#define QSERDES_DP_PHY_AUX_CFG3 0x02c
+#define QSERDES_DP_PHY_AUX_CFG4 0x030
+#define QSERDES_DP_PHY_AUX_CFG5 0x034
+#define QSERDES_DP_PHY_AUX_CFG6 0x038
+#define QSERDES_DP_PHY_AUX_CFG7 0x03c
+#define QSERDES_DP_PHY_AUX_CFG8 0x040
+#define QSERDES_DP_PHY_AUX_CFG9 0x044
+/* Only for QMP V3 PHY - DP PHY registers */
#define QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK 0x048
# define PHY_AUX_STOP_ERR_MASK 0x01
# define PHY_AUX_DEC_ERR_MASK 0x02
@@ -396,6 +397,7 @@
#define QSERDES_V3_DP_PHY_STATUS 0x0c0
/* Only for QMP V4 PHY - QSERDES COM registers */
+#define QSERDES_V4_COM_BG_TIMER 0x00c
#define QSERDES_V4_COM_SSC_EN_CENTER 0x010
#define QSERDES_V4_COM_SSC_PER1 0x01c
#define QSERDES_V4_COM_SSC_PER2 0x020
@@ -403,7 +405,9 @@
#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0 0x028
#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1 0x030
#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1 0x034
+#define QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN 0x044
#define QSERDES_V4_COM_CLK_ENABLE1 0x048
+#define QSERDES_V4_COM_SYS_CLK_CTRL 0x04c
#define QSERDES_V4_COM_SYSCLK_BUF_ENABLE 0x050
#define QSERDES_V4_COM_PLL_IVCO 0x058
#define QSERDES_V4_COM_CMN_IPTRIM 0x060
@@ -414,6 +418,7 @@
#define QSERDES_V4_COM_PLL_CCTRL_MODE0 0x084
#define QSERDES_V4_COM_PLL_CCTRL_MODE1 0x088
#define QSERDES_V4_COM_SYSCLK_EN_SEL 0x094
+#define QSERDES_V4_COM_RESETSM_CNTRL 0x09c
#define QSERDES_V4_COM_LOCK_CMP_EN 0x0a4
#define QSERDES_V4_COM_LOCK_CMP1_MODE0 0x0ac
#define QSERDES_V4_COM_LOCK_CMP2_MODE0 0x0b0
@@ -427,16 +432,24 @@
#define QSERDES_V4_COM_DIV_FRAC_START1_MODE1 0x0d8
#define QSERDES_V4_COM_DIV_FRAC_START2_MODE1 0x0dc
#define QSERDES_V4_COM_DIV_FRAC_START3_MODE1 0x0e0
+#define QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0 0x0ec
+#define QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0 0x0f0
+#define QSERDES_V4_COM_VCO_TUNE_CTRL 0x108
#define QSERDES_V4_COM_VCO_TUNE_MAP 0x10c
#define QSERDES_V4_COM_VCO_TUNE1_MODE0 0x110
#define QSERDES_V4_COM_VCO_TUNE2_MODE0 0x114
#define QSERDES_V4_COM_VCO_TUNE1_MODE1 0x118
#define QSERDES_V4_COM_VCO_TUNE2_MODE1 0x11c
#define QSERDES_V4_COM_VCO_TUNE_INITVAL2 0x124
+#define QSERDES_V4_COM_CMN_STATUS 0x140
#define QSERDES_V4_COM_CLK_SELECT 0x154
#define QSERDES_V4_COM_HSCLK_SEL 0x158
#define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V4_COM_CORECLK_DIV_MODE0 0x168
#define QSERDES_V4_COM_CORECLK_DIV_MODE1 0x16c
+#define QSERDES_V4_COM_CORE_CLK_EN 0x174
+#define QSERDES_V4_COM_C_READY_STATUS 0x178
+#define QSERDES_V4_COM_CMN_CONFIG 0x17c
#define QSERDES_V4_COM_SVS_MODE_CLK_SEL 0x184
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
@@ -445,19 +458,32 @@
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
/* Only for QMP V4 PHY - TX registers */
+#define QSERDES_V4_TX_CLKBUF_ENABLE 0x08
+#define QSERDES_V4_TX_TX_EMP_POST1_LVL 0x0c
+#define QSERDES_V4_TX_TX_DRV_LVL 0x14
+#define QSERDES_V4_TX_RESET_TSYNC_EN 0x1c
+#define QSERDES_V4_TX_PRE_STALL_LDO_BOOST_EN 0x20
+#define QSERDES_V4_TX_TX_BAND 0x24
+#define QSERDES_V4_TX_INTERFACE_SELECT 0x2c
#define QSERDES_V4_TX_RES_CODE_LANE_TX 0x34
#define QSERDES_V4_TX_RES_CODE_LANE_RX 0x38
#define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX 0x3c
#define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX 0x40
+#define QSERDES_V4_TX_TRANSCEIVER_BIAS_EN 0x54
+#define QSERDES_V4_TX_HIGHZ_DRVR_EN 0x58
+#define QSERDES_V4_TX_TX_POL_INV 0x5c
+#define QSERDES_V4_TX_PARRATE_REC_DETECT_IDLE_EN 0x60
#define QSERDES_V4_TX_LANE_MODE_1 0x84
#define QSERDES_V4_TX_LANE_MODE_2 0x88
#define QSERDES_V4_TX_RCV_DETECT_LVL_2 0x9c
+#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN 0xb8
+#define QSERDES_V4_TX_TX_INTERFACE_MODE 0xbc
#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0xd8
#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0xdC
#define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0xe0
#define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0xe4
-#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN 0xb8
-#define QSERDES_V4_TX_PI_QEC_CTRL 0x104
+#define QSERDES_V4_TX_VMODE_CTRL1 0xe8
+#define QSERDES_V4_TX_PI_QEC_CTRL 0x104
/* Only for QMP V4 PHY - RX registers */
#define QSERDES_V4_RX_UCDR_FO_GAIN 0x008
@@ -514,6 +540,17 @@
#define QSERDES_V4_RX_DCC_CTRL1 0x1bc
#define QSERDES_V4_RX_VTH_CODE 0x1c4
+/* Only for QMP V4 PHY - DP PHY registers */
+#define QSERDES_V4_DP_PHY_CFG_1 0x014
+#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK 0x054
+#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_CLEAR 0x058
+#define QSERDES_V4_DP_PHY_VCO_DIV 0x070
+#define QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL 0x078
+#define QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL 0x09c
+#define QSERDES_V4_DP_PHY_SPARE0 0x0c8
+#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
+#define QSERDES_V4_DP_PHY_STATUS 0x0dc
+
/* Only for QMP V4 PHY - UFS PCS registers */
#define QPHY_V4_PCS_UFS_PHY_START 0x000
#define QPHY_V4_PCS_UFS_POWER_DOWN_CONTROL 0x004
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
index 327df1a99f77..5c6c17673396 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
@@ -56,6 +56,7 @@ static int qcom_usb_hs_phy_set_mode(struct phy *phy,
fallthrough;
case PHY_MODE_USB_DEVICE:
val |= ULPI_INT_SESS_VALID;
+ break;
default:
break;
}
diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
index 9a610b414b1f..753cb5bab930 100644
--- a/drivers/phy/ralink/phy-mt7621-pci.c
+++ b/drivers/phy/ralink/phy-mt7621-pci.c
@@ -62,7 +62,7 @@
#define RG_PE1_FRC_MSTCKDIV BIT(5)
-#define XTAL_MASK GENMASK(7, 6)
+#define XTAL_MASK GENMASK(8, 6)
#define MAX_PHYS 2
@@ -319,9 +319,9 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
return PTR_ERR(phy->regmap);
phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops);
- if (IS_ERR(phy)) {
+ if (IS_ERR(phy->phy)) {
dev_err(dev, "failed to create phy\n");
- return PTR_ERR(phy);
+ return PTR_ERR(phy->phy);
}
phy_set_drvdata(phy->phy, phy);
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 70a31251b202..d2bbdc96a167 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1180,6 +1180,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
dev_err(dev, "failed to create phy: %pOFn\n",
child_np);
pm_runtime_disable(dev);
+ of_node_put(child_np);
return PTR_ERR(phy);
}
diff --git a/drivers/phy/st/Kconfig b/drivers/phy/st/Kconfig
index b32f44ff9033..3fc3d0781fb8 100644
--- a/drivers/phy/st/Kconfig
+++ b/drivers/phy/st/Kconfig
@@ -36,6 +36,7 @@ config PHY_STIH407_USB
config PHY_STM32_USBPHYC
tristate "STMicroelectronics STM32 USB HS PHY Controller driver"
depends on ARCH_STM32 || COMPILE_TEST
+ depends on COMMON_CLK
select GENERIC_PHY
help
Enable this to support the High-Speed USB transceivers that are part
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index d08fbb180e43..c184f4e34584 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -70,6 +71,7 @@ struct stm32_usbphyc {
struct regulator *vdda1v1;
struct regulator *vdda1v8;
atomic_t n_pll_cons;
+ struct clk_hw clk48_hw;
int switch_setup;
};
@@ -295,6 +297,61 @@ static const struct phy_ops stm32_usbphyc_phy_ops = {
.owner = THIS_MODULE,
};
+static int stm32_usbphyc_clk48_prepare(struct clk_hw *hw)
+{
+ struct stm32_usbphyc *usbphyc = container_of(hw, struct stm32_usbphyc, clk48_hw);
+
+ return stm32_usbphyc_pll_enable(usbphyc);
+}
+
+static void stm32_usbphyc_clk48_unprepare(struct clk_hw *hw)
+{
+ struct stm32_usbphyc *usbphyc = container_of(hw, struct stm32_usbphyc, clk48_hw);
+
+ stm32_usbphyc_pll_disable(usbphyc);
+}
+
+static unsigned long stm32_usbphyc_clk48_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ return 48000000;
+}
+
+static const struct clk_ops usbphyc_clk48_ops = {
+ .prepare = stm32_usbphyc_clk48_prepare,
+ .unprepare = stm32_usbphyc_clk48_unprepare,
+ .recalc_rate = stm32_usbphyc_clk48_recalc_rate,
+};
+
+static void stm32_usbphyc_clk48_unregister(void *data)
+{
+ struct stm32_usbphyc *usbphyc = data;
+
+ of_clk_del_provider(usbphyc->dev->of_node);
+ clk_hw_unregister(&usbphyc->clk48_hw);
+}
+
+static int stm32_usbphyc_clk48_register(struct stm32_usbphyc *usbphyc)
+{
+ struct device_node *node = usbphyc->dev->of_node;
+ struct clk_init_data init = { };
+ int ret = 0;
+
+ init.name = "ck_usbo_48m";
+ init.ops = &usbphyc_clk48_ops;
+
+ usbphyc->clk48_hw.init = &init;
+
+ ret = clk_hw_register(usbphyc->dev, &usbphyc->clk48_hw);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &usbphyc->clk48_hw);
+ if (ret)
+ clk_hw_unregister(&usbphyc->clk48_hw);
+
+ return ret;
+}
+
static void stm32_usbphyc_switch_setup(struct stm32_usbphyc *usbphyc,
u32 utmi_switch)
{
@@ -473,6 +530,12 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
goto clk_disable;
}
+ ret = stm32_usbphyc_clk48_register(usbphyc);
+ if (ret) {
+ dev_err(dev, "failed to register ck_usbo_48m clock: %d\n", ret);
+ goto clk_disable;
+ }
+
version = readl_relaxed(usbphyc->base + STM32_USBPHYC_VERSION);
dev_info(dev, "registered rev:%lu.%lu\n",
FIELD_GET(MAJREV, version), FIELD_GET(MINREV, version));
@@ -497,6 +560,8 @@ static int stm32_usbphyc_remove(struct platform_device *pdev)
if (usbphyc->phys[port]->active)
stm32_usbphyc_phy_exit(usbphyc->phys[port]->phy);
+ stm32_usbphyc_clk48_unregister(usbphyc);
+
clk_disable_unprepare(usbphyc->clk);
return 0;
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index c9cfafe89cbf..9eb6d37c907e 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -7,6 +7,8 @@
*/
#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-ti.h>
+#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/gpio.h>
@@ -26,6 +28,11 @@
#define WIZ_SERDES_RST 0x40c
#define WIZ_SERDES_TYPEC 0x410
#define WIZ_LANECTL(n) (0x480 + (0x40 * (n)))
+#define WIZ_LANEDIV(n) (0x484 + (0x40 * (n)))
+
+#define WIZ_MAX_INPUT_CLOCKS 4
+/* To include mux clocks, divider clocks and gate clocks */
+#define WIZ_MAX_OUTPUT_CLOCKS 32
#define WIZ_MAX_LANES 4
#define WIZ_MUX_NUM_CLOCKS 3
@@ -52,8 +59,16 @@ enum wiz_refclk_div_sel {
CMN_REFCLK1_DIG_DIV,
};
+enum wiz_clock_input {
+ WIZ_CORE_REFCLK,
+ WIZ_EXT_REFCLK,
+ WIZ_CORE_REFCLK1,
+ WIZ_EXT_REFCLK1,
+};
+
static const struct reg_field por_en = REG_FIELD(WIZ_SERDES_CTRL, 31, 31);
static const struct reg_field phy_reset_n = REG_FIELD(WIZ_SERDES_RST, 31, 31);
+static const struct reg_field phy_en_refclk = REG_FIELD(WIZ_SERDES_RST, 30, 30);
static const struct reg_field pll1_refclk_mux_sel =
REG_FIELD(WIZ_SERDES_RST, 29, 29);
static const struct reg_field pll0_refclk_mux_sel =
@@ -70,6 +85,12 @@ static const struct reg_field pma_cmn_refclk_dig_div =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 26, 27);
static const struct reg_field pma_cmn_refclk1_dig_div =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 24, 25);
+static const char * const output_clk_names[] = {
+ [TI_WIZ_PLL0_REFCLK] = "pll0-refclk",
+ [TI_WIZ_PLL1_REFCLK] = "pll1-refclk",
+ [TI_WIZ_REFCLK_DIG] = "refclk-dig",
+ [TI_WIZ_PHY_EN_REFCLK] = "phy-en-refclk",
+};
static const struct reg_field p_enable[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(0), 30, 31),
@@ -101,13 +122,34 @@ static const struct reg_field p_standard_mode[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(3), 24, 25),
};
+static const struct reg_field p0_fullrt_div[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 22, 23),
+ REG_FIELD(WIZ_LANECTL(1), 22, 23),
+ REG_FIELD(WIZ_LANECTL(2), 22, 23),
+ REG_FIELD(WIZ_LANECTL(3), 22, 23),
+};
+
+static const struct reg_field p_mac_div_sel0[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANEDIV(0), 16, 22),
+ REG_FIELD(WIZ_LANEDIV(1), 16, 22),
+ REG_FIELD(WIZ_LANEDIV(2), 16, 22),
+ REG_FIELD(WIZ_LANEDIV(3), 16, 22),
+};
+
+static const struct reg_field p_mac_div_sel1[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANEDIV(0), 0, 8),
+ REG_FIELD(WIZ_LANEDIV(1), 0, 8),
+ REG_FIELD(WIZ_LANEDIV(2), 0, 8),
+ REG_FIELD(WIZ_LANEDIV(3), 0, 8),
+};
+
static const struct reg_field typec_ln10_swap =
REG_FIELD(WIZ_SERDES_TYPEC, 30, 30);
struct wiz_clk_mux {
struct clk_hw hw;
struct regmap_field *field;
- u32 *table;
+ const u32 *table;
struct clk_init_data clk_data;
};
@@ -123,18 +165,26 @@ struct wiz_clk_divider {
#define to_wiz_clk_div(_hw) container_of(_hw, struct wiz_clk_divider, hw)
struct wiz_clk_mux_sel {
- struct regmap_field *field;
- u32 table[4];
+ u32 table[WIZ_MAX_INPUT_CLOCKS];
const char *node_name;
+ u32 num_parents;
+ u32 parents[WIZ_MAX_INPUT_CLOCKS];
};
struct wiz_clk_div_sel {
- struct regmap_field *field;
- const struct clk_div_table *table;
+ const struct clk_div_table *table;
const char *node_name;
};
-static struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
+struct wiz_phy_en_refclk {
+ struct clk_hw hw;
+ struct regmap_field *phy_en_refclk;
+ struct clk_init_data clk_data;
+};
+
+#define to_wiz_phy_en_refclk(_hw) container_of(_hw, struct wiz_phy_en_refclk, hw)
+
+static const struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
{
/*
* Mux value to be configured for each of the input clocks
@@ -153,20 +203,26 @@ static struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
},
};
-static struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
+static const struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
{
/*
* Mux value to be configured for each of the input clocks
* in the order populated in device tree
*/
+ .num_parents = 2,
+ .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK },
.table = { 1, 0 },
.node_name = "pll0-refclk",
},
{
+ .num_parents = 2,
+ .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK },
.table = { 1, 0 },
.node_name = "pll1-refclk",
},
{
+ .num_parents = 2,
+ .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK },
.table = { 1, 0 },
.node_name = "refclk-dig",
},
@@ -179,7 +235,7 @@ static const struct clk_div_table clk_div_table[] = {
{ .val = 3, .div = 8, },
};
-static struct wiz_clk_div_sel clk_div_sel[] = {
+static const struct wiz_clk_div_sel clk_div_sel[] = {
{
.table = clk_div_table,
.node_name = "cmn-refclk-dig-div",
@@ -193,6 +249,7 @@ static struct wiz_clk_div_sel clk_div_sel[] = {
enum wiz_type {
J721E_WIZ_16G,
J721E_WIZ_10G,
+ AM64_WIZ_10G,
};
#define WIZ_TYPEC_DIR_DEBOUNCE_MIN 100 /* ms */
@@ -201,19 +258,25 @@ enum wiz_type {
struct wiz {
struct regmap *regmap;
enum wiz_type type;
- struct wiz_clk_mux_sel *clk_mux_sel;
- struct wiz_clk_div_sel *clk_div_sel;
+ const struct wiz_clk_mux_sel *clk_mux_sel;
+ const struct wiz_clk_div_sel *clk_div_sel;
unsigned int clk_div_sel_num;
struct regmap_field *por_en;
struct regmap_field *phy_reset_n;
+ struct regmap_field *phy_en_refclk;
struct regmap_field *p_enable[WIZ_MAX_LANES];
struct regmap_field *p_align[WIZ_MAX_LANES];
struct regmap_field *p_raw_auto_start[WIZ_MAX_LANES];
struct regmap_field *p_standard_mode[WIZ_MAX_LANES];
+ struct regmap_field *p_mac_div_sel0[WIZ_MAX_LANES];
+ struct regmap_field *p_mac_div_sel1[WIZ_MAX_LANES];
+ struct regmap_field *p0_fullrt_div[WIZ_MAX_LANES];
struct regmap_field *pma_cmn_refclk_int_mode;
struct regmap_field *pma_cmn_refclk_mode;
struct regmap_field *pma_cmn_refclk_dig_div;
struct regmap_field *pma_cmn_refclk1_dig_div;
+ struct regmap_field *mux_sel_field[WIZ_MUX_NUM_CLOCKS];
+ struct regmap_field *div_sel_field[WIZ_DIV_NUM_CLOCKS_16G];
struct regmap_field *typec_ln10_swap;
struct device *dev;
@@ -223,6 +286,9 @@ struct wiz {
struct gpio_desc *gpio_typec_dir;
int typec_dir_delay;
u32 lane_phy_type[WIZ_MAX_LANES];
+ struct clk *input_clks[WIZ_MAX_INPUT_CLOCKS];
+ struct clk *output_clks[WIZ_MAX_OUTPUT_CLOCKS];
+ struct clk_onecell_data clk_data;
};
static int wiz_reset(struct wiz *wiz)
@@ -242,6 +308,27 @@ static int wiz_reset(struct wiz *wiz)
return 0;
}
+static int wiz_p_mac_div_sel(struct wiz *wiz)
+{
+ u32 num_lanes = wiz->num_lanes;
+ int ret;
+ int i;
+
+ for (i = 0; i < num_lanes; i++) {
+ if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII) {
+ ret = regmap_field_write(wiz->p_mac_div_sel0[i], 1);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(wiz->p_mac_div_sel1[i], 2);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int wiz_mode_select(struct wiz *wiz)
{
u32 num_lanes = wiz->num_lanes;
@@ -252,8 +339,10 @@ static int wiz_mode_select(struct wiz *wiz)
for (i = 0; i < num_lanes; i++) {
if (wiz->lane_phy_type[i] == PHY_TYPE_DP)
mode = LANE_MODE_GEN1;
+ else if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII)
+ mode = LANE_MODE_GEN2;
else
- mode = LANE_MODE_GEN4;
+ continue;
ret = regmap_field_write(wiz->p_standard_mode[i], mode);
if (ret)
@@ -299,6 +388,12 @@ static int wiz_init(struct wiz *wiz)
return ret;
}
+ ret = wiz_p_mac_div_sel(wiz);
+ if (ret) {
+ dev_err(dev, "Configuring P0 MAC DIV SEL failed\n");
+ return ret;
+ }
+
ret = wiz_init_raw_interface(wiz, true);
if (ret) {
dev_err(dev, "WIZ interface initialization failed\n");
@@ -310,8 +405,6 @@ static int wiz_init(struct wiz *wiz)
static int wiz_regfield_init(struct wiz *wiz)
{
- struct wiz_clk_mux_sel *clk_mux_sel;
- struct wiz_clk_div_sel *clk_div_sel;
struct regmap *regmap = wiz->regmap;
int num_lanes = wiz->num_lanes;
struct device *dev = wiz->dev;
@@ -344,54 +437,49 @@ static int wiz_regfield_init(struct wiz *wiz)
return PTR_ERR(wiz->pma_cmn_refclk_mode);
}
- clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK_DIG_DIV];
- clk_div_sel->field = devm_regmap_field_alloc(dev, regmap,
- pma_cmn_refclk_dig_div);
- if (IS_ERR(clk_div_sel->field)) {
+ wiz->div_sel_field[CMN_REFCLK_DIG_DIV] =
+ devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_dig_div);
+ if (IS_ERR(wiz->div_sel_field[CMN_REFCLK_DIG_DIV])) {
dev_err(dev, "PMA_CMN_REFCLK_DIG_DIV reg field init failed\n");
- return PTR_ERR(clk_div_sel->field);
+ return PTR_ERR(wiz->div_sel_field[CMN_REFCLK_DIG_DIV]);
}
if (wiz->type == J721E_WIZ_16G) {
- clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK1_DIG_DIV];
- clk_div_sel->field =
+ wiz->div_sel_field[CMN_REFCLK1_DIG_DIV] =
devm_regmap_field_alloc(dev, regmap,
pma_cmn_refclk1_dig_div);
- if (IS_ERR(clk_div_sel->field)) {
+ if (IS_ERR(wiz->div_sel_field[CMN_REFCLK1_DIG_DIV])) {
dev_err(dev, "PMA_CMN_REFCLK1_DIG_DIV reg field init failed\n");
- return PTR_ERR(clk_div_sel->field);
+ return PTR_ERR(wiz->div_sel_field[CMN_REFCLK1_DIG_DIV]);
}
}
- clk_mux_sel = &wiz->clk_mux_sel[PLL0_REFCLK];
- clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
- pll0_refclk_mux_sel);
- if (IS_ERR(clk_mux_sel->field)) {
+ wiz->mux_sel_field[PLL0_REFCLK] =
+ devm_regmap_field_alloc(dev, regmap, pll0_refclk_mux_sel);
+ if (IS_ERR(wiz->mux_sel_field[PLL0_REFCLK])) {
dev_err(dev, "PLL0_REFCLK_SEL reg field init failed\n");
- return PTR_ERR(clk_mux_sel->field);
+ return PTR_ERR(wiz->mux_sel_field[PLL0_REFCLK]);
}
- clk_mux_sel = &wiz->clk_mux_sel[PLL1_REFCLK];
- clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
- pll1_refclk_mux_sel);
- if (IS_ERR(clk_mux_sel->field)) {
+ wiz->mux_sel_field[PLL1_REFCLK] =
+ devm_regmap_field_alloc(dev, regmap, pll1_refclk_mux_sel);
+ if (IS_ERR(wiz->mux_sel_field[PLL1_REFCLK])) {
dev_err(dev, "PLL1_REFCLK_SEL reg field init failed\n");
- return PTR_ERR(clk_mux_sel->field);
+ return PTR_ERR(wiz->mux_sel_field[PLL1_REFCLK]);
}
- clk_mux_sel = &wiz->clk_mux_sel[REFCLK_DIG];
- if (wiz->type == J721E_WIZ_10G)
- clk_mux_sel->field =
+ if (wiz->type == J721E_WIZ_10G || wiz->type == AM64_WIZ_10G)
+ wiz->mux_sel_field[REFCLK_DIG] =
devm_regmap_field_alloc(dev, regmap,
refclk_dig_sel_10g);
else
- clk_mux_sel->field =
+ wiz->mux_sel_field[REFCLK_DIG] =
devm_regmap_field_alloc(dev, regmap,
refclk_dig_sel_16g);
- if (IS_ERR(clk_mux_sel->field)) {
+ if (IS_ERR(wiz->mux_sel_field[REFCLK_DIG])) {
dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n");
- return PTR_ERR(clk_mux_sel->field);
+ return PTR_ERR(wiz->mux_sel_field[REFCLK_DIG]);
}
for (i = 0; i < num_lanes; i++) {
@@ -424,6 +512,28 @@ static int wiz_regfield_init(struct wiz *wiz)
i);
return PTR_ERR(wiz->p_standard_mode[i]);
}
+
+ wiz->p0_fullrt_div[i] = devm_regmap_field_alloc(dev, regmap, p0_fullrt_div[i]);
+ if (IS_ERR(wiz->p0_fullrt_div[i])) {
+ dev_err(dev, "P%d_FULLRT_DIV reg field init failed\n", i);
+ return PTR_ERR(wiz->p0_fullrt_div[i]);
+ }
+
+ wiz->p_mac_div_sel0[i] =
+ devm_regmap_field_alloc(dev, regmap, p_mac_div_sel0[i]);
+ if (IS_ERR(wiz->p_mac_div_sel0[i])) {
+ dev_err(dev, "P%d_MAC_DIV_SEL0 reg field init fail\n",
+ i);
+ return PTR_ERR(wiz->p_mac_div_sel0[i]);
+ }
+
+ wiz->p_mac_div_sel1[i] =
+ devm_regmap_field_alloc(dev, regmap, p_mac_div_sel1[i]);
+ if (IS_ERR(wiz->p_mac_div_sel1[i])) {
+ dev_err(dev, "P%d_MAC_DIV_SEL1 reg field init fail\n",
+ i);
+ return PTR_ERR(wiz->p_mac_div_sel1[i]);
+ }
}
wiz->typec_ln10_swap = devm_regmap_field_alloc(dev, regmap,
@@ -433,6 +543,76 @@ static int wiz_regfield_init(struct wiz *wiz)
return PTR_ERR(wiz->typec_ln10_swap);
}
+ wiz->phy_en_refclk = devm_regmap_field_alloc(dev, regmap, phy_en_refclk);
+ if (IS_ERR(wiz->phy_en_refclk)) {
+ dev_err(dev, "PHY_EN_REFCLK reg field init failed\n");
+ return PTR_ERR(wiz->phy_en_refclk);
+ }
+
+ return 0;
+}
+
+static int wiz_phy_en_refclk_enable(struct clk_hw *hw)
+{
+ struct wiz_phy_en_refclk *wiz_phy_en_refclk = to_wiz_phy_en_refclk(hw);
+ struct regmap_field *phy_en_refclk = wiz_phy_en_refclk->phy_en_refclk;
+
+ regmap_field_write(phy_en_refclk, 1);
+
+ return 0;
+}
+
+static void wiz_phy_en_refclk_disable(struct clk_hw *hw)
+{
+ struct wiz_phy_en_refclk *wiz_phy_en_refclk = to_wiz_phy_en_refclk(hw);
+ struct regmap_field *phy_en_refclk = wiz_phy_en_refclk->phy_en_refclk;
+
+ regmap_field_write(phy_en_refclk, 0);
+}
+
+static int wiz_phy_en_refclk_is_enabled(struct clk_hw *hw)
+{
+ struct wiz_phy_en_refclk *wiz_phy_en_refclk = to_wiz_phy_en_refclk(hw);
+ struct regmap_field *phy_en_refclk = wiz_phy_en_refclk->phy_en_refclk;
+ int val;
+
+ regmap_field_read(phy_en_refclk, &val);
+
+ return !!val;
+}
+
+static const struct clk_ops wiz_phy_en_refclk_ops = {
+ .enable = wiz_phy_en_refclk_enable,
+ .disable = wiz_phy_en_refclk_disable,
+ .is_enabled = wiz_phy_en_refclk_is_enabled,
+};
+
+static int wiz_phy_en_refclk_register(struct wiz *wiz)
+{
+ struct wiz_phy_en_refclk *wiz_phy_en_refclk;
+ struct device *dev = wiz->dev;
+ struct clk_init_data *init;
+ struct clk *clk;
+
+ wiz_phy_en_refclk = devm_kzalloc(dev, sizeof(*wiz_phy_en_refclk), GFP_KERNEL);
+ if (!wiz_phy_en_refclk)
+ return -ENOMEM;
+
+ init = &wiz_phy_en_refclk->clk_data;
+
+ init->ops = &wiz_phy_en_refclk_ops;
+ init->flags = 0;
+ init->name = output_clk_names[TI_WIZ_PHY_EN_REFCLK];
+
+ wiz_phy_en_refclk->phy_en_refclk = wiz->phy_en_refclk;
+ wiz_phy_en_refclk->hw.init = init;
+
+ clk = devm_clk_register(dev, &wiz_phy_en_refclk->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ wiz->output_clks[TI_WIZ_PHY_EN_REFCLK] = clk;
+
return 0;
}
@@ -443,7 +623,7 @@ static u8 wiz_clk_mux_get_parent(struct clk_hw *hw)
unsigned int val;
regmap_field_read(field, &val);
- return clk_mux_val_to_index(hw, mux->table, 0, val);
+ return clk_mux_val_to_index(hw, (u32 *)mux->table, 0, val);
}
static int wiz_clk_mux_set_parent(struct clk_hw *hw, u8 index)
@@ -461,8 +641,69 @@ static const struct clk_ops wiz_clk_mux_ops = {
.get_parent = wiz_clk_mux_get_parent,
};
-static int wiz_mux_clk_register(struct wiz *wiz, struct device_node *node,
- struct regmap_field *field, u32 *table)
+static int wiz_mux_clk_register(struct wiz *wiz, struct regmap_field *field,
+ const struct wiz_clk_mux_sel *mux_sel, int clk_index)
+{
+ struct device *dev = wiz->dev;
+ struct clk_init_data *init;
+ const char **parent_names;
+ unsigned int num_parents;
+ struct wiz_clk_mux *mux;
+ char clk_name[100];
+ struct clk *clk;
+ int ret = 0, i;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ num_parents = mux_sel->num_parents;
+
+ parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ for (i = 0; i < num_parents; i++) {
+ clk = wiz->input_clks[mux_sel->parents[i]];
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_err(dev, "Failed to get parent clk for %s\n",
+ output_clk_names[clk_index]);
+ ret = -EINVAL;
+ goto err;
+ }
+ parent_names[i] = __clk_get_name(clk);
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev), output_clk_names[clk_index]);
+
+ init = &mux->clk_data;
+
+ init->ops = &wiz_clk_mux_ops;
+ init->flags = CLK_SET_RATE_NO_REPARENT;
+ init->parent_names = parent_names;
+ init->num_parents = num_parents;
+ init->name = clk_name;
+
+ mux->field = field;
+ mux->table = mux_sel->table;
+ mux->hw.init = init;
+
+ clk = devm_clk_register(dev, &mux->hw);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err;
+ }
+
+ wiz->output_clks[clk_index] = clk;
+
+err:
+ kfree(parent_names);
+
+ return ret;
+}
+
+static int wiz_mux_of_clk_register(struct wiz *wiz, struct device_node *node,
+ struct regmap_field *field, const u32 *table)
{
struct device *dev = wiz->dev;
struct clk_init_data *init;
@@ -606,20 +847,70 @@ static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node,
static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
{
- struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ struct device *dev = wiz->dev;
struct device_node *clk_node;
int i;
+ if (wiz->type == AM64_WIZ_10G) {
+ of_clk_del_provider(dev->of_node);
+ return;
+ }
+
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
clk_node = of_get_child_by_name(node, clk_mux_sel[i].node_name);
of_clk_del_provider(clk_node);
of_node_put(clk_node);
}
+
+ for (i = 0; i < wiz->clk_div_sel_num; i++) {
+ clk_node = of_get_child_by_name(node, clk_div_sel[i].node_name);
+ of_clk_del_provider(clk_node);
+ of_node_put(clk_node);
+ }
+
+ of_clk_del_provider(wiz->dev->of_node);
+}
+
+static int wiz_clock_register(struct wiz *wiz)
+{
+ const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ struct device *dev = wiz->dev;
+ struct device_node *node = dev->of_node;
+ int clk_index;
+ int ret;
+ int i;
+
+ if (wiz->type != AM64_WIZ_10G)
+ return 0;
+
+ clk_index = TI_WIZ_PLL0_REFCLK;
+ for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++, clk_index++) {
+ ret = wiz_mux_clk_register(wiz, wiz->mux_sel_field[i], &clk_mux_sel[i], clk_index);
+ if (ret) {
+ dev_err(dev, "Failed to register clk: %s\n", output_clk_names[clk_index]);
+ return ret;
+ }
+ }
+
+ ret = wiz_phy_en_refclk_register(wiz);
+ if (ret) {
+ dev_err(dev, "Failed to add phy-en-refclk\n");
+ return ret;
+ }
+
+ wiz->clk_data.clks = wiz->output_clks;
+ wiz->clk_data.clk_num = WIZ_MAX_OUTPUT_CLOCKS;
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, &wiz->clk_data);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider: %s\n", node->name);
+
+ return ret;
}
static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
{
- struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
struct device *dev = wiz->dev;
struct device_node *clk_node;
const char *node_name;
@@ -634,6 +925,7 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
ret = PTR_ERR(clk);
return ret;
}
+ wiz->input_clks[WIZ_CORE_REFCLK] = clk;
rate = clk_get_rate(clk);
if (rate >= 100000000)
@@ -647,6 +939,7 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
ret = PTR_ERR(clk);
return ret;
}
+ wiz->input_clks[WIZ_EXT_REFCLK] = clk;
rate = clk_get_rate(clk);
if (rate >= 100000000)
@@ -654,6 +947,13 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
else
regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2);
+ if (wiz->type == AM64_WIZ_10G) {
+ ret = wiz_clock_register(wiz);
+ if (ret)
+ dev_err(dev, "Failed to register wiz clocks\n");
+ return ret;
+ }
+
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
node_name = clk_mux_sel[i].node_name;
clk_node = of_get_child_by_name(node, node_name);
@@ -663,8 +963,8 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
goto err;
}
- ret = wiz_mux_clk_register(wiz, clk_node, clk_mux_sel[i].field,
- clk_mux_sel[i].table);
+ ret = wiz_mux_of_clk_register(wiz, clk_node, wiz->mux_sel_field[i],
+ clk_mux_sel[i].table);
if (ret) {
dev_err(dev, "Failed to register %s clock\n",
node_name);
@@ -684,7 +984,7 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
goto err;
}
- ret = wiz_div_clk_register(wiz, clk_node, clk_div_sel[i].field,
+ ret = wiz_div_clk_register(wiz, clk_node, wiz->div_sel_field[i],
clk_div_sel[i].table);
if (ret) {
dev_err(dev, "Failed to register %s clock\n",
@@ -719,6 +1019,17 @@ static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
return ret;
}
+static int wiz_phy_fullrt_div(struct wiz *wiz, int lane)
+{
+ if (wiz->type != AM64_WIZ_10G)
+ return 0;
+
+ if (wiz->lane_phy_type[lane] == PHY_TYPE_PCIE)
+ return regmap_field_write(wiz->p0_fullrt_div[lane], 0x1);
+
+ return 0;
+}
+
static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
@@ -742,6 +1053,10 @@ static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
return ret;
}
+ ret = wiz_phy_fullrt_div(wiz, id - 1);
+ if (ret)
+ return ret;
+
if (wiz->lane_phy_type[id - 1] == PHY_TYPE_DP)
ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE);
else
@@ -769,6 +1084,9 @@ static const struct of_device_id wiz_id_table[] = {
{
.compatible = "ti,j721e-wiz-10g", .data = (void *)J721E_WIZ_10G
},
+ {
+ .compatible = "ti,am64-wiz-10g", .data = (void *)AM64_WIZ_10G
+ },
{}
};
MODULE_DEVICE_TABLE(of, wiz_id_table);
@@ -787,8 +1105,13 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
u32 reg, num_lanes = 1, phy_type = PHY_NONE;
int ret, i;
+ if (!(of_node_name_eq(subnode, "phy") ||
+ of_node_name_eq(subnode, "link")))
+ continue;
+
ret = of_property_read_u32(subnode, "reg", &reg);
if (ret) {
+ of_node_put(subnode);
dev_err(dev,
"%s: Reading \"reg\" from \"%s\" failed: %d\n",
__func__, subnode->name, ret);
@@ -813,13 +1136,14 @@ static int wiz_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct platform_device *serdes_pdev;
+ bool already_configured = false;
struct device_node *child_node;
struct regmap *regmap;
struct resource res;
void __iomem *base;
struct wiz *wiz;
+ int ret, val, i;
u32 num_lanes;
- int ret;
wiz = devm_kzalloc(dev, sizeof(*wiz), GFP_KERNEL);
if (!wiz)
@@ -900,14 +1224,14 @@ static int wiz_probe(struct platform_device *pdev)
wiz->dev = dev;
wiz->regmap = regmap;
wiz->num_lanes = num_lanes;
- if (wiz->type == J721E_WIZ_10G)
+ if (wiz->type == J721E_WIZ_10G || wiz->type == AM64_WIZ_10G)
wiz->clk_mux_sel = clk_mux_sel_10g;
else
wiz->clk_mux_sel = clk_mux_sel_16g;
wiz->clk_div_sel = clk_div_sel;
- if (wiz->type == J721E_WIZ_10G)
+ if (wiz->type == J721E_WIZ_10G || wiz->type == AM64_WIZ_10G)
wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G;
else
wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G;
@@ -947,27 +1271,34 @@ static int wiz_probe(struct platform_device *pdev)
goto err_get_sync;
}
+ for (i = 0; i < wiz->num_lanes; i++) {
+ regmap_field_read(wiz->p_enable[i], &val);
+ if (val & (P_ENABLE | P_ENABLE_FORCE)) {
+ already_configured = true;
+ break;
+ }
+ }
+
+ if (!already_configured) {
+ ret = wiz_init(wiz);
+ if (ret) {
+ dev_err(dev, "WIZ initialization failed\n");
+ goto err_wiz_init;
+ }
+ }
+
serdes_pdev = of_platform_device_create(child_node, NULL, dev);
if (!serdes_pdev) {
dev_WARN(dev, "Unable to create SERDES platform device\n");
ret = -ENOMEM;
- goto err_pdev_create;
- }
- wiz->serdes_pdev = serdes_pdev;
-
- ret = wiz_init(wiz);
- if (ret) {
- dev_err(dev, "WIZ initialization failed\n");
goto err_wiz_init;
}
+ wiz->serdes_pdev = serdes_pdev;
of_node_put(child_node);
return 0;
err_wiz_init:
- of_platform_device_destroy(&serdes_pdev->dev, NULL);
-
-err_pdev_create:
wiz_clock_cleanup(wiz, node);
err_get_sync:
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index d8d0cc11d187..a63213f5972a 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -7,15 +7,16 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/
#include <linux/module.h>
+#include <linux/bitfield.h>
#include <linux/ulpi/driver.h>
#include <linux/ulpi/regs.h>
#include <linux/gpio/consumer.h>
#include <linux/phy/ulpi_phy.h>
#define TUSB1210_VENDOR_SPECIFIC2 0x80
-#define TUSB1210_VENDOR_SPECIFIC2_IHSTX_SHIFT 0
-#define TUSB1210_VENDOR_SPECIFIC2_ZHSDRV_SHIFT 4
-#define TUSB1210_VENDOR_SPECIFIC2_DP_SHIFT 6
+#define TUSB1210_VENDOR_SPECIFIC2_IHSTX_MASK GENMASK(3, 0)
+#define TUSB1210_VENDOR_SPECIFIC2_ZHSDRV_MASK GENMASK(5, 4)
+#define TUSB1210_VENDOR_SPECIFIC2_DP_MASK BIT(6)
struct tusb1210 {
struct ulpi *ulpi;
@@ -118,22 +119,22 @@ static int tusb1210_probe(struct ulpi *ulpi)
* diagram optimization and DP/DM swap.
*/
+ reg = ulpi_read(ulpi, TUSB1210_VENDOR_SPECIFIC2);
+
/* High speed output drive strength configuration */
- device_property_read_u8(&ulpi->dev, "ihstx", &val);
- reg = val << TUSB1210_VENDOR_SPECIFIC2_IHSTX_SHIFT;
+ if (!device_property_read_u8(&ulpi->dev, "ihstx", &val))
+ u8p_replace_bits(&reg, val, (u8)TUSB1210_VENDOR_SPECIFIC2_IHSTX_MASK);
/* High speed output impedance configuration */
- device_property_read_u8(&ulpi->dev, "zhsdrv", &val);
- reg |= val << TUSB1210_VENDOR_SPECIFIC2_ZHSDRV_SHIFT;
+ if (!device_property_read_u8(&ulpi->dev, "zhsdrv", &val))
+ u8p_replace_bits(&reg, val, (u8)TUSB1210_VENDOR_SPECIFIC2_ZHSDRV_MASK);
/* DP/DM swap control */
- device_property_read_u8(&ulpi->dev, "datapolarity", &val);
- reg |= val << TUSB1210_VENDOR_SPECIFIC2_DP_SHIFT;
+ if (!device_property_read_u8(&ulpi->dev, "datapolarity", &val))
+ u8p_replace_bits(&reg, val, (u8)TUSB1210_VENDOR_SPECIFIC2_DP_MASK);
- if (reg) {
- ulpi_write(ulpi, TUSB1210_VENDOR_SPECIFIC2, reg);
- tusb->vendor_specific2 = reg;
- }
+ ulpi_write(ulpi, TUSB1210_VENDOR_SPECIFIC2, reg);
+ tusb->vendor_specific2 = reg;
tusb->phy = ulpi_phy_create(ulpi, &phy_ops);
if (IS_ERR(tusb->phy))
diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
index 9887f908f540..812e5409d359 100644
--- a/drivers/phy/ti/phy-twl4030-usb.c
+++ b/drivers/phy/ti/phy-twl4030-usb.c
@@ -779,7 +779,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
usb_remove_phy(&twl->phy);
pm_runtime_get_sync(twl->dev);
- cancel_delayed_work(&twl->id_workaround_work);
+ cancel_delayed_work_sync(&twl->id_workaround_work);
device_remove_file(twl->dev, &dev_attr_vbus);
/* set transceiver mode to power on defaults */
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index 2b65f84a5f89..35652152ce5d 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -208,6 +208,7 @@ struct xpsgtr_phy {
* @gtr_mutex: mutex for locking
* @phys: PHY lanes
* @refclk_sscs: spread spectrum settings for the reference clocks
+ * @clk: reference clocks
* @tx_term_fix: fix for GT issue
* @saved_icm_cfg0: stored value of ICM CFG0 register
* @saved_icm_cfg1: stored value of ICM CFG1 register
@@ -219,6 +220,7 @@ struct xpsgtr_dev {
struct mutex gtr_mutex; /* mutex for locking */
struct xpsgtr_phy phys[NUM_LANES];
const struct xpsgtr_ssc *refclk_sscs[NUM_LANES];
+ struct clk *clk[NUM_LANES];
bool tx_term_fix;
unsigned int saved_icm_cfg0;
unsigned int saved_icm_cfg1;
@@ -818,11 +820,15 @@ static struct phy *xpsgtr_xlate(struct device *dev,
static int __maybe_unused xpsgtr_suspend(struct device *dev)
{
struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+ unsigned int i;
/* Save the snapshot ICM_CFG registers. */
gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+ for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
+ clk_disable_unprepare(gtr_dev->clk[i]);
+
return 0;
}
@@ -832,6 +838,13 @@ static int __maybe_unused xpsgtr_resume(struct device *dev)
unsigned int icm_cfg0, icm_cfg1;
unsigned int i;
bool skip_phy_init;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++) {
+ err = clk_prepare_enable(gtr_dev->clk[i]);
+ if (err)
+ goto err_clk_put;
+ }
icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
@@ -852,6 +865,12 @@ static int __maybe_unused xpsgtr_resume(struct device *dev)
gtr_dev->phys[i].skip_phy_init = skip_phy_init;
return 0;
+
+err_clk_put:
+ while (i--)
+ clk_disable_unprepare(gtr_dev->clk[i]);
+
+ return err;
}
static const struct dev_pm_ops xpsgtr_pm_ops = {
@@ -865,6 +884,7 @@ static const struct dev_pm_ops xpsgtr_pm_ops = {
static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
{
unsigned int refclk;
+ int ret;
for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
unsigned long rate;
@@ -874,14 +894,22 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
snprintf(name, sizeof(name), "ref%u", refclk);
clk = devm_clk_get_optional(gtr_dev->dev, name);
- if (IS_ERR(clk))
- return dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
- "Failed to get reference clock %u\n",
- refclk);
+ if (IS_ERR(clk)) {
+ ret = dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
+ "Failed to get reference clock %u\n",
+ refclk);
+ goto err_clk_put;
+ }
if (!clk)
continue;
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_clk_put;
+
+ gtr_dev->clk[refclk] = clk;
+
/*
* Get the spread spectrum (SSC) settings for the reference
* clock rate.
@@ -899,11 +927,18 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
dev_err(gtr_dev->dev,
"Invalid rate %lu for reference clock %u\n",
rate, refclk);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_clk_put;
}
}
return 0;
+
+err_clk_put:
+ while (refclk--)
+ clk_disable_unprepare(gtr_dev->clk[refclk]);
+
+ return ret;
}
static int xpsgtr_probe(struct platform_device *pdev)
@@ -912,6 +947,7 @@ static int xpsgtr_probe(struct platform_device *pdev)
struct xpsgtr_dev *gtr_dev;
struct phy_provider *provider;
unsigned int port;
+ unsigned int i;
int ret;
gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
@@ -951,7 +987,8 @@ static int xpsgtr_probe(struct platform_device *pdev)
phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
if (IS_ERR(phy)) {
dev_err(&pdev->dev, "failed to create PHY\n");
- return PTR_ERR(phy);
+ ret = PTR_ERR(phy);
+ goto err_clk_put;
}
gtr_phy->phy = phy;
@@ -962,9 +999,16 @@ static int xpsgtr_probe(struct platform_device *pdev)
provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
if (IS_ERR(provider)) {
dev_err(&pdev->dev, "registering provider failed\n");
- return PTR_ERR(provider);
+ ret = PTR_ERR(provider);
+ goto err_clk_put;
}
return 0;
+
+err_clk_put:
+ for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
+ clk_disable_unprepare(gtr_dev->clk[i]);
+
+ return ret;
}
static const struct of_device_id xpsgtr_of_match[] = {
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 7d3370289938..6e6825d17a1d 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1604,8 +1604,8 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
unsigned i, pin;
#ifdef CONFIG_GPIOLIB
struct pinctrl_gpio_range *range;
- unsigned int gpio_num;
struct gpio_chip *chip;
+ int gpio_num;
#endif
seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
@@ -1625,7 +1625,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
seq_printf(s, "pin %d (%s) ", pin, desc->name);
#ifdef CONFIG_GPIOLIB
- gpio_num = 0;
+ gpio_num = -1;
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
if ((pin >= range->pin_base) &&
(pin < (range->pin_base + range->npins))) {
@@ -1633,10 +1633,12 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
break;
}
}
- chip = gpio_to_chip(gpio_num);
- if (chip && chip->gpiodev && chip->gpiodev->base)
- seq_printf(s, "%u:%s ", gpio_num -
- chip->gpiodev->base, chip->label);
+ if (gpio_num >= 0)
+ chip = gpio_to_chip(gpio_num);
+ else
+ chip = NULL;
+ if (chip)
+ seq_printf(s, "%u:%s ", gpio_num - chip->gpiodev->base, chip->label);
else
seq_puts(s, "0:? ");
#endif
diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
index 7fdf4257df1e..ad4b446d588e 100644
--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
+++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
@@ -299,9 +299,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
static const struct intel_community lbg_communities[] = {
LBG_COMMUNITY(0, 0, 71),
LBG_COMMUNITY(1, 72, 132),
- LBG_COMMUNITY(3, 133, 144),
- LBG_COMMUNITY(4, 145, 180),
- LBG_COMMUNITY(5, 181, 246),
+ LBG_COMMUNITY(3, 133, 143),
+ LBG_COMMUNITY(4, 144, 178),
+ LBG_COMMUNITY(5, 179, 246),
};
static const struct intel_pinctrl_soc_data lbg_soc_data = {
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index 5d21c6adf1ab..1c7a288b59a5 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -208,7 +208,7 @@ static ssize_t secure_boot_fuse_state_show(struct device *dev,
* 0011 = version 1, 0111 = version 2, 1111 = version 3). Upper 4 bits
* are a thermometer code indicating key programming has completed for
* key n (same encodings as the start bits). This allows for detection
- * of an interruption in the progamming process which has left the key
+ * of an interruption in the programming process which has left the key
* partially programmed (and thus invalid). The process is to burn the
* eFuse for the new key start bit, burn the key eFuses, then burn the
* eFuse for the new key complete bit.
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index b013445147dd..a9db2f32658f 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -683,13 +683,13 @@ static int mlxreg_hotplug_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, priv->irq,
mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
- | IRQF_SHARED, "mlxreg-hotplug", priv);
+ | IRQF_SHARED | IRQF_NO_AUTOEN,
+ "mlxreg-hotplug", priv);
if (err) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
return err;
}
- disable_irq(priv->irq);
spin_lock_init(&priv->lock);
INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
dev_set_drvdata(&pdev->dev, priv);
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index 0847b2dc97bf..3105f651614f 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -77,6 +77,53 @@ config SURFACE_AGGREGATOR_CDEV
The provided interface is intended for debugging and development only,
and should not be used otherwise.
+config SURFACE_AGGREGATOR_REGISTRY
+ tristate "Surface System Aggregator Module Device Registry"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ help
+ Device-registry and device-hubs for Surface System Aggregator Module
+ (SSAM) devices.
+
+ Provides a module and driver which act as a device-registry for SSAM
+ client devices that cannot be detected automatically, e.g. via ACPI.
+ Such devices are instead provided via this registry and attached via
+ device hubs, also provided in this module.
+
+ Devices provided via this registry are:
+ - Platform profile (performance-/cooling-mode) device (5th- and later
+ generations).
+ - Battery/AC devices (7th-generation).
+ - HID input devices (7th-generation).
+
+ Select M (recommended) or Y here if you want support for the above
+ mentioned devices on the corresponding Surface models. Without this
+ module, the respective devices will not be instantiated and thus any
+ functionality provided by them will be missing, even when drivers for
+ these devices are present. In other words, this module only provides
+ the respective client devices. Drivers for these devices still need to
+ be selected via the other options.
+
+config SURFACE_DTX
+ tristate "Surface DTX (Detachment System) Driver"
+ depends on SURFACE_AGGREGATOR
+ depends on INPUT
+ help
+ Driver for the Surface Book clipboard detachment system (DTX).
+
+ On the Surface Book series devices, the display part containing the
+ CPU (called the clipboard) can be detached from the base (containing a
+ battery, the keyboard, and, optionally, a discrete GPU) by (if
+ necessary) unlocking and opening the latch connecting both parts.
+
+ This driver provides a user-space interface that can influence the
+ behavior of this process, which includes the option to abort it in
+ case the base is still in use or speed it up in case it is not.
+
+ Note that this module can be built without support for the Surface
+ Aggregator Bus (i.e. CONFIG_SURFACE_AGGREGATOR_BUS=n). In that case,
+ some devices, specifically the Surface Book 3, will not be supported.
+
config SURFACE_GPE
tristate "Surface GPE/Lid Support Driver"
depends on DMI
@@ -105,6 +152,28 @@ config SURFACE_HOTPLUG
Select M or Y here, if you want to (fully) support hot-plugging of
dGPU devices on the Surface Book 2 and/or 3 during D3cold.
+config SURFACE_PLATFORM_PROFILE
+ tristate "Surface Platform Profile Driver"
+ depends on SURFACE_AGGREGATOR_REGISTRY
+ select ACPI_PLATFORM_PROFILE
+ help
+ Provides support for the ACPI platform profile on 5th- and later
+ generation Microsoft Surface devices.
+
+ More specifically, this driver provides ACPI platform profile support
+ on Microsoft Surface devices with a Surface System Aggregator Module
+ (SSAM) connected via the Surface Serial Hub (SSH / SAM-over-SSH). In
+ other words, this driver provides platform profile support on the
+ Surface Pro 5, Surface Book 2, Surface Laptop, Surface Laptop Go and
+ later. On those devices, the platform profile can significantly
+ influence cooling behavior, e.g. setting it to 'quiet' (default) or
+ 'low-power' can significantly limit performance of the discrete GPU on
+ Surface Books, while in turn leading to lower power consumption and/or
+ less fan noise.
+
+ Select M or Y here, if you want to include ACPI platform profile
+ support on the above mentioned devices.
+
config SURFACE_PRO3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
depends on INPUT
diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
index 990424c5f0c9..32889482de55 100644
--- a/drivers/platform/surface/Makefile
+++ b/drivers/platform/surface/Makefile
@@ -10,6 +10,9 @@ obj-$(CONFIG_SURFACE_3_POWER_OPREGION) += surface3_power.o
obj-$(CONFIG_SURFACE_ACPI_NOTIFY) += surface_acpi_notify.o
obj-$(CONFIG_SURFACE_AGGREGATOR) += aggregator/
obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV) += surface_aggregator_cdev.o
+obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o
+obj-$(CONFIG_SURFACE_DTX) += surface_dtx.o
obj-$(CONFIG_SURFACE_GPE) += surface_gpe.o
obj-$(CONFIG_SURFACE_HOTPLUG) += surface_hotplug.o
+obj-$(CONFIG_SURFACE_PLATFORM_PROFILE) += surface_platform_profile.o
obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index 5bcb59ed579d..69e86cd599d3 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -1040,7 +1040,7 @@ static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
union acpi_object *obj;
u64 val;
- if (!(funcs & BIT(func)))
+ if (!(funcs & BIT_ULL(func)))
return 0; /* Not supported, leave *ret at its default value */
obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
@@ -1750,35 +1750,35 @@ EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer);
/* -- Internal SAM requests. ------------------------------------------------ */
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = 0x01,
.command_id = 0x13,
.instance_id = 0x00,
});
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = 0x01,
.command_id = 0x15,
.instance_id = 0x00,
});
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = 0x01,
.command_id = 0x16,
.instance_id = 0x00,
});
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = 0x01,
.command_id = 0x33,
.instance_id = 0x00,
});
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = 0x01,
.command_id = 0x34,
@@ -2483,7 +2483,8 @@ int ssam_irq_setup(struct ssam_controller *ctrl)
* interrupt, and let the SAM resume callback during the controller
* resume process clear it.
*/
- const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING;
+ const int irqf = IRQF_SHARED | IRQF_ONESHOT |
+ IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
if (IS_ERR(gpiod))
@@ -2501,7 +2502,6 @@ int ssam_irq_setup(struct ssam_controller *ctrl)
return status;
ctrl->irq.num = irq;
- disable_irq(ctrl->irq.num);
return 0;
}
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
new file mode 100644
index 000000000000..685d37a7add1
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module (SSAM) client device registry.
+ *
+ * Registry for non-platform/non-ACPI SSAM client devices, i.e. devices that
+ * cannot be auto-detected. Provides device-hubs and performs instantiation
+ * for these devices.
+ *
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+
+/* -- Device registry. ------------------------------------------------------ */
+
+/*
+ * SSAM device names follow the SSAM module alias, meaning they are prefixed
+ * with 'ssam:', followed by domain, category, target ID, instance ID, and
+ * function, each encoded as two-digit hexadecimal, separated by ':'. In other
+ * words, it follows the scheme
+ *
+ * ssam:dd:cc:tt:ii:ff
+ *
+ * Where, 'dd', 'cc', 'tt', 'ii', and 'ff' are the two-digit hexadecimal
+ * values mentioned above, respectively.
+ */
+
+/* Root node. */
+static const struct software_node ssam_node_root = {
+ .name = "ssam_platform_hub",
+};
+
+/* Base device hub (devices attached to Surface Book 3 base). */
+static const struct software_node ssam_node_hub_base = {
+ .name = "ssam:00:00:02:00:00",
+ .parent = &ssam_node_root,
+};
+
+/* AC adapter. */
+static const struct software_node ssam_node_bat_ac = {
+ .name = "ssam:01:02:01:01:01",
+ .parent = &ssam_node_root,
+};
+
+/* Primary battery. */
+static const struct software_node ssam_node_bat_main = {
+ .name = "ssam:01:02:01:01:00",
+ .parent = &ssam_node_root,
+};
+
+/* Secondary battery (Surface Book 3). */
+static const struct software_node ssam_node_bat_sb3base = {
+ .name = "ssam:01:02:02:01:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* Platform profile / performance-mode device. */
+static const struct software_node ssam_node_tmp_pprof = {
+ .name = "ssam:01:03:01:00:01",
+ .parent = &ssam_node_root,
+};
+
+/* DTX / detachment-system device (Surface Book 3). */
+static const struct software_node ssam_node_bas_dtx = {
+ .name = "ssam:01:11:01:00:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID keyboard. */
+static const struct software_node ssam_node_hid_main_keyboard = {
+ .name = "ssam:01:15:02:01:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID touchpad. */
+static const struct software_node ssam_node_hid_main_touchpad = {
+ .name = "ssam:01:15:02:03:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID device instance 5 (unknown HID device). */
+static const struct software_node ssam_node_hid_main_iid5 = {
+ .name = "ssam:01:15:02:05:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID keyboard (base hub). */
+static const struct software_node ssam_node_hid_base_keyboard = {
+ .name = "ssam:01:15:02:01:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* HID touchpad (base hub). */
+static const struct software_node ssam_node_hid_base_touchpad = {
+ .name = "ssam:01:15:02:03:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* HID device instance 5 (unknown HID device, base hub). */
+static const struct software_node ssam_node_hid_base_iid5 = {
+ .name = "ssam:01:15:02:05:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* HID device instance 6 (unknown HID device, base hub). */
+static const struct software_node ssam_node_hid_base_iid6 = {
+ .name = "ssam:01:15:02:06:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* Devices for Surface Book 2. */
+static const struct software_node *ssam_node_group_sb2[] = {
+ &ssam_node_root,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Book 3. */
+static const struct software_node *ssam_node_group_sb3[] = {
+ &ssam_node_root,
+ &ssam_node_hub_base,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_bat_sb3base,
+ &ssam_node_tmp_pprof,
+ &ssam_node_bas_dtx,
+ &ssam_node_hid_base_keyboard,
+ &ssam_node_hid_base_touchpad,
+ &ssam_node_hid_base_iid5,
+ &ssam_node_hid_base_iid6,
+ NULL,
+};
+
+/* Devices for Surface Laptop 1. */
+static const struct software_node *ssam_node_group_sl1[] = {
+ &ssam_node_root,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Laptop 2. */
+static const struct software_node *ssam_node_group_sl2[] = {
+ &ssam_node_root,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Laptop 3. */
+static const struct software_node *ssam_node_group_sl3[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ &ssam_node_hid_main_keyboard,
+ &ssam_node_hid_main_touchpad,
+ &ssam_node_hid_main_iid5,
+ NULL,
+};
+
+/* Devices for Surface Laptop Go. */
+static const struct software_node *ssam_node_group_slg1[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Pro 5. */
+static const struct software_node *ssam_node_group_sp5[] = {
+ &ssam_node_root,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Pro 6. */
+static const struct software_node *ssam_node_group_sp6[] = {
+ &ssam_node_root,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Pro 7 and Surface Pro 7+. */
+static const struct software_node *ssam_node_group_sp7[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+
+/* -- Device registry helper functions. ------------------------------------- */
+
+static int ssam_uid_from_string(const char *str, struct ssam_device_uid *uid)
+{
+ u8 d, tc, tid, iid, fn;
+ int n;
+
+ n = sscanf(str, "ssam:%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
+ if (n != 5)
+ return -EINVAL;
+
+ uid->domain = d;
+ uid->category = tc;
+ uid->target = tid;
+ uid->instance = iid;
+ uid->function = fn;
+
+ return 0;
+}
+
+static int ssam_hub_remove_devices_fn(struct device *dev, void *data)
+{
+ if (!is_ssam_device(dev))
+ return 0;
+
+ ssam_device_remove(to_ssam_device(dev));
+ return 0;
+}
+
+static void ssam_hub_remove_devices(struct device *parent)
+{
+ device_for_each_child_reverse(parent, NULL, ssam_hub_remove_devices_fn);
+}
+
+static int ssam_hub_add_device(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ struct ssam_device_uid uid;
+ struct ssam_device *sdev;
+ int status;
+
+ status = ssam_uid_from_string(fwnode_get_name(node), &uid);
+ if (status)
+ return status;
+
+ sdev = ssam_device_alloc(ctrl, uid);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->dev.parent = parent;
+ sdev->dev.fwnode = node;
+
+ status = ssam_device_add(sdev);
+ if (status)
+ ssam_device_put(sdev);
+
+ return status;
+}
+
+static int ssam_hub_add_devices(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ struct fwnode_handle *child;
+ int status;
+
+ fwnode_for_each_child_node(node, child) {
+ /*
+ * Try to add the device specified in the firmware node. If
+ * this fails with -EINVAL, the node does not specify any SSAM
+ * device, so ignore it and continue with the next one.
+ */
+
+ status = ssam_hub_add_device(parent, ctrl, child);
+ if (status && status != -EINVAL)
+ goto err;
+ }
+
+ return 0;
+err:
+ ssam_hub_remove_devices(parent);
+ return status;
+}
+
+
+/* -- SSAM base-hub driver. ------------------------------------------------- */
+
+/*
+ * Some devices (especially battery) may need a bit of time to be fully usable
+ * after being (re-)connected. This delay has been determined via
+ * experimentation.
+ */
+#define SSAM_BASE_UPDATE_CONNECT_DELAY msecs_to_jiffies(2500)
+
+enum ssam_base_hub_state {
+ SSAM_BASE_HUB_UNINITIALIZED,
+ SSAM_BASE_HUB_CONNECTED,
+ SSAM_BASE_HUB_DISCONNECTED,
+};
+
+struct ssam_base_hub {
+ struct ssam_device *sdev;
+
+ enum ssam_base_hub_state state;
+ struct delayed_work update_work;
+
+ struct ssam_event_notifier notif;
+};
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0d,
+ .instance_id = 0x00,
+});
+
+#define SSAM_BAS_OPMODE_TABLET 0x00
+#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c
+
+static int ssam_base_hub_query_state(struct ssam_base_hub *hub, enum ssam_base_hub_state *state)
+{
+ u8 opmode;
+ int status;
+
+ status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode);
+ if (status < 0) {
+ dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status);
+ return status;
+ }
+
+ if (opmode != SSAM_BAS_OPMODE_TABLET)
+ *state = SSAM_BASE_HUB_CONNECTED;
+ else
+ *state = SSAM_BASE_HUB_DISCONNECTED;
+
+ return 0;
+}
+
+static ssize_t ssam_base_hub_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ssam_base_hub *hub = dev_get_drvdata(dev);
+ bool connected = hub->state == SSAM_BASE_HUB_CONNECTED;
+
+ return sysfs_emit(buf, "%d\n", connected);
+}
+
+static struct device_attribute ssam_base_hub_attr_state =
+ __ATTR(state, 0444, ssam_base_hub_state_show, NULL);
+
+static struct attribute *ssam_base_hub_attrs[] = {
+ &ssam_base_hub_attr_state.attr,
+ NULL,
+};
+
+static const struct attribute_group ssam_base_hub_group = {
+ .attrs = ssam_base_hub_attrs,
+};
+
+static void ssam_base_hub_update_workfn(struct work_struct *work)
+{
+ struct ssam_base_hub *hub = container_of(work, struct ssam_base_hub, update_work.work);
+ struct fwnode_handle *node = dev_fwnode(&hub->sdev->dev);
+ enum ssam_base_hub_state state;
+ int status = 0;
+
+ status = ssam_base_hub_query_state(hub, &state);
+ if (status)
+ return;
+
+ if (hub->state == state)
+ return;
+ hub->state = state;
+
+ if (hub->state == SSAM_BASE_HUB_CONNECTED)
+ status = ssam_hub_add_devices(&hub->sdev->dev, hub->sdev->ctrl, node);
+ else
+ ssam_hub_remove_devices(&hub->sdev->dev);
+
+ if (status)
+ dev_err(&hub->sdev->dev, "failed to update base-hub devices: %d\n", status);
+}
+
+static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_base_hub *hub = container_of(nf, struct ssam_base_hub, notif);
+ unsigned long delay;
+
+ if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
+ return 0;
+
+ if (event->length < 1) {
+ dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
+ return 0;
+ }
+
+ /*
+ * Delay update when the base is being connected to give devices/EC
+ * some time to set up.
+ */
+ delay = event->data[0] ? SSAM_BASE_UPDATE_CONNECT_DELAY : 0;
+
+ schedule_delayed_work(&hub->update_work, delay);
+
+ /*
+ * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
+ * consumed by the detachment system driver. We're just a (more or less)
+ * silent observer.
+ */
+ return 0;
+}
+
+static int __maybe_unused ssam_base_hub_resume(struct device *dev)
+{
+ struct ssam_base_hub *hub = dev_get_drvdata(dev);
+
+ schedule_delayed_work(&hub->update_work, 0);
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(ssam_base_hub_pm_ops, NULL, ssam_base_hub_resume);
+
+static int ssam_base_hub_probe(struct ssam_device *sdev)
+{
+ struct ssam_base_hub *hub;
+ int status;
+
+ hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
+ if (!hub)
+ return -ENOMEM;
+
+ hub->sdev = sdev;
+ hub->state = SSAM_BASE_HUB_UNINITIALIZED;
+
+ hub->notif.base.priority = INT_MAX; /* This notifier should run first. */
+ hub->notif.base.fn = ssam_base_hub_notif;
+ hub->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ hub->notif.event.id.target_category = SSAM_SSH_TC_BAS,
+ hub->notif.event.id.instance = 0,
+ hub->notif.event.mask = SSAM_EVENT_MASK_NONE;
+ hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ INIT_DELAYED_WORK(&hub->update_work, ssam_base_hub_update_workfn);
+
+ ssam_device_set_drvdata(sdev, hub);
+
+ status = ssam_notifier_register(sdev->ctrl, &hub->notif);
+ if (status)
+ return status;
+
+ status = sysfs_create_group(&sdev->dev.kobj, &ssam_base_hub_group);
+ if (status)
+ goto err;
+
+ schedule_delayed_work(&hub->update_work, 0);
+ return 0;
+
+err:
+ ssam_notifier_unregister(sdev->ctrl, &hub->notif);
+ cancel_delayed_work_sync(&hub->update_work);
+ ssam_hub_remove_devices(&sdev->dev);
+ return status;
+}
+
+static void ssam_base_hub_remove(struct ssam_device *sdev)
+{
+ struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
+
+ sysfs_remove_group(&sdev->dev.kobj, &ssam_base_hub_group);
+
+ ssam_notifier_unregister(sdev->ctrl, &hub->notif);
+ cancel_delayed_work_sync(&hub->update_work);
+ ssam_hub_remove_devices(&sdev->dev);
+}
+
+static const struct ssam_device_id ssam_base_hub_match[] = {
+ { SSAM_VDEV(HUB, 0x02, SSAM_ANY_IID, 0x00) },
+ { },
+};
+
+static struct ssam_device_driver ssam_base_hub_driver = {
+ .probe = ssam_base_hub_probe,
+ .remove = ssam_base_hub_remove,
+ .match_table = ssam_base_hub_match,
+ .driver = {
+ .name = "surface_aggregator_base_hub",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &ssam_base_hub_pm_ops,
+ },
+};
+
+
+/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
+
+static const struct acpi_device_id ssam_platform_hub_match[] = {
+ /* Surface Pro 4, 5, and 6 (OMBR < 0x10) */
+ { "MSHW0081", (unsigned long)ssam_node_group_sp5 },
+
+ /* Surface Pro 6 (OMBR >= 0x10) */
+ { "MSHW0111", (unsigned long)ssam_node_group_sp6 },
+
+ /* Surface Pro 7 */
+ { "MSHW0116", (unsigned long)ssam_node_group_sp7 },
+
+ /* Surface Pro 7+ */
+ { "MSHW0119", (unsigned long)ssam_node_group_sp7 },
+
+ /* Surface Book 2 */
+ { "MSHW0107", (unsigned long)ssam_node_group_sb2 },
+
+ /* Surface Book 3 */
+ { "MSHW0117", (unsigned long)ssam_node_group_sb3 },
+
+ /* Surface Laptop 1 */
+ { "MSHW0086", (unsigned long)ssam_node_group_sl1 },
+
+ /* Surface Laptop 2 */
+ { "MSHW0112", (unsigned long)ssam_node_group_sl2 },
+
+ /* Surface Laptop 3 (13", Intel) */
+ { "MSHW0114", (unsigned long)ssam_node_group_sl3 },
+
+ /* Surface Laptop 3 (15", AMD) */
+ { "MSHW0110", (unsigned long)ssam_node_group_sl3 },
+
+ /* Surface Laptop Go 1 */
+ { "MSHW0118", (unsigned long)ssam_node_group_slg1 },
+
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match);
+
+static int ssam_platform_hub_probe(struct platform_device *pdev)
+{
+ const struct software_node **nodes;
+ struct ssam_controller *ctrl;
+ struct fwnode_handle *root;
+ int status;
+
+ nodes = (const struct software_node **)acpi_device_get_match_data(&pdev->dev);
+ if (!nodes)
+ return -ENODEV;
+
+ /*
+ * As we're adding the SSAM client devices as children under this device
+ * and not the SSAM controller, we need to add a device link to the
+ * controller to ensure that we remove all of our devices before the
+ * controller is removed. This also guarantees proper ordering for
+ * suspend/resume of the devices on this hub.
+ */
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ status = software_node_register_node_group(nodes);
+ if (status)
+ return status;
+
+ root = software_node_fwnode(&ssam_node_root);
+ if (!root) {
+ software_node_unregister_node_group(nodes);
+ return -ENOENT;
+ }
+
+ set_secondary_fwnode(&pdev->dev, root);
+
+ status = ssam_hub_add_devices(&pdev->dev, ctrl, root);
+ if (status) {
+ set_secondary_fwnode(&pdev->dev, NULL);
+ software_node_unregister_node_group(nodes);
+ }
+
+ platform_set_drvdata(pdev, nodes);
+ return status;
+}
+
+static int ssam_platform_hub_remove(struct platform_device *pdev)
+{
+ const struct software_node **nodes = platform_get_drvdata(pdev);
+
+ ssam_hub_remove_devices(&pdev->dev);
+ set_secondary_fwnode(&pdev->dev, NULL);
+ software_node_unregister_node_group(nodes);
+ return 0;
+}
+
+static struct platform_driver ssam_platform_hub_driver = {
+ .probe = ssam_platform_hub_probe,
+ .remove = ssam_platform_hub_remove,
+ .driver = {
+ .name = "surface_aggregator_platform_hub",
+ .acpi_match_table = ssam_platform_hub_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+
+/* -- Module initialization. ------------------------------------------------ */
+
+static int __init ssam_device_hub_init(void)
+{
+ int status;
+
+ status = platform_driver_register(&ssam_platform_hub_driver);
+ if (status)
+ return status;
+
+ status = ssam_device_driver_register(&ssam_base_hub_driver);
+ if (status)
+ platform_driver_unregister(&ssam_platform_hub_driver);
+
+ return status;
+}
+module_init(ssam_device_hub_init);
+
+static void __exit ssam_device_hub_exit(void)
+{
+ ssam_device_driver_unregister(&ssam_base_hub_driver);
+ platform_driver_unregister(&ssam_platform_hub_driver);
+}
+module_exit(ssam_device_hub_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
new file mode 100644
index 000000000000..63ce587e79e3
--- /dev/null
+++ b/drivers/platform/surface/surface_dtx.c
@@ -0,0 +1,1289 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Book (gen. 2 and later) detachment system (DTX) driver.
+ *
+ * Provides a user-space interface to properly handle clipboard/tablet
+ * (containing screen and processor) detachment from the base of the device
+ * (containing the keyboard and optionally a discrete GPU). Allows to
+ * acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
+ * use), or request detachment via user-space.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/fs.h>
+#include <linux/input.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+#include <linux/surface_aggregator/dtx.h>
+
+
+/* -- SSAM interface. ------------------------------------------------------- */
+
+enum sam_event_cid_bas {
+ SAM_EVENT_CID_DTX_CONNECTION = 0x0c,
+ SAM_EVENT_CID_DTX_REQUEST = 0x0e,
+ SAM_EVENT_CID_DTX_CANCEL = 0x0f,
+ SAM_EVENT_CID_DTX_LATCH_STATUS = 0x11,
+};
+
+enum ssam_bas_base_state {
+ SSAM_BAS_BASE_STATE_DETACH_SUCCESS = 0x00,
+ SSAM_BAS_BASE_STATE_ATTACHED = 0x01,
+ SSAM_BAS_BASE_STATE_NOT_FEASIBLE = 0x02,
+};
+
+enum ssam_bas_latch_status {
+ SSAM_BAS_LATCH_STATUS_CLOSED = 0x00,
+ SSAM_BAS_LATCH_STATUS_OPENED = 0x01,
+ SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN = 0x02,
+ SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN = 0x03,
+ SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE = 0x04,
+};
+
+enum ssam_bas_cancel_reason {
+ SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE = 0x00, /* Low battery. */
+ SSAM_BAS_CANCEL_REASON_TIMEOUT = 0x02,
+ SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN = 0x03,
+ SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN = 0x04,
+ SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE = 0x05,
+};
+
+struct ssam_bas_base_info {
+ u8 state;
+ u8 base_id;
+} __packed;
+
+static_assert(sizeof(struct ssam_bas_base_info) == 2);
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x06,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x07,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x08,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x09,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0a,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0b,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0c,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0d,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x11,
+ .instance_id = 0x00,
+});
+
+
+/* -- Main structures. ------------------------------------------------------ */
+
+enum sdtx_device_state {
+ SDTX_DEVICE_SHUTDOWN_BIT = BIT(0),
+ SDTX_DEVICE_DIRTY_BASE_BIT = BIT(1),
+ SDTX_DEVICE_DIRTY_MODE_BIT = BIT(2),
+ SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
+};
+
+struct sdtx_device {
+ struct kref kref;
+ struct rw_semaphore lock; /* Guards device and controller reference. */
+
+ struct device *dev;
+ struct ssam_controller *ctrl;
+ unsigned long flags;
+
+ struct miscdevice mdev;
+ wait_queue_head_t waitq;
+ struct mutex write_lock; /* Guards order of events/notifications. */
+ struct rw_semaphore client_lock; /* Guards client list. */
+ struct list_head client_list;
+
+ struct delayed_work state_work;
+ struct {
+ struct ssam_bas_base_info base;
+ u8 device_mode;
+ u8 latch_status;
+ } state;
+
+ struct delayed_work mode_work;
+ struct input_dev *mode_switch;
+
+ struct ssam_event_notifier notif;
+};
+
+enum sdtx_client_state {
+ SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
+};
+
+struct sdtx_client {
+ struct sdtx_device *ddev;
+ struct list_head node;
+ unsigned long flags;
+
+ struct fasync_struct *fasync;
+
+ struct mutex read_lock; /* Guards FIFO buffer read access. */
+ DECLARE_KFIFO(buffer, u8, 512);
+};
+
+static void __sdtx_device_release(struct kref *kref)
+{
+ struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref);
+
+ mutex_destroy(&ddev->write_lock);
+ kfree(ddev);
+}
+
+static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
+{
+ if (ddev)
+ kref_get(&ddev->kref);
+
+ return ddev;
+}
+
+static void sdtx_device_put(struct sdtx_device *ddev)
+{
+ if (ddev)
+ kref_put(&ddev->kref, __sdtx_device_release);
+}
+
+
+/* -- Firmware value translations. ------------------------------------------ */
+
+static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
+{
+ switch (state) {
+ case SSAM_BAS_BASE_STATE_ATTACHED:
+ return SDTX_BASE_ATTACHED;
+
+ case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
+ return SDTX_BASE_DETACHED;
+
+ case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
+ return SDTX_DETACH_NOT_FEASIBLE;
+
+ default:
+ dev_err(ddev->dev, "unknown base state: %#04x\n", state);
+ return SDTX_UNKNOWN(state);
+ }
+}
+
+static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
+{
+ switch (status) {
+ case SSAM_BAS_LATCH_STATUS_CLOSED:
+ return SDTX_LATCH_CLOSED;
+
+ case SSAM_BAS_LATCH_STATUS_OPENED:
+ return SDTX_LATCH_OPENED;
+
+ case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
+ return SDTX_ERR_FAILED_TO_OPEN;
+
+ case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
+ return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
+
+ case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
+ return SDTX_ERR_FAILED_TO_CLOSE;
+
+ default:
+ dev_err(ddev->dev, "unknown latch status: %#04x\n", status);
+ return SDTX_UNKNOWN(status);
+ }
+}
+
+static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
+{
+ switch (reason) {
+ case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
+ return SDTX_DETACH_NOT_FEASIBLE;
+
+ case SSAM_BAS_CANCEL_REASON_TIMEOUT:
+ return SDTX_DETACH_TIMEDOUT;
+
+ case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
+ return SDTX_ERR_FAILED_TO_OPEN;
+
+ case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
+ return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
+
+ case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
+ return SDTX_ERR_FAILED_TO_CLOSE;
+
+ default:
+ dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason);
+ return SDTX_UNKNOWN(reason);
+ }
+}
+
+
+/* -- IOCTLs. --------------------------------------------------------------- */
+
+static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
+ struct sdtx_base_info __user *buf)
+{
+ struct ssam_bas_base_info raw;
+ struct sdtx_base_info info;
+ int status;
+
+ lockdep_assert_held_read(&ddev->lock);
+
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw);
+ if (status < 0)
+ return status;
+
+ info.state = sdtx_translate_base_state(ddev, raw.state);
+ info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
+
+ if (copy_to_user(buf, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
+{
+ u8 mode;
+ int status;
+
+ lockdep_assert_held_read(&ddev->lock);
+
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
+ if (status < 0)
+ return status;
+
+ return put_user(mode, buf);
+}
+
+static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
+{
+ u8 latch;
+ int status;
+
+ lockdep_assert_held_read(&ddev->lock);
+
+ status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
+ if (status < 0)
+ return status;
+
+ return put_user(sdtx_translate_latch_status(ddev, latch), buf);
+}
+
+static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg)
+{
+ struct sdtx_device *ddev = client->ddev;
+
+ lockdep_assert_held_read(&ddev->lock);
+
+ switch (cmd) {
+ case SDTX_IOCTL_EVENTS_ENABLE:
+ set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
+ return 0;
+
+ case SDTX_IOCTL_EVENTS_DISABLE:
+ clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
+ return 0;
+
+ case SDTX_IOCTL_LATCH_LOCK:
+ return ssam_retry(ssam_bas_latch_lock, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_UNLOCK:
+ return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_REQUEST:
+ return ssam_retry(ssam_bas_latch_request, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_CONFIRM:
+ return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_HEARTBEAT:
+ return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_CANCEL:
+ return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl);
+
+ case SDTX_IOCTL_GET_BASE_INFO:
+ return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg);
+
+ case SDTX_IOCTL_GET_DEVICE_MODE:
+ return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
+
+ case SDTX_IOCTL_GET_LATCH_STATUS:
+ return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct sdtx_client *client = file->private_data;
+ long status;
+
+ if (down_read_killable(&client->ddev->lock))
+ return -ERESTARTSYS;
+
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
+ up_read(&client->ddev->lock);
+ return -ENODEV;
+ }
+
+ status = __surface_dtx_ioctl(client, cmd, arg);
+
+ up_read(&client->ddev->lock);
+ return status;
+}
+
+
+/* -- File operations. ------------------------------------------------------ */
+
+static int surface_dtx_open(struct inode *inode, struct file *file)
+{
+ struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev);
+ struct sdtx_client *client;
+
+ /* Initialize client. */
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->ddev = sdtx_device_get(ddev);
+
+ INIT_LIST_HEAD(&client->node);
+
+ mutex_init(&client->read_lock);
+ INIT_KFIFO(client->buffer);
+
+ file->private_data = client;
+
+ /* Attach client. */
+ down_write(&ddev->client_lock);
+
+ /*
+ * Do not add a new client if the device has been shut down. Note that
+ * it's enough to hold the client_lock here as, during shutdown, we
+ * only acquire that lock and remove clients after marking the device
+ * as shut down.
+ */
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
+ up_write(&ddev->client_lock);
+ sdtx_device_put(client->ddev);
+ kfree(client);
+ return -ENODEV;
+ }
+
+ list_add_tail(&client->node, &ddev->client_list);
+ up_write(&ddev->client_lock);
+
+ stream_open(inode, file);
+ return 0;
+}
+
+static int surface_dtx_release(struct inode *inode, struct file *file)
+{
+ struct sdtx_client *client = file->private_data;
+
+ /* Detach client. */
+ down_write(&client->ddev->client_lock);
+ list_del(&client->node);
+ up_write(&client->ddev->client_lock);
+
+ /* Free client. */
+ sdtx_device_put(client->ddev);
+ mutex_destroy(&client->read_lock);
+ kfree(client);
+
+ return 0;
+}
+
+static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
+{
+ struct sdtx_client *client = file->private_data;
+ struct sdtx_device *ddev = client->ddev;
+ unsigned int copied;
+ int status = 0;
+
+ if (down_read_killable(&ddev->lock))
+ return -ERESTARTSYS;
+
+ /* Make sure we're not shut down. */
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
+ up_read(&ddev->lock);
+ return -ENODEV;
+ }
+
+ do {
+ /* Check availability, wait if necessary. */
+ if (kfifo_is_empty(&client->buffer)) {
+ up_read(&ddev->lock);
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ status = wait_event_interruptible(ddev->waitq,
+ !kfifo_is_empty(&client->buffer) ||
+ test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
+ &ddev->flags));
+ if (status < 0)
+ return status;
+
+ if (down_read_killable(&ddev->lock))
+ return -ERESTARTSYS;
+
+ /* Need to check that we're not shut down again. */
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
+ up_read(&ddev->lock);
+ return -ENODEV;
+ }
+ }
+
+ /* Try to read from FIFO. */
+ if (mutex_lock_interruptible(&client->read_lock)) {
+ up_read(&ddev->lock);
+ return -ERESTARTSYS;
+ }
+
+ status = kfifo_to_user(&client->buffer, buf, count, &copied);
+ mutex_unlock(&client->read_lock);
+
+ if (status < 0) {
+ up_read(&ddev->lock);
+ return status;
+ }
+
+ /* We might not have gotten anything, check this here. */
+ if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
+ up_read(&ddev->lock);
+ return -EAGAIN;
+ }
+ } while (copied == 0);
+
+ up_read(&ddev->lock);
+ return copied;
+}
+
+static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
+{
+ struct sdtx_client *client = file->private_data;
+ __poll_t events = 0;
+
+ if (down_read_killable(&client->ddev->lock))
+ return -ERESTARTSYS;
+
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
+ up_read(&client->ddev->lock);
+ return EPOLLHUP | EPOLLERR;
+ }
+
+ poll_wait(file, &client->ddev->waitq, pt);
+
+ if (!kfifo_is_empty(&client->buffer))
+ events |= EPOLLIN | EPOLLRDNORM;
+
+ up_read(&client->ddev->lock);
+ return events;
+}
+
+static int surface_dtx_fasync(int fd, struct file *file, int on)
+{
+ struct sdtx_client *client = file->private_data;
+
+ return fasync_helper(fd, file, on, &client->fasync);
+}
+
+static const struct file_operations surface_dtx_fops = {
+ .owner = THIS_MODULE,
+ .open = surface_dtx_open,
+ .release = surface_dtx_release,
+ .read = surface_dtx_read,
+ .poll = surface_dtx_poll,
+ .fasync = surface_dtx_fasync,
+ .unlocked_ioctl = surface_dtx_ioctl,
+ .compat_ioctl = surface_dtx_ioctl,
+ .llseek = no_llseek,
+};
+
+
+/* -- Event handling/forwarding. -------------------------------------------- */
+
+/*
+ * The device operation mode is not immediately updated on the EC when the
+ * base has been connected, i.e. querying the device mode inside the
+ * connection event callback yields an outdated value. Thus, we can only
+ * determine the new tablet-mode switch and device mode values after some
+ * time.
+ *
+ * These delays have been chosen by experimenting. We first delay on connect
+ * events, then check and validate the device mode against the base state and
+ * if invalid delay again by the "recheck" delay.
+ */
+#define SDTX_DEVICE_MODE_DELAY_CONNECT msecs_to_jiffies(100)
+#define SDTX_DEVICE_MODE_DELAY_RECHECK msecs_to_jiffies(100)
+
+struct sdtx_status_event {
+ struct sdtx_event e;
+ __u16 v;
+} __packed;
+
+struct sdtx_base_info_event {
+ struct sdtx_event e;
+ struct sdtx_base_info v;
+} __packed;
+
+union sdtx_generic_event {
+ struct sdtx_event common;
+ struct sdtx_status_event status;
+ struct sdtx_base_info_event base;
+};
+
+static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
+
+/* Must be executed with ddev->write_lock held. */
+static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
+{
+ const size_t len = sizeof(struct sdtx_event) + evt->length;
+ struct sdtx_client *client;
+
+ lockdep_assert_held(&ddev->write_lock);
+
+ down_read(&ddev->client_lock);
+ list_for_each_entry(client, &ddev->client_list, node) {
+ if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
+ continue;
+
+ if (likely(kfifo_avail(&client->buffer) >= len))
+ kfifo_in(&client->buffer, (const u8 *)evt, len);
+ else
+ dev_warn(ddev->dev, "event buffer overrun\n");
+
+ kill_fasync(&client->fasync, SIGIO, POLL_IN);
+ }
+ up_read(&ddev->client_lock);
+
+ wake_up_interruptible(&ddev->waitq);
+}
+
+static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
+{
+ struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
+ union sdtx_generic_event event;
+ size_t len;
+
+ /* Validate event payload length. */
+ switch (in->command_id) {
+ case SAM_EVENT_CID_DTX_CONNECTION:
+ len = 2 * sizeof(u8);
+ break;
+
+ case SAM_EVENT_CID_DTX_REQUEST:
+ len = 0;
+ break;
+
+ case SAM_EVENT_CID_DTX_CANCEL:
+ len = sizeof(u8);
+ break;
+
+ case SAM_EVENT_CID_DTX_LATCH_STATUS:
+ len = sizeof(u8);
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (in->length != len) {
+ dev_err(ddev->dev,
+ "unexpected payload size for event %#04x: got %u, expected %zu\n",
+ in->command_id, in->length, len);
+ return 0;
+ }
+
+ mutex_lock(&ddev->write_lock);
+
+ /* Translate event. */
+ switch (in->command_id) {
+ case SAM_EVENT_CID_DTX_CONNECTION:
+ clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
+
+ /* If state has not changed: do not send new event. */
+ if (ddev->state.base.state == in->data[0] &&
+ ddev->state.base.base_id == in->data[1])
+ goto out;
+
+ ddev->state.base.state = in->data[0];
+ ddev->state.base.base_id = in->data[1];
+
+ event.base.e.length = sizeof(struct sdtx_base_info);
+ event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
+ event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
+ event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
+ break;
+
+ case SAM_EVENT_CID_DTX_REQUEST:
+ event.common.code = SDTX_EVENT_REQUEST;
+ event.common.length = 0;
+ break;
+
+ case SAM_EVENT_CID_DTX_CANCEL:
+ event.status.e.length = sizeof(u16);
+ event.status.e.code = SDTX_EVENT_CANCEL;
+ event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
+ break;
+
+ case SAM_EVENT_CID_DTX_LATCH_STATUS:
+ clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
+
+ /* If state has not changed: do not send new event. */
+ if (ddev->state.latch_status == in->data[0])
+ goto out;
+
+ ddev->state.latch_status = in->data[0];
+
+ event.status.e.length = sizeof(u16);
+ event.status.e.code = SDTX_EVENT_LATCH_STATUS;
+ event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
+ break;
+ }
+
+ sdtx_push_event(ddev, &event.common);
+
+ /* Update device mode on base connection change. */
+ if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
+ unsigned long delay;
+
+ delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
+ sdtx_update_device_mode(ddev, delay);
+ }
+
+out:
+ mutex_unlock(&ddev->write_lock);
+ return SSAM_NOTIF_HANDLED;
+}
+
+
+/* -- State update functions. ----------------------------------------------- */
+
+static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
+{
+ return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) &&
+ (mode == SDTX_DEVICE_MODE_TABLET)) ||
+ ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) &&
+ (mode != SDTX_DEVICE_MODE_TABLET));
+}
+
+static void sdtx_device_mode_workfn(struct work_struct *work)
+{
+ struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
+ struct sdtx_status_event event;
+ struct ssam_bas_base_info base;
+ int status, tablet;
+ u8 mode;
+
+ /* Get operation mode. */
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
+ if (status) {
+ dev_err(ddev->dev, "failed to get device mode: %d\n", status);
+ return;
+ }
+
+ /* Get base info. */
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
+ if (status) {
+ dev_err(ddev->dev, "failed to get base info: %d\n", status);
+ return;
+ }
+
+ /*
+ * In some cases (specifically when attaching the base), the device
+ * mode isn't updated right away. Thus we check if the device mode
+ * makes sense for the given base state and try again later if it
+ * doesn't.
+ */
+ if (sdtx_device_mode_invalid(mode, base.state)) {
+ dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
+ sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
+ return;
+ }
+
+ mutex_lock(&ddev->write_lock);
+ clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
+
+ /* Avoid sending duplicate device-mode events. */
+ if (ddev->state.device_mode == mode) {
+ mutex_unlock(&ddev->write_lock);
+ return;
+ }
+
+ ddev->state.device_mode = mode;
+
+ event.e.length = sizeof(u16);
+ event.e.code = SDTX_EVENT_DEVICE_MODE;
+ event.v = mode;
+
+ sdtx_push_event(ddev, &event.e);
+
+ /* Send SW_TABLET_MODE event. */
+ tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
+ input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
+ input_sync(ddev->mode_switch);
+
+ mutex_unlock(&ddev->write_lock);
+}
+
+static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
+{
+ schedule_delayed_work(&ddev->mode_work, delay);
+}
+
+/* Must be executed with ddev->write_lock held. */
+static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
+ struct ssam_bas_base_info info)
+{
+ struct sdtx_base_info_event event;
+
+ lockdep_assert_held(&ddev->write_lock);
+
+ /* Prevent duplicate events. */
+ if (ddev->state.base.state == info.state &&
+ ddev->state.base.base_id == info.base_id)
+ return;
+
+ ddev->state.base = info;
+
+ event.e.length = sizeof(struct sdtx_base_info);
+ event.e.code = SDTX_EVENT_BASE_CONNECTION;
+ event.v.state = sdtx_translate_base_state(ddev, info.state);
+ event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
+
+ sdtx_push_event(ddev, &event.e);
+}
+
+/* Must be executed with ddev->write_lock held. */
+static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
+{
+ struct sdtx_status_event event;
+ int tablet;
+
+ /*
+ * Note: This function must be called after updating the base state
+ * via __sdtx_device_state_update_base(), as we rely on the updated
+ * base state value in the validity check below.
+ */
+
+ lockdep_assert_held(&ddev->write_lock);
+
+ if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
+ dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
+ sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
+ return;
+ }
+
+ /* Prevent duplicate events. */
+ if (ddev->state.device_mode == mode)
+ return;
+
+ ddev->state.device_mode = mode;
+
+ /* Send event. */
+ event.e.length = sizeof(u16);
+ event.e.code = SDTX_EVENT_DEVICE_MODE;
+ event.v = mode;
+
+ sdtx_push_event(ddev, &event.e);
+
+ /* Send SW_TABLET_MODE event. */
+ tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
+ input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
+ input_sync(ddev->mode_switch);
+}
+
+/* Must be executed with ddev->write_lock held. */
+static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
+{
+ struct sdtx_status_event event;
+
+ lockdep_assert_held(&ddev->write_lock);
+
+ /* Prevent duplicate events. */
+ if (ddev->state.latch_status == status)
+ return;
+
+ ddev->state.latch_status = status;
+
+ event.e.length = sizeof(struct sdtx_base_info);
+ event.e.code = SDTX_EVENT_BASE_CONNECTION;
+ event.v = sdtx_translate_latch_status(ddev, status);
+
+ sdtx_push_event(ddev, &event.e);
+}
+
+static void sdtx_device_state_workfn(struct work_struct *work)
+{
+ struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
+ struct ssam_bas_base_info base;
+ u8 mode, latch;
+ int status;
+
+ /* Mark everything as dirty. */
+ set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
+ set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
+ set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
+
+ /*
+ * Ensure that the state gets marked as dirty before continuing to
+ * query it. Necessary to ensure that clear_bit() calls in
+ * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
+ * bits if an event is received while updating the state here.
+ */
+ smp_mb__after_atomic();
+
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
+ if (status) {
+ dev_err(ddev->dev, "failed to get base state: %d\n", status);
+ return;
+ }
+
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
+ if (status) {
+ dev_err(ddev->dev, "failed to get device mode: %d\n", status);
+ return;
+ }
+
+ status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
+ if (status) {
+ dev_err(ddev->dev, "failed to get latch status: %d\n", status);
+ return;
+ }
+
+ mutex_lock(&ddev->write_lock);
+
+ /*
+ * If the respective dirty-bit has been cleared, an event has been
+ * received, updating this state. The queried state may thus be out of
+ * date. At this point, we can safely assume that the state provided
+ * by the event is either up to date, or we're about to receive
+ * another event updating it.
+ */
+
+ if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
+ __sdtx_device_state_update_base(ddev, base);
+
+ if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
+ __sdtx_device_state_update_mode(ddev, mode);
+
+ if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
+ __sdtx_device_state_update_latch(ddev, latch);
+
+ mutex_unlock(&ddev->write_lock);
+}
+
+static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
+{
+ schedule_delayed_work(&ddev->state_work, delay);
+}
+
+
+/* -- Common device initialization. ----------------------------------------- */
+
+static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
+ struct ssam_controller *ctrl)
+{
+ int status, tablet_mode;
+
+ /* Basic initialization. */
+ kref_init(&ddev->kref);
+ init_rwsem(&ddev->lock);
+ ddev->dev = dev;
+ ddev->ctrl = ctrl;
+
+ ddev->mdev.minor = MISC_DYNAMIC_MINOR;
+ ddev->mdev.name = "surface_dtx";
+ ddev->mdev.nodename = "surface/dtx";
+ ddev->mdev.fops = &surface_dtx_fops;
+
+ ddev->notif.base.priority = 1;
+ ddev->notif.base.fn = sdtx_notifier;
+ ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
+ ddev->notif.event.id.instance = 0;
+ ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
+ ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ init_waitqueue_head(&ddev->waitq);
+ mutex_init(&ddev->write_lock);
+ init_rwsem(&ddev->client_lock);
+ INIT_LIST_HEAD(&ddev->client_list);
+
+ INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
+ INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
+
+ /*
+ * Get current device state. We want to guarantee that events are only
+ * sent when state actually changes. Thus we cannot use special
+ * "uninitialized" values, as that would cause problems when manually
+ * querying the state in surface_dtx_pm_complete(). I.e. we would not
+ * be able to detect state changes there if no change event has been
+ * received between driver initialization and first device suspension.
+ *
+ * Note that we also need to do this before registering the event
+ * notifier, as that may access the state values.
+ */
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base);
+ if (status)
+ return status;
+
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode);
+ if (status)
+ return status;
+
+ status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status);
+ if (status)
+ return status;
+
+ /* Set up tablet mode switch. */
+ ddev->mode_switch = input_allocate_device();
+ if (!ddev->mode_switch)
+ return -ENOMEM;
+
+ ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
+ ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
+ ddev->mode_switch->id.bustype = BUS_HOST;
+ ddev->mode_switch->dev.parent = ddev->dev;
+
+ tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
+ input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
+ input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
+
+ status = input_register_device(ddev->mode_switch);
+ if (status) {
+ input_free_device(ddev->mode_switch);
+ return status;
+ }
+
+ /* Set up event notifier. */
+ status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
+ if (status)
+ goto err_notif;
+
+ /* Register miscdevice. */
+ status = misc_register(&ddev->mdev);
+ if (status)
+ goto err_mdev;
+
+ /*
+ * Update device state in case it has changed between getting the
+ * initial mode and registering the event notifier.
+ */
+ sdtx_update_device_state(ddev, 0);
+ return 0;
+
+err_notif:
+ ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
+ cancel_delayed_work_sync(&ddev->mode_work);
+err_mdev:
+ input_unregister_device(ddev->mode_switch);
+ return status;
+}
+
+static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl)
+{
+ struct sdtx_device *ddev;
+ int status;
+
+ ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
+ if (!ddev)
+ return ERR_PTR(-ENOMEM);
+
+ status = sdtx_device_init(ddev, dev, ctrl);
+ if (status) {
+ sdtx_device_put(ddev);
+ return ERR_PTR(status);
+ }
+
+ return ddev;
+}
+
+static void sdtx_device_destroy(struct sdtx_device *ddev)
+{
+ struct sdtx_client *client;
+
+ /*
+ * Mark device as shut-down. Prevent new clients from being added and
+ * new operations from being executed.
+ */
+ set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
+
+ /* Disable notifiers, prevent new events from arriving. */
+ ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
+
+ /* Stop mode_work, prevent access to mode_switch. */
+ cancel_delayed_work_sync(&ddev->mode_work);
+
+ /* Stop state_work. */
+ cancel_delayed_work_sync(&ddev->state_work);
+
+ /* With mode_work canceled, we can unregister the mode_switch. */
+ input_unregister_device(ddev->mode_switch);
+
+ /* Wake up async clients. */
+ down_write(&ddev->client_lock);
+ list_for_each_entry(client, &ddev->client_list, node) {
+ kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+ }
+ up_write(&ddev->client_lock);
+
+ /* Wake up blocking clients. */
+ wake_up_interruptible(&ddev->waitq);
+
+ /*
+ * Wait for clients to finish their current operation. After this, the
+ * controller and device references are guaranteed to be no longer in
+ * use.
+ */
+ down_write(&ddev->lock);
+ ddev->dev = NULL;
+ ddev->ctrl = NULL;
+ up_write(&ddev->lock);
+
+ /* Finally remove the misc-device. */
+ misc_deregister(&ddev->mdev);
+
+ /*
+ * We're now guaranteed that sdtx_device_open() won't be called any
+ * more, so we can now drop out reference.
+ */
+ sdtx_device_put(ddev);
+}
+
+
+/* -- PM ops. --------------------------------------------------------------- */
+
+#ifdef CONFIG_PM_SLEEP
+
+static void surface_dtx_pm_complete(struct device *dev)
+{
+ struct sdtx_device *ddev = dev_get_drvdata(dev);
+
+ /*
+ * Normally, the EC will store events while suspended (i.e. in
+ * display-off state) and release them when resumed (i.e. transitioned
+ * to display-on state). During hibernation, however, the EC will be
+ * shut down and does not store events. Furthermore, events might be
+ * dropped during prolonged suspension (it is currently unknown how
+ * big this event buffer is and how it behaves on overruns).
+ *
+ * To prevent any problems, we update the device state here. We do
+ * this delayed to ensure that any events sent by the EC directly
+ * after resuming will be handled first. The delay below has been
+ * chosen (experimentally), so that there should be ample time for
+ * these events to be handled, before we check and, if necessary,
+ * update the state.
+ */
+ sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
+}
+
+static const struct dev_pm_ops surface_dtx_pm_ops = {
+ .complete = surface_dtx_pm_complete,
+};
+
+#else /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops surface_dtx_pm_ops = {};
+
+#endif /* CONFIG_PM_SLEEP */
+
+
+/* -- Platform driver. ------------------------------------------------------ */
+
+static int surface_dtx_platform_probe(struct platform_device *pdev)
+{
+ struct ssam_controller *ctrl;
+ struct sdtx_device *ddev;
+
+ /* Link to EC. */
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ ddev = sdtx_device_create(&pdev->dev, ctrl);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ platform_set_drvdata(pdev, ddev);
+ return 0;
+}
+
+static int surface_dtx_platform_remove(struct platform_device *pdev)
+{
+ sdtx_device_destroy(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static const struct acpi_device_id surface_dtx_acpi_match[] = {
+ { "MSHW0133", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
+
+static struct platform_driver surface_dtx_platform_driver = {
+ .probe = surface_dtx_platform_probe,
+ .remove = surface_dtx_platform_remove,
+ .driver = {
+ .name = "surface_dtx_pltf",
+ .acpi_match_table = surface_dtx_acpi_match,
+ .pm = &surface_dtx_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+
+/* -- SSAM device driver. --------------------------------------------------- */
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
+static int surface_dtx_ssam_probe(struct ssam_device *sdev)
+{
+ struct sdtx_device *ddev;
+
+ ddev = sdtx_device_create(&sdev->dev, sdev->ctrl);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ ssam_device_set_drvdata(sdev, ddev);
+ return 0;
+}
+
+static void surface_dtx_ssam_remove(struct ssam_device *sdev)
+{
+ sdtx_device_destroy(ssam_device_get_drvdata(sdev));
+}
+
+static const struct ssam_device_id surface_dtx_ssam_match[] = {
+ { SSAM_SDEV(BAS, 0x01, 0x00, 0x00) },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
+
+static struct ssam_device_driver surface_dtx_ssam_driver = {
+ .probe = surface_dtx_ssam_probe,
+ .remove = surface_dtx_ssam_remove,
+ .match_table = surface_dtx_ssam_match,
+ .driver = {
+ .name = "surface_dtx",
+ .pm = &surface_dtx_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int ssam_dtx_driver_register(void)
+{
+ return ssam_device_driver_register(&surface_dtx_ssam_driver);
+}
+
+static void ssam_dtx_driver_unregister(void)
+{
+ ssam_device_driver_unregister(&surface_dtx_ssam_driver);
+}
+
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static int ssam_dtx_driver_register(void)
+{
+ return 0;
+}
+
+static void ssam_dtx_driver_unregister(void)
+{
+}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+
+/* -- Module setup. --------------------------------------------------------- */
+
+static int __init surface_dtx_init(void)
+{
+ int status;
+
+ status = ssam_dtx_driver_register();
+ if (status)
+ return status;
+
+ status = platform_driver_register(&surface_dtx_platform_driver);
+ if (status)
+ ssam_dtx_driver_unregister();
+
+ return status;
+}
+module_init(surface_dtx_init);
+
+static void __exit surface_dtx_exit(void)
+{
+ platform_driver_unregister(&surface_dtx_platform_driver);
+ ssam_dtx_driver_unregister();
+}
+module_exit(surface_dtx_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
new file mode 100644
index 000000000000..6373d3b5eb7f
--- /dev/null
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Platform Profile / Performance Mode driver for Surface System
+ * Aggregator Module (thermal subsystem).
+ *
+ * Copyright (C) 2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_profile.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/device.h>
+
+enum ssam_tmp_profile {
+ SSAM_TMP_PROFILE_NORMAL = 1,
+ SSAM_TMP_PROFILE_BATTERY_SAVER = 2,
+ SSAM_TMP_PROFILE_BETTER_PERFORMANCE = 3,
+ SSAM_TMP_PROFILE_BEST_PERFORMANCE = 4,
+};
+
+struct ssam_tmp_profile_info {
+ __le32 profile;
+ __le16 unknown1;
+ __le16 unknown2;
+} __packed;
+
+struct ssam_tmp_profile_device {
+ struct ssam_device *sdev;
+ struct platform_profile_handler handler;
+};
+
+SSAM_DEFINE_SYNC_REQUEST_CL_R(__ssam_tmp_profile_get, struct ssam_tmp_profile_info, {
+ .target_category = SSAM_SSH_TC_TMP,
+ .command_id = 0x02,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_CL_W(__ssam_tmp_profile_set, __le32, {
+ .target_category = SSAM_SSH_TC_TMP,
+ .command_id = 0x03,
+});
+
+static int ssam_tmp_profile_get(struct ssam_device *sdev, enum ssam_tmp_profile *p)
+{
+ struct ssam_tmp_profile_info info;
+ int status;
+
+ status = ssam_retry(__ssam_tmp_profile_get, sdev, &info);
+ if (status < 0)
+ return status;
+
+ *p = le32_to_cpu(info.profile);
+ return 0;
+}
+
+static int ssam_tmp_profile_set(struct ssam_device *sdev, enum ssam_tmp_profile p)
+{
+ __le32 profile_le = cpu_to_le32(p);
+
+ return ssam_retry(__ssam_tmp_profile_set, sdev, &profile_le);
+}
+
+static int convert_ssam_to_profile(struct ssam_device *sdev, enum ssam_tmp_profile p)
+{
+ switch (p) {
+ case SSAM_TMP_PROFILE_NORMAL:
+ return PLATFORM_PROFILE_BALANCED;
+
+ case SSAM_TMP_PROFILE_BATTERY_SAVER:
+ return PLATFORM_PROFILE_LOW_POWER;
+
+ case SSAM_TMP_PROFILE_BETTER_PERFORMANCE:
+ return PLATFORM_PROFILE_BALANCED_PERFORMANCE;
+
+ case SSAM_TMP_PROFILE_BEST_PERFORMANCE:
+ return PLATFORM_PROFILE_PERFORMANCE;
+
+ default:
+ dev_err(&sdev->dev, "invalid performance profile: %d", p);
+ return -EINVAL;
+ }
+}
+
+static int convert_profile_to_ssam(struct ssam_device *sdev, enum platform_profile_option p)
+{
+ switch (p) {
+ case PLATFORM_PROFILE_LOW_POWER:
+ return SSAM_TMP_PROFILE_BATTERY_SAVER;
+
+ case PLATFORM_PROFILE_BALANCED:
+ return SSAM_TMP_PROFILE_NORMAL;
+
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
+ return SSAM_TMP_PROFILE_BETTER_PERFORMANCE;
+
+ case PLATFORM_PROFILE_PERFORMANCE:
+ return SSAM_TMP_PROFILE_BEST_PERFORMANCE;
+
+ default:
+ /* This should have already been caught by platform_profile_store(). */
+ WARN(true, "unsupported platform profile");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ struct ssam_tmp_profile_device *tpd;
+ enum ssam_tmp_profile tp;
+ int status;
+
+ tpd = container_of(pprof, struct ssam_tmp_profile_device, handler);
+
+ status = ssam_tmp_profile_get(tpd->sdev, &tp);
+ if (status)
+ return status;
+
+ status = convert_ssam_to_profile(tpd->sdev, tp);
+ if (status < 0)
+ return status;
+
+ *profile = status;
+ return 0;
+}
+
+static int ssam_platform_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ struct ssam_tmp_profile_device *tpd;
+ int tp;
+
+ tpd = container_of(pprof, struct ssam_tmp_profile_device, handler);
+
+ tp = convert_profile_to_ssam(tpd->sdev, profile);
+ if (tp < 0)
+ return tp;
+
+ return ssam_tmp_profile_set(tpd->sdev, tp);
+}
+
+static int surface_platform_profile_probe(struct ssam_device *sdev)
+{
+ struct ssam_tmp_profile_device *tpd;
+
+ tpd = devm_kzalloc(&sdev->dev, sizeof(*tpd), GFP_KERNEL);
+ if (!tpd)
+ return -ENOMEM;
+
+ tpd->sdev = sdev;
+
+ tpd->handler.profile_get = ssam_platform_profile_get;
+ tpd->handler.profile_set = ssam_platform_profile_set;
+
+ set_bit(PLATFORM_PROFILE_LOW_POWER, tpd->handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, tpd->handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
+
+ platform_profile_register(&tpd->handler);
+ return 0;
+}
+
+static void surface_platform_profile_remove(struct ssam_device *sdev)
+{
+ platform_profile_remove();
+}
+
+static const struct ssam_device_id ssam_platform_profile_match[] = {
+ { SSAM_SDEV(TMP, 0x01, 0x00, 0x01) },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, ssam_platform_profile_match);
+
+static struct ssam_device_driver surface_platform_profile = {
+ .probe = surface_platform_profile_probe,
+ .remove = surface_platform_profile_remove,
+ .match_table = ssam_platform_profile_match,
+ .driver = {
+ .name = "surface_platform_profile",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_ssam_device_driver(surface_platform_profile);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Platform Profile Support for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surfacepro3_button.c b/drivers/platform/surface/surfacepro3_button.c
index d8afed5db94c..242fb690dcaf 100644
--- a/drivers/platform/surface/surfacepro3_button.c
+++ b/drivers/platform/surface/surfacepro3_button.c
@@ -40,8 +40,6 @@ static const guid_t MSHW0040_DSM_UUID =
#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2
#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3
-ACPI_MODULE_NAME("surface pro 3 button");
-
MODULE_AUTHOR("Chen Yu");
MODULE_DESCRIPTION("Surface Pro3 Button Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 461ec61530eb..2714f7c3843e 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -123,6 +123,17 @@ config XIAOMI_WMI
To compile this driver as a module, choose M here: the module will
be called xiaomi-wmi.
+config GIGABYTE_WMI
+ tristate "Gigabyte WMI temperature driver"
+ depends on ACPI_WMI
+ depends on HWMON
+ help
+ Say Y here if you want to support WMI-based temperature reporting on
+ Gigabyte mainboards.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gigabyte-wmi.
+
config ACERHDF
tristate "Acer Aspire One temperature and fan driver"
depends on ACPI && THERMAL
@@ -193,6 +204,17 @@ config AMD_PMC
If you choose to compile this driver as a module the module will be
called amd-pmc.
+config ADV_SWBUTTON
+ tristate "Advantech ACPI Software Button Driver"
+ depends on ACPI && INPUT
+ help
+ Say Y here to enable support for Advantech software defined
+ button feature. More information can be found at
+ <http://www.advantech.com.tw/products/>
+
+ To compile this driver as a module, choose M here. The module will
+ be called adv_swbutton.
+
config APPLE_GMUX
tristate "Apple Gmux Driver"
depends on ACPI && PCI
@@ -410,6 +432,7 @@ config HP_WMI
depends on INPUT
depends on RFKILL || RFKILL = n
select INPUT_SPARSEKMAP
+ select ACPI_PLATFORM_PROFILE
help
Say Y here if you want to support WMI-based hotkeys on HP laptops and
to read data from WMI such as docking or ambient light sensor state.
@@ -1171,6 +1194,7 @@ config INTEL_MRFLD_PWRBTN
config INTEL_PMC_CORE
tristate "Intel PMC Core driver"
depends on PCI
+ depends on ACPI
help
The Intel Platform Controller Hub for Intel Core SoCs provides access
to Power Management Controller registers via various interfaces. This
@@ -1192,7 +1216,7 @@ config INTEL_PMT_CLASS
tristate
help
The Intel Platform Monitoring Technology (PMT) class driver provides
- the basic sysfs interface and file hierarchy uses by PMT devices.
+ the basic sysfs interface and file hierarchy used by PMT devices.
For more information, see:
<file:Documentation/ABI/testing/sysfs-class-intel_pmt>
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 60d554073749..dcc8cdb95b4d 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o
obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o
obj-$(CONFIG_XIAOMI_WMI) += xiaomi-wmi.o
+obj-$(CONFIG_GIGABYTE_WMI) += gigabyte-wmi.o
# Acer
obj-$(CONFIG_ACERHDF) += acerhdf.o
@@ -24,6 +25,9 @@ obj-$(CONFIG_ACER_WMI) += acer-wmi.o
# AMD
obj-$(CONFIG_AMD_PMC) += amd-pmc.o
+# Advantech
+obj-$(CONFIG_ADV_SWBUTTON) += adv_swbutton.o
+
# Apple
obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
diff --git a/drivers/platform/x86/adv_swbutton.c b/drivers/platform/x86/adv_swbutton.c
new file mode 100644
index 000000000000..38693b735c87
--- /dev/null
+++ b/drivers/platform/x86/adv_swbutton.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * adv_swbutton.c - Software Button Interface Driver.
+ *
+ * (C) Copyright 2020 Advantech Corporation, Inc
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+
+#define ACPI_BUTTON_HID_SWBTN "AHC0310"
+
+#define ACPI_BUTTON_NOTIFY_SWBTN_RELEASE 0x86
+#define ACPI_BUTTON_NOTIFY_SWBTN_PRESSED 0x85
+
+struct adv_swbutton {
+ struct input_dev *input;
+ char phys[32];
+};
+
+/*-------------------------------------------------------------------------
+ * Driver Interface
+ *--------------------------------------------------------------------------
+ */
+static void adv_swbutton_notify(acpi_handle handle, u32 event, void *context)
+{
+ struct platform_device *device = context;
+ struct adv_swbutton *button = dev_get_drvdata(&device->dev);
+
+ switch (event) {
+ case ACPI_BUTTON_NOTIFY_SWBTN_RELEASE:
+ input_report_key(button->input, KEY_PROG1, 0);
+ input_sync(button->input);
+ break;
+ case ACPI_BUTTON_NOTIFY_SWBTN_PRESSED:
+ input_report_key(button->input, KEY_PROG1, 1);
+ input_sync(button->input);
+ break;
+ default:
+ dev_dbg(&device->dev, "Unsupported event [0x%x]\n", event);
+ }
+}
+
+static int adv_swbutton_probe(struct platform_device *device)
+{
+ struct adv_swbutton *button;
+ struct input_dev *input;
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ acpi_status status;
+ int error;
+
+ button = devm_kzalloc(&device->dev, sizeof(*button), GFP_KERNEL);
+ if (!button)
+ return -ENOMEM;
+
+ dev_set_drvdata(&device->dev, button);
+
+ input = devm_input_allocate_device(&device->dev);
+ if (!input)
+ return -ENOMEM;
+
+ button->input = input;
+ snprintf(button->phys, sizeof(button->phys), "%s/button/input0", ACPI_BUTTON_HID_SWBTN);
+
+ input->name = "Advantech Software Button";
+ input->phys = button->phys;
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &device->dev;
+ set_bit(EV_REP, input->evbit);
+ input_set_capability(input, EV_KEY, KEY_PROG1);
+
+ error = input_register_device(input);
+ if (error)
+ return error;
+
+ device_init_wakeup(&device->dev, true);
+
+ status = acpi_install_notify_handler(handle,
+ ACPI_DEVICE_NOTIFY,
+ adv_swbutton_notify,
+ device);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&device->dev, "Error installing notify handler\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adv_swbutton_remove(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+
+ acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY,
+ adv_swbutton_notify);
+
+ return 0;
+}
+
+static const struct acpi_device_id button_device_ids[] = {
+ {ACPI_BUTTON_HID_SWBTN, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, button_device_ids);
+
+static struct platform_driver adv_swbutton_driver = {
+ .driver = {
+ .name = "adv_swbutton",
+ .acpi_match_table = button_device_ids,
+ },
+ .probe = adv_swbutton_probe,
+ .remove = adv_swbutton_remove,
+};
+module_platform_driver(adv_swbutton_driver);
+
+MODULE_AUTHOR("Andrea Ho");
+MODULE_DESCRIPTION("Advantech ACPI SW Button Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index bfea656e910c..4d2d32bfbe2a 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1569,7 +1569,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr,
int idx)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct asus_laptop *asus = dev_get_drvdata(dev);
acpi_handle handle = asus->handle;
bool supported;
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 9ca15f724343..ebaeb7bb80f5 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -47,6 +47,9 @@ MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>, "
MODULE_DESCRIPTION("Asus Generic WMI Driver");
MODULE_LICENSE("GPL");
+static bool fnlock_default = true;
+module_param(fnlock_default, bool, 0444);
+
#define to_asus_wmi_driver(pdrv) \
(container_of((pdrv), struct asus_wmi_driver, platform_driver))
@@ -2673,7 +2676,7 @@ static int asus_wmi_add(struct platform_device *pdev)
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
if (asus_wmi_has_fnlock_key(asus)) {
- asus->fnlock_locked = true;
+ asus->fnlock_locked = fnlock_default;
asus_wmi_fnlock_update(asus);
}
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 3e03e8d3a07f..9309ab5792cb 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -956,7 +956,7 @@ static int cmpc_ipml_add(struct acpi_device *acpi)
/*
* If RFKILL is disabled, rfkill_alloc will return ERR_PTR(-ENODEV).
* This is OK, however, since all other uses of the device will not
- * derefence it.
+ * dereference it.
*/
if (ipml->rf) {
retval = rfkill_register(ipml->rf);
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
index 5bb2859c8285..f21248255529 100644
--- a/drivers/platform/x86/dell/alienware-wmi.c
+++ b/drivers/platform/x86/dell/alienware-wmi.c
@@ -2,7 +2,7 @@
/*
* Alienware AlienFX control
*
- * Copyright (C) 2014 Dell Inc <mario_limonciello@dell.com>
+ * Copyright (C) 2014 Dell Inc <Dell.Client.Kernel@dell.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -26,7 +26,7 @@
#define WMAX_METHOD_DEEP_SLEEP_CONTROL 0x0B
#define WMAX_METHOD_DEEP_SLEEP_STATUS 0x0C
-MODULE_AUTHOR("Mario Limonciello <mario_limonciello@dell.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
MODULE_DESCRIPTION("Alienware special feature control");
MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:" LEGACY_CONTROL_GUID);
diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
index 3a1dbf199441..fc086b66f70b 100644
--- a/drivers/platform/x86/dell/dell-smbios-base.c
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
@@ -647,6 +647,6 @@ module_exit(dell_smbios_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
-MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
index 27a298b7c541..a1753485159c 100644
--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
@@ -205,7 +205,7 @@ fail_register:
return ret;
}
-static int dell_smbios_wmi_remove(struct wmi_device *wdev)
+static void dell_smbios_wmi_remove(struct wmi_device *wdev)
{
struct wmi_smbios_priv *priv = dev_get_drvdata(&wdev->dev);
int count;
@@ -218,7 +218,6 @@ static int dell_smbios_wmi_remove(struct wmi_device *wdev)
count = get_order(priv->req_buf_size);
free_pages((unsigned long)priv->buf, count);
mutex_unlock(&call_mutex);
- return 0;
}
static const struct wmi_device_id dell_smbios_wmi_id_table[] = {
diff --git a/drivers/platform/x86/dell/dell-wmi-descriptor.c b/drivers/platform/x86/dell/dell-wmi-descriptor.c
index a068900ae8a1..c2a180202719 100644
--- a/drivers/platform/x86/dell/dell-wmi-descriptor.c
+++ b/drivers/platform/x86/dell/dell-wmi-descriptor.c
@@ -174,14 +174,13 @@ out:
return ret;
}
-static int dell_wmi_descriptor_remove(struct wmi_device *wdev)
+static void dell_wmi_descriptor_remove(struct wmi_device *wdev)
{
struct descriptor_priv *priv = dev_get_drvdata(&wdev->dev);
mutex_lock(&list_mutex);
list_del(&priv->list);
mutex_unlock(&list_mutex);
- return 0;
}
static const struct wmi_device_id dell_wmi_descriptor_id_table[] = {
@@ -201,6 +200,6 @@ static struct wmi_driver dell_wmi_descriptor_driver = {
module_wmi_driver(dell_wmi_descriptor_driver);
MODULE_DEVICE_TABLE(wmi, dell_wmi_descriptor_id_table);
-MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
MODULE_DESCRIPTION("Dell WMI descriptor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c b/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c
index f95d8ddace5a..c2dd2de6bc20 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c
@@ -152,12 +152,11 @@ static int bios_attr_set_interface_probe(struct wmi_device *wdev, const void *co
return 0;
}
-static int bios_attr_set_interface_remove(struct wmi_device *wdev)
+static void bios_attr_set_interface_remove(struct wmi_device *wdev)
{
mutex_lock(&wmi_priv.mutex);
wmi_priv.bios_attr_wdev = NULL;
mutex_unlock(&wmi_priv.mutex);
- return 0;
}
static const struct wmi_device_id bios_attr_set_interface_id_table[] = {
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c b/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
index 5780b4d94759..339a082d6c18 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
@@ -119,12 +119,11 @@ static int bios_attr_pass_interface_probe(struct wmi_device *wdev, const void *c
return 0;
}
-static int bios_attr_pass_interface_remove(struct wmi_device *wdev)
+static void bios_attr_pass_interface_remove(struct wmi_device *wdev)
{
mutex_lock(&wmi_priv.mutex);
wmi_priv.password_attr_wdev = NULL;
mutex_unlock(&wmi_priv.mutex);
- return 0;
}
static const struct wmi_device_id bios_attr_pass_interface_id_table[] = {
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index 7410ccae650c..c8d276d78e92 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -399,6 +399,7 @@ static int init_bios_attributes(int attr_type, const char *guid)
union acpi_object *obj = NULL;
union acpi_object *elements;
struct kset *tmp_set;
+ int min_elements;
/* instance_id needs to be reset for each type GUID
* also, instance IDs are unique within GUID but not across
@@ -409,14 +410,38 @@ static int init_bios_attributes(int attr_type, const char *guid)
retval = alloc_attributes_data(attr_type);
if (retval)
return retval;
+
+ switch (attr_type) {
+ case ENUM: min_elements = 8; break;
+ case INT: min_elements = 9; break;
+ case STR: min_elements = 8; break;
+ case PO: min_elements = 4; break;
+ default:
+ pr_err("Error: Unknown attr_type: %d\n", attr_type);
+ return -EINVAL;
+ }
+
/* need to use specific instance_id and guid combination to get right data */
obj = get_wmiobj_pointer(instance_id, guid);
- if (!obj || obj->type != ACPI_TYPE_PACKAGE)
+ if (!obj)
return -ENODEV;
- elements = obj->package.elements;
mutex_lock(&wmi_priv.mutex);
- while (elements) {
+ while (obj) {
+ if (obj->type != ACPI_TYPE_PACKAGE) {
+ pr_err("Error: Expected ACPI-package type, got: %d\n", obj->type);
+ retval = -EIO;
+ goto err_attr_init;
+ }
+
+ if (obj->package.count < min_elements) {
+ pr_err("Error: ACPI-package does not have enough elements: %d < %d\n",
+ obj->package.count, min_elements);
+ goto nextobj;
+ }
+
+ elements = obj->package.elements;
+
/* sanity checking */
if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
pr_debug("incorrect element type\n");
@@ -481,7 +506,6 @@ nextobj:
kfree(obj);
instance_id++;
obj = get_wmiobj_pointer(instance_id, guid);
- elements = obj ? obj->package.elements : NULL;
}
mutex_unlock(&wmi_priv.mutex);
@@ -604,7 +628,7 @@ static void __exit sysman_exit(void)
module_init(sysman_init);
module_exit(sysman_exit);
-MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
MODULE_AUTHOR("Prasanth Ksr <prasanth.ksr@dell.com>");
MODULE_AUTHOR("Divya Bharathi <divya.bharathi@dell.com>");
MODULE_DESCRIPTION("Dell platform setting control interface");
diff --git a/drivers/platform/x86/dell/dell-wmi.c b/drivers/platform/x86/dell/dell-wmi.c
index bbdb3e860892..5e1b7f897df5 100644
--- a/drivers/platform/x86/dell/dell-wmi.c
+++ b/drivers/platform/x86/dell/dell-wmi.c
@@ -714,10 +714,9 @@ static int dell_wmi_probe(struct wmi_device *wdev, const void *context)
return dell_wmi_input_setup(wdev);
}
-static int dell_wmi_remove(struct wmi_device *wdev)
+static void dell_wmi_remove(struct wmi_device *wdev)
{
dell_wmi_input_destroy(wdev);
- return 0;
}
static const struct wmi_device_id dell_wmi_id_table[] = {
{ .guid_string = DELL_EVENT_GUID },
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
new file mode 100644
index 000000000000..13d57434e60f
--- /dev/null
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021 Thomas Weißschuh <thomas@weissschuh.net>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+
+#define GIGABYTE_WMI_GUID "DEADBEEF-2001-0000-00A0-C90629100000"
+#define NUM_TEMPERATURE_SENSORS 6
+
+static bool force_load;
+module_param(force_load, bool, 0444);
+MODULE_PARM_DESC(force_load, "Force loading on unknown platform");
+
+static u8 usable_sensors_mask;
+
+enum gigabyte_wmi_commandtype {
+ GIGABYTE_WMI_BUILD_DATE_QUERY = 0x1,
+ GIGABYTE_WMI_MAINBOARD_TYPE_QUERY = 0x2,
+ GIGABYTE_WMI_FIRMWARE_VERSION_QUERY = 0x4,
+ GIGABYTE_WMI_MAINBOARD_NAME_QUERY = 0x5,
+ GIGABYTE_WMI_TEMPERATURE_QUERY = 0x125,
+};
+
+struct gigabyte_wmi_args {
+ u32 arg1;
+};
+
+static int gigabyte_wmi_perform_query(struct wmi_device *wdev,
+ enum gigabyte_wmi_commandtype command,
+ struct gigabyte_wmi_args *args, struct acpi_buffer *out)
+{
+ const struct acpi_buffer in = {
+ .length = sizeof(*args),
+ .pointer = args,
+ };
+
+ acpi_status ret = wmidev_evaluate_method(wdev, 0x0, command, &in, out);
+
+ if (ACPI_FAILURE(ret))
+ return -EIO;
+
+ return 0;
+}
+
+static int gigabyte_wmi_query_integer(struct wmi_device *wdev,
+ enum gigabyte_wmi_commandtype command,
+ struct gigabyte_wmi_args *args, u64 *res)
+{
+ union acpi_object *obj;
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ int ret;
+
+ ret = gigabyte_wmi_perform_query(wdev, command, args, &result);
+ if (ret)
+ return ret;
+ obj = result.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ *res = obj->integer.value;
+ else
+ ret = -EIO;
+ kfree(result.pointer);
+ return ret;
+}
+
+static int gigabyte_wmi_temperature(struct wmi_device *wdev, u8 sensor, long *res)
+{
+ struct gigabyte_wmi_args args = {
+ .arg1 = sensor,
+ };
+ u64 temp;
+ acpi_status ret;
+
+ ret = gigabyte_wmi_query_integer(wdev, GIGABYTE_WMI_TEMPERATURE_QUERY, &args, &temp);
+ if (ret == 0) {
+ if (temp == 0)
+ return -ENODEV;
+ *res = (s8)temp * 1000; // value is a signed 8-bit integer
+ }
+ return ret;
+}
+
+static int gigabyte_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct wmi_device *wdev = dev_get_drvdata(dev);
+
+ return gigabyte_wmi_temperature(wdev, channel, val);
+}
+
+static umode_t gigabyte_wmi_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return usable_sensors_mask & BIT(channel) ? 0444 : 0;
+}
+
+static const struct hwmon_channel_info *gigabyte_wmi_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops gigabyte_wmi_hwmon_ops = {
+ .read = gigabyte_wmi_hwmon_read,
+ .is_visible = gigabyte_wmi_hwmon_is_visible,
+};
+
+static const struct hwmon_chip_info gigabyte_wmi_hwmon_chip_info = {
+ .ops = &gigabyte_wmi_hwmon_ops,
+ .info = gigabyte_wmi_hwmon_info,
+};
+
+static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
+{
+ int i;
+ long temp;
+ u8 r = 0;
+
+ for (i = 0; i < NUM_TEMPERATURE_SENSORS; i++) {
+ if (!gigabyte_wmi_temperature(wdev, i, &temp))
+ r |= BIT(i);
+ }
+ return r;
+}
+
+static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ { .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550 GAMING X V2"),
+ }},
+ { .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M AORUS PRO-P"),
+ }},
+ { .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M DS3H"),
+ }},
+ { .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Z390 I AORUS PRO WIFI-CF"),
+ }},
+ { .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 AORUS ELITE"),
+ }},
+ { .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 I AORUS PRO WIFI"),
+ }},
+ { }
+};
+
+static int gigabyte_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct device *hwmon_dev;
+
+ if (!dmi_check_system(gigabyte_wmi_known_working_platforms)) {
+ if (!force_load)
+ return -ENODEV;
+ dev_warn(&wdev->dev, "Forcing load on unknown platform");
+ }
+
+ usable_sensors_mask = gigabyte_wmi_detect_sensor_usability(wdev);
+ if (!usable_sensors_mask) {
+ dev_info(&wdev->dev, "No temperature sensors usable");
+ return -ENODEV;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&wdev->dev, "gigabyte_wmi", wdev,
+ &gigabyte_wmi_hwmon_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct wmi_device_id gigabyte_wmi_id_table[] = {
+ { GIGABYTE_WMI_GUID, NULL },
+ { }
+};
+
+static struct wmi_driver gigabyte_wmi_driver = {
+ .driver = {
+ .name = "gigabyte-wmi",
+ },
+ .id_table = gigabyte_wmi_id_table,
+ .probe = gigabyte_wmi_probe,
+};
+module_wmi_driver(gigabyte_wmi_driver);
+
+MODULE_DEVICE_TABLE(wmi, gigabyte_wmi_id_table);
+MODULE_AUTHOR("Thomas Weißschuh <thomas@weissschuh.net>");
+MODULE_DESCRIPTION("Gigabyte WMI temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index e94e59283ecb..027a1467d009 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -21,6 +21,7 @@
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/platform_device.h>
+#include <linux/platform_profile.h>
#include <linux/acpi.h>
#include <linux/rfkill.h>
#include <linux/string.h>
@@ -85,7 +86,7 @@ enum hp_wmi_commandtype {
HPWMI_FEATURE2_QUERY = 0x0d,
HPWMI_WIRELESS2_QUERY = 0x1b,
HPWMI_POSTCODEERROR_QUERY = 0x2a,
- HPWMI_THERMAL_POLICY_QUERY = 0x4c,
+ HPWMI_THERMAL_PROFILE_QUERY = 0x4c,
};
enum hp_wmi_command {
@@ -119,6 +120,12 @@ enum hp_wireless2_bits {
HPWMI_POWER_FW_OR_HW = HPWMI_POWER_BIOS | HPWMI_POWER_HARD,
};
+enum hp_thermal_profile {
+ HP_THERMAL_PROFILE_PERFORMANCE = 0x00,
+ HP_THERMAL_PROFILE_DEFAULT = 0x01,
+ HP_THERMAL_PROFILE_COOL = 0x02
+};
+
#define IS_HWBLOCKED(x) ((x & HPWMI_POWER_FW_OR_HW) != HPWMI_POWER_FW_OR_HW)
#define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT)
@@ -159,6 +166,8 @@ static const struct key_entry hp_wmi_keymap[] = {
static struct input_dev *hp_wmi_input_dev;
static struct platform_device *hp_wmi_platform_dev;
+static struct platform_profile_handler platform_profile_handler;
+static bool platform_profile_support;
static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
@@ -869,23 +878,98 @@ fail:
return err;
}
-static int thermal_policy_setup(struct platform_device *device)
+static int thermal_profile_get(void)
+{
+ return hp_wmi_read_int(HPWMI_THERMAL_PROFILE_QUERY);
+}
+
+static int thermal_profile_set(int thermal_profile)
+{
+ return hp_wmi_perform_query(HPWMI_THERMAL_PROFILE_QUERY, HPWMI_WRITE, &thermal_profile,
+ sizeof(thermal_profile), 0);
+}
+
+static int platform_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ int tp;
+
+ tp = thermal_profile_get();
+ if (tp < 0)
+ return tp;
+
+ switch (tp) {
+ case HP_THERMAL_PROFILE_PERFORMANCE:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case HP_THERMAL_PROFILE_DEFAULT:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case HP_THERMAL_PROFILE_COOL:
+ *profile = PLATFORM_PROFILE_COOL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int platform_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
{
int err, tp;
- tp = hp_wmi_read_int(HPWMI_THERMAL_POLICY_QUERY);
+ switch (profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ tp = HP_THERMAL_PROFILE_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ tp = HP_THERMAL_PROFILE_DEFAULT;
+ break;
+ case PLATFORM_PROFILE_COOL:
+ tp = HP_THERMAL_PROFILE_COOL;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = thermal_profile_set(tp);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int thermal_profile_setup(void)
+{
+ int err, tp;
+
+ tp = thermal_profile_get();
if (tp < 0)
return tp;
/*
- * call thermal policy write command to ensure that the firmware correctly
+ * call thermal profile write command to ensure that the firmware correctly
* sets the OEM variables for the DPTF
*/
- err = hp_wmi_perform_query(HPWMI_THERMAL_POLICY_QUERY, HPWMI_WRITE, &tp,
- sizeof(tp), 0);
+ err = thermal_profile_set(tp);
if (err)
return err;
+ platform_profile_handler.profile_get = platform_profile_get,
+ platform_profile_handler.profile_set = platform_profile_set,
+
+ set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, platform_profile_handler.choices);
+
+ err = platform_profile_register(&platform_profile_handler);
+ if (err)
+ return err;
+
+ platform_profile_support = true;
+
return 0;
}
@@ -900,7 +984,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
if (hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
- thermal_policy_setup(device);
+ thermal_profile_setup();
return 0;
}
@@ -927,6 +1011,9 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
rfkill_destroy(wwan_rfkill);
}
+ if (platform_profile_support)
+ platform_profile_remove();
+
return 0;
}
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 3fdf4cbec9ad..888a764efad1 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -63,9 +63,6 @@ static const struct key_entry intel_vbtn_switchmap[] = {
{ KE_END }
};
-#define KEYMAP_LEN \
- (ARRAY_SIZE(intel_vbtn_keymap) + ARRAY_SIZE(intel_vbtn_switchmap) + 1)
-
struct intel_vbtn_priv {
struct input_dev *buttons_dev;
struct input_dev *switches_dev;
diff --git a/drivers/platform/x86/intel-wmi-sbl-fw-update.c b/drivers/platform/x86/intel-wmi-sbl-fw-update.c
index ea87fa0786e8..3c86e0108a24 100644
--- a/drivers/platform/x86/intel-wmi-sbl-fw-update.c
+++ b/drivers/platform/x86/intel-wmi-sbl-fw-update.c
@@ -117,10 +117,9 @@ static int intel_wmi_sbl_fw_update_probe(struct wmi_device *wdev,
return 0;
}
-static int intel_wmi_sbl_fw_update_remove(struct wmi_device *wdev)
+static void intel_wmi_sbl_fw_update_remove(struct wmi_device *wdev)
{
dev_info(&wdev->dev, "Slim Bootloader signaling driver removed\n");
- return 0;
}
static const struct wmi_device_id intel_wmi_sbl_id_table[] = {
diff --git a/drivers/platform/x86/intel-wmi-thunderbolt.c b/drivers/platform/x86/intel-wmi-thunderbolt.c
index 974c22a7ff61..4ae87060d18b 100644
--- a/drivers/platform/x86/intel-wmi-thunderbolt.c
+++ b/drivers/platform/x86/intel-wmi-thunderbolt.c
@@ -66,11 +66,10 @@ static int intel_wmi_thunderbolt_probe(struct wmi_device *wdev,
return ret;
}
-static int intel_wmi_thunderbolt_remove(struct wmi_device *wdev)
+static void intel_wmi_thunderbolt_remove(struct wmi_device *wdev)
{
sysfs_remove_group(&wdev->dev.kobj, &tbt_attribute_group);
kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
- return 0;
}
static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = {
diff --git a/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c b/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c
index 0df2e82dd249..9606a994af22 100644
--- a/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c
+++ b/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c
@@ -58,7 +58,7 @@ static int chtdc_ti_pwrbtn_probe(struct platform_device *pdev)
err = devm_request_threaded_irq(dev, irq, NULL,
chtdc_ti_pwrbtn_interrupt,
- 0, KBUILD_MODNAME, input);
+ IRQF_ONESHOT, KBUILD_MODNAME, input);
if (err)
return err;
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index b5888aeb4bcf..b0e486a6bdfb 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -23,7 +23,9 @@
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/uaccess.h>
+#include <linux/uuid.h>
+#include <acpi/acpi_bus.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/msr.h>
@@ -31,7 +33,8 @@
#include "intel_pmc_core.h"
-static struct pmc_dev pmc;
+#define ACPI_S0IX_DSM_UUID "57a6512e-3979-4e9d-9708-ff13b2508972"
+#define ACPI_GET_LOW_MODE_REGISTERS 1
/* PKGC MSRs are common across Intel Core SoCs */
static const struct pmc_bit_map msr_map[] = {
@@ -380,6 +383,8 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
* a list of core SoCs using this.
*/
{"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
/* Below two cannot be used for LTR_IGNORE */
{"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
{"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
@@ -401,6 +406,7 @@ static const struct pmc_reg_map cnp_reg_map = {
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
.ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
+ .etr3_offset = ETR3_OFFSET,
};
static const struct pmc_reg_map icl_reg_map = {
@@ -418,6 +424,7 @@ static const struct pmc_reg_map icl_reg_map = {
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
.ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
+ .etr3_offset = ETR3_OFFSET,
};
static const struct pmc_bit_map tgl_clocksource_status_map[] = {
@@ -579,14 +586,65 @@ static const struct pmc_reg_map tgl_reg_map = {
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
.ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
- .lpm_modes = tgl_lpm_modes,
+ .lpm_num_maps = TGL_LPM_NUM_MAPS,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .lpm_sts_latch_en_offset = TGL_LPM_STS_LATCH_EN_OFFSET,
.lpm_en_offset = TGL_LPM_EN_OFFSET,
+ .lpm_priority_offset = TGL_LPM_PRI_OFFSET,
.lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET,
.lpm_sts = tgl_lpm_maps,
.lpm_status_offset = TGL_LPM_STATUS_OFFSET,
.lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET,
+ .etr3_offset = ETR3_OFFSET,
};
+static void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev)
+{
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
+ const int num_maps = pmcdev->map->lpm_num_maps;
+ u32 lpm_size = LPM_MAX_NUM_MODES * num_maps * 4;
+ union acpi_object *out_obj;
+ struct acpi_device *adev;
+ guid_t s0ix_dsm_guid;
+ u32 *lpm_req_regs, *addr;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
+ return;
+
+ guid_parse(ACPI_S0IX_DSM_UUID, &s0ix_dsm_guid);
+
+ out_obj = acpi_evaluate_dsm(adev->handle, &s0ix_dsm_guid, 0,
+ ACPI_GET_LOW_MODE_REGISTERS, NULL);
+ if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
+ u32 size = out_obj->buffer.length;
+
+ if (size != lpm_size) {
+ acpi_handle_debug(adev->handle,
+ "_DSM returned unexpected buffer size, have %u, expect %u\n",
+ size, lpm_size);
+ goto free_acpi_obj;
+ }
+ } else {
+ acpi_handle_debug(adev->handle,
+ "_DSM function 0 evaluation failed\n");
+ goto free_acpi_obj;
+ }
+
+ addr = (u32 *)out_obj->buffer.pointer;
+
+ lpm_req_regs = devm_kzalloc(&pdev->dev, lpm_size * sizeof(u32),
+ GFP_KERNEL);
+ if (!lpm_req_regs)
+ goto free_acpi_obj;
+
+ memcpy(lpm_req_regs, addr, lpm_size);
+ pmcdev->lpm_req_regs = lpm_req_regs;
+
+free_acpi_obj:
+ ACPI_FREE(out_obj);
+}
+
static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
{
return readl(pmcdev->regbase + reg_offset);
@@ -603,6 +661,115 @@ static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
return (u64)value * pmcdev->map->slp_s0_res_counter_step;
}
+static int set_etr3(struct pmc_dev *pmcdev)
+{
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 reg;
+ int err;
+
+ if (!map->etr3_offset)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&pmcdev->lock);
+
+ /* check if CF9 is locked */
+ reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+ if (reg & ETR3_CF9LOCK) {
+ err = -EACCES;
+ goto out_unlock;
+ }
+
+ /* write CF9 global reset bit */
+ reg |= ETR3_CF9GR;
+ pmc_core_reg_write(pmcdev, map->etr3_offset, reg);
+
+ reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+ if (!(reg & ETR3_CF9GR)) {
+ err = -EIO;
+ goto out_unlock;
+ }
+
+ err = 0;
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err;
+}
+static umode_t etr3_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int idx)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 reg;
+
+ mutex_lock(&pmcdev->lock);
+ reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+ mutex_unlock(&pmcdev->lock);
+
+ return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
+}
+
+static ssize_t etr3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 reg;
+
+ if (!map->etr3_offset)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+ reg &= ETR3_CF9GR | ETR3_CF9LOCK;
+
+ mutex_unlock(&pmcdev->lock);
+
+ return sysfs_emit(buf, "0x%08x", reg);
+}
+
+static ssize_t etr3_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ int err;
+ u32 reg;
+
+ err = kstrtouint(buf, 16, &reg);
+ if (err)
+ return err;
+
+ /* allow only CF9 writes */
+ if (reg != ETR3_CF9GR)
+ return -EINVAL;
+
+ err = set_etr3(pmcdev);
+ if (err)
+ return err;
+
+ return len;
+}
+static DEVICE_ATTR_RW(etr3);
+
+static struct attribute *pmc_attrs[] = {
+ &dev_attr_etr3.attr,
+ NULL
+};
+
+static const struct attribute_group pmc_attr_group = {
+ .attrs = pmc_attrs,
+ .is_visible = etr3_is_visible,
+};
+
+static const struct attribute_group *pmc_dev_groups[] = {
+ &pmc_attr_group,
+ NULL
+};
+
static int pmc_core_dev_state_get(void *data, u64 *val)
{
struct pmc_dev *pmcdev = data;
@@ -617,9 +784,8 @@ static int pmc_core_dev_state_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
-static int pmc_core_check_read_lock_bit(void)
+static int pmc_core_check_read_lock_bit(struct pmc_dev *pmcdev)
{
- struct pmc_dev *pmcdev = &pmc;
u32 value;
value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset);
@@ -744,28 +910,26 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
/* This function should return link status, 0 means ready */
-static int pmc_core_mtpmc_link_status(void)
+static int pmc_core_mtpmc_link_status(struct pmc_dev *pmcdev)
{
- struct pmc_dev *pmcdev = &pmc;
u32 value;
value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
}
-static int pmc_core_send_msg(u32 *addr_xram)
+static int pmc_core_send_msg(struct pmc_dev *pmcdev, u32 *addr_xram)
{
- struct pmc_dev *pmcdev = &pmc;
u32 dest;
int timeout;
for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
- if (pmc_core_mtpmc_link_status() == 0)
+ if (pmc_core_mtpmc_link_status(pmcdev) == 0)
break;
msleep(5);
}
- if (timeout <= 0 && pmc_core_mtpmc_link_status())
+ if (timeout <= 0 && pmc_core_mtpmc_link_status(pmcdev))
return -EBUSY;
dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
@@ -791,7 +955,7 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
mutex_lock(&pmcdev->lock);
- if (pmc_core_send_msg(&mphy_core_reg_low) != 0) {
+ if (pmc_core_send_msg(pmcdev, &mphy_core_reg_low) != 0) {
err = -EBUSY;
goto out_unlock;
}
@@ -799,7 +963,7 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
msleep(10);
val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
- if (pmc_core_send_msg(&mphy_core_reg_high) != 0) {
+ if (pmc_core_send_msg(pmcdev, &mphy_core_reg_high) != 0) {
err = -EBUSY;
goto out_unlock;
}
@@ -842,7 +1006,7 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16);
mutex_lock(&pmcdev->lock);
- if (pmc_core_send_msg(&mphy_common_reg) != 0) {
+ if (pmc_core_send_msg(pmcdev, &mphy_common_reg) != 0) {
err = -EBUSY;
goto out_unlock;
}
@@ -863,9 +1027,8 @@ out_unlock:
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
-static int pmc_core_send_ltr_ignore(u32 value)
+static int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value)
{
- struct pmc_dev *pmcdev = &pmc;
const struct pmc_reg_map *map = pmcdev->map;
u32 reg;
int err = 0;
@@ -891,6 +1054,8 @@ static ssize_t pmc_core_ltr_ignore_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
u32 buf_size, value;
int err;
@@ -900,7 +1065,7 @@ static ssize_t pmc_core_ltr_ignore_write(struct file *file,
if (err)
return err;
- err = pmc_core_send_ltr_ignore(value);
+ err = pmc_core_send_ltr_ignore(pmcdev, value);
return err == 0 ? count : err;
}
@@ -1029,21 +1194,26 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
+static inline u64 adjust_lpm_residency(struct pmc_dev *pmcdev, u32 offset,
+ const int lpm_adj_x2)
+{
+ u64 lpm_res = pmc_core_reg_read(pmcdev, offset);
+
+ return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res);
+}
+
static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- const char **lpm_modes = pmcdev->map->lpm_modes;
+ const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
u32 offset = pmcdev->map->lpm_residency_offset;
- u32 lpm_en;
- int index;
+ int i, mode;
- lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
- seq_printf(s, "status substate residency\n");
- for (index = 0; lpm_modes[index]; index++) {
- seq_printf(s, "%7s %7s %-15u\n",
- BIT(index) & lpm_en ? "Enabled" : " ",
- lpm_modes[index], pmc_core_reg_read(pmcdev, offset));
- offset += 4;
+ seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
+
+ pmc_for_each_mode(i, mode, pmcdev) {
+ seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
+ adjust_lpm_residency(pmcdev, offset + (4 * mode), lpm_adj_x2));
}
return 0;
@@ -1074,6 +1244,190 @@ static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
+static void pmc_core_substate_req_header_show(struct seq_file *s)
+{
+ struct pmc_dev *pmcdev = s->private;
+ int i, mode;
+
+ seq_printf(s, "%30s |", "Element");
+ pmc_for_each_mode(i, mode, pmcdev)
+ seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
+
+ seq_printf(s, " %9s |\n", "Status");
+}
+
+static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+ const struct pmc_bit_map *map;
+ const int num_maps = pmcdev->map->lpm_num_maps;
+ u32 sts_offset = pmcdev->map->lpm_status_offset;
+ u32 *lpm_req_regs = pmcdev->lpm_req_regs;
+ int mp;
+
+ /* Display the header */
+ pmc_core_substate_req_header_show(s);
+
+ /* Loop over maps */
+ for (mp = 0; mp < num_maps; mp++) {
+ u32 req_mask = 0;
+ u32 lpm_status;
+ int mode, idx, i, len = 32;
+
+ /*
+ * Capture the requirements and create a mask so that we only
+ * show an element if it's required for at least one of the
+ * enabled low power modes
+ */
+ pmc_for_each_mode(idx, mode, pmcdev)
+ req_mask |= lpm_req_regs[mp + (mode * num_maps)];
+
+ /* Get the last latched status for this map */
+ lpm_status = pmc_core_reg_read(pmcdev, sts_offset + (mp * 4));
+
+ /* Loop over elements in this map */
+ map = maps[mp];
+ for (i = 0; map[i].name && i < len; i++) {
+ u32 bit_mask = map[i].bit_mask;
+
+ if (!(bit_mask & req_mask))
+ /*
+ * Not required for any enabled states
+ * so don't display
+ */
+ continue;
+
+ /* Display the element name in the first column */
+ seq_printf(s, "%30s |", map[i].name);
+
+ /* Loop over the enabled states and display if required */
+ pmc_for_each_mode(idx, mode, pmcdev) {
+ if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask)
+ seq_printf(s, " %9s |",
+ "Required");
+ else
+ seq_printf(s, " %9s |", " ");
+ }
+
+ /* In Status column, show the last captured state of this agent */
+ if (lpm_status & bit_mask)
+ seq_printf(s, " %9s |", "Yes");
+ else
+ seq_printf(s, " %9s |", " ");
+
+ seq_puts(s, "\n");
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
+
+static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ bool c10;
+ u32 reg;
+ int idx, mode;
+
+ reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
+ if (reg & LPM_STS_LATCH_MODE) {
+ seq_puts(s, "c10");
+ c10 = false;
+ } else {
+ seq_puts(s, "[c10]");
+ c10 = true;
+ }
+
+ pmc_for_each_mode(idx, mode, pmcdev) {
+ if ((BIT(mode) & reg) && !c10)
+ seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
+ else
+ seq_printf(s, " %s", pmc_lpm_modes[mode]);
+ }
+
+ seq_puts(s, " clear\n");
+
+ return 0;
+}
+
+static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
+ bool clear = false, c10 = false;
+ unsigned char buf[8];
+ int idx, m, mode;
+ u32 reg;
+
+ if (count > sizeof(buf) - 1)
+ return -EINVAL;
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ /*
+ * Allowed strings are:
+ * Any enabled substate, e.g. 'S0i2.0'
+ * 'c10'
+ * 'clear'
+ */
+ mode = sysfs_match_string(pmc_lpm_modes, buf);
+
+ /* Check string matches enabled mode */
+ pmc_for_each_mode(idx, m, pmcdev)
+ if (mode == m)
+ break;
+
+ if (mode != m || mode < 0) {
+ if (sysfs_streq(buf, "clear"))
+ clear = true;
+ else if (sysfs_streq(buf, "c10"))
+ c10 = true;
+ else
+ return -EINVAL;
+ }
+
+ if (clear) {
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmcdev, pmcdev->map->etr3_offset);
+ reg |= ETR3_CLEAR_LPM_EVENTS;
+ pmc_core_reg_write(pmcdev, pmcdev->map->etr3_offset, reg);
+
+ mutex_unlock(&pmcdev->lock);
+
+ return count;
+ }
+
+ if (c10) {
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
+ reg &= ~LPM_STS_LATCH_MODE;
+ pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
+
+ mutex_unlock(&pmcdev->lock);
+
+ return count;
+ }
+
+ /*
+ * For LPM mode latching we set the latch enable bit and selected mode
+ * and clear everything else.
+ */
+ reg = LPM_STS_LATCH_MODE | BIT(mode);
+ mutex_lock(&pmcdev->lock);
+ pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
+ mutex_unlock(&pmcdev->lock);
+
+ return count;
+}
+DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode);
+
static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
@@ -1095,6 +1449,45 @@ static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
+static void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
+{
+ u8 lpm_priority[LPM_MAX_NUM_MODES];
+ u32 lpm_en;
+ int mode, i, p;
+
+ /* Use LPM Maps to indicate support for substates */
+ if (!pmcdev->map->lpm_num_maps)
+ return;
+
+ lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
+ pmcdev->num_lpm_modes = hweight32(lpm_en);
+
+ /* Each byte contains information for 2 modes (7:4 and 3:0) */
+ for (mode = 0; mode < LPM_MAX_NUM_MODES; mode += 2) {
+ u8 priority = pmc_core_reg_read_byte(pmcdev,
+ pmcdev->map->lpm_priority_offset + (mode / 2));
+ int pri0 = GENMASK(3, 0) & priority;
+ int pri1 = (GENMASK(7, 4) & priority) >> 4;
+
+ lpm_priority[pri0] = mode;
+ lpm_priority[pri1] = mode + 1;
+ }
+
+ /*
+ * Loop though all modes from lowest to highest priority,
+ * and capture all enabled modes in order
+ */
+ i = 0;
+ for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
+ int mode = lpm_priority[p];
+
+ if (!(BIT(mode) & lpm_en))
+ continue;
+
+ pmcdev->lpm_en_modes[i++] = mode;
+ }
+}
+
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
debugfs_remove_recursive(pmcdev->dbgfs_dir);
@@ -1153,6 +1546,15 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
debugfs_create_file("substate_live_status_registers", 0444,
pmcdev->dbgfs_dir, pmcdev,
&pmc_core_substate_l_sts_regs_fops);
+ debugfs_create_file("lpm_latch_mode", 0644,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_lpm_latch_mode_fops);
+ }
+
+ if (pmcdev->lpm_req_regs) {
+ debugfs_create_file("substate_requirements", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_req_regs_fops);
}
}
@@ -1171,6 +1573,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &tgl_reg_map),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &icl_reg_map),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &tgl_reg_map),
{}
};
@@ -1186,9 +1589,15 @@ static const struct pci_device_id pmc_pci_ids[] = {
* the platform BIOS enforces 24Mhz crystal to shutdown
* before PMC can assert SLP_S0#.
*/
+static bool xtal_ignore;
static int quirk_xtal_ignore(const struct dmi_system_id *id)
{
- struct pmc_dev *pmcdev = &pmc;
+ xtal_ignore = true;
+ return 0;
+}
+
+static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
+{
u32 value;
value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
@@ -1197,7 +1606,6 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id)
/* Low Voltage Mode Enable */
value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
- return 0;
}
static const struct dmi_system_id pmc_core_dmi_table[] = {
@@ -1212,16 +1620,30 @@ static const struct dmi_system_id pmc_core_dmi_table[] = {
{}
};
+static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
+{
+ dmi_check_system(pmc_core_dmi_table);
+
+ if (xtal_ignore)
+ pmc_core_xtal_ignore(pmcdev);
+}
+
static int pmc_core_probe(struct platform_device *pdev)
{
static bool device_initialized;
- struct pmc_dev *pmcdev = &pmc;
+ struct pmc_dev *pmcdev;
const struct x86_cpu_id *cpu_id;
u64 slp_s0_addr;
if (device_initialized)
return -ENODEV;
+ pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL);
+ if (!pmcdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pmcdev);
+
cpu_id = x86_match_cpu(intel_pmc_core_ids);
if (!cpu_id)
return -ENODEV;
@@ -1251,9 +1673,13 @@ static int pmc_core_probe(struct platform_device *pdev)
return -ENOMEM;
mutex_init(&pmcdev->lock);
- platform_set_drvdata(pdev, pmcdev);
- pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
- dmi_check_system(pmc_core_dmi_table);
+
+ pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(pmcdev);
+ pmc_core_get_low_power_modes(pmcdev);
+ pmc_core_do_dmi_quirks(pmcdev);
+
+ if (pmcdev->map == &tgl_reg_map)
+ pmc_core_get_tgl_lpm_reqs(pdev);
/*
* On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
@@ -1261,7 +1687,7 @@ static int pmc_core_probe(struct platform_device *pdev)
*/
if (pmcdev->map == &tgl_reg_map) {
dev_dbg(&pdev->dev, "ignoring GBE LTR\n");
- pmc_core_send_ltr_ignore(3);
+ pmc_core_send_ltr_ignore(pmcdev, 3);
}
pmc_core_dbgfs_register(pmcdev);
@@ -1384,6 +1810,7 @@ static struct platform_driver pmc_core_driver = {
.name = "intel_pmc_core",
.acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
.pm = &pmc_core_pm_ops,
+ .dev_groups = pmc_dev_groups,
},
.probe = pmc_core_probe,
.remove = pmc_core_remove,
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index f33cd2c34835..e8dae9c6c45f 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -187,20 +187,38 @@ enum ppfear_regs {
#define ICL_PMC_LTR_WIGIG 0x1BFC
#define ICL_PMC_SLP_S0_RES_COUNTER_STEP 0x64
-#define TGL_NUM_IP_IGN_ALLOWED 22
+#define LPM_MAX_NUM_MODES 8
+#define GET_X2_COUNTER(v) ((v) >> 1)
+#define LPM_STS_LATCH_MODE BIT(31)
+
#define TGL_PMC_SLP_S0_RES_COUNTER_STEP 0x7A
+#define TGL_PMC_LTR_THC0 0x1C04
+#define TGL_PMC_LTR_THC1 0x1C08
+#define TGL_NUM_IP_IGN_ALLOWED 23
+#define TGL_PMC_LPM_RES_COUNTER_STEP_X2 61 /* 30.5us * 2 */
/*
* Tigerlake Power Management Controller register offsets
*/
+#define TGL_LPM_STS_LATCH_EN_OFFSET 0x1C34
#define TGL_LPM_EN_OFFSET 0x1C78
#define TGL_LPM_RESIDENCY_OFFSET 0x1C80
/* Tigerlake Low Power Mode debug registers */
#define TGL_LPM_STATUS_OFFSET 0x1C3C
#define TGL_LPM_LIVE_STATUS_OFFSET 0x1C5C
+#define TGL_LPM_PRI_OFFSET 0x1C7C
+#define TGL_LPM_NUM_MAPS 6
+
+/* Extended Test Mode Register 3 (CNL and later) */
+#define ETR3_OFFSET 0x1048
+#define ETR3_CF9GR BIT(20)
+#define ETR3_CF9LOCK BIT(31)
+
+/* Extended Test Mode Register LPM bits (TGL and later */
+#define ETR3_CLEAR_LPM_EVENTS BIT(28)
-const char *tgl_lpm_modes[] = {
+const char *pmc_lpm_modes[] = {
"S0i2.0",
"S0i2.1",
"S0i2.2",
@@ -258,11 +276,15 @@ struct pmc_reg_map {
const u32 ltr_ignore_max;
const u32 pm_vric1_offset;
/* Low Power Mode registers */
- const char **lpm_modes;
+ const int lpm_num_maps;
+ const int lpm_res_counter_step_x2;
+ const u32 lpm_sts_latch_en_offset;
const u32 lpm_en_offset;
+ const u32 lpm_priority_offset;
const u32 lpm_residency_offset;
const u32 lpm_status_offset;
const u32 lpm_live_status_offset;
+ const u32 etr3_offset;
};
/**
@@ -278,6 +300,9 @@ struct pmc_reg_map {
* @check_counters: On resume, check if counters are getting incremented
* @pc10_counter: PC10 residency counter
* @s0ix_counter: S0ix residency (step adjusted)
+ * @num_lpm_modes: Count of enabled modes
+ * @lpm_en_modes: Array of enabled modes from lowest to highest priority
+ * @lpm_req_regs: List of substate requirements
*
* pmc_dev contains info about power management controller device.
*/
@@ -292,6 +317,28 @@ struct pmc_dev {
bool check_counters; /* Check for counter increments on resume */
u64 pc10_counter;
u64 s0ix_counter;
+ int num_lpm_modes;
+ int lpm_en_modes[LPM_MAX_NUM_MODES];
+ u32 *lpm_req_regs;
};
+#define pmc_for_each_mode(i, mode, pmcdev) \
+ for (i = 0, mode = pmcdev->lpm_en_modes[i]; \
+ i < pmcdev->num_lpm_modes; \
+ i++, mode = pmcdev->lpm_en_modes[i])
+
+#define DEFINE_PMC_CORE_ATTR_WRITE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .write = __name ## _write, \
+ .release = single_release, \
+}
+
#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/intel_pmt_class.c b/drivers/platform/x86/intel_pmt_class.c
index ee2b3bbeb83d..c86ff15b1ed5 100644
--- a/drivers/platform/x86/intel_pmt_class.c
+++ b/drivers/platform/x86/intel_pmt_class.c
@@ -20,6 +20,28 @@
#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
/*
+ * Early implementations of PMT on client platforms have some
+ * differences from the server platforms (which use the Out Of Band
+ * Management Services Module OOBMSM). This list tracks those
+ * platforms as needed to handle those differences. Newer client
+ * platforms are expected to be fully compatible with server.
+ */
+static const struct pci_device_id pmt_telem_early_client_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x467d) }, /* ADL */
+ { PCI_VDEVICE(INTEL, 0x490e) }, /* DG1 */
+ { PCI_VDEVICE(INTEL, 0x9a0d) }, /* TGL */
+ { }
+};
+
+bool intel_pmt_is_early_client_hw(struct device *dev)
+{
+ struct pci_dev *parent = to_pci_dev(dev->parent);
+
+ return !!pci_match_id(pmt_telem_early_client_pci_ids, parent);
+}
+EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw);
+
+/*
* sysfs
*/
static ssize_t
@@ -147,6 +169,30 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
* base address = end of discovery region + base offset
*/
entry->base_addr = disc_res->end + 1 + header->base_offset;
+
+ /*
+ * Some hardware use a different calculation for the base address
+ * when access_type == ACCESS_LOCAL. On the these systems
+ * ACCCESS_LOCAL refers to an address in the same BAR as the
+ * header but at a fixed offset. But as the header address was
+ * supplied to the driver, we don't know which BAR it was in.
+ * So search for the bar whose range includes the header address.
+ */
+ if (intel_pmt_is_early_client_hw(dev)) {
+ int i;
+
+ entry->base_addr = 0;
+ for (i = 0; i < 6; i++)
+ if (disc_res->start >= pci_resource_start(pci_dev, i) &&
+ (disc_res->start <= pci_resource_end(pci_dev, i))) {
+ entry->base_addr = pci_resource_start(pci_dev, i) +
+ header->base_offset;
+ break;
+ }
+ if (!entry->base_addr)
+ return -EINVAL;
+ }
+
break;
case ACCESS_BARID:
/*
diff --git a/drivers/platform/x86/intel_pmt_class.h b/drivers/platform/x86/intel_pmt_class.h
index de8f8139ba31..1337019c2873 100644
--- a/drivers/platform/x86/intel_pmt_class.h
+++ b/drivers/platform/x86/intel_pmt_class.h
@@ -44,6 +44,7 @@ struct intel_pmt_namespace {
struct device *dev);
};
+bool intel_pmt_is_early_client_hw(struct device *dev);
int intel_pmt_dev_create(struct intel_pmt_entry *entry,
struct intel_pmt_namespace *ns,
struct platform_device *pdev, int idx);
diff --git a/drivers/platform/x86/intel_pmt_telemetry.c b/drivers/platform/x86/intel_pmt_telemetry.c
index f8a87614efa4..9b95ef050457 100644
--- a/drivers/platform/x86/intel_pmt_telemetry.c
+++ b/drivers/platform/x86/intel_pmt_telemetry.c
@@ -34,26 +34,6 @@ struct pmt_telem_priv {
struct intel_pmt_entry entry[];
};
-/*
- * Early implementations of PMT on client platforms have some
- * differences from the server platforms (which use the Out Of Band
- * Management Services Module OOBMSM). This list tracks those
- * platforms as needed to handle those differences. Newer client
- * platforms are expected to be fully compatible with server.
- */
-static const struct pci_device_id pmt_telem_early_client_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x9a0d) }, /* TGL */
- { PCI_VDEVICE(INTEL, 0x467d) }, /* ADL */
- { }
-};
-
-static bool intel_pmt_is_early_client_hw(struct device *dev)
-{
- struct pci_dev *parent = to_pci_dev(dev->parent);
-
- return !!pci_match_id(pmt_telem_early_client_pci_ids, parent);
-}
-
static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry,
struct device *dev)
{
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
index a2a2d923e60c..df1fc6c719f3 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
@@ -21,12 +21,16 @@
#define PUNIT_MAILBOX_BUSY_BIT 31
/*
- * The average time to complete some commands is about 40us. The current
- * count is enough to satisfy 40us. But when the firmware is very busy, this
- * causes timeout occasionally. So increase to deal with some worst case
- * scenarios. Most of the command still complete in few us.
+ * The average time to complete mailbox commands is less than 40us. Most of
+ * the commands complete in few micro seconds. But the same firmware handles
+ * requests from all power management features.
+ * We can create a scenario where we flood the firmware with requests then
+ * the mailbox response can be delayed for 100s of micro seconds. So define
+ * two timeouts. One for average case and one for long.
+ * If the firmware is taking more than average, just call cond_resched().
*/
-#define OS_MAILBOX_RETRY_COUNT 100
+#define OS_MAILBOX_TIMEOUT_AVG_US 40
+#define OS_MAILBOX_TIMEOUT_MAX_US 1000
struct isst_if_device {
struct mutex mutex;
@@ -35,11 +39,13 @@ struct isst_if_device {
static int isst_if_mbox_cmd(struct pci_dev *pdev,
struct isst_if_mbox_cmd *mbox_cmd)
{
- u32 retries, data;
+ s64 tm_delta = 0;
+ ktime_t tm;
+ u32 data;
int ret;
/* Poll for rb bit == 0 */
- retries = OS_MAILBOX_RETRY_COUNT;
+ tm = ktime_get();
do {
ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
&data);
@@ -48,11 +54,14 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
ret = -EBUSY;
+ tm_delta = ktime_us_delta(ktime_get(), tm);
+ if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
+ cond_resched();
continue;
}
ret = 0;
break;
- } while (--retries);
+ } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
if (ret)
return ret;
@@ -74,7 +83,8 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
return ret;
/* Poll for rb bit == 0 */
- retries = OS_MAILBOX_RETRY_COUNT;
+ tm_delta = 0;
+ tm = ktime_get();
do {
ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
&data);
@@ -83,6 +93,9 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
ret = -EBUSY;
+ tm_delta = ktime_us_delta(ktime_get(), tm);
+ if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
+ cond_resched();
continue;
}
@@ -96,7 +109,7 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
mbox_cmd->resp_data = data;
ret = 0;
break;
- } while (--retries);
+ } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
return ret;
}
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index dd900a76d8de..20145b539335 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -678,7 +678,7 @@ static int __init acpi_init(void)
result = acpi_bus_register_driver(&acpi_driver);
if (result < 0) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error registering driver\n"));
+ pr_debug("Error registering driver\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 6388c3c705a6..d4f444401496 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -973,7 +973,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
pcc->mute = pcc->sinf[SINF_MUTE];
pcc->ac_brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
pcc->dc_brightness = pcc->sinf[SINF_DC_CUR_BRIGHT];
- result = pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
+ pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
/* add sysfs attributes */
result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index ca684ed760d1..a9d2a4b98e57 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -393,34 +393,10 @@ static const struct dmi_system_id critclk_systems[] = {
},
{
/* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Beckhoff CB3163",
+ .ident = "Beckhoff Baytrail",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
- DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
- },
- },
- {
- /* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Beckhoff CB4063",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
- DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
- },
- },
- {
- /* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Beckhoff CB6263",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
- DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
- },
- },
- {
- /* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Beckhoff CB6363",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
- DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
},
},
{
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 0d9e2ddbf904..dd60c9397d35 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -175,6 +175,12 @@ enum tpacpi_hkey_event_t {
or port replicator */
TP_HKEY_EV_HOTPLUG_UNDOCK = 0x4011, /* undocked from hotplug
dock or port replicator */
+ /*
+ * Thinkpad X1 Tablet series devices emit 0x4012 and 0x4013
+ * when keyboard cover is attached, detached or folded onto the back
+ */
+ TP_HKEY_EV_KBD_COVER_ATTACH = 0x4012, /* keyboard cover attached */
+ TP_HKEY_EV_KBD_COVER_DETACH = 0x4013, /* keyboard cover detached or folded back */
/* User-interface events */
TP_HKEY_EV_LID_CLOSE = 0x5001, /* laptop lid closed */
@@ -3991,6 +3997,23 @@ static bool hotkey_notify_dockevent(const u32 hkey,
pr_info("undocked from hotplug port replicator\n");
return true;
+ /*
+ * Deliberately ignore attaching and detaching the keybord cover to avoid
+ * duplicates from intel-vbtn, which already emits SW_TABLET_MODE events
+ * to userspace.
+ *
+ * Please refer to the following thread for more information and a preliminary
+ * implementation using the GTOP ("Get Tablet OPtions") interface that could be
+ * extended to other attachment options of the ThinkPad X1 Tablet series, such as
+ * the Pico cartridge dock module:
+ * https://lore.kernel.org/platform-driver-x86/38cb8265-1e30-d547-9e12-b4ae290be737@a-kobel.de/
+ */
+ case TP_HKEY_EV_KBD_COVER_ATTACH:
+ case TP_HKEY_EV_KBD_COVER_DETACH:
+ *send_acpi_ev = false;
+ *ignore_acpi_ev = true;
+ return true;
+
default:
return false;
}
@@ -4088,7 +4111,7 @@ static bool hotkey_notify_6xxx(const u32 hkey,
return true;
case TP_HKEY_EV_KEY_FN_ESC:
- /* Get the media key status to foce the status LED to update */
+ /* Get the media key status to force the status LED to update */
acpi_evalf(hkey_handle, NULL, "GMKS", "v");
*send_acpi_ev = false;
*ignore_acpi_ev = true;
@@ -6260,6 +6283,7 @@ enum thermal_access_mode {
enum { /* TPACPI_THERMAL_TPEC_* */
TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
+ TP_EC_FUNCREV = 0xEF, /* ACPI EC Functional revision */
TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
@@ -6272,6 +6296,8 @@ struct ibm_thermal_sensors_struct {
};
static enum thermal_access_mode thermal_read_mode;
+static const struct attribute_group *thermal_attr_group;
+static bool thermal_use_labels;
/* idx is zero-based */
static int thermal_get_sensor(int idx, s32 *value)
@@ -6454,11 +6480,33 @@ static const struct attribute_group thermal_temp_input8_group = {
#undef THERMAL_SENSOR_ATTR_TEMP
#undef THERMAL_ATTRS
+static ssize_t temp1_label_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "CPU\n");
+}
+static DEVICE_ATTR_RO(temp1_label);
+
+static ssize_t temp2_label_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "GPU\n");
+}
+static DEVICE_ATTR_RO(temp2_label);
+
+static struct attribute *temp_label_attributes[] = {
+ &dev_attr_temp1_label.attr,
+ &dev_attr_temp2_label.attr,
+ NULL
+};
+
+static const struct attribute_group temp_label_attr_group = {
+ .attrs = temp_label_attributes,
+};
+
/* --------------------------------------------------------------------- */
static int __init thermal_init(struct ibm_init_struct *iibm)
{
- u8 t, ta1, ta2;
+ u8 t, ta1, ta2, ver = 0;
int i;
int acpi_tmp7;
int res;
@@ -6473,7 +6521,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
* 0x78-0x7F, 0xC0-0xC7. Registers return 0x00 for
* non-implemented, thermal sensors return 0x80 when
* not available
+ * The above rule is unfortunately flawed. This has been seen with
+ * 0xC2 (power supply ID) causing thermal control problems.
+ * The EC version can be determined by offset 0xEF and at least for
+ * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
+ * are not thermal registers.
*/
+ if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
+ pr_warn("Thinkpad ACPI EC unable to access EC version\n");
ta1 = ta2 = 0;
for (i = 0; i < 8; i++) {
@@ -6483,11 +6538,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
ta1 = 0;
break;
}
- if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
- ta2 |= t;
- } else {
- ta1 = 0;
- break;
+ if (ver < 3) {
+ if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
+ ta2 |= t;
+ } else {
+ ta1 = 0;
+ break;
+ }
}
}
if (ta1 == 0) {
@@ -6500,9 +6557,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
thermal_read_mode = TPACPI_THERMAL_NONE;
}
} else {
- thermal_read_mode =
- (ta2 != 0) ?
- TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+ if (ver >= 3) {
+ thermal_read_mode = TPACPI_THERMAL_TPEC_8;
+ thermal_use_labels = true;
+ } else {
+ thermal_read_mode =
+ (ta2 != 0) ?
+ TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+ }
}
} else if (acpi_tmp7) {
if (tpacpi_is_ibm() &&
@@ -6524,44 +6586,40 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
switch (thermal_read_mode) {
case TPACPI_THERMAL_TPEC_16:
- res = sysfs_create_group(&tpacpi_hwmon->kobj,
- &thermal_temp_input16_group);
- if (res)
- return res;
+ thermal_attr_group = &thermal_temp_input16_group;
break;
case TPACPI_THERMAL_TPEC_8:
case TPACPI_THERMAL_ACPI_TMP07:
case TPACPI_THERMAL_ACPI_UPDT:
- res = sysfs_create_group(&tpacpi_hwmon->kobj,
- &thermal_temp_input8_group);
- if (res)
- return res;
+ thermal_attr_group = &thermal_temp_input8_group;
break;
case TPACPI_THERMAL_NONE:
default:
return 1;
}
+ res = sysfs_create_group(&tpacpi_hwmon->kobj, thermal_attr_group);
+ if (res)
+ return res;
+
+ if (thermal_use_labels) {
+ res = sysfs_create_group(&tpacpi_hwmon->kobj, &temp_label_attr_group);
+ if (res) {
+ sysfs_remove_group(&tpacpi_hwmon->kobj, thermal_attr_group);
+ return res;
+ }
+ }
+
return 0;
}
static void thermal_exit(void)
{
- switch (thermal_read_mode) {
- case TPACPI_THERMAL_TPEC_16:
- sysfs_remove_group(&tpacpi_hwmon->kobj,
- &thermal_temp_input16_group);
- break;
- case TPACPI_THERMAL_TPEC_8:
- case TPACPI_THERMAL_ACPI_TMP07:
- case TPACPI_THERMAL_ACPI_UPDT:
- sysfs_remove_group(&tpacpi_hwmon->kobj,
- &thermal_temp_input8_group);
- break;
- case TPACPI_THERMAL_NONE:
- default:
- break;
- }
+ if (thermal_attr_group)
+ sysfs_remove_group(&tpacpi_hwmon->kobj, thermal_attr_group);
+
+ if (thermal_use_labels)
+ sysfs_remove_group(&tpacpi_hwmon->kobj, &temp_label_attr_group);
}
static int thermal_read(struct seq_file *m)
@@ -10050,6 +10108,7 @@ static struct ibm_struct proxsensor_driver_data = {
*/
#define DYTC_CMD_SET 1 /* To enable/disable IC function mode */
+#define DYTC_CMD_MMC_GET 8 /* To get current MMC function and mode */
#define DYTC_CMD_RESET 0x1ff /* To reset back to default */
#define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */
@@ -10066,6 +10125,10 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_MODE_PERFORM 2 /* High power mode aka performance */
#define DYTC_MODE_LOWPOWER 3 /* Low power mode */
#define DYTC_MODE_BALANCE 0xF /* Default mode aka balanced */
+#define DYTC_MODE_MMC_BALANCE 0 /* Default mode from MMC_GET, aka balanced */
+
+#define DYTC_ERR_MASK 0xF /* Bits 0-3 in cmd result are the error result */
+#define DYTC_ERR_SUCCESS 1 /* CMD completed successful */
#define DYTC_SET_COMMAND(function, mode, on) \
(DYTC_CMD_SET | (function) << DYTC_SET_FUNCTION_BIT | \
@@ -10080,6 +10143,7 @@ static bool dytc_profile_available;
static enum platform_profile_option dytc_current_profile;
static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
static DEFINE_MUTEX(dytc_mutex);
+static bool dytc_mmc_get_available;
static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile)
{
@@ -10088,6 +10152,7 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
*profile = PLATFORM_PROFILE_LOW_POWER;
break;
case DYTC_MODE_BALANCE:
+ case DYTC_MODE_MMC_BALANCE:
*profile = PLATFORM_PROFILE_BALANCED;
break;
case DYTC_MODE_PERFORM:
@@ -10165,7 +10230,6 @@ static int dytc_cql_command(int command, int *output)
if (err)
return err;
}
-
return cmd_err;
}
@@ -10222,7 +10286,10 @@ static void dytc_profile_refresh(void)
int perfmode;
mutex_lock(&dytc_mutex);
- err = dytc_cql_command(DYTC_CMD_GET, &output);
+ if (dytc_mmc_get_available)
+ err = dytc_command(DYTC_CMD_MMC_GET, &output);
+ else
+ err = dytc_cql_command(DYTC_CMD_GET, &output);
mutex_unlock(&dytc_mutex);
if (err)
return;
@@ -10271,6 +10338,16 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (dytc_version >= 5) {
dbg_printk(TPACPI_DBG_INIT,
"DYTC version %d: thermal mode available\n", dytc_version);
+ /*
+ * Check if MMC_GET functionality available
+ * Version > 6 and return success from MMC_GET command
+ */
+ dytc_mmc_get_available = false;
+ if (dytc_version >= 6) {
+ err = dytc_command(DYTC_CMD_MMC_GET, &output);
+ if (!err && ((output & DYTC_ERR_MASK) == DYTC_ERR_SUCCESS))
+ dytc_mmc_get_available = true;
+ }
/* Create platform_profile structure and register */
err = platform_profile_register(&dytc_profile);
/*
@@ -10473,6 +10550,111 @@ static struct ibm_struct kbdlang_driver_data = {
.exit = kbdlang_exit,
};
+/*************************************************************************
+ * DPRC(Dynamic Power Reduction Control) subdriver, for the Lenovo WWAN
+ * and WLAN feature.
+ */
+#define DPRC_GET_WWAN_ANTENNA_TYPE 0x40000
+#define DPRC_WWAN_ANTENNA_TYPE_A_BIT BIT(4)
+#define DPRC_WWAN_ANTENNA_TYPE_B_BIT BIT(8)
+static bool has_antennatype;
+static int wwan_antennatype;
+
+static int dprc_command(int command, int *output)
+{
+ acpi_handle dprc_handle;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "DPRC", &dprc_handle))) {
+ /* Platform doesn't support DPRC */
+ return -ENODEV;
+ }
+
+ if (!acpi_evalf(dprc_handle, output, NULL, "dd", command))
+ return -EIO;
+
+ /*
+ * METHOD_ERR gets returned on devices where few commands are not supported
+ * for example command to get WWAN Antenna type command is not supported on
+ * some devices.
+ */
+ if (*output & METHOD_ERR)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int get_wwan_antenna(int *wwan_antennatype)
+{
+ int output, err;
+
+ /* Get current Antenna type */
+ err = dprc_command(DPRC_GET_WWAN_ANTENNA_TYPE, &output);
+ if (err)
+ return err;
+
+ if (output & DPRC_WWAN_ANTENNA_TYPE_A_BIT)
+ *wwan_antennatype = 1;
+ else if (output & DPRC_WWAN_ANTENNA_TYPE_B_BIT)
+ *wwan_antennatype = 2;
+ else
+ return -ENODEV;
+
+ return 0;
+}
+
+/* sysfs wwan antenna type entry */
+static ssize_t wwan_antenna_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ switch (wwan_antennatype) {
+ case 1:
+ return sysfs_emit(buf, "type a\n");
+ case 2:
+ return sysfs_emit(buf, "type b\n");
+ default:
+ return -ENODATA;
+ }
+}
+static DEVICE_ATTR_RO(wwan_antenna_type);
+
+static int tpacpi_dprc_init(struct ibm_init_struct *iibm)
+{
+ int wwanantenna_err, err;
+
+ wwanantenna_err = get_wwan_antenna(&wwan_antennatype);
+ /*
+ * If support isn't available (ENODEV) then quit, but don't
+ * return an error.
+ */
+ if (wwanantenna_err == -ENODEV)
+ return 0;
+
+ /* if there was an error return it */
+ if (wwanantenna_err && (wwanantenna_err != -ENODEV))
+ return wwanantenna_err;
+ else if (!wwanantenna_err)
+ has_antennatype = true;
+
+ if (has_antennatype) {
+ err = sysfs_create_file(&tpacpi_pdev->dev.kobj, &dev_attr_wwan_antenna_type.attr);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static void dprc_exit(void)
+{
+ if (has_antennatype)
+ sysfs_remove_file(&tpacpi_pdev->dev.kobj, &dev_attr_wwan_antenna_type.attr);
+}
+
+static struct ibm_struct dprc_driver_data = {
+ .name = "dprc",
+ .exit = dprc_exit,
+};
+
/****************************************************************************
****************************************************************************
*
@@ -10977,6 +11159,10 @@ static struct ibm_init_struct ibms_init[] __initdata = {
.init = tpacpi_kbdlang_init,
.data = &kbdlang_driver_data,
},
+ {
+ .init = tpacpi_dprc_init,
+ .data = &dprc_driver_data,
+ },
};
static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index c44a6e8dceb8..90fe4f8f3c2c 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -715,6 +715,32 @@ static const struct ts_dmi_data techbite_arc_11_6_data = {
.properties = techbite_arc_11_6_props,
};
+static const struct property_entry teclast_tbook11_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-tbook11.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data teclast_tbook11_data = {
+ .embedded_fw = {
+ .name = "silead/gsl3692-teclast-tbook11.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 43560,
+ .sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
+ 0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
+ 0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
+ 0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = teclast_tbook11_props,
+};
+
static const struct property_entry teclast_x3_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -1244,6 +1270,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Teclast Tbook 11 */
+ .driver_data = (void *)&teclast_tbook11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TbooK 11"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"),
+ },
+ },
+ {
/* Teclast X3 Plus */
.driver_data = (void *)&teclast_x3_plus_data,
.matches = {
@@ -1355,7 +1390,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
if (has_acpi_companion(dev) &&
!strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
- error = device_add_properties(dev, ts_data->properties);
+ error = device_create_managed_software_node(dev, ts_data->properties, NULL);
if (error)
dev_err(dev, "failed to add properties: %d\n", error);
}
diff --git a/drivers/platform/x86/wmi-bmof.c b/drivers/platform/x86/wmi-bmof.c
index 66b434d6307f..80137afb9753 100644
--- a/drivers/platform/x86/wmi-bmof.c
+++ b/drivers/platform/x86/wmi-bmof.c
@@ -86,13 +86,12 @@ static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
return ret;
}
-static int wmi_bmof_remove(struct wmi_device *wdev)
+static void wmi_bmof_remove(struct wmi_device *wdev)
{
struct bmof_priv *priv = dev_get_drvdata(&wdev->dev);
sysfs_remove_bin_file(&wdev->dev.kobj, &priv->bmof_bin_attr);
kfree(priv->bmofdata);
- return 0;
}
static const struct wmi_device_id wmi_bmof_id_table[] = {
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index c669676ea8e8..62e0d56a3332 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -32,7 +32,6 @@
#include <linux/fs.h>
#include <uapi/linux/wmi.h>
-ACPI_MODULE_NAME("wmi");
MODULE_AUTHOR("Carlos Corbacho");
MODULE_DESCRIPTION("ACPI-WMI Mapping Driver");
MODULE_LICENSE("GPL");
@@ -986,7 +985,6 @@ static int wmi_dev_remove(struct device *dev)
struct wmi_block *wblock = dev_to_wblock(dev);
struct wmi_driver *wdriver =
container_of(dev->driver, struct wmi_driver, driver);
- int ret = 0;
if (wdriver->filter_callback) {
misc_deregister(&wblock->char_dev);
@@ -995,12 +993,12 @@ static int wmi_dev_remove(struct device *dev)
}
if (wdriver->remove)
- ret = wdriver->remove(dev_to_wdev(dev));
+ wdriver->remove(dev_to_wdev(dev));
if (ACPI_FAILURE(wmi_method_enable(wblock, 0)))
dev_warn(dev, "failed to disable device\n");
- return ret;
+ return 0;
}
static struct class wmi_bus_class = {
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 8337c99d2ce2..97440462aa25 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -26,8 +26,6 @@
#define XO15_EBOOK_HID "XO15EBK"
#define XO15_EBOOK_DEVICE_NAME "EBook Switch"
-ACPI_MODULE_NAME(MODULE_NAME);
-
MODULE_DESCRIPTION("OLPC XO-1.5 ebook switch driver");
MODULE_LICENSE("GPL");
@@ -66,8 +64,8 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event)
ebook_send_state(device);
break;
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
+ acpi_handle_debug(device->handle,
+ "Unsupported event [0x%x]\n", event);
break;
}
}
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index e0de1df2ede0..35799e6401c9 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -12,15 +12,14 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/pps_kernel.h>
-#include <linux/pps-gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/list.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/property.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
@@ -100,51 +99,42 @@ static void pps_gpio_echo_timer_callback(struct timer_list *t)
gpiod_set_value(info->echo_pin, 0);
}
-static int pps_gpio_setup(struct platform_device *pdev)
+static int pps_gpio_setup(struct device *dev)
{
- struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
- struct device_node *np = pdev->dev.of_node;
+ struct pps_gpio_device_data *data = dev_get_drvdata(dev);
int ret;
u32 value;
- data->gpio_pin = devm_gpiod_get(&pdev->dev,
- NULL, /* request "gpios" */
- GPIOD_IN);
- if (IS_ERR(data->gpio_pin)) {
- dev_err(&pdev->dev,
- "failed to request PPS GPIO\n");
- return PTR_ERR(data->gpio_pin);
+ data->gpio_pin = devm_gpiod_get(dev, NULL, GPIOD_IN);
+ if (IS_ERR(data->gpio_pin))
+ return dev_err_probe(dev, PTR_ERR(data->gpio_pin),
+ "failed to request PPS GPIO\n");
+
+ data->assert_falling_edge =
+ device_property_read_bool(dev, "assert-falling-edge");
+
+ data->echo_pin = devm_gpiod_get_optional(dev, "echo", GPIOD_OUT_LOW);
+ if (IS_ERR(data->echo_pin))
+ return dev_err_probe(dev, PTR_ERR(data->echo_pin),
+ "failed to request ECHO GPIO\n");
+
+ if (!data->echo_pin)
+ return 0;
+
+ ret = device_property_read_u32(dev, "echo-active-ms", &value);
+ if (ret) {
+ dev_err(dev, "failed to get echo-active-ms from FW\n");
+ return ret;
}
- data->echo_pin = devm_gpiod_get_optional(&pdev->dev,
- "echo",
- GPIOD_OUT_LOW);
- if (data->echo_pin) {
- if (IS_ERR(data->echo_pin)) {
- dev_err(&pdev->dev, "failed to request ECHO GPIO\n");
- return PTR_ERR(data->echo_pin);
- }
-
- ret = of_property_read_u32(np,
- "echo-active-ms",
- &value);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to get echo-active-ms from OF\n");
- return ret;
- }
- data->echo_active_ms = value;
- /* sanity check on echo_active_ms */
- if (!data->echo_active_ms || data->echo_active_ms > 999) {
- dev_err(&pdev->dev,
- "echo-active-ms: %u - bad value from OF\n",
- data->echo_active_ms);
- return -EINVAL;
- }
+ /* sanity check on echo_active_ms */
+ if (!value || value > 999) {
+ dev_err(dev, "echo-active-ms: %u - bad value from FW\n", value);
+ return -EINVAL;
}
- if (of_property_read_bool(np, "assert-falling-edge"))
- data->assert_falling_edge = true;
+ data->echo_active_ms = value;
+
return 0;
}
@@ -165,34 +155,26 @@ get_irqf_trigger_flags(const struct pps_gpio_device_data *data)
static int pps_gpio_probe(struct platform_device *pdev)
{
struct pps_gpio_device_data *data;
+ struct device *dev = &pdev->dev;
int ret;
int pps_default_params;
- const struct pps_gpio_platform_data *pdata = pdev->dev.platform_data;
/* allocate space for device info */
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- platform_set_drvdata(pdev, data);
+
+ dev_set_drvdata(dev, data);
/* GPIO setup */
- if (pdata) {
- data->gpio_pin = pdata->gpio_pin;
- data->echo_pin = pdata->echo_pin;
-
- data->assert_falling_edge = pdata->assert_falling_edge;
- data->capture_clear = pdata->capture_clear;
- data->echo_active_ms = pdata->echo_active_ms;
- } else {
- ret = pps_gpio_setup(pdev);
- if (ret)
- return -EINVAL;
- }
+ ret = pps_gpio_setup(dev);
+ if (ret)
+ return -EINVAL;
/* IRQ setup */
ret = gpiod_to_irq(data->gpio_pin);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to map GPIO to IRQ: %d\n", ret);
+ dev_err(dev, "failed to map GPIO to IRQ: %d\n", ret);
return -EINVAL;
}
data->irq = ret;
@@ -218,17 +200,17 @@ static int pps_gpio_probe(struct platform_device *pdev)
pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR;
data->pps = pps_register_source(&data->info, pps_default_params);
if (IS_ERR(data->pps)) {
- dev_err(&pdev->dev, "failed to register IRQ %d as PPS source\n",
+ dev_err(dev, "failed to register IRQ %d as PPS source\n",
data->irq);
return PTR_ERR(data->pps);
}
/* register IRQ interrupt handler */
- ret = devm_request_irq(&pdev->dev, data->irq, pps_gpio_irq_handler,
+ ret = devm_request_irq(dev, data->irq, pps_gpio_irq_handler,
get_irqf_trigger_flags(data), data->info.name, data);
if (ret) {
pps_unregister_source(data->pps);
- dev_err(&pdev->dev, "failed to acquire IRQ %d\n", data->irq);
+ dev_err(dev, "failed to acquire IRQ %d\n", data->irq);
return -EINVAL;
}
@@ -243,11 +225,9 @@ static int pps_gpio_remove(struct platform_device *pdev)
struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
pps_unregister_source(data->pps);
- if (data->echo_pin) {
- del_timer_sync(&data->echo_timer);
- /* reset echo pin in any case */
- gpiod_set_value(data->echo_pin, 0);
- }
+ del_timer_sync(&data->echo_timer);
+ /* reset echo pin in any case */
+ gpiod_set_value(data->echo_pin, 0);
dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq);
return 0;
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 04633e5157e9..4834219497ee 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3179,9 +3179,10 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
}
}
-static void iscsi_start_session_recovery(struct iscsi_session *session,
- struct iscsi_conn *conn, int flag)
+void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
int old_stop_stage;
mutex_lock(&session->eh_mutex);
@@ -3239,27 +3240,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
}
-
-void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
-{
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_session *session = conn->session;
-
- switch (flag) {
- case STOP_CONN_RECOVER:
- cls_conn->state = ISCSI_CONN_FAILED;
- break;
- case STOP_CONN_TERM:
- cls_conn->state = ISCSI_CONN_DOWN;
- break;
- default:
- iscsi_conn_printk(KERN_ERR, conn,
- "invalid stop flag %d\n", flag);
- return;
- }
-
- iscsi_start_session_recovery(session, conn, flag);
-}
EXPORT_SYMBOL_GPL(iscsi_conn_stop);
int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 024e5a550759..8b9a39077dba 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -201,18 +201,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
task->total_xfer_len = qc->nbytes;
task->num_scatter = qc->n_elem;
+ task->data_dir = qc->dma_dir;
+ } else if (qc->tf.protocol == ATA_PROT_NODATA) {
+ task->data_dir = DMA_NONE;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
xfer += sg_dma_len(sg);
task->total_xfer_len = xfer;
task->num_scatter = si;
- }
-
- if (qc->tf.protocol == ATA_PROT_NODATA)
- task->data_dir = DMA_NONE;
- else
task->data_dir = qc->dma_dir;
+ }
task->scatter = qc->sg;
task->ata_task.retry_count = 1;
task->task_state_flags = SAS_TASK_STATE_PENDING;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f4bf62b007a0..441f0152193f 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2474,10 +2474,22 @@ static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
* it works.
*/
mutex_lock(&conn_mutex);
+ switch (flag) {
+ case STOP_CONN_RECOVER:
+ conn->state = ISCSI_CONN_FAILED;
+ break;
+ case STOP_CONN_TERM:
+ conn->state = ISCSI_CONN_DOWN;
+ break;
+ default:
+ iscsi_cls_conn_printk(KERN_ERR, conn,
+ "invalid stop flag %d\n", flag);
+ goto unlock;
+ }
+
conn->transport->stop_conn(conn, flag);
- conn->state = ISCSI_CONN_DOWN;
+unlock:
mutex_unlock(&conn_mutex);
-
}
static void stop_conn_work_fn(struct work_struct *work)
@@ -2968,7 +2980,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
mutex_lock(&conn->ep_mutex);
conn->ep = NULL;
mutex_unlock(&conn->ep_mutex);
- conn->state = ISCSI_CONN_DOWN;
+ conn->state = ISCSI_CONN_FAILED;
}
transport->ep_disconnect(ep);
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index a14684ffe4c1..ca4f4ca413f1 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -179,6 +179,21 @@ static unsigned int __init save_reg(struct intc_desc_int *d,
return 0;
}
+static bool __init intc_map(struct irq_domain *domain, int irq)
+{
+ if (!irq_to_desc(irq) && irq_alloc_desc_at(irq, NUMA_NO_NODE) != irq) {
+ pr_err("uname to allocate IRQ %d\n", irq);
+ return false;
+ }
+
+ if (irq_domain_associate(domain, irq, irq)) {
+ pr_err("domain association failure\n");
+ return false;
+ }
+
+ return true;
+}
+
int __init register_intc_controller(struct intc_desc *desc)
{
unsigned int i, k, smp;
@@ -311,24 +326,12 @@ int __init register_intc_controller(struct intc_desc *desc)
for (i = 0; i < hw->nr_vectors; i++) {
struct intc_vect *vect = hw->vectors + i;
unsigned int irq = evt2irq(vect->vect);
- int res;
if (!vect->enum_id)
continue;
- res = irq_create_identity_mapping(d->domain, irq);
- if (unlikely(res)) {
- if (res == -EEXIST) {
- res = irq_domain_associate(d->domain, irq, irq);
- if (unlikely(res)) {
- pr_err("domain association failure\n");
- continue;
- }
- } else {
- pr_err("can't identity map IRQ %d\n", irq);
- continue;
- }
- }
+ if (!intc_map(d->domain, irq))
+ continue;
intc_irq_xlate_set(irq, vect->enum_id, d);
intc_register_irq(desc, d, vect->enum_id, irq);
@@ -345,22 +348,8 @@ int __init register_intc_controller(struct intc_desc *desc)
* IRQ support, each vector still needs to have
* its own backing irq_desc.
*/
- res = irq_create_identity_mapping(d->domain, irq2);
- if (unlikely(res)) {
- if (res == -EEXIST) {
- res = irq_domain_associate(d->domain,
- irq2, irq2);
- if (unlikely(res)) {
- pr_err("domain association "
- "failure\n");
- continue;
- }
- } else {
- pr_err("can't identity map IRQ %d\n",
- irq);
- continue;
- }
- }
+ if (!intc_map(d->domain, irq2))
+ continue;
vect2->enum_id = 0;
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 1fd29f93ff6d..5bdfb1565c14 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -756,6 +756,9 @@ int geni_icc_get(struct geni_se *se, const char *icc_ddr)
int i, err;
const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
+ if (has_acpi_companion(se->dev))
+ return 0;
+
for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
if (!icc_names[i])
continue;
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index bf1e250d50dd..986776787b9e 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -20,7 +20,7 @@ soundwire-cadence-y := cadence_master.o
obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o
#Intel driver
-soundwire-intel-y := intel.o intel_init.o
+soundwire-intel-y := intel.o intel_init.o dmi-quirks.o
obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
#Qualcomm driver
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 46885429928a..a9e0aa72654d 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -44,13 +44,13 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
}
ret = sdw_get_id(bus);
- if (ret) {
+ if (ret < 0) {
dev_err(parent, "Failed to get bus id\n");
return ret;
}
ret = sdw_master_device_add(bus, parent, fwnode);
- if (ret) {
+ if (ret < 0) {
dev_err(parent, "Failed to add master device at link %d\n",
bus->link_id);
return ret;
@@ -121,7 +121,7 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
else
ret = -ENOTSUPP; /* No ACPI/DT so error out */
- if (ret) {
+ if (ret < 0) {
dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
return ret;
}
@@ -422,7 +422,7 @@ sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
SDW_MSG_FLAG_READ, &buf);
- if (ret)
+ if (ret < 0)
return ret;
ret = sdw_transfer(bus, &msg);
@@ -440,7 +440,7 @@ sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
SDW_MSG_FLAG_WRITE, &value);
- if (ret)
+ if (ret < 0)
return ret;
return sdw_transfer(bus, &msg);
@@ -454,7 +454,7 @@ int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr)
ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
SDW_MSG_FLAG_READ, &buf);
- if (ret)
+ if (ret < 0)
return ret;
ret = sdw_transfer_unlocked(bus, &msg);
@@ -472,7 +472,7 @@ int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 val
ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
SDW_MSG_FLAG_WRITE, &value);
- if (ret)
+ if (ret < 0)
return ret;
return sdw_transfer_unlocked(bus, &msg);
@@ -593,7 +593,7 @@ EXPORT_SYMBOL(sdw_write);
/* called with bus_lock held */
static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
{
- struct sdw_slave *slave = NULL;
+ struct sdw_slave *slave;
list_for_each_entry(slave, &bus->slaves, node) {
if (slave->dev_num == i)
@@ -603,7 +603,7 @@ static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
return NULL;
}
-static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
+int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
{
if (slave->id.mfg_id != id.mfg_id ||
slave->id.part_id != id.part_id ||
@@ -614,6 +614,7 @@ static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
return 0;
}
+EXPORT_SYMBOL(sdw_compare_devid);
/* called with bus_lock held */
static int sdw_get_device_num(struct sdw_slave *slave)
@@ -698,6 +699,7 @@ void sdw_extract_slave_id(struct sdw_bus *bus,
"SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n",
id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version);
}
+EXPORT_SYMBOL(sdw_extract_slave_id);
static int sdw_program_device_num(struct sdw_bus *bus)
{
@@ -705,7 +707,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
struct sdw_slave *slave, *_s;
struct sdw_slave_id id;
struct sdw_msg msg;
- bool found = false;
+ bool found;
int count = 0, ret;
u64 addr;
@@ -737,6 +739,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
sdw_extract_slave_id(bus, addr, &id);
+ found = false;
/* Now compare with entries */
list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
if (sdw_compare_devid(slave, id) == 0) {
@@ -749,7 +752,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
* dev_num
*/
ret = sdw_assign_device_num(slave);
- if (ret) {
+ if (ret < 0) {
dev_err(bus->dev,
"Assign dev_num failed:%d\n",
ret);
@@ -875,14 +878,18 @@ static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
if (wake_en)
val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN;
} else {
- val = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
-
+ ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
+ if (ret < 0) {
+ dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret);
+ return ret;
+ }
+ val = ret;
val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP);
}
ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val);
- if (ret != 0)
+ if (ret < 0)
dev_err(&slave->dev,
"Clock Stop prepare failed for slave: %d", ret);
@@ -895,11 +902,15 @@ static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num)
int val;
do {
- val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT) &
- SDW_SCP_STAT_CLK_STP_NF;
+ val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT);
+ if (val < 0) {
+ dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val);
+ return val;
+ }
+ val &= SDW_SCP_STAT_CLK_STP_NF;
if (!val) {
- dev_info(bus->dev, "clock stop prep/de-prep done slave:%d",
- dev_num);
+ dev_dbg(bus->dev, "clock stop prep/de-prep done slave:%d",
+ dev_num);
return 0;
}
@@ -1253,6 +1264,7 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
static int sdw_initialize_slave(struct sdw_slave *slave)
{
struct sdw_slave_prop *prop = &slave->prop;
+ int status;
int ret;
u8 val;
@@ -1260,6 +1272,44 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
if (ret < 0)
return ret;
+ if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) {
+ /* Clear bus clash interrupt before enabling interrupt mask */
+ status = sdw_read_no_pm(slave, SDW_SCP_INT1);
+ if (status < 0) {
+ dev_err(&slave->dev,
+ "SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status);
+ return status;
+ }
+ if (status & SDW_SCP_INT1_BUS_CLASH) {
+ dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n");
+ ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret);
+ return ret;
+ }
+ }
+ }
+ if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) &&
+ !(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) {
+ /* Clear parity interrupt before enabling interrupt mask */
+ status = sdw_read_no_pm(slave, SDW_SCP_INT1);
+ if (status < 0) {
+ dev_err(&slave->dev,
+ "SDW_SCP_INT1 (PARITY) read failed:%d\n", status);
+ return status;
+ }
+ if (status & SDW_SCP_INT1_PARITY) {
+ dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n");
+ ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "SDW_SCP_INT1 (PARITY) write failed:%d\n", ret);
+ return ret;
+ }
+ }
+ }
+
/*
* Set SCP_INT1_MASK register, typically bus clash and
* implementation-defined interrupt mask. The Parity detection
@@ -1589,7 +1639,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
if (ret < 0) {
dev_err(&slave->dev,
- "SDW_SCP_INT1 read failed:%d\n", ret);
+ "SDW_SCP_INT1 recheck read failed:%d\n", ret);
goto io_err;
}
_buf = ret;
@@ -1597,7 +1647,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, _buf2);
if (ret < 0) {
dev_err(&slave->dev,
- "SDW_SCP_INT2/3 read failed:%d\n", ret);
+ "SDW_SCP_INT2/3 recheck read failed:%d\n", ret);
goto io_err;
}
@@ -1605,7 +1655,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
ret = sdw_read_no_pm(slave, SDW_DP0_INT);
if (ret < 0) {
dev_err(&slave->dev,
- "SDW_DP0_INT read failed:%d\n", ret);
+ "SDW_DP0_INT recheck read failed:%d\n", ret);
goto io_err;
}
sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
@@ -1701,7 +1751,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
if (status[0] == SDW_SLAVE_ATTACHED) {
dev_dbg(bus->dev, "Slave attached, programming device number\n");
ret = sdw_program_device_num(bus);
- if (ret)
+ if (ret < 0)
dev_err(bus->dev, "Slave attach failed: %d\n", ret);
/*
* programming a device number will have side effects,
@@ -1735,7 +1785,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
case SDW_SLAVE_ALERT:
ret = sdw_handle_slave_alerts(slave);
- if (ret)
+ if (ret < 0)
dev_err(&slave->dev,
"Slave %d alert handling failed: %d\n",
i, ret);
@@ -1754,7 +1804,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
attached_initializing = true;
ret = sdw_initialize_slave(slave);
- if (ret)
+ if (ret < 0)
dev_err(&slave->dev,
"Slave %d initialization failed: %d\n",
i, ret);
@@ -1768,7 +1818,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
}
ret = sdw_update_slave_status(slave, status[i]);
- if (ret)
+ if (ret < 0)
dev_err(&slave->dev,
"Update Slave status failed:%d\n", ret);
if (attached_initializing) {
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index 2e049d39c6e5..40354469860a 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -7,6 +7,8 @@
#define DEFAULT_BANK_SWITCH_TIMEOUT 3000
#define DEFAULT_PROBE_TIMEOUT 2000
+u64 sdw_dmi_override_adr(struct sdw_bus *bus, u64 addr);
+
#if IS_ENABLED(CONFIG_ACPI)
int sdw_acpi_find_slaves(struct sdw_bus *bus);
#else
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 575b9bad99d5..893296f3fe39 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -82,6 +82,7 @@ static int sdw_drv_probe(struct device *dev)
struct sdw_slave *slave = dev_to_sdw_dev(dev);
struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
const struct sdw_device_id *id;
+ const char *name;
int ret;
/*
@@ -108,7 +109,10 @@ static int sdw_drv_probe(struct device *dev)
ret = drv->probe(slave, id);
if (ret) {
- dev_err(dev, "Probe of %s failed: %d\n", drv->name, ret);
+ name = drv->name;
+ if (!name)
+ name = drv->driver.name;
+ dev_err(dev, "Probe of %s failed: %d\n", name, ret);
dev_pm_domain_detach(dev, false);
return ret;
}
@@ -174,11 +178,16 @@ static void sdw_drv_shutdown(struct device *dev)
*/
int __sdw_register_driver(struct sdw_driver *drv, struct module *owner)
{
+ const char *name;
+
drv->driver.bus = &sdw_bus_type;
if (!drv->probe) {
- pr_err("driver %s didn't provide SDW probe routine\n",
- drv->name);
+ name = drv->name;
+ if (!name)
+ name = drv->driver.name;
+
+ pr_err("driver %s didn't provide SDW probe routine\n", name);
return -EINVAL;
}
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index d05442e646a3..192dac10f0c2 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -905,7 +905,7 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
EXPORT_SYMBOL(sdw_cdns_irq);
/**
- * To update slave status in a work since we will need to handle
+ * cdns_update_slave_status_work - update slave status in a work since we will need to handle
* other interrupts eg. CDNS_MCP_INT_RX_WL during the update slave
* process.
* @work: cdns worker thread
@@ -968,7 +968,7 @@ int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
EXPORT_SYMBOL(sdw_cdns_exit_reset);
/**
- * sdw_cdns_enable_slave_interrupt() - Enable SDW slave interrupts
+ * cdns_enable_slave_interrupts() - Enable SDW slave interrupts
* @cdns: Cadence instance
* @state: boolean for true/false
*/
@@ -1450,10 +1450,12 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
}
/* Prepare slaves for clock stop */
- ret = sdw_bus_prep_clk_stop(&cdns->bus);
- if (ret < 0) {
- dev_err(cdns->dev, "prepare clock stop failed %d", ret);
- return ret;
+ if (slave_present) {
+ ret = sdw_bus_prep_clk_stop(&cdns->bus);
+ if (ret < 0 && ret != -ENODATA) {
+ dev_err(cdns->dev, "prepare clock stop failed %d\n", ret);
+ return ret;
+ }
}
/*
@@ -1462,7 +1464,7 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
*/
ret = sdw_bus_clk_stop(&cdns->bus);
if (ret < 0 && slave_present && ret != -ENODATA) {
- dev_err(cdns->dev, "bus clock stop failed %d", ret);
+ dev_err(cdns->dev, "bus clock stop failed %d\n", ret);
return ret;
}
diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
new file mode 100644
index 000000000000..82061c1d9835
--- /dev/null
+++ b/drivers/soundwire/dmi-quirks.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2021 Intel Corporation.
+
+/*
+ * Soundwire DMI quirks
+ */
+
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/soundwire/sdw.h>
+#include "bus.h"
+
+struct adr_remap {
+ u64 adr;
+ u64 remapped_adr;
+};
+
+/*
+ * HP Spectre 360 Convertible devices do not expose the correct _ADR
+ * in the DSDT.
+ * Remap the bad _ADR values to the ones reported by hardware
+ */
+static const struct adr_remap hp_spectre_360[] = {
+ {
+ 0x000010025D070100,
+ 0x000020025D071100
+ },
+ {
+ 0x000110025d070100,
+ 0x000120025D130800
+ },
+ {}
+};
+
+/*
+ * The initial version of the Dell SKU 0A3E did not expose the devices
+ * on the correct links.
+ */
+static const struct adr_remap dell_sku_0A3E[] = {
+ /* rt715 on link0 */
+ {
+ 0x00020025d071100,
+ 0x00021025d071500
+ },
+ /* rt711 on link1 */
+ {
+ 0x000120025d130800,
+ 0x000120025d071100,
+ },
+ /* rt1308 on link2 */
+ {
+ 0x000220025d071500,
+ 0x000220025d130800
+ },
+ {}
+};
+
+static const struct dmi_system_id adr_remap_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible"),
+ },
+ .driver_data = (void *)hp_spectre_360,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A3E")
+ },
+ .driver_data = (void *)dell_sku_0A3E,
+ },
+ {}
+};
+
+u64 sdw_dmi_override_adr(struct sdw_bus *bus, u64 addr)
+{
+ const struct dmi_system_id *dmi_id;
+
+ /* check if any address remap quirk applies */
+ dmi_id = dmi_first_match(adr_remap_quirk_table);
+ if (dmi_id) {
+ struct adr_remap *map = dmi_id->driver_data;
+
+ for (map = dmi_id->driver_data; map->adr; map++) {
+ if (map->adr == addr) {
+ dev_dbg(bus->dev, "remapped _ADR 0x%llx as 0x%llx\n",
+ addr, map->remapped_adr);
+ addr = map->remapped_adr;
+ break;
+ }
+ }
+ }
+
+ return addr;
+}
diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c
index 0bdef38c9a30..84d129587084 100644
--- a/drivers/soundwire/generic_bandwidth_allocation.c
+++ b/drivers/soundwire/generic_bandwidth_allocation.c
@@ -62,7 +62,7 @@ static void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
sample_int, port_bo, port_bo >> 8,
t_data->hstart,
t_data->hstop,
- (SDW_BLK_GRP_CNT_1 * ch), 0x0);
+ SDW_BLK_PKG_PER_PORT, 0x0);
sdw_fill_port_params(&p_rt->port_params,
p_rt->num, bps,
@@ -95,7 +95,7 @@ static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
struct sdw_bus *bus = m_rt->bus;
struct sdw_bus_params *b_params = &bus->params;
int sample_int, hstart = 0;
- unsigned int rate, bps, ch, no_ch;
+ unsigned int rate, bps, ch;
rate = m_rt->stream->params.rate;
bps = m_rt->stream->params.bps;
@@ -110,12 +110,11 @@ static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
t_data.hstart = hstart;
list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
- no_ch = sdw_ch_mask_to_ch(p_rt->ch_mask);
sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
false, SDW_BLK_GRP_CNT_1, sample_int,
port_bo, port_bo >> 8, hstart, hstop,
- (SDW_BLK_GRP_CNT_1 * no_ch), 0x0);
+ SDW_BLK_PKG_PER_PORT, 0x0);
sdw_fill_port_params(&p_rt->port_params,
p_rt->num, bps,
@@ -143,7 +142,7 @@ static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
static void _sdw_compute_port_params(struct sdw_bus *bus,
struct sdw_group_params *params, int count)
{
- struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_master_runtime *m_rt;
int hstop = bus->params.col - 1;
int block_offset, port_bo, i;
@@ -169,7 +168,7 @@ static int sdw_compute_group_params(struct sdw_bus *bus,
struct sdw_group_params *params,
int *rates, int count)
{
- struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_master_runtime *m_rt;
int sel_col = bus->params.col;
unsigned int rate, bps, ch;
int i, column_needed = 0;
@@ -406,14 +405,14 @@ int sdw_compute_params(struct sdw_bus *bus)
/* Computes clock frequency, frame shape and frame frequency */
ret = sdw_compute_bus_params(bus);
if (ret < 0) {
- dev_err(bus->dev, "Compute bus params failed: %d", ret);
+ dev_err(bus->dev, "Compute bus params failed: %d\n", ret);
return ret;
}
/* Compute transport and port params */
ret = sdw_compute_port_params(bus);
if (ret < 0) {
- dev_err(bus->dev, "Compute transport params failed: %d", ret);
+ dev_err(bus->dev, "Compute transport params failed: %d\n", ret);
return ret;
}
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index a2d5cdaa9998..fd95f94630b1 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -561,8 +561,6 @@ static int intel_link_power_down(struct sdw_intel *sdw)
ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
}
- link_control = intel_readl(shim, SDW_SHIM_LCTL);
-
mutex_unlock(sdw->link_res->shim_lock);
if (ret < 0) {
@@ -997,7 +995,7 @@ static int intel_prepare(struct snd_pcm_substream *substream,
dma = snd_soc_dai_get_dma_data(dai, substream);
if (!dma) {
- dev_err(dai->dev, "failed to get dma data in %s",
+ dev_err(dai->dev, "failed to get dma data in %s\n",
__func__);
return -EIO;
}
@@ -1061,7 +1059,7 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
ret = intel_free_stream(sdw, substream, dai, sdw->instance);
if (ret < 0) {
- dev_err(dai->dev, "intel_free_stream: failed %d", ret);
+ dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
return ret;
}
@@ -1286,6 +1284,9 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
prop->hw_disabled = true;
+ prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH |
+ SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
+
return 0;
}
@@ -1302,6 +1303,7 @@ static int intel_prop_read(struct sdw_bus *bus)
static struct sdw_master_ops sdw_intel_ops = {
.read_prop = sdw_master_read_prop,
+ .override_adr = sdw_dmi_override_adr,
.xfer_msg = cdns_xfer_msg,
.xfer_msg_defer = cdns_xfer_msg_defer,
.reset_page_addr = cdns_reset_page_addr,
@@ -1630,7 +1632,7 @@ static int __maybe_unused intel_suspend(struct device *dev)
ret = intel_link_power_down(sdw);
if (ret) {
- dev_err(dev, "Link power down failed: %d", ret);
+ dev_err(dev, "Link power down failed: %d\n", ret);
return ret;
}
@@ -1665,7 +1667,7 @@ static int __maybe_unused intel_suspend_runtime(struct device *dev)
ret = intel_link_power_down(sdw);
if (ret) {
- dev_err(dev, "Link power down failed: %d", ret);
+ dev_err(dev, "Link power down failed: %d\n", ret);
return ret;
}
@@ -1689,7 +1691,7 @@ static int __maybe_unused intel_suspend_runtime(struct device *dev)
ret = intel_link_power_down(sdw);
if (ret) {
- dev_err(dev, "Link power down failed: %d", ret);
+ dev_err(dev, "Link power down failed: %d\n", ret);
return ret;
}
@@ -1738,7 +1740,7 @@ static int __maybe_unused intel_resume(struct device *dev)
ret = intel_init(sdw);
if (ret) {
- dev_err(dev, "%s failed: %d", __func__, ret);
+ dev_err(dev, "%s failed: %d\n", __func__, ret);
return ret;
}
@@ -1822,7 +1824,7 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
ret = intel_init(sdw);
if (ret) {
- dev_err(dev, "%s failed: %d", __func__, ret);
+ dev_err(dev, "%s failed: %d\n", __func__, ret);
return ret;
}
@@ -1867,7 +1869,7 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
} else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) {
ret = intel_init(sdw);
if (ret) {
- dev_err(dev, "%s failed: %d", __func__, ret);
+ dev_err(dev, "%s failed: %d\n", __func__, ret);
return ret;
}
@@ -1945,7 +1947,7 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
ret = intel_init(sdw);
if (ret) {
- dev_err(dev, "%s failed: %d", __func__, ret);
+ dev_err(dev, "%s failed: %d\n", __func__, ret);
return ret;
}
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 05b726cdfebc..30ce95ec2d70 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -178,6 +178,15 @@ static struct sdw_intel_ctx
link->pdev = pdev;
link->cdns = platform_get_drvdata(pdev);
+ if (!link->cdns) {
+ dev_err(&adev->dev, "failed to get link->cdns\n");
+ /*
+ * 1 will be subtracted from i in the err label, but we need to call
+ * intel_link_dev_unregister for this ldev, so plus 1 now
+ */
+ i++;
+ goto err;
+ }
list_add_tail(&link->list, &ctx->link_list);
bus = &link->cdns->bus;
/* Calculate number of slaves */
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 6d22df01f354..2827085a323b 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -24,28 +24,50 @@
#define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK BIT(1)
#define SWRM_COMP_CFG_ENABLE_MSK BIT(0)
#define SWRM_COMP_PARAMS 0x100
+#define SWRM_COMP_PARAMS_WR_FIFO_DEPTH GENMASK(14, 10)
+#define SWRM_COMP_PARAMS_RD_FIFO_DEPTH GENMASK(19, 15)
#define SWRM_COMP_PARAMS_DOUT_PORTS_MASK GENMASK(4, 0)
#define SWRM_COMP_PARAMS_DIN_PORTS_MASK GENMASK(9, 5)
#define SWRM_INTERRUPT_STATUS 0x200
#define SWRM_INTERRUPT_STATUS_RMSK GENMASK(16, 0)
+#define SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ BIT(0)
#define SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED BIT(1)
#define SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS BIT(2)
+#define SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET BIT(3)
+#define SWRM_INTERRUPT_STATUS_RD_FIFO_OVERFLOW BIT(4)
+#define SWRM_INTERRUPT_STATUS_RD_FIFO_UNDERFLOW BIT(5)
+#define SWRM_INTERRUPT_STATUS_WR_CMD_FIFO_OVERFLOW BIT(6)
#define SWRM_INTERRUPT_STATUS_CMD_ERROR BIT(7)
+#define SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION BIT(8)
+#define SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH BIT(9)
#define SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED BIT(10)
+#define SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2 BIT(13)
+#define SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED_V2 BIT(14)
+#define SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP BIT(16)
+#define SWRM_INTERRUPT_MAX 17
#define SWRM_INTERRUPT_MASK_ADDR 0x204
#define SWRM_INTERRUPT_CLEAR 0x208
#define SWRM_INTERRUPT_CPU_EN 0x210
#define SWRM_CMD_FIFO_WR_CMD 0x300
#define SWRM_CMD_FIFO_RD_CMD 0x304
#define SWRM_CMD_FIFO_CMD 0x308
+#define SWRM_CMD_FIFO_FLUSH 0x1
#define SWRM_CMD_FIFO_STATUS 0x30C
+#define SWRM_RD_CMD_FIFO_CNT_MASK GENMASK(20, 16)
+#define SWRM_WR_CMD_FIFO_CNT_MASK GENMASK(12, 8)
#define SWRM_CMD_FIFO_CFG_ADDR 0x314
+#define SWRM_CONTINUE_EXEC_ON_CMD_IGNORE BIT(31)
#define SWRM_RD_WR_CMD_RETRIES 0x7
#define SWRM_CMD_FIFO_RD_FIFO_ADDR 0x318
+#define SWRM_RD_FIFO_CMD_ID_MASK GENMASK(11, 8)
#define SWRM_ENUMERATOR_CFG_ADDR 0x500
+#define SWRM_ENUMERATOR_SLAVE_DEV_ID_1(m) (0x530 + 0x8 * (m))
+#define SWRM_ENUMERATOR_SLAVE_DEV_ID_2(m) (0x534 + 0x8 * (m))
#define SWRM_MCP_FRAME_CTRL_BANK_ADDR(m) (0x101C + 0x40 * (m))
#define SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK GENMASK(2, 0)
#define SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK GENMASK(7, 3)
+#define SWRM_MCP_BUS_CTRL 0x1044
+#define SWRM_MCP_BUS_CLK_START BIT(1)
#define SWRM_MCP_CFG_ADDR 0x1048
#define SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK GENMASK(21, 17)
#define SWRM_DEF_CMD_NO_PINGS 0x1f
@@ -53,8 +75,15 @@
#define SWRM_MCP_STATUS_BANK_NUM_MASK BIT(0)
#define SWRM_MCP_SLV_STATUS 0x1090
#define SWRM_MCP_SLV_STATUS_MASK GENMASK(1, 0)
+#define SWRM_MCP_SLV_STATUS_SZ 2
#define SWRM_DP_PORT_CTRL_BANK(n, m) (0x1124 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DP_PORT_CTRL_2_BANK(n, m) (0x1128 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DP_BLOCK_CTRL_1(n) (0x112C + 0x100 * (n - 1))
+#define SWRM_DP_BLOCK_CTRL2_BANK(n, m) (0x1130 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DP_PORT_HCTRL_BANK(n, m) (0x1134 + 0x100 * (n - 1) + 0x40 * m)
#define SWRM_DP_BLOCK_CTRL3_BANK(n, m) (0x1138 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DIN_DPn_PCM_PORT_CTRL(n) (0x1054 + 0x100 * (n - 1))
+
#define SWRM_DP_PORT_CTRL_EN_CHAN_SHFT 0x18
#define SWRM_DP_PORT_CTRL_OFFSET2_SHFT 0x10
#define SWRM_DP_PORT_CTRL_OFFSET1_SHFT 0x08
@@ -69,16 +98,28 @@
#define SWRM_SPECIAL_CMD_ID 0xF
#define MAX_FREQ_NUM 1
#define TIMEOUT_MS (2 * HZ)
-#define QCOM_SWRM_MAX_RD_LEN 0xf
+#define QCOM_SWRM_MAX_RD_LEN 0x1
#define QCOM_SDW_MAX_PORTS 14
#define DEFAULT_CLK_FREQ 9600000
#define SWRM_MAX_DAIS 0xF
+#define SWR_INVALID_PARAM 0xFF
+#define SWR_HSTOP_MAX_VAL 0xF
+#define SWR_HSTART_MIN_VAL 0x0
+#define SWR_BROADCAST_CMD_ID 0x0F
+#define SWR_MAX_CMD_ID 14
+#define MAX_FIFO_RD_RETRY 3
+#define SWR_OVERFLOW_RETRY_COUNT 30
struct qcom_swrm_port_config {
u8 si;
u8 off1;
u8 off2;
u8 bp_mode;
+ u8 hstart;
+ u8 hstop;
+ u8 word_length;
+ u8 blk_group_count;
+ u8 lane_control;
};
struct qcom_swrm_ctrl {
@@ -86,10 +127,9 @@ struct qcom_swrm_ctrl {
struct device *dev;
struct regmap *regmap;
void __iomem *mmio;
- struct completion *comp;
+ struct completion broadcast;
+ struct completion enumeration;
struct work_struct slave_work;
- /* read/write lock */
- spinlock_t comp_lock;
/* Port alloc/free lock */
struct mutex port_lock;
struct clk *hclk;
@@ -103,11 +143,17 @@ struct qcom_swrm_ctrl {
int rows_index;
unsigned long dout_port_mask;
unsigned long din_port_mask;
+ u32 intr_mask;
+ u8 rcmd_id;
+ u8 wcmd_id;
struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS];
enum sdw_slave_status status[SDW_MAX_DEVICES];
int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val);
int (*reg_write)(struct qcom_swrm_ctrl *ctrl, int reg, int val);
+ u32 slave_status;
+ u32 wr_fifo_depth;
+ u32 rd_fifo_depth;
};
struct qcom_swrm_data {
@@ -181,77 +227,180 @@ static int qcom_swrm_cpu_reg_write(struct qcom_swrm_ctrl *ctrl, int reg,
return SDW_CMD_OK;
}
-static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *ctrl, u8 cmd_data,
+static u32 swrm_get_packed_reg_val(u8 *cmd_id, u8 cmd_data,
+ u8 dev_addr, u16 reg_addr)
+{
+ u32 val;
+ u8 id = *cmd_id;
+
+ if (id != SWR_BROADCAST_CMD_ID) {
+ if (id < SWR_MAX_CMD_ID)
+ id += 1;
+ else
+ id = 0;
+ *cmd_id = id;
+ }
+ val = SWRM_REG_VAL_PACK(cmd_data, dev_addr, id, reg_addr);
+
+ return val;
+}
+
+static int swrm_wait_for_rd_fifo_avail(struct qcom_swrm_ctrl *swrm)
+{
+ u32 fifo_outstanding_data, value;
+ int fifo_retry_count = SWR_OVERFLOW_RETRY_COUNT;
+
+ do {
+ /* Check for fifo underflow during read */
+ swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+ fifo_outstanding_data = FIELD_GET(SWRM_RD_CMD_FIFO_CNT_MASK, value);
+
+ /* Check if read data is available in read fifo */
+ if (fifo_outstanding_data > 0)
+ return 0;
+
+ usleep_range(500, 510);
+ } while (fifo_retry_count--);
+
+ if (fifo_outstanding_data == 0) {
+ dev_err_ratelimited(swrm->dev, "%s err read underflow\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int swrm_wait_for_wr_fifo_avail(struct qcom_swrm_ctrl *swrm)
+{
+ u32 fifo_outstanding_cmds, value;
+ int fifo_retry_count = SWR_OVERFLOW_RETRY_COUNT;
+
+ do {
+ /* Check for fifo overflow during write */
+ swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+ fifo_outstanding_cmds = FIELD_GET(SWRM_WR_CMD_FIFO_CNT_MASK, value);
+
+ /* Check for space in write fifo before writing */
+ if (fifo_outstanding_cmds < swrm->wr_fifo_depth)
+ return 0;
+
+ usleep_range(500, 510);
+ } while (fifo_retry_count--);
+
+ if (fifo_outstanding_cmds == swrm->wr_fifo_depth) {
+ dev_err_ratelimited(swrm->dev, "%s err write overflow\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
u8 dev_addr, u16 reg_addr)
{
- DECLARE_COMPLETION_ONSTACK(comp);
- unsigned long flags;
+
u32 val;
- int ret;
+ int ret = 0;
+ u8 cmd_id = 0x0;
- spin_lock_irqsave(&ctrl->comp_lock, flags);
- ctrl->comp = &comp;
- spin_unlock_irqrestore(&ctrl->comp_lock, flags);
- val = SWRM_REG_VAL_PACK(cmd_data, dev_addr,
- SWRM_SPECIAL_CMD_ID, reg_addr);
- ret = ctrl->reg_write(ctrl, SWRM_CMD_FIFO_WR_CMD, val);
- if (ret)
- goto err;
+ if (dev_addr == SDW_BROADCAST_DEV_NUM) {
+ cmd_id = SWR_BROADCAST_CMD_ID;
+ val = swrm_get_packed_reg_val(&cmd_id, cmd_data,
+ dev_addr, reg_addr);
+ } else {
+ val = swrm_get_packed_reg_val(&swrm->wcmd_id, cmd_data,
+ dev_addr, reg_addr);
+ }
- ret = wait_for_completion_timeout(ctrl->comp,
- msecs_to_jiffies(TIMEOUT_MS));
+ if (swrm_wait_for_wr_fifo_avail(swrm))
+ return SDW_CMD_FAIL_OTHER;
+
+ /* Its assumed that write is okay as we do not get any status back */
+ swrm->reg_write(swrm, SWRM_CMD_FIFO_WR_CMD, val);
+
+ /* version 1.3 or less */
+ if (swrm->version <= 0x01030000)
+ usleep_range(150, 155);
+
+ if (cmd_id == SWR_BROADCAST_CMD_ID) {
+ /*
+ * sleep for 10ms for MSM soundwire variant to allow broadcast
+ * command to complete.
+ */
+ ret = wait_for_completion_timeout(&swrm->broadcast,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret)
+ ret = SDW_CMD_IGNORED;
+ else
+ ret = SDW_CMD_OK;
- if (!ret)
- ret = SDW_CMD_IGNORED;
- else
+ } else {
ret = SDW_CMD_OK;
-err:
- spin_lock_irqsave(&ctrl->comp_lock, flags);
- ctrl->comp = NULL;
- spin_unlock_irqrestore(&ctrl->comp_lock, flags);
-
+ }
return ret;
}
-static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *ctrl,
+static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *swrm,
u8 dev_addr, u16 reg_addr,
u32 len, u8 *rval)
{
- int i, ret;
- u32 val;
- DECLARE_COMPLETION_ONSTACK(comp);
- unsigned long flags;
+ u32 cmd_data, cmd_id, val, retry_attempt = 0;
+
+ val = swrm_get_packed_reg_val(&swrm->rcmd_id, len, dev_addr, reg_addr);
+
+ /* wait for FIFO RD to complete to avoid overflow */
+ usleep_range(100, 105);
+ swrm->reg_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
+ /* wait for FIFO RD CMD complete to avoid overflow */
+ usleep_range(250, 255);
+
+ if (swrm_wait_for_rd_fifo_avail(swrm))
+ return SDW_CMD_FAIL_OTHER;
+
+ do {
+ swrm->reg_read(swrm, SWRM_CMD_FIFO_RD_FIFO_ADDR, &cmd_data);
+ rval[0] = cmd_data & 0xFF;
+ cmd_id = FIELD_GET(SWRM_RD_FIFO_CMD_ID_MASK, cmd_data);
+
+ if (cmd_id != swrm->rcmd_id) {
+ if (retry_attempt < (MAX_FIFO_RD_RETRY - 1)) {
+ /* wait 500 us before retry on fifo read failure */
+ usleep_range(500, 505);
+ swrm->reg_write(swrm, SWRM_CMD_FIFO_CMD,
+ SWRM_CMD_FIFO_FLUSH);
+ swrm->reg_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
+ }
+ retry_attempt++;
+ } else {
+ return SDW_CMD_OK;
+ }
- spin_lock_irqsave(&ctrl->comp_lock, flags);
- ctrl->comp = &comp;
- spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+ } while (retry_attempt < MAX_FIFO_RD_RETRY);
- val = SWRM_REG_VAL_PACK(len, dev_addr, SWRM_SPECIAL_CMD_ID, reg_addr);
- ret = ctrl->reg_write(ctrl, SWRM_CMD_FIFO_RD_CMD, val);
- if (ret)
- goto err;
+ dev_err(swrm->dev, "failed to read fifo: reg: 0x%x, rcmd_id: 0x%x,\
+ dev_num: 0x%x, cmd_data: 0x%x\n",
+ reg_addr, swrm->rcmd_id, dev_addr, cmd_data);
- ret = wait_for_completion_timeout(ctrl->comp,
- msecs_to_jiffies(TIMEOUT_MS));
+ return SDW_CMD_IGNORED;
+}
- if (!ret) {
- ret = SDW_CMD_IGNORED;
- goto err;
- } else {
- ret = SDW_CMD_OK;
- }
+static int qcom_swrm_get_alert_slave_dev_num(struct qcom_swrm_ctrl *ctrl)
+{
+ u32 val, status;
+ int dev_num;
- for (i = 0; i < len; i++) {
- ctrl->reg_read(ctrl, SWRM_CMD_FIFO_RD_FIFO_ADDR, &val);
- rval[i] = val & 0xFF;
- }
+ ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
-err:
- spin_lock_irqsave(&ctrl->comp_lock, flags);
- ctrl->comp = NULL;
- spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+ for (dev_num = 0; dev_num < SDW_MAX_DEVICES; dev_num++) {
+ status = (val >> (dev_num * SWRM_MCP_SLV_STATUS_SZ));
- return ret;
+ if ((status & SWRM_MCP_SLV_STATUS_MASK) == SDW_SLAVE_ALERT) {
+ ctrl->status[dev_num] = status;
+ return dev_num;
+ }
+ }
+
+ return -EINVAL;
}
static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
@@ -260,6 +409,7 @@ static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
int i;
ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
+ ctrl->slave_status = val;
for (i = 0; i < SDW_MAX_DEVICES; i++) {
u32 s;
@@ -270,42 +420,188 @@ static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
}
}
-static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
+static void qcom_swrm_set_slave_dev_num(struct sdw_bus *bus,
+ struct sdw_slave *slave, int devnum)
{
- struct qcom_swrm_ctrl *ctrl = dev_id;
- u32 sts, value;
- unsigned long flags;
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ u32 status;
+
+ ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &status);
+ status = (status >> (devnum * SWRM_MCP_SLV_STATUS_SZ));
+ status &= SWRM_MCP_SLV_STATUS_MASK;
+
+ if (status == SDW_SLAVE_ATTACHED) {
+ if (slave)
+ slave->dev_num = devnum;
+ mutex_lock(&bus->bus_lock);
+ set_bit(devnum, bus->assigned);
+ mutex_unlock(&bus->bus_lock);
+ }
+}
- ctrl->reg_read(ctrl, SWRM_INTERRUPT_STATUS, &sts);
+static int qcom_swrm_enumerate(struct sdw_bus *bus)
+{
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ struct sdw_slave *slave, *_s;
+ struct sdw_slave_id id;
+ u32 val1, val2;
+ bool found;
+ u64 addr;
+ int i;
+ char *buf1 = (char *)&val1, *buf2 = (char *)&val2;
+
+ for (i = 1; i <= SDW_MAX_DEVICES; i++) {
+ /*SCP_Devid5 - Devid 4*/
+ ctrl->reg_read(ctrl, SWRM_ENUMERATOR_SLAVE_DEV_ID_1(i), &val1);
+
+ /*SCP_Devid3 - DevId 2 Devid 1 Devid 0*/
+ ctrl->reg_read(ctrl, SWRM_ENUMERATOR_SLAVE_DEV_ID_2(i), &val2);
+
+ if (!val1 && !val2)
+ break;
+
+ addr = buf2[1] | (buf2[0] << 8) | (buf1[3] << 16) |
+ ((u64)buf1[2] << 24) | ((u64)buf1[1] << 32) |
+ ((u64)buf1[0] << 40);
+
+ sdw_extract_slave_id(bus, addr, &id);
+ found = false;
+ /* Now compare with entries */
+ list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
+ if (sdw_compare_devid(slave, id) == 0) {
+ qcom_swrm_set_slave_dev_num(bus, slave, i);
+ found = true;
+ break;
+ }
+ }
- if (sts & SWRM_INTERRUPT_STATUS_CMD_ERROR) {
- ctrl->reg_read(ctrl, SWRM_CMD_FIFO_STATUS, &value);
- dev_err_ratelimited(ctrl->dev,
- "CMD error, fifo status 0x%x\n",
- value);
- ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CMD, 0x1);
+ if (!found) {
+ qcom_swrm_set_slave_dev_num(bus, NULL, i);
+ sdw_slave_add(bus, &id, NULL);
+ }
}
- if ((sts & SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED) ||
- sts & SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS)
- schedule_work(&ctrl->slave_work);
-
- /**
- * clear the interrupt before complete() is called, as complete can
- * schedule new read/writes which require interrupts, clearing the
- * interrupt would avoid missing interrupts in such cases.
- */
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_CLEAR, sts);
-
- if (sts & SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED) {
- spin_lock_irqsave(&ctrl->comp_lock, flags);
- if (ctrl->comp)
- complete(ctrl->comp);
- spin_unlock_irqrestore(&ctrl->comp_lock, flags);
- }
+ complete(&ctrl->enumeration);
+ return 0;
+}
+
+static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
+{
+ struct qcom_swrm_ctrl *swrm = dev_id;
+ u32 value, intr_sts, intr_sts_masked, slave_status;
+ u32 i;
+ int devnum;
+ int ret = IRQ_HANDLED;
+
+ swrm->reg_read(swrm, SWRM_INTERRUPT_STATUS, &intr_sts);
+ intr_sts_masked = intr_sts & swrm->intr_mask;
+
+ do {
+ for (i = 0; i < SWRM_INTERRUPT_MAX; i++) {
+ value = intr_sts_masked & BIT(i);
+ if (!value)
+ continue;
+
+ switch (value) {
+ case SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ:
+ devnum = qcom_swrm_get_alert_slave_dev_num(swrm);
+ if (devnum < 0) {
+ dev_err_ratelimited(swrm->dev,
+ "no slave alert found.spurious interrupt\n");
+ } else {
+ sdw_handle_slave_status(&swrm->bus, swrm->status);
+ }
- return IRQ_HANDLED;
+ break;
+ case SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED:
+ case SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS:
+ dev_err_ratelimited(swrm->dev, "%s: SWR new slave attached\n",
+ __func__);
+ swrm->reg_read(swrm, SWRM_MCP_SLV_STATUS, &slave_status);
+ if (swrm->slave_status == slave_status) {
+ dev_err(swrm->dev, "Slave status not changed %x\n",
+ slave_status);
+ } else {
+ qcom_swrm_get_device_status(swrm);
+ qcom_swrm_enumerate(&swrm->bus);
+ sdw_handle_slave_status(&swrm->bus, swrm->status);
+ }
+ break;
+ case SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET:
+ dev_err_ratelimited(swrm->dev,
+ "%s: SWR bus clsh detected\n",
+ __func__);
+ swrm->intr_mask &= ~SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET;
+ swrm->reg_write(swrm, SWRM_INTERRUPT_CPU_EN, swrm->intr_mask);
+ break;
+ case SWRM_INTERRUPT_STATUS_RD_FIFO_OVERFLOW:
+ swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+ dev_err_ratelimited(swrm->dev,
+ "%s: SWR read FIFO overflow fifo status 0x%x\n",
+ __func__, value);
+ break;
+ case SWRM_INTERRUPT_STATUS_RD_FIFO_UNDERFLOW:
+ swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+ dev_err_ratelimited(swrm->dev,
+ "%s: SWR read FIFO underflow fifo status 0x%x\n",
+ __func__, value);
+ break;
+ case SWRM_INTERRUPT_STATUS_WR_CMD_FIFO_OVERFLOW:
+ swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+ dev_err(swrm->dev,
+ "%s: SWR write FIFO overflow fifo status %x\n",
+ __func__, value);
+ swrm->reg_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
+ break;
+ case SWRM_INTERRUPT_STATUS_CMD_ERROR:
+ swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+ dev_err_ratelimited(swrm->dev,
+ "%s: SWR CMD error, fifo status 0x%x, flushing fifo\n",
+ __func__, value);
+ swrm->reg_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
+ break;
+ case SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION:
+ dev_err_ratelimited(swrm->dev,
+ "%s: SWR Port collision detected\n",
+ __func__);
+ swrm->intr_mask &= ~SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION;
+ swrm->reg_write(swrm,
+ SWRM_INTERRUPT_CPU_EN, swrm->intr_mask);
+ break;
+ case SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH:
+ dev_err_ratelimited(swrm->dev,
+ "%s: SWR read enable valid mismatch\n",
+ __func__);
+ swrm->intr_mask &=
+ ~SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH;
+ swrm->reg_write(swrm,
+ SWRM_INTERRUPT_CPU_EN, swrm->intr_mask);
+ break;
+ case SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED:
+ complete(&swrm->broadcast);
+ break;
+ case SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2:
+ break;
+ case SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED_V2:
+ break;
+ case SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP:
+ break;
+ default:
+ dev_err_ratelimited(swrm->dev,
+ "%s: SWR unknown interrupt value: %d\n",
+ __func__, value);
+ ret = IRQ_NONE;
+ break;
+ }
+ }
+ swrm->reg_write(swrm, SWRM_INTERRUPT_CLEAR, intr_sts);
+ swrm->reg_read(swrm, SWRM_INTERRUPT_STATUS, &intr_sts);
+ intr_sts_masked = intr_sts & swrm->intr_mask;
+ } while (intr_sts_masked);
+
+ return ret;
}
+
static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
{
u32 val;
@@ -316,9 +612,10 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_write(ctrl, SWRM_MCP_FRAME_CTRL_BANK_ADDR(0), val);
- /* Disable Auto enumeration */
- ctrl->reg_write(ctrl, SWRM_ENUMERATOR_CFG_ADDR, 0);
+ /* Enable Auto enumeration */
+ ctrl->reg_write(ctrl, SWRM_ENUMERATOR_CFG_ADDR, 1);
+ ctrl->intr_mask = SWRM_INTERRUPT_STATUS_RMSK;
/* Mask soundwire interrupts */
ctrl->reg_write(ctrl, SWRM_INTERRUPT_MASK_ADDR,
SWRM_INTERRUPT_STATUS_RMSK);
@@ -328,8 +625,17 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
u32p_replace_bits(&val, SWRM_DEF_CMD_NO_PINGS, SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK);
ctrl->reg_write(ctrl, SWRM_MCP_CFG_ADDR, val);
+ ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START);
/* Configure number of retries of a read/write cmd */
- ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR, SWRM_RD_WR_CMD_RETRIES);
+ if (ctrl->version > 0x01050001) {
+ /* Only for versions >= 1.5.1 */
+ ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR,
+ SWRM_RD_WR_CMD_RETRIES |
+ SWRM_CONTINUE_EXEC_ON_CMD_IGNORE);
+ } else {
+ ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR,
+ SWRM_RD_WR_CMD_RETRIES);
+ }
/* Set IRQ to PULSE */
ctrl->reg_write(ctrl, SWRM_COMP_CFG_ADDR,
@@ -341,6 +647,11 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_write(ctrl, SWRM_INTERRUPT_CPU_EN,
SWRM_INTERRUPT_STATUS_RMSK);
}
+ ctrl->slave_status = 0;
+ ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val);
+ ctrl->rd_fifo_depth = FIELD_GET(SWRM_COMP_PARAMS_RD_FIFO_DEPTH, val);
+ ctrl->wr_fifo_depth = FIELD_GET(SWRM_COMP_PARAMS_WR_FIFO_DEPTH, val);
+
return 0;
}
@@ -396,8 +707,11 @@ static int qcom_swrm_port_params(struct sdw_bus *bus,
struct sdw_port_params *p_params,
unsigned int bank)
{
- /* TBD */
- return 0;
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+
+ return ctrl->reg_write(ctrl, SWRM_DP_BLOCK_CTRL_1(p_params->num),
+ p_params->bps - 1);
+
}
static int qcom_swrm_transport_params(struct sdw_bus *bus,
@@ -405,22 +719,57 @@ static int qcom_swrm_transport_params(struct sdw_bus *bus,
enum sdw_reg_bank bank)
{
struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ struct qcom_swrm_port_config *pcfg;
u32 value;
int reg = SWRM_DP_PORT_CTRL_BANK((params->port_num), bank);
int ret;
- value = params->offset1 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT;
- value |= params->offset2 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT;
- value |= params->sample_interval - 1;
+ pcfg = &ctrl->pconfig[params->port_num];
+
+ value = pcfg->off1 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT;
+ value |= pcfg->off2 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT;
+ value |= pcfg->si;
ret = ctrl->reg_write(ctrl, reg, value);
+ if (ret)
+ goto err;
- if (!ret && params->blk_pkg_mode) {
- reg = SWRM_DP_BLOCK_CTRL3_BANK(params->port_num, bank);
+ if (pcfg->lane_control != SWR_INVALID_PARAM) {
+ reg = SWRM_DP_PORT_CTRL_2_BANK(params->port_num, bank);
+ value = pcfg->lane_control;
+ ret = ctrl->reg_write(ctrl, reg, value);
+ if (ret)
+ goto err;
+ }
+
+ if (pcfg->blk_group_count != SWR_INVALID_PARAM) {
+ reg = SWRM_DP_BLOCK_CTRL2_BANK(params->port_num, bank);
+ value = pcfg->blk_group_count;
+ ret = ctrl->reg_write(ctrl, reg, value);
+ if (ret)
+ goto err;
+ }
+
+ if (pcfg->hstart != SWR_INVALID_PARAM
+ && pcfg->hstop != SWR_INVALID_PARAM) {
+ reg = SWRM_DP_PORT_HCTRL_BANK(params->port_num, bank);
+ value = (pcfg->hstop << 4) | pcfg->hstart;
+ ret = ctrl->reg_write(ctrl, reg, value);
+ } else {
+ reg = SWRM_DP_PORT_HCTRL_BANK(params->port_num, bank);
+ value = (SWR_HSTOP_MAX_VAL << 4) | SWR_HSTART_MIN_VAL;
+ ret = ctrl->reg_write(ctrl, reg, value);
+ }
+
+ if (ret)
+ goto err;
- ret = ctrl->reg_write(ctrl, reg, 1);
+ if (pcfg->bp_mode != SWR_INVALID_PARAM) {
+ reg = SWRM_DP_BLOCK_CTRL3_BANK(params->port_num, bank);
+ ret = ctrl->reg_write(ctrl, reg, pcfg->bp_mode);
}
+err:
return ret;
}
@@ -460,27 +809,50 @@ static int qcom_swrm_compute_params(struct sdw_bus *bus)
struct sdw_slave_runtime *s_rt;
struct sdw_port_runtime *p_rt;
struct qcom_swrm_port_config *pcfg;
- int i = 0;
+ struct sdw_slave *slave;
+ unsigned int m_port;
+ int i = 1;
list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
- pcfg = &ctrl->pconfig[p_rt->num - 1];
+ pcfg = &ctrl->pconfig[p_rt->num];
p_rt->transport_params.port_num = p_rt->num;
- p_rt->transport_params.sample_interval = pcfg->si + 1;
- p_rt->transport_params.offset1 = pcfg->off1;
- p_rt->transport_params.offset2 = pcfg->off2;
- p_rt->transport_params.blk_pkg_mode = pcfg->bp_mode;
+ if (pcfg->word_length != SWR_INVALID_PARAM) {
+ sdw_fill_port_params(&p_rt->port_params,
+ p_rt->num, pcfg->word_length + 1,
+ SDW_PORT_FLOW_MODE_ISOCH,
+ SDW_PORT_DATA_MODE_NORMAL);
+ }
+
}
list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ slave = s_rt->slave;
list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
- pcfg = &ctrl->pconfig[i];
+ m_port = slave->m_port_map[p_rt->num];
+ /* port config starts at offset 0 so -1 from actual port number */
+ if (m_port)
+ pcfg = &ctrl->pconfig[m_port];
+ else
+ pcfg = &ctrl->pconfig[i];
p_rt->transport_params.port_num = p_rt->num;
p_rt->transport_params.sample_interval =
pcfg->si + 1;
p_rt->transport_params.offset1 = pcfg->off1;
p_rt->transport_params.offset2 = pcfg->off2;
p_rt->transport_params.blk_pkg_mode = pcfg->bp_mode;
+ p_rt->transport_params.blk_grp_ctrl = pcfg->blk_group_count;
+
+ p_rt->transport_params.hstart = pcfg->hstart;
+ p_rt->transport_params.hstop = pcfg->hstop;
+ p_rt->transport_params.lane_ctrl = pcfg->lane_control;
+ if (pcfg->word_length != SWR_INVALID_PARAM) {
+ sdw_fill_port_params(&p_rt->port_params,
+ p_rt->num,
+ pcfg->word_length + 1,
+ SDW_PORT_FLOW_MODE_ISOCH,
+ SDW_PORT_DATA_MODE_NORMAL);
+ }
i++;
}
}
@@ -493,16 +865,6 @@ static u32 qcom_swrm_freq_tbl[MAX_FREQ_NUM] = {
DEFAULT_CLK_FREQ,
};
-static void qcom_swrm_slave_wq(struct work_struct *work)
-{
- struct qcom_swrm_ctrl *ctrl =
- container_of(work, struct qcom_swrm_ctrl, slave_work);
-
- qcom_swrm_get_device_status(ctrl);
- sdw_handle_slave_status(&ctrl->bus, ctrl->status);
-}
-
-
static void qcom_swrm_stream_free_ports(struct qcom_swrm_ctrl *ctrl,
struct sdw_stream_runtime *stream)
{
@@ -519,7 +881,7 @@ static void qcom_swrm_stream_free_ports(struct qcom_swrm_ctrl *ctrl,
port_mask = &ctrl->din_port_mask;
list_for_each_entry(p_rt, &m_rt->port_list, port_node)
- clear_bit(p_rt->num - 1, port_mask);
+ clear_bit(p_rt->num, port_mask);
}
mutex_unlock(&ctrl->port_lock);
@@ -535,8 +897,10 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
struct sdw_master_runtime *m_rt;
struct sdw_slave_runtime *s_rt;
struct sdw_port_runtime *p_rt;
+ struct sdw_slave *slave;
unsigned long *port_mask;
int i, maxport, pn, nports = 0, ret = 0;
+ unsigned int m_port;
mutex_lock(&ctrl->port_lock);
list_for_each_entry(m_rt, &stream->master_list, stream_node) {
@@ -549,16 +913,22 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
}
list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ slave = s_rt->slave;
list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
+ m_port = slave->m_port_map[p_rt->num];
/* Port numbers start from 1 - 14*/
- pn = find_first_zero_bit(port_mask, maxport);
- if (pn > (maxport - 1)) {
+ if (m_port)
+ pn = m_port;
+ else
+ pn = find_first_zero_bit(port_mask, maxport);
+
+ if (pn > maxport) {
dev_err(ctrl->dev, "All ports busy\n");
ret = -EBUSY;
goto err;
}
set_bit(pn, port_mask);
- pconfig[nports].num = pn + 1;
+ pconfig[nports].num = pn;
pconfig[nports].ch_mask = p_rt->ch_mask;
nports++;
}
@@ -580,7 +950,7 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
err:
if (ret) {
for (i = 0; i < nports; i++)
- clear_bit(pconfig[i].num - 1, port_mask);
+ clear_bit(pconfig[i].num, port_mask);
}
mutex_unlock(&ctrl->port_lock);
@@ -652,7 +1022,7 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
ret = snd_soc_dai_set_sdw_stream(codec_dai, sruntime,
substream->stream);
if (ret < 0 && ret != -ENOTSUPP) {
- dev_err(dai->dev, "Failed to set sdw stream on %s",
+ dev_err(dai->dev, "Failed to set sdw stream on %s\n",
codec_dai->name);
sdw_release_stream(sruntime);
return ret;
@@ -728,6 +1098,11 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
u8 off2[QCOM_SDW_MAX_PORTS];
u8 si[QCOM_SDW_MAX_PORTS];
u8 bp_mode[QCOM_SDW_MAX_PORTS] = { 0, };
+ u8 hstart[QCOM_SDW_MAX_PORTS];
+ u8 hstop[QCOM_SDW_MAX_PORTS];
+ u8 word_length[QCOM_SDW_MAX_PORTS];
+ u8 blk_group_count[QCOM_SDW_MAX_PORTS];
+ u8 lane_control[QCOM_SDW_MAX_PORTS];
int i, ret, nports, val;
ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val);
@@ -754,6 +1129,9 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
ctrl->num_dout_ports = val;
nports = ctrl->num_dout_ports + ctrl->num_din_ports;
+ /* Valid port numbers are from 1-14, so mask out port 0 explicitly */
+ set_bit(0, &ctrl->dout_port_mask);
+ set_bit(0, &ctrl->din_port_mask);
ret = of_property_read_u8_array(np, "qcom,ports-offset1",
off1, nports);
@@ -772,11 +1150,35 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
bp_mode, nports);
+ if (ret)
+ return ret;
+
+ memset(hstart, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+ of_property_read_u8_array(np, "qcom,ports-hstart", hstart, nports);
+
+ memset(hstop, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+ of_property_read_u8_array(np, "qcom,ports-hstop", hstop, nports);
+
+ memset(word_length, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+ of_property_read_u8_array(np, "qcom,ports-word-length", word_length, nports);
+
+ memset(blk_group_count, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+ of_property_read_u8_array(np, "qcom,ports-block-group-count", blk_group_count, nports);
+
+ memset(lane_control, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+ of_property_read_u8_array(np, "qcom,ports-lane-control", lane_control, nports);
+
for (i = 0; i < nports; i++) {
- ctrl->pconfig[i].si = si[i];
- ctrl->pconfig[i].off1 = off1[i];
- ctrl->pconfig[i].off2 = off2[i];
- ctrl->pconfig[i].bp_mode = bp_mode[i];
+ /* Valid port number range is from 1-14 */
+ ctrl->pconfig[i + 1].si = si[i];
+ ctrl->pconfig[i + 1].off1 = off1[i];
+ ctrl->pconfig[i + 1].off2 = off2[i];
+ ctrl->pconfig[i + 1].bp_mode = bp_mode[i];
+ ctrl->pconfig[i + 1].hstart = hstart[i];
+ ctrl->pconfig[i + 1].hstop = hstop[i];
+ ctrl->pconfig[i + 1].word_length = word_length[i];
+ ctrl->pconfig[i + 1].blk_group_count = blk_group_count[i];
+ ctrl->pconfig[i + 1].lane_control = lane_control[i];
}
return 0;
@@ -833,9 +1235,9 @@ static int qcom_swrm_probe(struct platform_device *pdev)
ctrl->dev = dev;
dev_set_drvdata(&pdev->dev, ctrl);
- spin_lock_init(&ctrl->comp_lock);
mutex_init(&ctrl->port_lock);
- INIT_WORK(&ctrl->slave_work, qcom_swrm_slave_wq);
+ init_completion(&ctrl->broadcast);
+ init_completion(&ctrl->enumeration);
ctrl->bus.ops = &qcom_swrm_ops;
ctrl->bus.port_ops = &qcom_swrm_port_ops;
@@ -882,6 +1284,8 @@ static int qcom_swrm_probe(struct platform_device *pdev)
}
qcom_swrm_init(ctrl);
+ wait_for_completion_timeout(&ctrl->enumeration,
+ msecs_to_jiffies(TIMEOUT_MS));
ret = qcom_swrm_register_dais(ctrl);
if (ret)
goto err_master_add;
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 180f38bd003b..0eed38a79c6d 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -88,6 +88,7 @@ int sdw_slave_add(struct sdw_bus *bus,
return ret;
}
+EXPORT_SYMBOL(sdw_slave_add);
#if IS_ENABLED(CONFIG_ACPI)
@@ -95,7 +96,7 @@ static bool find_slave(struct sdw_bus *bus,
struct acpi_device *adev,
struct sdw_slave_id *id)
{
- unsigned long long addr;
+ u64 addr;
unsigned int link_id;
acpi_status status;
@@ -108,6 +109,12 @@ static bool find_slave(struct sdw_bus *bus,
return false;
}
+ if (bus->ops->override_adr)
+ addr = bus->ops->override_adr(bus, addr);
+
+ if (!addr)
+ return false;
+
/* Extract link id from ADR, Bit 51 to 48 (included) */
link_id = SDW_DISCO_LINK_ID(addr);
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 1099b5d1262b..1eaedaaba094 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -261,7 +261,7 @@ static int sdw_program_master_port_params(struct sdw_bus *bus,
*/
static int sdw_program_port_params(struct sdw_master_runtime *m_rt)
{
- struct sdw_slave_runtime *s_rt = NULL;
+ struct sdw_slave_runtime *s_rt;
struct sdw_bus *bus = m_rt->bus;
struct sdw_port_runtime *p_rt;
int ret = 0;
@@ -1375,8 +1375,16 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
}
ret = sdw_config_stream(&slave->dev, stream, stream_config, true);
- if (ret)
+ if (ret) {
+ /*
+ * sdw_release_master_stream will release s_rt in slave_rt_list in
+ * stream_error case, but s_rt is only added to slave_rt_list
+ * when sdw_config_stream is successful, so free s_rt explicitly
+ * when sdw_config_stream is failed.
+ */
+ kfree(s_rt);
goto stream_error;
+ }
list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list);
@@ -1449,7 +1457,7 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
static void sdw_acquire_bus_lock(struct sdw_stream_runtime *stream)
{
struct sdw_master_runtime *m_rt;
- struct sdw_bus *bus = NULL;
+ struct sdw_bus *bus;
/* Iterate for all Master(s) in Master list */
list_for_each_entry(m_rt, &stream->master_list, stream_node) {
@@ -1470,8 +1478,8 @@ static void sdw_acquire_bus_lock(struct sdw_stream_runtime *stream)
*/
static void sdw_release_bus_lock(struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = NULL;
- struct sdw_bus *bus = NULL;
+ struct sdw_master_runtime *m_rt;
+ struct sdw_bus *bus;
/* Iterate for all Master(s) in Master list */
list_for_each_entry_reverse(m_rt, &stream->master_list, stream_node) {
@@ -1513,7 +1521,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
if (bus->compute_params) {
ret = bus->compute_params(bus);
if (ret < 0) {
- dev_err(bus->dev, "Compute params failed: %d",
+ dev_err(bus->dev, "Compute params failed: %d\n",
ret);
return ret;
}
@@ -1791,7 +1799,7 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
if (bus->compute_params) {
ret = bus->compute_params(bus);
if (ret < 0) {
- dev_err(bus->dev, "Compute params failed: %d",
+ dev_err(bus->dev, "Compute params failed: %d\n",
ret);
return ret;
}
@@ -1855,7 +1863,7 @@ static int set_stream(struct snd_pcm_substream *substream,
for_each_rtd_dais(rtd, i, dai) {
ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream);
if (ret < 0) {
- dev_err(rtd->dev, "failed to set stream pointer on dai %s", dai->name);
+ dev_err(rtd->dev, "failed to set stream pointer on dai %s\n", dai->name);
break;
}
}
@@ -1888,7 +1896,7 @@ int sdw_startup_stream(void *sdw_substream)
sdw_stream = sdw_alloc_stream(name);
if (!sdw_stream) {
- dev_err(rtd->dev, "alloc stream failed for substream DAI %s", substream->name);
+ dev_err(rtd->dev, "alloc stream failed for substream DAI %s\n", substream->name);
ret = -ENOMEM;
goto error;
}
@@ -1927,7 +1935,7 @@ void sdw_shutdown_stream(void *sdw_substream)
sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream);
if (IS_ERR(sdw_stream)) {
- dev_err(rtd->dev, "no stream found for DAI %s", dai->name);
+ dev_err(rtd->dev, "no stream found for DAI %s\n", dai->name);
return;
}
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 202ee81cfc2b..5531f3afeb21 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -165,4 +165,21 @@ config UIO_HV_GENERIC
to network and storage devices from userspace.
If you compile this as a module, it will be called uio_hv_generic.
+
+config UIO_DFL
+ tristate "Generic driver for DFL (Device Feature List) bus"
+ depends on FPGA_DFL
+ help
+ Generic DFL (Device Feature List) driver for Userspace I/O devices.
+ It is useful to provide direct access to DFL devices from userspace.
+ A sample userspace application using this driver is available for
+ download in a git repository:
+
+ git clone https://github.com/OPAE/opae-sdk.git
+
+ It could be found at:
+
+ opae-sdk/tools/libopaeuio/
+
+ If you compile this as a module, it will be called uio_dfl.
endif
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index c285dd2a4539..f2f416a14228 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o
obj-$(CONFIG_UIO_MF624) += uio_mf624.o
obj-$(CONFIG_UIO_FSL_ELBC_GPCM) += uio_fsl_elbc_gpcm.o
obj-$(CONFIG_UIO_HV_GENERIC) += uio_hv_generic.o
+obj-$(CONFIG_UIO_DFL) += uio_dfl.o
diff --git a/drivers/uio/uio_dfl.c b/drivers/uio/uio_dfl.c
new file mode 100644
index 000000000000..89c0fc7b0cbc
--- /dev/null
+++ b/drivers/uio/uio_dfl.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic DFL driver for Userspace I/O devicess
+ *
+ * Copyright (C) 2021 Intel Corporation, Inc.
+ */
+#include <linux/dfl.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/uio_driver.h>
+
+#define DRIVER_NAME "uio_dfl"
+
+static int uio_dfl_probe(struct dfl_device *ddev)
+{
+ struct resource *r = &ddev->mmio_res;
+ struct device *dev = &ddev->dev;
+ struct uio_info *uioinfo;
+ struct uio_mem *uiomem;
+ int ret;
+
+ uioinfo = devm_kzalloc(dev, sizeof(struct uio_info), GFP_KERNEL);
+ if (!uioinfo)
+ return -ENOMEM;
+
+ uioinfo->name = DRIVER_NAME;
+ uioinfo->version = "0";
+
+ uiomem = &uioinfo->mem[0];
+ uiomem->memtype = UIO_MEM_PHYS;
+ uiomem->addr = r->start & PAGE_MASK;
+ uiomem->offs = r->start & ~PAGE_MASK;
+ uiomem->size = (uiomem->offs + resource_size(r)
+ + PAGE_SIZE - 1) & PAGE_MASK;
+ uiomem->name = r->name;
+
+ /* Irq is yet to be supported */
+ uioinfo->irq = UIO_IRQ_NONE;
+
+ ret = devm_uio_register_device(dev, uioinfo);
+ if (ret)
+ dev_err(dev, "unable to register uio device\n");
+
+ return ret;
+}
+
+#define FME_FEATURE_ID_ETH_GROUP 0x10
+
+static const struct dfl_device_id uio_dfl_ids[] = {
+ { FME_ID, FME_FEATURE_ID_ETH_GROUP },
+ { }
+};
+MODULE_DEVICE_TABLE(dfl, uio_dfl_ids);
+
+static struct dfl_driver uio_dfl_driver = {
+ .drv = {
+ .name = DRIVER_NAME,
+ },
+ .id_table = uio_dfl_ids,
+ .probe = uio_dfl_probe,
+};
+module_dfl_driver(uio_dfl_driver);
+
+MODULE_DESCRIPTION("Generic DFL driver for Userspace I/O devices");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 3908ff28eec0..800cfd1967ad 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -278,8 +278,10 @@ done:
mr->log_size = log_entity_size;
mr->nsg = nsg;
mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
- if (!mr->nent)
+ if (!mr->nent) {
+ err = -ENOMEM;
goto err_map;
+ }
err = create_direct_mr(mvdev, mr);
if (err)
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 65e7e6b44578..5023e23db3bc 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1656,6 +1656,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ return -EINVAL;
if (vma->vm_end < vma->vm_start)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
@@ -1664,7 +1666,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
int regnum = index - VFIO_PCI_NUM_REGIONS;
struct vfio_pci_region *region = vdev->region + regnum;
- if (region && region->ops && region->ops->mmap &&
+ if (region->ops && region->ops->mmap &&
(region->flags & VFIO_REGION_INFO_FLAG_MMAP))
return region->ops->mmap(vdev, region, vma);
return -EINVAL;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index e0a27e336293..bfa4c6ef554e 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -745,9 +745,11 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
const struct vdpa_config_ops *ops = vdpa->config;
int r = 0;
+ mutex_lock(&dev->mutex);
+
r = vhost_dev_check_owner(dev);
if (r)
- return r;
+ goto unlock;
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
@@ -768,6 +770,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
r = -EINVAL;
break;
}
+unlock:
+ mutex_unlock(&dev->mutex);
return r;
}
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index 757d5c3f620b..ff09e57f3c38 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
if (!len)
return 0;
- cmap->red = kmalloc(size, flags);
+ cmap->red = kzalloc(size, flags);
if (!cmap->red)
goto fail;
- cmap->green = kmalloc(size, flags);
+ cmap->green = kzalloc(size, flags);
if (!cmap->green)
goto fail;
- cmap->blue = kmalloc(size, flags);
+ cmap->blue = kzalloc(size, flags);
if (!cmap->blue)
goto fail;
if (transp) {
- cmap->transp = kmalloc(size, flags);
+ cmap->transp = kzalloc(size, flags);
if (!cmap->transp)
goto fail;
} else {
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 4dc9077dd2ac..a7e6eea2c4a1 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -308,7 +308,7 @@ static inline int synthvid_send(struct hv_device *hdev,
VM_PKT_DATA_INBAND, 0);
if (ret)
- pr_err("Unable to send packet via vmbus\n");
+ pr_err_ratelimited("Unable to send packet via vmbus; error %d\n", ret);
return ret;
}
diff --git a/drivers/virt/acrn/vm.c b/drivers/virt/acrn/vm.c
index 7804a2492ad7..0d002a355a93 100644
--- a/drivers/virt/acrn/vm.c
+++ b/drivers/virt/acrn/vm.c
@@ -94,7 +94,7 @@ int acrn_vm_destroy(struct acrn_vm *vm)
}
/**
- * acrn_inject_msi() - Inject a MSI interrupt into a User VM
+ * acrn_msi_inject() - Inject a MSI interrupt into a User VM
* @vm: User VM
* @msi_addr: The MSI address
* @msi_data: The MSI data
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
index c281fe5ed688..9dcb5a54f7fc 100644
--- a/drivers/w1/slaves/w1_ds2780.c
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -90,7 +90,7 @@ static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
return w1_ds2780_io(dev, buf, off, count, 0);
}
diff --git a/drivers/w1/slaves/w1_ds2781.c b/drivers/w1/slaves/w1_ds2781.c
index f0d393ae070b..2cb7c020b607 100644
--- a/drivers/w1/slaves/w1_ds2781.c
+++ b/drivers/w1/slaves/w1_ds2781.c
@@ -87,7 +87,7 @@ static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
return w1_ds2781_io(dev, buf, off, count, 0);
}
diff --git a/drivers/w1/slaves/w1_ds2805.c b/drivers/w1/slaves/w1_ds2805.c
index 206186db727d..6b5d12ba1b65 100644
--- a/drivers/w1/slaves/w1_ds2805.c
+++ b/drivers/w1/slaves/w1_ds2805.c
@@ -291,20 +291,7 @@ static struct w1_family w1_family_0d = {
.fops = &w1_f0d_fops,
};
-static int __init w1_f0d_init(void)
-{
- pr_info("%s()\n", __func__);
- return w1_register_family(&w1_family_0d);
-}
-
-static void __exit w1_f0d_fini(void)
-{
- pr_info("%s()\n", __func__);
- w1_unregister_family(&w1_family_0d);
-}
-
-module_init(w1_f0d_init);
-module_exit(w1_f0d_fini);
+module_w1_family(w1_family_0d);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andrew Worsley amworsley@gmail.com");
diff --git a/drivers/w1/slaves/w1_ds28e17.c b/drivers/w1/slaves/w1_ds28e17.c
index 6b00db7169ab..aed10b72fc99 100644
--- a/drivers/w1/slaves/w1_ds28e17.c
+++ b/drivers/w1/slaves/w1_ds28e17.c
@@ -752,18 +752,4 @@ static struct w1_family w1_family_19 = {
.fops = &w1_f19_fops,
};
-
-/* Module init and remove functions. */
-static int __init w1_f19_init(void)
-{
- return w1_register_family(&w1_family_19);
-}
-
-static void __exit w1_f19_fini(void)
-{
- w1_unregister_family(&w1_family_19);
-}
-
-module_init(w1_f19_init);
-module_exit(w1_f19_fini);
-
+module_w1_family(w1_family_19);
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 976eea28f268..9d08a1c9c445 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -63,8 +63,8 @@ static u16 bulk_read_device_counter; /* =0 as per C standard */
#define EEPROM_CMD_READ "restore" /* cmd for read eeprom sysfs */
#define BULK_TRIGGER_CMD "trigger" /* cmd to trigger a bulk read */
-#define MIN_TEMP -55 /* min temperature that can be mesured */
-#define MAX_TEMP 125 /* max temperature that can be mesured */
+#define MIN_TEMP -55 /* min temperature that can be measured */
+#define MAX_TEMP 125 /* max temperature that can be measured */
/* Allowed values for sysfs conv_time attribute */
#define CONV_TIME_DEFAULT 0
@@ -906,8 +906,7 @@ static inline int temperature_from_RAM(struct w1_slave *sl, u8 rom[9])
static inline s8 int_to_short(int i)
{
/* Prepare to cast to short by eliminating out of range values */
- i = i > MAX_TEMP ? MAX_TEMP : i;
- i = i < MIN_TEMP ? MIN_TEMP : i;
+ i = clamp(i, MIN_TEMP, MAX_TEMP);
return (s8) i;
}
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index ea0efd290c37..5f1ce59b44b9 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -238,37 +238,6 @@ config XEN_PRIVCMD
depends on XEN
default m
-config XEN_STUB
- bool "Xen stub drivers"
- depends on XEN && X86_64 && BROKEN
- help
- Allow kernel to install stub drivers, to reserve space for Xen drivers,
- i.e. memory hotplug and cpu hotplug, and to block native drivers loaded,
- so that real Xen drivers can be modular.
-
- To enable Xen features like cpu and memory hotplug, select Y here.
-
-config XEN_ACPI_HOTPLUG_MEMORY
- tristate "Xen ACPI memory hotplug"
- depends on XEN_DOM0 && XEN_STUB && ACPI
- help
- This is Xen ACPI memory hotplug.
-
- Currently Xen only support ACPI memory hot-add. If you want
- to hot-add memory at runtime (the hot-added memory cannot be
- removed until machine stop), select Y/M here, otherwise select N.
-
-config XEN_ACPI_HOTPLUG_CPU
- tristate "Xen ACPI cpu hotplug"
- depends on XEN_DOM0 && XEN_STUB && ACPI
- select ACPI_CONTAINER
- help
- Xen ACPI cpu enumerating and hotplugging
-
- For hotplugging, currently Xen only support ACPI cpu hotadd.
- If you want to hotadd cpu at runtime (the hotadded cpu cannot
- be removed until machine stop), select Y/M here.
-
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index c3621b9f4012..3434593455b2 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -26,9 +26,6 @@ obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
-obj-$(CONFIG_XEN_STUB) += xen-stub.o
-obj-$(CONFIG_XEN_ACPI_HOTPLUG_MEMORY) += xen-acpi-memhotplug.o
-obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU) += xen-acpi-cpuhotplug.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
obj-$(CONFIG_XEN_EFI) += efi.o
obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index cdc6daa7a9f6..1bcdd5227771 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -345,41 +345,6 @@ static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Sync with Xen hypervisor after cpu hotadded */
-void xen_pcpu_hotplug_sync(void)
-{
- schedule_work(&xen_pcpu_work);
-}
-EXPORT_SYMBOL_GPL(xen_pcpu_hotplug_sync);
-
-/*
- * For hypervisor presented cpu, return logic cpu id;
- * For hypervisor non-presented cpu, return -ENODEV.
- */
-int xen_pcpu_id(uint32_t acpi_id)
-{
- int cpu_id = 0, max_id = 0;
- struct xen_platform_op op;
-
- op.cmd = XENPF_get_cpuinfo;
- while (cpu_id <= max_id) {
- op.u.pcpu_info.xen_cpuid = cpu_id;
- if (HYPERVISOR_platform_op(&op)) {
- cpu_id++;
- continue;
- }
-
- if (acpi_id == op.u.pcpu_info.acpi_id)
- return cpu_id;
- if (op.u.pcpu_info.max_present > max_id)
- max_id = op.u.pcpu_info.max_present;
- cpu_id++;
- }
-
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(xen_pcpu_id);
-
static int __init xen_pcpu_init(void)
{
int irq, ret;
diff --git a/drivers/xen/time.c b/drivers/xen/time.c
index 108edbcbc040..152dd33bb223 100644
--- a/drivers/xen/time.c
+++ b/drivers/xen/time.c
@@ -7,6 +7,7 @@
#include <linux/math64.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/static_call.h>
#include <asm/paravirt.h>
#include <asm/xen/hypervisor.h>
@@ -175,7 +176,7 @@ void __init xen_time_setup_guest(void)
xen_runstate_remote = !HYPERVISOR_vm_assist(VMASST_CMD_enable,
VMASST_TYPE_runstate_update_flag);
- pv_ops.time.steal_clock = xen_steal_clock;
+ static_call_update(pv_steal_clock, xen_steal_clock);
static_key_slow_inc(&paravirt_steal_enabled);
if (xen_runstate_remote)
diff --git a/drivers/xen/xen-acpi-cpuhotplug.c b/drivers/xen/xen-acpi-cpuhotplug.c
deleted file mode 100644
index 00ab1ece02e5..000000000000
--- a/drivers/xen/xen-acpi-cpuhotplug.c
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012 Intel Corporation
- * Author: Liu Jinsong <jinsong.liu@intel.com>
- * Author: Jiang Yunhong <yunhong.jiang@intel.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/cpu.h>
-#include <linux/acpi.h>
-#include <linux/uaccess.h>
-#include <acpi/processor.h>
-#include <xen/acpi.h>
-#include <xen/interface/platform.h>
-#include <asm/xen/hypercall.h>
-
-#define PREFIX "ACPI:xen_cpu_hotplug:"
-
-#define INSTALL_NOTIFY_HANDLER 0
-#define UNINSTALL_NOTIFY_HANDLER 1
-
-static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr);
-
-/* --------------------------------------------------------------------------
- Driver Interface
--------------------------------------------------------------------------- */
-
-static int xen_acpi_processor_enable(struct acpi_device *device)
-{
- acpi_status status = 0;
- unsigned long long value;
- union acpi_object object = { 0 };
- struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
- struct acpi_processor *pr = acpi_driver_data(device);
-
- if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
- /* Declared with "Processor" statement; match ProcessorID */
- status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- pr_err(PREFIX "Evaluating processor object\n");
- return -ENODEV;
- }
-
- pr->acpi_id = object.processor.proc_id;
- } else {
- /* Declared with "Device" statement; match _UID */
- status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
- NULL, &value);
- if (ACPI_FAILURE(status)) {
- pr_err(PREFIX "Evaluating processor _UID\n");
- return -ENODEV;
- }
-
- pr->acpi_id = value;
- }
-
- pr->id = xen_pcpu_id(pr->acpi_id);
-
- if (invalid_logical_cpuid(pr->id))
- /* This cpu is not presented at hypervisor, try to hotadd it */
- if (ACPI_FAILURE(xen_acpi_cpu_hotadd(pr))) {
- pr_err(PREFIX "Hotadd CPU (acpi_id = %d) failed.\n",
- pr->acpi_id);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int xen_acpi_processor_add(struct acpi_device *device)
-{
- int ret;
- struct acpi_processor *pr;
-
- if (!device)
- return -EINVAL;
-
- pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
- if (!pr)
- return -ENOMEM;
-
- pr->handle = device->handle;
- strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
- device->driver_data = pr;
-
- ret = xen_acpi_processor_enable(device);
- if (ret)
- pr_err(PREFIX "Error when enabling Xen processor\n");
-
- return ret;
-}
-
-static int xen_acpi_processor_remove(struct acpi_device *device)
-{
- struct acpi_processor *pr;
-
- if (!device)
- return -EINVAL;
-
- pr = acpi_driver_data(device);
- if (!pr)
- return -EINVAL;
-
- kfree(pr);
- return 0;
-}
-
-/*--------------------------------------------------------------
- Acpi processor hotplug support
---------------------------------------------------------------*/
-
-static int is_processor_present(acpi_handle handle)
-{
- acpi_status status;
- unsigned long long sta = 0;
-
-
- status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
-
- if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
- return 1;
-
- /*
- * _STA is mandatory for a processor that supports hot plug
- */
- if (status == AE_NOT_FOUND)
- pr_info(PREFIX "Processor does not support hot plug\n");
- else
- pr_info(PREFIX "Processor Device is not present");
- return 0;
-}
-
-static int xen_apic_id(acpi_handle handle)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- struct acpi_madt_local_apic *lapic;
- int apic_id;
-
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
- return -EINVAL;
-
- if (!buffer.length || !buffer.pointer)
- return -EINVAL;
-
- obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_BUFFER ||
- obj->buffer.length < sizeof(*lapic)) {
- kfree(buffer.pointer);
- return -EINVAL;
- }
-
- lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
-
- if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
- !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
- kfree(buffer.pointer);
- return -EINVAL;
- }
-
- apic_id = (uint32_t)lapic->id;
- kfree(buffer.pointer);
- buffer.length = ACPI_ALLOCATE_BUFFER;
- buffer.pointer = NULL;
-
- return apic_id;
-}
-
-static int xen_hotadd_cpu(struct acpi_processor *pr)
-{
- int cpu_id, apic_id, pxm;
- struct xen_platform_op op;
-
- apic_id = xen_apic_id(pr->handle);
- if (apic_id < 0) {
- pr_err(PREFIX "Failed to get apic_id for acpi_id %d\n",
- pr->acpi_id);
- return -ENODEV;
- }
-
- pxm = xen_acpi_get_pxm(pr->handle);
- if (pxm < 0) {
- pr_err(PREFIX "Failed to get _PXM for acpi_id %d\n",
- pr->acpi_id);
- return pxm;
- }
-
- op.cmd = XENPF_cpu_hotadd;
- op.u.cpu_add.apic_id = apic_id;
- op.u.cpu_add.acpi_id = pr->acpi_id;
- op.u.cpu_add.pxm = pxm;
-
- cpu_id = HYPERVISOR_platform_op(&op);
- if (cpu_id < 0)
- pr_err(PREFIX "Failed to hotadd CPU for acpi_id %d\n",
- pr->acpi_id);
-
- return cpu_id;
-}
-
-static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr)
-{
- if (!is_processor_present(pr->handle))
- return AE_ERROR;
-
- pr->id = xen_hotadd_cpu(pr);
- if (invalid_logical_cpuid(pr->id))
- return AE_ERROR;
-
- /*
- * Sync with Xen hypervisor, providing new /sys/.../xen_cpuX
- * interface after cpu hotadded.
- */
- xen_pcpu_hotplug_sync();
-
- return AE_OK;
-}
-
-static int acpi_processor_device_remove(struct acpi_device *device)
-{
- pr_debug(PREFIX "Xen does not support CPU hotremove\n");
-
- return -ENOSYS;
-}
-
-static void acpi_processor_hotplug_notify(acpi_handle handle,
- u32 event, void *data)
-{
- struct acpi_processor *pr;
- struct acpi_device *device = NULL;
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
- int result;
-
- acpi_scan_lock_acquire();
-
- switch (event) {
- case ACPI_NOTIFY_BUS_CHECK:
- case ACPI_NOTIFY_DEVICE_CHECK:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Processor driver received %s event\n",
- (event == ACPI_NOTIFY_BUS_CHECK) ?
- "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
-
- if (!is_processor_present(handle))
- break;
-
- acpi_bus_get_device(handle, &device);
- if (acpi_device_enumerated(device))
- break;
-
- result = acpi_bus_scan(handle);
- if (result) {
- pr_err(PREFIX "Unable to add the device\n");
- break;
- }
- device = NULL;
- acpi_bus_get_device(handle, &device);
- if (!acpi_device_enumerated(device)) {
- pr_err(PREFIX "Missing device object\n");
- break;
- }
- ost_code = ACPI_OST_SC_SUCCESS;
- break;
-
- case ACPI_NOTIFY_EJECT_REQUEST:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "received ACPI_NOTIFY_EJECT_REQUEST\n"));
-
- if (acpi_bus_get_device(handle, &device)) {
- pr_err(PREFIX "Device don't exist, dropping EJECT\n");
- break;
- }
- pr = acpi_driver_data(device);
- if (!pr) {
- pr_err(PREFIX "Driver data is NULL, dropping EJECT\n");
- break;
- }
-
- /*
- * TBD: implement acpi_processor_device_remove if Xen support
- * CPU hotremove in the future.
- */
- acpi_processor_device_remove(device);
- break;
-
- default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
-
- /* non-hotplug event; possibly handled by other handler */
- goto out;
- }
-
- (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
-
-out:
- acpi_scan_lock_release();
-}
-
-static acpi_status is_processor_device(acpi_handle handle)
-{
- struct acpi_device_info *info;
- char *hid;
- acpi_status status;
-
- status = acpi_get_object_info(handle, &info);
- if (ACPI_FAILURE(status))
- return status;
-
- if (info->type == ACPI_TYPE_PROCESSOR) {
- kfree(info);
- return AE_OK; /* found a processor object */
- }
-
- if (!(info->valid & ACPI_VALID_HID)) {
- kfree(info);
- return AE_ERROR;
- }
-
- hid = info->hardware_id.string;
- if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
- kfree(info);
- return AE_ERROR;
- }
-
- kfree(info);
- return AE_OK; /* found a processor device object */
-}
-
-static acpi_status
-processor_walk_namespace_cb(acpi_handle handle,
- u32 lvl, void *context, void **rv)
-{
- acpi_status status;
- int *action = context;
-
- status = is_processor_device(handle);
- if (ACPI_FAILURE(status))
- return AE_OK; /* not a processor; continue to walk */
-
- switch (*action) {
- case INSTALL_NOTIFY_HANDLER:
- acpi_install_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- acpi_processor_hotplug_notify,
- NULL);
- break;
- case UNINSTALL_NOTIFY_HANDLER:
- acpi_remove_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- acpi_processor_hotplug_notify);
- break;
- default:
- break;
- }
-
- /* found a processor; skip walking underneath */
- return AE_CTRL_DEPTH;
-}
-
-static
-void acpi_processor_install_hotplug_notify(void)
-{
- int action = INSTALL_NOTIFY_HANDLER;
- acpi_walk_namespace(ACPI_TYPE_ANY,
- ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- processor_walk_namespace_cb, NULL, &action, NULL);
-}
-
-static
-void acpi_processor_uninstall_hotplug_notify(void)
-{
- int action = UNINSTALL_NOTIFY_HANDLER;
- acpi_walk_namespace(ACPI_TYPE_ANY,
- ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- processor_walk_namespace_cb, NULL, &action, NULL);
-}
-
-static const struct acpi_device_id processor_device_ids[] = {
- {ACPI_PROCESSOR_OBJECT_HID, 0},
- {ACPI_PROCESSOR_DEVICE_HID, 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, processor_device_ids);
-
-static struct acpi_driver xen_acpi_processor_driver = {
- .name = "processor",
- .class = ACPI_PROCESSOR_CLASS,
- .ids = processor_device_ids,
- .ops = {
- .add = xen_acpi_processor_add,
- .remove = xen_acpi_processor_remove,
- },
-};
-
-static int __init xen_acpi_processor_init(void)
-{
- int result = 0;
-
- if (!xen_initial_domain())
- return -ENODEV;
-
- /* unregister the stub which only used to reserve driver space */
- xen_stub_processor_exit();
-
- result = acpi_bus_register_driver(&xen_acpi_processor_driver);
- if (result < 0) {
- xen_stub_processor_init();
- return result;
- }
-
- acpi_processor_install_hotplug_notify();
- return 0;
-}
-
-static void __exit xen_acpi_processor_exit(void)
-{
- if (!xen_initial_domain())
- return;
-
- acpi_processor_uninstall_hotplug_notify();
-
- acpi_bus_unregister_driver(&xen_acpi_processor_driver);
-
- /*
- * stub reserve space again to prevent any chance of native
- * driver loading.
- */
- xen_stub_processor_init();
- return;
-}
-
-module_init(xen_acpi_processor_init);
-module_exit(xen_acpi_processor_exit);
-ACPI_MODULE_NAME("xen-acpi-cpuhotplug");
-MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>");
-MODULE_DESCRIPTION("Xen Hotplug CPU Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c
deleted file mode 100644
index f914b72557ef..000000000000
--- a/drivers/xen/xen-acpi-memhotplug.c
+++ /dev/null
@@ -1,475 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012 Intel Corporation
- * Author: Liu Jinsong <jinsong.liu@intel.com>
- * Author: Jiang Yunhong <yunhong.jiang@intel.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/acpi.h>
-#include <xen/acpi.h>
-#include <xen/interface/platform.h>
-#include <asm/xen/hypercall.h>
-
-#define PREFIX "ACPI:xen_memory_hotplug:"
-
-struct acpi_memory_info {
- struct list_head list;
- u64 start_addr; /* Memory Range start physical addr */
- u64 length; /* Memory Range length */
- unsigned short caching; /* memory cache attribute */
- unsigned short write_protect; /* memory read/write attribute */
- /* copied from buffer getting from _CRS */
- unsigned int enabled:1;
-};
-
-struct acpi_memory_device {
- struct acpi_device *device;
- struct list_head res_list;
-};
-
-static bool acpi_hotmem_initialized __read_mostly;
-
-static int xen_hotadd_memory(int pxm, struct acpi_memory_info *info)
-{
- int rc;
- struct xen_platform_op op;
-
- op.cmd = XENPF_mem_hotadd;
- op.u.mem_add.spfn = info->start_addr >> PAGE_SHIFT;
- op.u.mem_add.epfn = (info->start_addr + info->length) >> PAGE_SHIFT;
- op.u.mem_add.pxm = pxm;
-
- rc = HYPERVISOR_dom0_op(&op);
- if (rc)
- pr_err(PREFIX "Xen Hotplug Memory Add failed on "
- "0x%lx -> 0x%lx, _PXM: %d, error: %d\n",
- (unsigned long)info->start_addr,
- (unsigned long)(info->start_addr + info->length),
- pxm, rc);
-
- return rc;
-}
-
-static int xen_acpi_memory_enable_device(struct acpi_memory_device *mem_device)
-{
- int pxm, result;
- int num_enabled = 0;
- struct acpi_memory_info *info;
-
- if (!mem_device)
- return -EINVAL;
-
- pxm = xen_acpi_get_pxm(mem_device->device->handle);
- if (pxm < 0)
- return pxm;
-
- list_for_each_entry(info, &mem_device->res_list, list) {
- if (info->enabled) { /* just sanity check...*/
- num_enabled++;
- continue;
- }
-
- if (!info->length)
- continue;
-
- result = xen_hotadd_memory(pxm, info);
- if (result)
- continue;
- info->enabled = 1;
- num_enabled++;
- }
-
- if (!num_enabled)
- return -ENODEV;
-
- return 0;
-}
-
-static acpi_status
-acpi_memory_get_resource(struct acpi_resource *resource, void *context)
-{
- struct acpi_memory_device *mem_device = context;
- struct acpi_resource_address64 address64;
- struct acpi_memory_info *info, *new;
- acpi_status status;
-
- status = acpi_resource_to_address64(resource, &address64);
- if (ACPI_FAILURE(status) ||
- (address64.resource_type != ACPI_MEMORY_RANGE))
- return AE_OK;
-
- list_for_each_entry(info, &mem_device->res_list, list) {
- if ((info->caching == address64.info.mem.caching) &&
- (info->write_protect == address64.info.mem.write_protect) &&
- (info->start_addr + info->length == address64.address.minimum)) {
- info->length += address64.address.address_length;
- return AE_OK;
- }
- }
-
- new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL);
- if (!new)
- return AE_ERROR;
-
- INIT_LIST_HEAD(&new->list);
- new->caching = address64.info.mem.caching;
- new->write_protect = address64.info.mem.write_protect;
- new->start_addr = address64.address.minimum;
- new->length = address64.address.address_length;
- list_add_tail(&new->list, &mem_device->res_list);
-
- return AE_OK;
-}
-
-static int
-acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
-{
- acpi_status status;
- struct acpi_memory_info *info, *n;
-
- if (!list_empty(&mem_device->res_list))
- return 0;
-
- status = acpi_walk_resources(mem_device->device->handle,
- METHOD_NAME__CRS, acpi_memory_get_resource, mem_device);
-
- if (ACPI_FAILURE(status)) {
- list_for_each_entry_safe(info, n, &mem_device->res_list, list)
- kfree(info);
- INIT_LIST_HEAD(&mem_device->res_list);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int acpi_memory_get_device(acpi_handle handle,
- struct acpi_memory_device **mem_device)
-{
- struct acpi_device *device = NULL;
- int result = 0;
-
- acpi_scan_lock_acquire();
-
- acpi_bus_get_device(handle, &device);
- if (acpi_device_enumerated(device))
- goto end;
-
- /*
- * Now add the notified device. This creates the acpi_device
- * and invokes .add function
- */
- result = acpi_bus_scan(handle);
- if (result) {
- pr_warn(PREFIX "ACPI namespace scan failed\n");
- result = -EINVAL;
- goto out;
- }
- device = NULL;
- acpi_bus_get_device(handle, &device);
- if (!acpi_device_enumerated(device)) {
- pr_warn(PREFIX "Missing device object\n");
- result = -EINVAL;
- goto out;
- }
-
-end:
- *mem_device = acpi_driver_data(device);
- if (!(*mem_device)) {
- pr_err(PREFIX "driver data not found\n");
- result = -ENODEV;
- goto out;
- }
-
-out:
- acpi_scan_lock_release();
- return result;
-}
-
-static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
-{
- unsigned long long current_status;
-
- /* Get device present/absent information from the _STA */
- if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
- "_STA", NULL, &current_status)))
- return -ENODEV;
- /*
- * Check for device status. Device should be
- * present/enabled/functioning.
- */
- if (!((current_status & ACPI_STA_DEVICE_PRESENT)
- && (current_status & ACPI_STA_DEVICE_ENABLED)
- && (current_status & ACPI_STA_DEVICE_FUNCTIONING)))
- return -ENODEV;
-
- return 0;
-}
-
-static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
-{
- pr_debug(PREFIX "Xen does not support memory hotremove\n");
-
- return -ENOSYS;
-}
-
-static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
-{
- struct acpi_memory_device *mem_device;
- struct acpi_device *device;
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
-
- switch (event) {
- case ACPI_NOTIFY_BUS_CHECK:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "\nReceived BUS CHECK notification for device\n"));
- fallthrough;
- case ACPI_NOTIFY_DEVICE_CHECK:
- if (event == ACPI_NOTIFY_DEVICE_CHECK)
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "\nReceived DEVICE CHECK notification for device\n"));
-
- if (acpi_memory_get_device(handle, &mem_device)) {
- pr_err(PREFIX "Cannot find driver data\n");
- break;
- }
-
- ost_code = ACPI_OST_SC_SUCCESS;
- break;
-
- case ACPI_NOTIFY_EJECT_REQUEST:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "\nReceived EJECT REQUEST notification for device\n"));
-
- acpi_scan_lock_acquire();
- if (acpi_bus_get_device(handle, &device)) {
- acpi_scan_lock_release();
- pr_err(PREFIX "Device doesn't exist\n");
- break;
- }
- mem_device = acpi_driver_data(device);
- if (!mem_device) {
- acpi_scan_lock_release();
- pr_err(PREFIX "Driver Data is NULL\n");
- break;
- }
-
- /*
- * TBD: implement acpi_memory_disable_device and invoke
- * acpi_bus_remove if Xen support hotremove in the future
- */
- acpi_memory_disable_device(mem_device);
- acpi_scan_lock_release();
- break;
-
- default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
- /* non-hotplug event; possibly handled by other handler */
- return;
- }
-
- (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
- return;
-}
-
-static int xen_acpi_memory_device_add(struct acpi_device *device)
-{
- int result;
- struct acpi_memory_device *mem_device = NULL;
-
-
- if (!device)
- return -EINVAL;
-
- mem_device = kzalloc(sizeof(struct acpi_memory_device), GFP_KERNEL);
- if (!mem_device)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&mem_device->res_list);
- mem_device->device = device;
- sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
- sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
- device->driver_data = mem_device;
-
- /* Get the range from the _CRS */
- result = acpi_memory_get_device_resources(mem_device);
- if (result) {
- kfree(mem_device);
- return result;
- }
-
- /*
- * For booting existed memory devices, early boot code has recognized
- * memory area by EFI/E820. If DSDT shows these memory devices on boot,
- * hotplug is not necessary for them.
- * For hot-added memory devices during runtime, it need hypercall to
- * Xen hypervisor to add memory.
- */
- if (!acpi_hotmem_initialized)
- return 0;
-
- if (!acpi_memory_check_device(mem_device))
- result = xen_acpi_memory_enable_device(mem_device);
-
- return result;
-}
-
-static int xen_acpi_memory_device_remove(struct acpi_device *device)
-{
- struct acpi_memory_device *mem_device = NULL;
-
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
-
- mem_device = acpi_driver_data(device);
- kfree(mem_device);
-
- return 0;
-}
-
-/*
- * Helper function to check for memory device
- */
-static acpi_status is_memory_device(acpi_handle handle)
-{
- char *hardware_id;
- acpi_status status;
- struct acpi_device_info *info;
-
- status = acpi_get_object_info(handle, &info);
- if (ACPI_FAILURE(status))
- return status;
-
- if (!(info->valid & ACPI_VALID_HID)) {
- kfree(info);
- return AE_ERROR;
- }
-
- hardware_id = info->hardware_id.string;
- if ((hardware_id == NULL) ||
- (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
- status = AE_ERROR;
-
- kfree(info);
- return status;
-}
-
-static acpi_status
-acpi_memory_register_notify_handler(acpi_handle handle,
- u32 level, void *ctxt, void **retv)
-{
- acpi_status status;
-
- status = is_memory_device(handle);
- if (ACPI_FAILURE(status))
- return AE_OK; /* continue */
-
- status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- acpi_memory_device_notify, NULL);
- /* continue */
- return AE_OK;
-}
-
-static acpi_status
-acpi_memory_deregister_notify_handler(acpi_handle handle,
- u32 level, void *ctxt, void **retv)
-{
- acpi_status status;
-
- status = is_memory_device(handle);
- if (ACPI_FAILURE(status))
- return AE_OK; /* continue */
-
- status = acpi_remove_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- acpi_memory_device_notify);
-
- return AE_OK; /* continue */
-}
-
-static const struct acpi_device_id memory_device_ids[] = {
- {ACPI_MEMORY_DEVICE_HID, 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, memory_device_ids);
-
-static struct acpi_driver xen_acpi_memory_device_driver = {
- .name = "acpi_memhotplug",
- .class = ACPI_MEMORY_DEVICE_CLASS,
- .ids = memory_device_ids,
- .ops = {
- .add = xen_acpi_memory_device_add,
- .remove = xen_acpi_memory_device_remove,
- },
-};
-
-static int __init xen_acpi_memory_device_init(void)
-{
- int result;
- acpi_status status;
-
- if (!xen_initial_domain())
- return -ENODEV;
-
- /* unregister the stub which only used to reserve driver space */
- xen_stub_memory_device_exit();
-
- result = acpi_bus_register_driver(&xen_acpi_memory_device_driver);
- if (result < 0) {
- xen_stub_memory_device_init();
- return -ENODEV;
- }
-
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- acpi_memory_register_notify_handler,
- NULL, NULL, NULL);
-
- if (ACPI_FAILURE(status)) {
- pr_warn(PREFIX "walk_namespace failed\n");
- acpi_bus_unregister_driver(&xen_acpi_memory_device_driver);
- xen_stub_memory_device_init();
- return -ENODEV;
- }
-
- acpi_hotmem_initialized = true;
- return 0;
-}
-
-static void __exit xen_acpi_memory_device_exit(void)
-{
- acpi_status status;
-
- if (!xen_initial_domain())
- return;
-
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- acpi_memory_deregister_notify_handler,
- NULL, NULL, NULL);
- if (ACPI_FAILURE(status))
- pr_warn(PREFIX "walk_namespace failed\n");
-
- acpi_bus_unregister_driver(&xen_acpi_memory_device_driver);
-
- /*
- * stub reserve space again to prevent any chance of native
- * driver loading.
- */
- xen_stub_memory_device_init();
- return;
-}
-
-module_init(xen_acpi_memory_device_init);
-module_exit(xen_acpi_memory_device_exit);
-ACPI_MODULE_NAME("xen-acpi-memhotplug");
-MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>");
-MODULE_DESCRIPTION("Xen Hotplug Mem Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index cb904ac83006..f8e4faa96ad6 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -802,7 +802,7 @@ static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
"guest with no AER driver should have been killed\n");
goto end;
}
- result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
+ result = common_process(psdev, pci_channel_io_normal, XEN_PCI_OP_aer_slotreset, result);
if (result == PCI_ERS_RESULT_NONE ||
result == PCI_ERS_RESULT_DISCONNECT) {
@@ -859,7 +859,7 @@ static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
"guest with no AER driver should have been killed\n");
goto end;
}
- result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
+ result = common_process(psdev, pci_channel_io_normal, XEN_PCI_OP_aer_mmio, result);
if (result == PCI_ERS_RESULT_NONE ||
result == PCI_ERS_RESULT_DISCONNECT) {
@@ -970,7 +970,7 @@ static void xen_pcibk_error_resume(struct pci_dev *dev)
kill_domain_by_device(psdev);
goto end;
}
- common_process(psdev, 1, XEN_PCI_OP_aer_resume,
+ common_process(psdev, pci_channel_io_normal, XEN_PCI_OP_aer_resume,
PCI_ERS_RESULT_RECOVERED);
end:
if (psdev)
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 5447b5ab7c76..4162d0e7e00d 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -233,7 +233,6 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
unsigned int *devfn)
{
struct pci_dev_entry *entry;
- struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
int found = 0, slot;
@@ -242,11 +241,7 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
list_for_each_entry(entry,
&vpci_dev->dev_list[slot],
list) {
- dev = entry->dev;
- if (dev && dev->bus->number == pcidev->bus->number
- && pci_domain_nr(dev->bus) ==
- pci_domain_nr(pcidev->bus)
- && dev->devfn == pcidev->devfn) {
+ if (entry->dev == pcidev) {
found = 1;
*domain = 0;
*bus = 0;
diff --git a/drivers/xen/xen-stub.c b/drivers/xen/xen-stub.c
deleted file mode 100644
index 3be4e74660b5..000000000000
--- a/drivers/xen/xen-stub.c
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * xen-stub.c - stub drivers to reserve space for Xen
- *
- * Copyright (C) 2012 Intel Corporation
- * Author: Liu Jinsong <jinsong.liu@intel.com>
- * Author: Jiang Yunhong <yunhong.jiang@intel.com>
- *
- * Copyright (C) 2012 Oracle Inc
- * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/acpi.h>
-#include <xen/acpi.h>
-
-#ifdef CONFIG_ACPI
-
-/*--------------------------------------------
- stub driver for Xen memory hotplug
---------------------------------------------*/
-
-static const struct acpi_device_id memory_device_ids[] = {
- {ACPI_MEMORY_DEVICE_HID, 0},
- {"", 0},
-};
-
-static struct acpi_driver xen_stub_memory_device_driver = {
- /* same name as native memory driver to block native loaded */
- .name = "acpi_memhotplug",
- .class = ACPI_MEMORY_DEVICE_CLASS,
- .ids = memory_device_ids,
-};
-
-int xen_stub_memory_device_init(void)
-{
- if (!xen_initial_domain())
- return -ENODEV;
-
- /* just reserve space for Xen, block native driver loaded */
- return acpi_bus_register_driver(&xen_stub_memory_device_driver);
-}
-EXPORT_SYMBOL_GPL(xen_stub_memory_device_init);
-subsys_initcall(xen_stub_memory_device_init);
-
-void xen_stub_memory_device_exit(void)
-{
- acpi_bus_unregister_driver(&xen_stub_memory_device_driver);
-}
-EXPORT_SYMBOL_GPL(xen_stub_memory_device_exit);
-
-
-/*--------------------------------------------
- stub driver for Xen cpu hotplug
---------------------------------------------*/
-
-static const struct acpi_device_id processor_device_ids[] = {
- {ACPI_PROCESSOR_OBJECT_HID, 0},
- {ACPI_PROCESSOR_DEVICE_HID, 0},
- {"", 0},
-};
-
-static struct acpi_driver xen_stub_processor_driver = {
- /* same name as native processor driver to block native loaded */
- .name = "processor",
- .class = ACPI_PROCESSOR_CLASS,
- .ids = processor_device_ids,
-};
-
-int xen_stub_processor_init(void)
-{
- if (!xen_initial_domain())
- return -ENODEV;
-
- /* just reserve space for Xen, block native driver loaded */
- return acpi_bus_register_driver(&xen_stub_processor_driver);
-}
-EXPORT_SYMBOL_GPL(xen_stub_processor_init);
-subsys_initcall(xen_stub_processor_init);
-
-void xen_stub_processor_exit(void)
-{
- acpi_bus_unregister_driver(&xen_stub_processor_driver);
-}
-EXPORT_SYMBOL_GPL(xen_stub_processor_exit);
-
-#endif