diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2020-03-21 12:02:20 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2020-03-21 12:02:20 +0100 |
commit | e79220db6349344b6e770f0cae7e26d8636c1440 (patch) | |
tree | 1bc3a28c7b072588fe8a28057978fb8b00801c28 /drivers/phy | |
parent | f62c1930674913b18daaa608c348000ff124a481 (diff) | |
parent | 89d715371a05b1dee32faf49014b1acff6138b83 (diff) | |
download | linux-e79220db6349344b6e770f0cae7e26d8636c1440.tar.gz linux-e79220db6349344b6e770f0cae7e26d8636c1440.tar.bz2 linux-e79220db6349344b6e770f0cae7e26d8636c1440.zip |
Merge tag 'phy-for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux-phy into usb-next
Kishon writes:
phy: for 5.7
*) Rename and Re-design phy-cadence-dp driver to phy-cadence-torrent driver
*) Add new PHY driver for Qualcomm 28nm Hi-Speed USB PHY
*) Add new PHY driver for Qualcomm Super Speed PHY in QCS404
*) Add support for Qualcomm PCIe QMP/QHP PHY in SDM845 to phy-qcom-qmp driver
*) Add support for Qualcomm UFS PHY in MSM8996 to phy-qcom-qmp driver
*) Add support for an additional reference clock in Mediatek phy-mtk-tphy driver
*) Add support for configuring tuning parameters in Mediatek phy-mtk-tphy driver
*) Add support for GMII PHY in TI K3 AM654x/J721E SoCs to phy-gmii-sel driver
*) Add support for USB2 PHY in Amlogic A1 SoC Family to phy-meson-g12a-usb2
driver
*) Add support for USB3/USB2/PCIe PHY in Socionext Pro5 SoC to
phy-uniphier-usb3ss/phy-uniphier-usb3hs/phy-uniphier-pcie driver respectively
*) Add support for QUSB2 PHY in Qualcomm SC7180 in driver
*) Convert dt-bindings of Cadence DP, Qualcomm QUSB2 to YAML format
Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
* tag 'phy-for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux-phy: (52 commits)
phy: qcom-qusb2: Add new overriding tuning parameters in QUSB2 V2 PHY
phy: qcom-qusb2: Add support for overriding tuning parameters in QUSB2 V2 PHY
dt-bindings: phy: qcom-qusb2: Add support for overriding Phy tuning parameters
phy: qcom-qusb2: Add generic QUSB2 V2 PHY support
dt-bindings: phy: qcom,qusb2: Add compatibles for QUSB2 V2 phy and SC7180
dt-bindings: phy: qcom,qusb2: Convert QUSB2 phy bindings to yaml
phy: rk-inno-usb2: Decrease verbosity of repeating log.
phy: amlogic: Add Amlogic A1 USB2 PHY Driver
dt-bindings: phy: Add Amlogic A1 USB2 PHY Bindings
phy: ti: gmii-sel: add support for am654x/j721e soc
dt-bindings: phy: ti: gmii-sel: add support for am654x/j721e soc
phy: qualcomm: usb: Add SuperSpeed PHY driver
dt-bindings: Add Qualcomm USB SuperSpeed PHY bindings
phy: qualcomm: Add Synopsys 28nm Hi-Speed USB PHY driver
dt-bindings: phy: Add Qualcomm Synopsys Hi-Speed USB PHY binding
dt-bindings: phy: remove qcom-dwc3-usb-phy
phy: phy-mtk-tphy: add a new reference clock
phy: phy-mtk-tphy: remove unused u3phya_ref clock
phy: phy-mtk-tphy: make the ref clock optional
phy: phy-mtk-tphy: add a property for internal resistance
...
Diffstat (limited to 'drivers/phy')
-rw-r--r-- | drivers/phy/amlogic/phy-meson-g12a-usb2.c | 87 | ||||
-rw-r--r-- | drivers/phy/cadence/Kconfig | 6 | ||||
-rw-r--r-- | drivers/phy/cadence/Makefile | 2 | ||||
-rw-r--r-- | drivers/phy/cadence/phy-cadence-dp.c | 541 | ||||
-rw-r--r-- | drivers/phy/cadence/phy-cadence-torrent.c | 1944 | ||||
-rw-r--r-- | drivers/phy/mediatek/phy-mtk-tphy.c | 64 | ||||
-rw-r--r-- | drivers/phy/qualcomm/Kconfig | 20 | ||||
-rw-r--r-- | drivers/phy/qualcomm/Makefile | 2 | ||||
-rw-r--r-- | drivers/phy/qualcomm/phy-qcom-qmp.c | 425 | ||||
-rw-r--r-- | drivers/phy/qualcomm/phy-qcom-qmp.h | 114 | ||||
-rw-r--r-- | drivers/phy/qualcomm/phy-qcom-qusb2.c | 144 | ||||
-rw-r--r-- | drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c | 415 | ||||
-rw-r--r-- | drivers/phy/qualcomm/phy-qcom-usb-ss.c | 246 | ||||
-rw-r--r-- | drivers/phy/rockchip/phy-rockchip-inno-usb2.c | 2 | ||||
-rw-r--r-- | drivers/phy/socionext/phy-uniphier-pcie.c | 102 | ||||
-rw-r--r-- | drivers/phy/socionext/phy-uniphier-usb3hs.c | 92 | ||||
-rw-r--r-- | drivers/phy/socionext/phy-uniphier-usb3ss.c | 8 | ||||
-rw-r--r-- | drivers/phy/ti/phy-gmii-sel.c | 19 |
18 files changed, 3551 insertions, 682 deletions
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb2.c b/drivers/phy/amlogic/phy-meson-g12a-usb2.c index 9065ffc85eb4..b26e30e1afaf 100644 --- a/drivers/phy/amlogic/phy-meson-g12a-usb2.c +++ b/drivers/phy/amlogic/phy-meson-g12a-usb2.c @@ -66,7 +66,7 @@ #define PHY_CTRL_R14 0x38 #define PHY_CTRL_R14_I_RDP_EN BIT(0) #define PHY_CTRL_R14_I_RPU_SW1_EN BIT(1) - #define PHY_CTRL_R14_I_RPU_SW2_EN GENMASK(2, 3) + #define PHY_CTRL_R14_I_RPU_SW2_EN GENMASK(3, 2) #define PHY_CTRL_R14_PG_RSTN BIT(4) #define PHY_CTRL_R14_I_C2L_DATA_16_8 BIT(5) #define PHY_CTRL_R14_I_C2L_ASSERT_SINGLE_EN_ZERO BIT(6) @@ -146,11 +146,17 @@ #define RESET_COMPLETE_TIME 1000 #define PLL_RESET_COMPLETE_TIME 100 +enum meson_soc_id { + MESON_SOC_G12A = 0, + MESON_SOC_A1, +}; + struct phy_meson_g12a_usb2_priv { struct device *dev; struct regmap *regmap; struct clk *clk; struct reset_control *reset; + int soc_id; }; static const struct regmap_config phy_meson_g12a_usb2_regmap_conf = { @@ -164,6 +170,7 @@ static int phy_meson_g12a_usb2_init(struct phy *phy) { struct phy_meson_g12a_usb2_priv *priv = phy_get_drvdata(phy); int ret; + unsigned int value; ret = reset_control_reset(priv->reset); if (ret) @@ -192,18 +199,22 @@ static int phy_meson_g12a_usb2_init(struct phy *phy) FIELD_PREP(PHY_CTRL_R17_MPLL_FILTER_PVT2, 2) | FIELD_PREP(PHY_CTRL_R17_MPLL_FILTER_PVT1, 9)); - regmap_write(priv->regmap, PHY_CTRL_R18, - FIELD_PREP(PHY_CTRL_R18_MPLL_LKW_SEL, 1) | - FIELD_PREP(PHY_CTRL_R18_MPLL_LK_W, 9) | - FIELD_PREP(PHY_CTRL_R18_MPLL_LK_S, 0x27) | - FIELD_PREP(PHY_CTRL_R18_MPLL_PFD_GAIN, 1) | - FIELD_PREP(PHY_CTRL_R18_MPLL_ROU, 7) | - FIELD_PREP(PHY_CTRL_R18_MPLL_DATA_SEL, 3) | - FIELD_PREP(PHY_CTRL_R18_MPLL_BIAS_ADJ, 1) | - FIELD_PREP(PHY_CTRL_R18_MPLL_BB_MODE, 0) | - FIELD_PREP(PHY_CTRL_R18_MPLL_ALPHA, 3) | - FIELD_PREP(PHY_CTRL_R18_MPLL_ADJ_LDO, 1) | - PHY_CTRL_R18_MPLL_ACG_RANGE); + value = FIELD_PREP(PHY_CTRL_R18_MPLL_LKW_SEL, 1) | + FIELD_PREP(PHY_CTRL_R18_MPLL_LK_W, 9) | + FIELD_PREP(PHY_CTRL_R18_MPLL_LK_S, 0x27) | + FIELD_PREP(PHY_CTRL_R18_MPLL_PFD_GAIN, 1) | + FIELD_PREP(PHY_CTRL_R18_MPLL_ROU, 7) | + FIELD_PREP(PHY_CTRL_R18_MPLL_DATA_SEL, 3) | + FIELD_PREP(PHY_CTRL_R18_MPLL_BIAS_ADJ, 1) | + FIELD_PREP(PHY_CTRL_R18_MPLL_BB_MODE, 0) | + FIELD_PREP(PHY_CTRL_R18_MPLL_ALPHA, 3) | + FIELD_PREP(PHY_CTRL_R18_MPLL_ADJ_LDO, 1) | + PHY_CTRL_R18_MPLL_ACG_RANGE; + + if (priv->soc_id == MESON_SOC_A1) + value |= PHY_CTRL_R18_MPLL_DCO_CLK_SEL; + + regmap_write(priv->regmap, PHY_CTRL_R18, value); udelay(PLL_RESET_COMPLETE_TIME); @@ -227,13 +238,24 @@ static int phy_meson_g12a_usb2_init(struct phy *phy) FIELD_PREP(PHY_CTRL_R20_USB2_BGR_VREF_4_0, 0) | FIELD_PREP(PHY_CTRL_R20_USB2_BGR_DBG_1_0, 0)); - regmap_write(priv->regmap, PHY_CTRL_R4, - FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_7_0, 0xf) | - FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_15_8, 0xf) | - FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_23_16, 0xf) | - PHY_CTRL_R4_TEST_BYPASS_MODE_EN | - FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_1_0, 0) | - FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_3_2, 0)); + if (priv->soc_id == MESON_SOC_G12A) + regmap_write(priv->regmap, PHY_CTRL_R4, + FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_7_0, 0xf) | + FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_15_8, 0xf) | + FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_23_16, 0xf) | + PHY_CTRL_R4_TEST_BYPASS_MODE_EN | + FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_1_0, 0) | + FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_3_2, 0)); + else if (priv->soc_id == MESON_SOC_A1) { + regmap_write(priv->regmap, PHY_CTRL_R21, + PHY_CTRL_R21_USB2_CAL_ACK_EN | + PHY_CTRL_R21_USB2_TX_STRG_PD | + FIELD_PREP(PHY_CTRL_R21_USB2_OTG_ACA_TRIM_1_0, 2)); + + /* Analog Settings */ + regmap_write(priv->regmap, PHY_CTRL_R13, + FIELD_PREP(PHY_CTRL_R13_MIN_COUNT_FOR_SYNC_DET, 7)); + } /* Tuning Disconnect Threshold */ regmap_write(priv->regmap, PHY_CTRL_R3, @@ -241,11 +263,13 @@ static int phy_meson_g12a_usb2_init(struct phy *phy) FIELD_PREP(PHY_CTRL_R3_HSDIC_REF, 1) | FIELD_PREP(PHY_CTRL_R3_DISC_THRESH, 3)); - /* Analog Settings */ - regmap_write(priv->regmap, PHY_CTRL_R14, 0); - regmap_write(priv->regmap, PHY_CTRL_R13, - PHY_CTRL_R13_UPDATE_PMA_SIGNALS | - FIELD_PREP(PHY_CTRL_R13_MIN_COUNT_FOR_SYNC_DET, 7)); + if (priv->soc_id == MESON_SOC_G12A) { + /* Analog Settings */ + regmap_write(priv->regmap, PHY_CTRL_R14, 0); + regmap_write(priv->regmap, PHY_CTRL_R13, + PHY_CTRL_R13_UPDATE_PMA_SIGNALS | + FIELD_PREP(PHY_CTRL_R13_MIN_COUNT_FOR_SYNC_DET, 7)); + } return 0; } @@ -286,6 +310,8 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev) if (IS_ERR(base)) return PTR_ERR(base); + priv->soc_id = (enum meson_soc_id)of_device_get_match_data(&pdev->dev); + priv->regmap = devm_regmap_init_mmio(dev, base, &phy_meson_g12a_usb2_regmap_conf); if (IS_ERR(priv->regmap)) @@ -321,8 +347,15 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev) } static const struct of_device_id phy_meson_g12a_usb2_of_match[] = { - { .compatible = "amlogic,g12a-usb2-phy", }, - { }, + { + .compatible = "amlogic,g12a-usb2-phy", + .data = (void *)MESON_SOC_G12A, + }, + { + .compatible = "amlogic,a1-usb2-phy", + .data = (void *)MESON_SOC_A1, + }, + { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, phy_meson_g12a_usb2_of_match); diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig index b2db916db64b..459545871608 100644 --- a/drivers/phy/cadence/Kconfig +++ b/drivers/phy/cadence/Kconfig @@ -3,13 +3,13 @@ # Phy drivers for Cadence PHYs # -config PHY_CADENCE_DP - tristate "Cadence MHDP DisplayPort PHY driver" +config PHY_CADENCE_TORRENT + tristate "Cadence Torrent PHY driver" depends on OF depends on HAS_IOMEM select GENERIC_PHY help - Support for Cadence MHDP DisplayPort PHY. + Support for Cadence Torrent PHY. config PHY_CADENCE_DPHY tristate "Cadence D-PHY Support" diff --git a/drivers/phy/cadence/Makefile b/drivers/phy/cadence/Makefile index 8f89560f1711..6a7ffc6ea599 100644 --- a/drivers/phy/cadence/Makefile +++ b/drivers/phy/cadence/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_PHY_CADENCE_DP) += phy-cadence-dp.o +obj-$(CONFIG_PHY_CADENCE_TORRENT) += phy-cadence-torrent.o obj-$(CONFIG_PHY_CADENCE_DPHY) += cdns-dphy.o obj-$(CONFIG_PHY_CADENCE_SIERRA) += phy-cadence-sierra.o diff --git a/drivers/phy/cadence/phy-cadence-dp.c b/drivers/phy/cadence/phy-cadence-dp.c deleted file mode 100644 index bc10cb264b7a..000000000000 --- a/drivers/phy/cadence/phy-cadence-dp.c +++ /dev/null @@ -1,541 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Cadence MHDP DisplayPort SD0801 PHY driver. - * - * Copyright 2018 Cadence Design Systems, Inc. - * - */ - -#include <linux/delay.h> -#include <linux/err.h> -#include <linux/io.h> -#include <linux/iopoll.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/phy/phy.h> -#include <linux/platform_device.h> - -#define DEFAULT_NUM_LANES 2 -#define MAX_NUM_LANES 4 -#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */ - -#define POLL_TIMEOUT_US 2000 -#define LANE_MASK 0x7 - -/* - * register offsets from DPTX PHY register block base (i.e MHDP - * register base + 0x30a00) - */ -#define PHY_AUX_CONFIG 0x00 -#define PHY_AUX_CTRL 0x04 -#define PHY_RESET 0x20 -#define PHY_PMA_XCVR_PLLCLK_EN 0x24 -#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28 -#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c -#define PHY_POWER_STATE_LN_0 0x0000 -#define PHY_POWER_STATE_LN_1 0x0008 -#define PHY_POWER_STATE_LN_2 0x0010 -#define PHY_POWER_STATE_LN_3 0x0018 -#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30 -#define PHY_PMA_CMN_READY 0x34 -#define PHY_PMA_XCVR_TX_VMARGIN 0x38 -#define PHY_PMA_XCVR_TX_DEEMPH 0x3c - -/* - * register offsets from SD0801 PHY register block base (i.e MHDP - * register base + 0x500000) - */ -#define CMN_SSM_BANDGAP_TMR 0x00084 -#define CMN_SSM_BIAS_TMR 0x00088 -#define CMN_PLLSM0_PLLPRE_TMR 0x000a8 -#define CMN_PLLSM0_PLLLOCK_TMR 0x000b0 -#define CMN_PLLSM1_PLLPRE_TMR 0x000c8 -#define CMN_PLLSM1_PLLLOCK_TMR 0x000d0 -#define CMN_BGCAL_INIT_TMR 0x00190 -#define CMN_BGCAL_ITER_TMR 0x00194 -#define CMN_IBCAL_INIT_TMR 0x001d0 -#define CMN_PLL0_VCOCAL_INIT_TMR 0x00210 -#define CMN_PLL0_VCOCAL_ITER_TMR 0x00214 -#define CMN_PLL0_VCOCAL_REFTIM_START 0x00218 -#define CMN_PLL0_VCOCAL_PLLCNT_START 0x00220 -#define CMN_PLL0_INTDIV_M0 0x00240 -#define CMN_PLL0_FRACDIVL_M0 0x00244 -#define CMN_PLL0_FRACDIVH_M0 0x00248 -#define CMN_PLL0_HIGH_THR_M0 0x0024c -#define CMN_PLL0_DSM_DIAG_M0 0x00250 -#define CMN_PLL0_LOCK_PLLCNT_START 0x00278 -#define CMN_PLL1_VCOCAL_INIT_TMR 0x00310 -#define CMN_PLL1_VCOCAL_ITER_TMR 0x00314 -#define CMN_PLL1_DSM_DIAG_M0 0x00350 -#define CMN_TXPUCAL_INIT_TMR 0x00410 -#define CMN_TXPUCAL_ITER_TMR 0x00414 -#define CMN_TXPDCAL_INIT_TMR 0x00430 -#define CMN_TXPDCAL_ITER_TMR 0x00434 -#define CMN_RXCAL_INIT_TMR 0x00450 -#define CMN_RXCAL_ITER_TMR 0x00454 -#define CMN_SD_CAL_INIT_TMR 0x00490 -#define CMN_SD_CAL_ITER_TMR 0x00494 -#define CMN_SD_CAL_REFTIM_START 0x00498 -#define CMN_SD_CAL_PLLCNT_START 0x004a0 -#define CMN_PDIAG_PLL0_CTRL_M0 0x00680 -#define CMN_PDIAG_PLL0_CLK_SEL_M0 0x00684 -#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x00690 -#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x00694 -#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x00698 -#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x006d0 -#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x006d4 -#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x00704 -#define XCVR_DIAG_PLLDRC_CTRL 0x10394 -#define XCVR_DIAG_HSCLK_SEL 0x10398 -#define XCVR_DIAG_HSCLK_DIV 0x1039c -#define TX_PSC_A0 0x10400 -#define TX_PSC_A1 0x10404 -#define TX_PSC_A2 0x10408 -#define TX_PSC_A3 0x1040c -#define RX_PSC_A0 0x20000 -#define RX_PSC_A1 0x20004 -#define RX_PSC_A2 0x20008 -#define RX_PSC_A3 0x2000c -#define PHY_PLL_CFG 0x30038 - -struct cdns_dp_phy { - void __iomem *base; /* DPTX registers base */ - void __iomem *sd_base; /* SD0801 registers base */ - u32 num_lanes; /* Number of lanes to use */ - u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */ - struct device *dev; -}; - -static int cdns_dp_phy_init(struct phy *phy); -static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy); -static void cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy); -static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy); -static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy); -static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy, - unsigned int lane); -static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy); -static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy); -static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy, - unsigned int offset, - unsigned char start_bit, - unsigned char num_bits, - unsigned int val); - -static const struct phy_ops cdns_dp_phy_ops = { - .init = cdns_dp_phy_init, - .owner = THIS_MODULE, -}; - -static int cdns_dp_phy_init(struct phy *phy) -{ - unsigned char lane_bits; - - struct cdns_dp_phy *cdns_phy = phy_get_drvdata(phy); - - writel(0x0003, cdns_phy->base + PHY_AUX_CTRL); /* enable AUX */ - - /* PHY PMA registers configuration function */ - cdns_dp_phy_pma_cfg(cdns_phy); - - /* - * Set lines power state to A0 - * Set lines pll clk enable to 0 - */ - - cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ, - PHY_POWER_STATE_LN_0, 6, 0x0000); - - if (cdns_phy->num_lanes >= 2) { - cdns_dp_phy_write_field(cdns_phy, - PHY_PMA_XCVR_POWER_STATE_REQ, - PHY_POWER_STATE_LN_1, 6, 0x0000); - - if (cdns_phy->num_lanes == 4) { - cdns_dp_phy_write_field(cdns_phy, - PHY_PMA_XCVR_POWER_STATE_REQ, - PHY_POWER_STATE_LN_2, 6, 0); - cdns_dp_phy_write_field(cdns_phy, - PHY_PMA_XCVR_POWER_STATE_REQ, - PHY_POWER_STATE_LN_3, 6, 0); - } - } - - cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN, - 0, 1, 0x0000); - - if (cdns_phy->num_lanes >= 2) { - cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN, - 1, 1, 0x0000); - if (cdns_phy->num_lanes == 4) { - cdns_dp_phy_write_field(cdns_phy, - PHY_PMA_XCVR_PLLCLK_EN, - 2, 1, 0x0000); - cdns_dp_phy_write_field(cdns_phy, - PHY_PMA_XCVR_PLLCLK_EN, - 3, 1, 0x0000); - } - } - - /* - * release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on - * used lanes - */ - lane_bits = (1 << cdns_phy->num_lanes) - 1; - writel(((0xF & ~lane_bits) << 4) | (0xF & lane_bits), - cdns_phy->base + PHY_RESET); - - /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */ - writel(0x0001, cdns_phy->base + PHY_PMA_XCVR_PLLCLK_EN); - - /* PHY PMA registers configuration functions */ - cdns_dp_phy_pma_cmn_vco_cfg_25mhz(cdns_phy); - cdns_dp_phy_pma_cmn_rate(cdns_phy); - - /* take out of reset */ - cdns_dp_phy_write_field(cdns_phy, PHY_RESET, 8, 1, 1); - cdns_dp_phy_wait_pma_cmn_ready(cdns_phy); - cdns_dp_phy_run(cdns_phy); - - return 0; -} - -static void cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy) -{ - unsigned int reg; - int ret; - - ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_CMN_READY, reg, - reg & 1, 0, 500); - if (ret == -ETIMEDOUT) - dev_err(cdns_phy->dev, - "timeout waiting for PMA common ready\n"); -} - -static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy) -{ - unsigned int i; - - /* PMA common configuration */ - cdns_dp_phy_pma_cmn_cfg_25mhz(cdns_phy); - - /* PMA lane configuration to deal with multi-link operation */ - for (i = 0; i < cdns_phy->num_lanes; i++) - cdns_dp_phy_pma_lane_cfg(cdns_phy, i); -} - -static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy) -{ - /* refclock registers - assumes 25 MHz refclock */ - writel(0x0019, cdns_phy->sd_base + CMN_SSM_BIAS_TMR); - writel(0x0032, cdns_phy->sd_base + CMN_PLLSM0_PLLPRE_TMR); - writel(0x00D1, cdns_phy->sd_base + CMN_PLLSM0_PLLLOCK_TMR); - writel(0x0032, cdns_phy->sd_base + CMN_PLLSM1_PLLPRE_TMR); - writel(0x00D1, cdns_phy->sd_base + CMN_PLLSM1_PLLLOCK_TMR); - writel(0x007D, cdns_phy->sd_base + CMN_BGCAL_INIT_TMR); - writel(0x007D, cdns_phy->sd_base + CMN_BGCAL_ITER_TMR); - writel(0x0019, cdns_phy->sd_base + CMN_IBCAL_INIT_TMR); - writel(0x001E, cdns_phy->sd_base + CMN_TXPUCAL_INIT_TMR); - writel(0x0006, cdns_phy->sd_base + CMN_TXPUCAL_ITER_TMR); - writel(0x001E, cdns_phy->sd_base + CMN_TXPDCAL_INIT_TMR); - writel(0x0006, cdns_phy->sd_base + CMN_TXPDCAL_ITER_TMR); - writel(0x02EE, cdns_phy->sd_base + CMN_RXCAL_INIT_TMR); - writel(0x0006, cdns_phy->sd_base + CMN_RXCAL_ITER_TMR); - writel(0x0002, cdns_phy->sd_base + CMN_SD_CAL_INIT_TMR); - writel(0x0002, cdns_phy->sd_base + CMN_SD_CAL_ITER_TMR); - writel(0x000E, cdns_phy->sd_base + CMN_SD_CAL_REFTIM_START); - writel(0x012B, cdns_phy->sd_base + CMN_SD_CAL_PLLCNT_START); - /* PLL registers */ - writel(0x0409, cdns_phy->sd_base + CMN_PDIAG_PLL0_CP_PADJ_M0); - writel(0x1001, cdns_phy->sd_base + CMN_PDIAG_PLL0_CP_IADJ_M0); - writel(0x0F08, cdns_phy->sd_base + CMN_PDIAG_PLL0_FILT_PADJ_M0); - writel(0x0004, cdns_phy->sd_base + CMN_PLL0_DSM_DIAG_M0); - writel(0x00FA, cdns_phy->sd_base + CMN_PLL0_VCOCAL_INIT_TMR); - writel(0x0004, cdns_phy->sd_base + CMN_PLL0_VCOCAL_ITER_TMR); - writel(0x00FA, cdns_phy->sd_base + CMN_PLL1_VCOCAL_INIT_TMR); - writel(0x0004, cdns_phy->sd_base + CMN_PLL1_VCOCAL_ITER_TMR); - writel(0x0318, cdns_phy->sd_base + CMN_PLL0_VCOCAL_REFTIM_START); -} - -static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy) -{ - /* Assumes 25 MHz refclock */ - switch (cdns_phy->max_bit_rate) { - /* Setting VCO for 10.8GHz */ - case 2700: - case 5400: - writel(0x01B0, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0); - writel(0x0000, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0); - writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0); - writel(0x0120, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0); - break; - /* Setting VCO for 9.72GHz */ - case 2430: - case 3240: - writel(0x0184, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0); - writel(0xCCCD, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0); - writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0); - writel(0x0104, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0); - break; - /* Setting VCO for 8.64GHz */ - case 2160: - case 4320: - writel(0x0159, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0); - writel(0x999A, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0); - writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0); - writel(0x00E7, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0); - break; - /* Setting VCO for 8.1GHz */ - case 8100: - writel(0x0144, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0); - writel(0x0000, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0); - writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0); - writel(0x00D8, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0); - break; - } - - writel(0x0002, cdns_phy->sd_base + CMN_PDIAG_PLL0_CTRL_M0); - writel(0x0318, cdns_phy->sd_base + CMN_PLL0_VCOCAL_PLLCNT_START); -} - -static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy) -{ - unsigned int clk_sel_val = 0; - unsigned int hsclk_div_val = 0; - unsigned int i; - - /* 16'h0000 for single DP link configuration */ - writel(0x0000, cdns_phy->sd_base + PHY_PLL_CFG); - - switch (cdns_phy->max_bit_rate) { - case 1620: - clk_sel_val = 0x0f01; - hsclk_div_val = 2; - break; - case 2160: - case 2430: - case 2700: - clk_sel_val = 0x0701; - hsclk_div_val = 1; - break; - case 3240: - clk_sel_val = 0x0b00; - hsclk_div_val = 2; - break; - case 4320: - case 5400: - clk_sel_val = 0x0301; - hsclk_div_val = 0; - break; - case 8100: - clk_sel_val = 0x0200; - hsclk_div_val = 0; - break; - } - - writel(clk_sel_val, cdns_phy->sd_base + CMN_PDIAG_PLL0_CLK_SEL_M0); - - /* PMA lane configuration to deal with multi-link operation */ - for (i = 0; i < cdns_phy->num_lanes; i++) { - writel(hsclk_div_val, - cdns_phy->sd_base + (XCVR_DIAG_HSCLK_DIV | (i<<11))); - } -} - -static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy, - unsigned int lane) -{ - unsigned int lane_bits = (lane & LANE_MASK) << 11; - - /* Writing Tx/Rx Power State Controllers registers */ - writel(0x00FB, cdns_phy->sd_base + (TX_PSC_A0 | lane_bits)); - writel(0x04AA, cdns_phy->sd_base + (TX_PSC_A2 | lane_bits)); - writel(0x04AA, cdns_phy->sd_base + (TX_PSC_A3 | lane_bits)); - writel(0x0000, cdns_phy->sd_base + (RX_PSC_A0 | lane_bits)); - writel(0x0000, cdns_phy->sd_base + (RX_PSC_A2 | lane_bits)); - writel(0x0000, cdns_phy->sd_base + (RX_PSC_A3 | lane_bits)); - - writel(0x0001, cdns_phy->sd_base + (XCVR_DIAG_PLLDRC_CTRL | lane_bits)); - writel(0x0000, cdns_phy->sd_base + (XCVR_DIAG_HSCLK_SEL | lane_bits)); -} - -static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy) -{ - unsigned int read_val; - u32 write_val1 = 0; - u32 write_val2 = 0; - u32 mask = 0; - int ret; - - /* - * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the - * master lane - */ - ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_PLLCLK_EN_ACK, - read_val, read_val & 1, 0, POLL_TIMEOUT_US); - if (ret == -ETIMEDOUT) - dev_err(cdns_phy->dev, - "timeout waiting for link PLL clock enable ack\n"); - - ndelay(100); - - switch (cdns_phy->num_lanes) { - - case 1: /* lane 0 */ - write_val1 = 0x00000004; - write_val2 = 0x00000001; - mask = 0x0000003f; - break; - case 2: /* lane 0-1 */ - write_val1 = 0x00000404; - write_val2 = 0x00000101; - mask = 0x00003f3f; - break; - case 4: /* lane 0-3 */ - write_val1 = 0x04040404; - write_val2 = 0x01010101; - mask = 0x3f3f3f3f; - break; - } - - writel(write_val1, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ); - - ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_ACK, - read_val, (read_val & mask) == write_val1, 0, - POLL_TIMEOUT_US); - if (ret == -ETIMEDOUT) - dev_err(cdns_phy->dev, - "timeout waiting for link power state ack\n"); - - writel(0, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ); - ndelay(100); - - writel(write_val2, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ); - - ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_ACK, - read_val, (read_val & mask) == write_val2, 0, - POLL_TIMEOUT_US); - if (ret == -ETIMEDOUT) - dev_err(cdns_phy->dev, - "timeout waiting for link power state ack\n"); - - writel(0, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ); - ndelay(100); -} - -static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy, - unsigned int offset, - unsigned char start_bit, - unsigned char num_bits, - unsigned int val) -{ - unsigned int read_val; - - read_val = readl(cdns_phy->base + offset); - writel(((val << start_bit) | (read_val & ~(((1 << num_bits) - 1) << - start_bit))), cdns_phy->base + offset); -} - -static int cdns_dp_phy_probe(struct platform_device *pdev) -{ - struct resource *regs; - struct cdns_dp_phy *cdns_phy; - struct device *dev = &pdev->dev; - struct phy_provider *phy_provider; - struct phy *phy; - int err; - - cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL); - if (!cdns_phy) - return -ENOMEM; - - cdns_phy->dev = &pdev->dev; - - phy = devm_phy_create(dev, NULL, &cdns_dp_phy_ops); - if (IS_ERR(phy)) { - dev_err(dev, "failed to create DisplayPort PHY\n"); - return PTR_ERR(phy); - } - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - cdns_phy->base = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(cdns_phy->base)) - return PTR_ERR(cdns_phy->base); - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); - cdns_phy->sd_base = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(cdns_phy->sd_base)) - return PTR_ERR(cdns_phy->sd_base); - - err = device_property_read_u32(dev, "num_lanes", - &(cdns_phy->num_lanes)); - if (err) - cdns_phy->num_lanes = DEFAULT_NUM_LANES; - - switch (cdns_phy->num_lanes) { - case 1: - case 2: - case 4: - /* valid number of lanes */ - break; - default: - dev_err(dev, "unsupported number of lanes: %d\n", - cdns_phy->num_lanes); - return -EINVAL; - } - - err = device_property_read_u32(dev, "max_bit_rate", - &(cdns_phy->max_bit_rate)); - if (err) - cdns_phy->max_bit_rate = DEFAULT_MAX_BIT_RATE; - - switch (cdns_phy->max_bit_rate) { - case 2160: - case 2430: - case 2700: - case 3240: - case 4320: - case 5400: - case 8100: - /* valid bit rate */ - break; - default: - dev_err(dev, "unsupported max bit rate: %dMbps\n", - cdns_phy->max_bit_rate); - return -EINVAL; - } - - phy_set_drvdata(phy, cdns_phy); - - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - - dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n", - cdns_phy->num_lanes, - cdns_phy->max_bit_rate / 1000, - cdns_phy->max_bit_rate % 1000); - - return PTR_ERR_OR_ZERO(phy_provider); -} - -static const struct of_device_id cdns_dp_phy_of_match[] = { - { - .compatible = "cdns,dp-phy" - }, - {} -}; -MODULE_DEVICE_TABLE(of, cdns_dp_phy_of_match); - -static struct platform_driver cdns_dp_phy_driver = { - .probe = cdns_dp_phy_probe, - .driver = { - .name = "cdns-dp-phy", - .of_match_table = cdns_dp_phy_of_match, - } -}; -module_platform_driver(cdns_dp_phy_driver); - -MODULE_AUTHOR("Cadence Design Systems, Inc."); -MODULE_DESCRIPTION("Cadence MHDP PHY driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c new file mode 100644 index 000000000000..7116127358ee --- /dev/null +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -0,0 +1,1944 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Cadence Torrent SD0801 PHY driver. + * + * Copyright 2018 Cadence Design Systems, Inc. + * + */ + +#include <dt-bindings/phy/phy.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/regmap.h> + +#define REF_CLK_19_2MHz 19200000 +#define REF_CLK_25MHz 25000000 + +#define DEFAULT_NUM_LANES 4 +#define MAX_NUM_LANES 4 +#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */ + +#define POLL_TIMEOUT_US 5000 + +#define TORRENT_COMMON_CDB_OFFSET 0x0 + +#define TORRENT_TX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \ + ((0x4000 << (block_offset)) + \ + (((ln) << 9) << (reg_offset))) + +#define TORRENT_RX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \ + ((0x8000 << (block_offset)) + \ + (((ln) << 9) << (reg_offset))) + +#define TORRENT_PHY_PCS_COMMON_OFFSET(block_offset) \ + (0xC000 << (block_offset)) + +#define TORRENT_PHY_PMA_COMMON_OFFSET(block_offset) \ + (0xE000 << (block_offset)) + +#define TORRENT_DPTX_PHY_OFFSET 0x0 + +/* + * register offsets from DPTX PHY register block base (i.e MHDP + * register base + 0x30a00) + */ +#define PHY_AUX_CTRL 0x04 +#define PHY_RESET 0x20 +#define PMA_TX_ELEC_IDLE_MASK 0xF0U +#define PMA_TX_ELEC_IDLE_SHIFT 4 +#define PHY_L00_RESET_N_MASK 0x01U +#define PHY_PMA_XCVR_PLLCLK_EN 0x24 +#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28 +#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c +#define PHY_POWER_STATE_LN_0 0x0000 +#define PHY_POWER_STATE_LN_1 0x0008 +#define PHY_POWER_STATE_LN_2 0x0010 +#define PHY_POWER_STATE_LN_3 0x0018 +#define PMA_XCVR_POWER_STATE_REQ_LN_MASK 0x3FU +#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30 +#define PHY_PMA_CMN_READY 0x34 + +/* + * register offsets from SD0801 PHY register block base (i.e MHDP + * register base + 0x500000) + */ +#define CMN_SSM_BANDGAP_TMR 0x0021U +#define CMN_SSM_BIAS_TMR 0x0022U +#define CMN_PLLSM0_PLLPRE_TMR 0x002AU +#define CMN_PLLSM0_PLLLOCK_TMR 0x002CU +#define CMN_PLLSM1_PLLPRE_TMR 0x0032U +#define CMN_PLLSM1_PLLLOCK_TMR 0x0034U +#define CMN_BGCAL_INIT_TMR 0x0064U +#define CMN_BGCAL_ITER_TMR 0x0065U +#define CMN_IBCAL_INIT_TMR 0x0074U +#define CMN_PLL0_VCOCAL_TCTRL 0x0082U +#define CMN_PLL0_VCOCAL_INIT_TMR 0x0084U +#define CMN_PLL0_VCOCAL_ITER_TMR 0x0085U +#define CMN_PLL0_VCOCAL_REFTIM_START 0x0086U +#define CMN_PLL0_VCOCAL_PLLCNT_START 0x0088U +#define CMN_PLL0_INTDIV_M0 0x0090U +#define CMN_PLL0_FRACDIVL_M0 0x0091U +#define CMN_PLL0_FRACDIVH_M0 0x0092U +#define CMN_PLL0_HIGH_THR_M0 0x0093U +#define CMN_PLL0_DSM_DIAG_M0 0x0094U +#define CMN_PLL0_SS_CTRL1_M0 0x0098U +#define CMN_PLL0_SS_CTRL2_M0 0x0099U +#define CMN_PLL0_SS_CTRL3_M0 0x009AU +#define CMN_PLL0_SS_CTRL4_M0 0x009BU +#define CMN_PLL0_LOCK_REFCNT_START 0x009CU +#define CMN_PLL0_LOCK_PLLCNT_START 0x009EU +#define CMN_PLL0_LOCK_PLLCNT_THR 0x009FU +#define CMN_PLL1_VCOCAL_TCTRL 0x00C2U +#define CMN_PLL1_VCOCAL_INIT_TMR 0x00C4U +#define CMN_PLL1_VCOCAL_ITER_TMR 0x00C5U +#define CMN_PLL1_VCOCAL_REFTIM_START 0x00C6U +#define CMN_PLL1_VCOCAL_PLLCNT_START 0x00C8U +#define CMN_PLL1_INTDIV_M0 0x00D0U +#define CMN_PLL1_FRACDIVL_M0 0x00D1U +#define CMN_PLL1_FRACDIVH_M0 0x00D2U +#define CMN_PLL1_HIGH_THR_M0 0x00D3U +#define CMN_PLL1_DSM_DIAG_M0 0x00D4U +#define CMN_PLL1_SS_CTRL1_M0 0x00D8U +#define CMN_PLL1_SS_CTRL2_M0 0x00D9U +#define CMN_PLL1_SS_CTRL3_M0 0x00DAU +#define CMN_PLL1_SS_CTRL4_M0 0x00DBU +#define CMN_PLL1_LOCK_REFCNT_START 0x00DCU +#define CMN_PLL1_LOCK_PLLCNT_START 0x00DEU +#define CMN_PLL1_LOCK_PLLCNT_THR 0x00DFU +#define CMN_TXPUCAL_INIT_TMR 0x0104U +#define CMN_TXPUCAL_ITER_TMR 0x0105U +#define CMN_TXPDCAL_INIT_TMR 0x010CU +#define CMN_TXPDCAL_ITER_TMR 0x010DU +#define CMN_RXCAL_INIT_TMR 0x0114U +#define CMN_RXCAL_ITER_TMR 0x0115U +#define CMN_SD_CAL_INIT_TMR 0x0124U +#define CMN_SD_CAL_ITER_TMR 0x0125U +#define CMN_SD_CAL_REFTIM_START 0x0126U +#define CMN_SD_CAL_PLLCNT_START 0x0128U +#define CMN_PDIAG_PLL0_CTRL_M0 0x01A0U +#define CMN_PDIAG_PLL0_CLK_SEL_M0 0x01A1U +#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x01A4U +#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x01A5U +#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x01A6U +#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x01B4U +#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x01B5U +#define CMN_PDIAG_PLL1_CTRL_M0 0x01C0U +#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x01C1U +#define CMN_PDIAG_PLL1_CP_PADJ_M0 0x01C4U +#define CMN_PDIAG_PLL1_CP_IADJ_M0 0x01C5U +#define CMN_PDIAG_PLL1_FILT_PADJ_M0 0x01C6U + +/* PMA TX Lane registers */ +#define TX_TXCC_CTRL 0x0040U +#define TX_TXCC_CPOST_MULT_00 0x004CU +#define TX_TXCC_MGNFS_MULT_000 0x0050U +#define DRV_DIAG_TX_DRV 0x00C6U +#define XCVR_DIAG_PLLDRC_CTRL 0x00E5U +#define XCVR_DIAG_HSCLK_SEL 0x00E6U +#define XCVR_DIAG_HSCLK_DIV 0x00E7U +#define XCVR_DIAG_BIDI_CTRL 0x00EAU +#define TX_PSC_A0 0x0100U +#define TX_PSC_A2 0x0102U +#define TX_PSC_A3 0x0103U +#define TX_RCVDET_ST_TMR 0x0123U +#define TX_DIAG_ACYA 0x01E7U +#define TX_DIAG_ACYA_HBDC_MASK 0x0001U + +/* PMA RX Lane registers */ +#define RX_PSC_A0 0x0000U +#define RX_PSC_A2 0x0002U +#define RX_PSC_A3 0x0003U +#define RX_PSC_CAL 0x0006U +#define RX_REE_GCSM1_CTRL 0x0108U +#define RX_REE_GCSM2_CTRL 0x0110U +#define RX_REE_PERGCSM_CTRL 0x0118U + +/* PHY PCS common registers */ +#define PHY_PLL_CFG 0x000EU + +/* PHY PMA common registers */ +#define PHY_PMA_CMN_CTRL2 0x0001U +#define PHY_PMA_PLL_RAW_CTRL 0x0003U + +static const struct reg_field phy_pll_cfg = + REG_FIELD(PHY_PLL_CFG, 0, 1); + +static const struct reg_field phy_pma_cmn_ctrl_2 = + REG_FIELD(PHY_PMA_CMN_CTRL2, 0, 7); + +static const struct reg_field phy_pma_pll_raw_ctrl = + REG_FIELD(PHY_PMA_PLL_RAW_CTRL, 0, 1); + +static const struct reg_field phy_reset_ctrl = + REG_FIELD(PHY_RESET, 8, 8); + +static const struct of_device_id cdns_torrent_phy_of_match[]; + +struct cdns_torrent_inst { + struct phy *phy; + u32 mlane; + u32 phy_type; + u32 num_lanes; + struct reset_control *lnk_rst; +}; + +struct cdns_torrent_phy { + void __iomem *base; /* DPTX registers base */ + void __iomem *sd_base; /* SD0801 registers base */ + u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */ + struct reset_control *phy_rst; + struct device *dev; + struct clk *clk; + unsigned long ref_clk_rate; + struct cdns_torrent_inst phys[MAX_NUM_LANES]; + int nsubnodes; + struct regmap *regmap; + struct regmap *regmap_common_cdb; + struct regmap *regmap_phy_pcs_common_cdb; + struct regmap *regmap_phy_pma_common_cdb; + struct regmap *regmap_tx_lane_cdb[MAX_NUM_LANES]; + struct regmap *regmap_rx_lane_cdb[MAX_NUM_LANES]; + struct regmap *regmap_dptx_phy_reg; + struct regmap_field *phy_pll_cfg; + struct regmap_field *phy_pma_cmn_ctrl_2; + struct regmap_field *phy_pma_pll_raw_ctrl; + struct regmap_field *phy_reset_ctrl; +}; + +enum phy_powerstate { + POWERSTATE_A0 = 0, + /* Powerstate A1 is unused */ + POWERSTATE_A2 = 2, + POWERSTATE_A3 = 3, +}; + +static int cdns_torrent_dp_init(struct phy *phy); +static int cdns_torrent_dp_exit(struct phy *phy); +static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, + u32 num_lanes); +static +int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy); +static void cdns_torrent_dp_pma_cfg(struct cdns_torrent_phy *cdns_phy, + struct cdns_torrent_inst *inst); +static +void cdns_torrent_dp_pma_cmn_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy); +static +void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy, + u32 rate, bool ssc); +static +void cdns_torrent_dp_pma_cmn_cfg_25mhz(struct cdns_torrent_phy *cdns_phy); +static +void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy, + u32 rate, bool ssc); +static void cdns_torrent_dp_pma_lane_cfg(struct cdns_torrent_phy *cdns_phy, + unsigned int lane); +static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy, + u32 rate, u32 num_lanes); +static int cdns_torrent_dp_configure(struct phy *phy, + union phy_configure_opts *opts); +static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy, + u32 num_lanes, + enum phy_powerstate powerstate); +static int cdns_torrent_phy_on(struct phy *phy); +static int cdns_torrent_phy_off(struct phy *phy); + +static const struct phy_ops cdns_torrent_phy_ops = { + .init = cdns_torrent_dp_init, + .exit = cdns_torrent_dp_exit, + .configure = cdns_torrent_dp_configure, + .power_on = cdns_torrent_phy_on, + .power_off = cdns_torrent_phy_off, + .owner = THIS_MODULE, +}; + +struct cdns_torrent_data { + u8 block_offset_shift; + u8 reg_offset_shift; +}; + +struct cdns_regmap_cdb_context { + struct device *dev; + void __iomem *base; + u8 reg_offset_shift; +}; + +static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val) +{ + struct cdns_regmap_cdb_context *ctx = context; + u32 offset = reg << ctx->reg_offset_shift; + + writew(val, ctx->base + offset); + + return 0; +} + +static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val) +{ + struct cdns_regmap_cdb_context *ctx = context; + u32 offset = reg << ctx->reg_offset_shift; + + *val = readw(ctx->base + offset); + return 0; +} + +static int cdns_regmap_dptx_write(void *context, unsigned int reg, + unsigned int val) +{ + struct cdns_regmap_cdb_context *ctx = context; + u32 offset = reg; + + writel(val, ctx->base + offset); + + return 0; +} + +static int cdns_regmap_dptx_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct cdns_regmap_cdb_context *ctx = context; + u32 offset = reg; + + *val = readl(ctx->base + offset); + return 0; +} + +#define TORRENT_TX_LANE_CDB_REGMAP_CONF(n) \ +{ \ + .name = "torrent_tx_lane" n "_cdb", \ + .reg_stride = 1, \ + .fast_io = true, \ + .reg_write = cdns_regmap_write, \ + .reg_read = cdns_regmap_read, \ +} + +#define TORRENT_RX_LANE_CDB_REGMAP_CONF(n) \ +{ \ + .name = "torrent_rx_lane" n "_cdb", \ + .reg_stride = 1, \ + .fast_io = true, \ + .reg_write = cdns_regmap_write, \ + .reg_read = cdns_regmap_read, \ +} + +static struct regmap_config cdns_torrent_tx_lane_cdb_config[] = { + TORRENT_TX_LANE_CDB_REGMAP_CONF("0"), + TORRENT_TX_LANE_CDB_REGMAP_CONF("1"), + TORRENT_TX_LANE_CDB_REGMAP_CONF("2"), + TORRENT_TX_LANE_CDB_REGMAP_CONF("3"), +}; + +static struct regmap_config cdns_torrent_rx_lane_cdb_config[] = { + TORRENT_RX_LANE_CDB_REGMAP_CONF("0"), + TORRENT_RX_LANE_CDB_REGMAP_CONF("1"), + TORRENT_RX_LANE_CDB_REGMAP_CONF("2"), + TORRENT_RX_LANE_CDB_REGMAP_CONF("3"), +}; + +static struct regmap_config cdns_torrent_common_cdb_config = { + .name = "torrent_common_cdb", + .reg_stride = 1, + .fast_io = true, + .reg_write = cdns_regmap_write, + .reg_read = cdns_regmap_read, +}; + +static struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = { + .name = "torrent_phy_pcs_cmn_cdb", + .reg_stride = 1, + .fast_io = true, + .reg_write = cdns_regmap_write, + .reg_read = cdns_regmap_read, +}; + +static struct regmap_config cdns_torrent_phy_pma_cmn_cdb_config = { + .name = "torrent_phy_pma_cmn_cdb", + .reg_stride = 1, + .fast_io = true, + .reg_write = cdns_regmap_write, + .reg_read = cdns_regmap_read, +}; + +static struct regmap_config cdns_torrent_dptx_phy_config = { + .name = "torrent_dptx_phy", + .reg_stride = 1, + .fast_io = true, + .reg_write = cdns_regmap_dptx_write, + .reg_read = cdns_regmap_dptx_read, +}; + +/* PHY mmr access functions */ + +static void cdns_torrent_phy_write(struct regmap *regmap, u32 offset, u32 val) +{ + regmap_write(regmap, offset, val); +} + +static u32 cdns_torrent_phy_read(struct regmap *regmap, u32 offset) +{ + unsigned int val; + + regmap_read(regmap, offset, &val); + return val; +} + +/* DPTX mmr access functions */ + +static void cdns_torrent_dp_write(struct regmap *regmap, u32 offset, u32 val) +{ + regmap_write(regmap, offset, val); +} + +static u32 cdns_torrent_dp_read(struct regmap *regmap, u32 offset) +{ + u32 val; + + regmap_read(regmap, offset, &val); + return val; +} + +/* + * Structure used to store values of PHY registers for voltage-related + * coefficients, for particular voltage swing and pre-emphasis level. Values + * are shared across all physical lanes. + */ +struct coefficients { + /* Value of DRV_DIAG_TX_DRV register to use */ + u16 diag_tx_drv; + /* Value of TX_TXCC_MGNFS_MULT_000 register to use */ + u16 mgnfs_mult; + /* Value of TX_TXCC_CPOST_MULT_00 register to use */ + u16 cpost_mult; +}; + +/* + * Array consists of values of voltage-related registers for sd0801 PHY. A value + * of 0xFFFF is a placeholder for invalid combination, and will never be used. + */ +static const struct coefficients vltg_coeff[4][4] = { + /* voltage swing 0, pre-emphasis 0->3 */ + { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x002A, + .cpost_mult = 0x0000}, + {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x001F, + .cpost_mult = 0x0014}, + {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0012, + .cpost_mult = 0x0020}, + {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000, + .cpost_mult = 0x002A} + }, + + /* voltage swing 1, pre-emphasis 0->3 */ + { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x001F, + .cpost_mult = 0x0000}, + {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0013, + .cpost_mult = 0x0012}, + {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000, + .cpost_mult = 0x001F}, + {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF, + .cpost_mult = 0xFFFF} + }, + + /* voltage swing 2, pre-emphasis 0->3 */ + { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0013, + .cpost_mult = 0x0000}, + {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000, + .cpost_mult = 0x0013}, + {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF, + .cpost_mult = 0xFFFF}, + {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF, + .cpost_mult = 0xFFFF} + }, + + /* voltage swing 3, pre-emphasis 0->3 */ + { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000, + .cpost_mult = 0x0000}, + {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF, + .cpost_mult = 0xFFFF}, + {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF, + .cpost_mult = 0xFFFF}, + {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF, + .cpost_mult = 0xFFFF} + } +}; + +/* + * Enable or disable PLL for selected lanes. + */ +static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy, + struct phy_configure_opts_dp *dp, + bool enable) +{ + u32 rd_val; + u32 ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + /* + * Used to determine, which bits to check for or enable in + * PHY_PMA_XCVR_PLLCLK_EN register. + */ + u32 pll_bits; + /* Used to enable or disable lanes. */ + u32 pll_val; + + /* Select values of registers and mask, depending on enabled lane + * count. + */ + switch (dp->lanes) { + /* lane 0 */ + case (1): + pll_bits = 0x00000001; + break; + /* lanes 0-1 */ + case (2): + pll_bits = 0x00000003; + break; + /* lanes 0-3, all */ + default: + pll_bits = 0x0000000F; + break; + } + + if (enable) + pll_val = pll_bits; + else + pll_val = 0x00000000; + + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_val); + + /* Wait for acknowledgment from PHY. */ + ret = regmap_read_poll_timeout(regmap, + PHY_PMA_XCVR_PLLCLK_EN_ACK, + rd_val, + (rd_val & pll_bits) == pll_val, + 0, POLL_TIMEOUT_US); + ndelay(100); + return ret; +} + +/* + * Perform register operations related to setting link rate, once powerstate is + * set and PLL disable request was processed. + */ +static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy, + struct phy_configure_opts_dp *dp) +{ + u32 ret; + u32 read_val; + + /* Disable the cmn_pll0_en before re-programming the new data rate. */ + regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x0); + + /* + * Wait for PLL ready de-assertion. + * For PLL0 - PHY_PMA_CMN_CTRL2[2] == 1 + */ + ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2, + read_val, + ((read_val >> 2) & 0x01) != 0, + 0, POLL_TIMEOUT_US); + if (ret) + return ret; + ndelay(200); + + /* DP Rate Change - VCO Output settings. */ + if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) { + /* PMA common configuration 19.2MHz */ + cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate, + dp->ssc); + cdns_torrent_dp_pma_cmn_cfg_19_2mhz(cdns_phy); + } else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) { + /* PMA common configuration 25MHz */ + cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate, + dp->ssc); + cdns_torrent_dp_pma_cmn_cfg_25mhz(cdns_phy); + } + cdns_torrent_dp_pma_cmn_rate(cdns_phy, dp->link_rate, dp->lanes); + + /* Enable the cmn_pll0_en. */ + regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x3); + + /* + * Wait for PLL ready assertion. + * For PLL0 - PHY_PMA_CMN_CTRL2[0] == 1 + */ + ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2, + read_val, + (read_val & 0x01) != 0, + 0, POLL_TIMEOUT_US); + return ret; +} + +/* + * Verify, that parameters to configure PHY with are correct. + */ +static int cdns_torrent_dp_verify_config(struct cdns_torrent_inst *inst, + struct phy_configure_opts_dp *dp) +{ + u8 i; + + /* If changing link rate was required, verify it's supported. */ + if (dp->set_rate) { + switch (dp->link_rate) { + case 1620: + case 2160: + case 2430: + case 2700: + case 3240: + case 4320: + case 5400: + case 8100: + /* valid bit rate */ + break; + default: + return -EINVAL; + } + } + + /* Verify lane count. */ + switch (dp->lanes) { + case 1: + case 2: + case 4: + /* valid lane count. */ + break; + default: + return -EINVAL; + } + + /* Check against actual number of PHY's lanes. */ + if (dp->lanes > inst->num_lanes) + return -EINVAL; + + /* + * If changing voltages is required, check swing and pre-emphasis + * levels, per-lane. + */ + if (dp->set_voltages) { + /* Lane count verified previously. */ + for (i = 0; i < dp->lanes; i++) { + if (dp->voltage[i] > 3 || dp->pre[i] > 3) + return -EINVAL; + + /* Sum of voltage swing and pre-emphasis levels cannot + * exceed 3. + */ + if (dp->voltage[i] + dp->pre[i] > 3) + return -EINVAL; + } + } + + return 0; +} + +/* Set power state A0 and PLL clock enable to 0 on enabled lanes. */ +static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy, + u32 num_lanes) +{ + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + u32 pwr_state = cdns_torrent_dp_read(regmap, + PHY_PMA_XCVR_POWER_STATE_REQ); + u32 pll_clk_en = cdns_torrent_dp_read(regmap, + PHY_PMA_XCVR_PLLCLK_EN); + + /* Lane 0 is always enabled. */ + pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << + PHY_POWER_STATE_LN_0); + pll_clk_en &= ~0x01U; + + if (num_lanes > 1) { + /* lane 1 */ + pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << + PHY_POWER_STATE_LN_1); + pll_clk_en &= ~(0x01U << 1); + } + + if (num_lanes > 2) { + /* lanes 2 and 3 */ + pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << + PHY_POWER_STATE_LN_2); + pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << + PHY_POWER_STATE_LN_3); + pll_clk_en &= ~(0x01U << 2); + pll_clk_en &= ~(0x01U << 3); + } + + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, pwr_state); + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_clk_en); +} + +/* Configure lane count as required. */ +static int cdns_torrent_dp_set_lanes(struct cdns_torrent_phy *cdns_phy, + struct phy_configure_opts_dp *dp) +{ + u32 value; + u32 ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + u8 lane_mask = (1 << dp->lanes) - 1; + + value = cdns_torrent_dp_read(regmap, PHY_RESET); + /* clear pma_tx_elec_idle_ln_* bits. */ + value &= ~PMA_TX_ELEC_IDLE_MASK; + /* Assert pma_tx_elec_idle_ln_* for disabled lanes. */ + value |= ((~lane_mask) << PMA_TX_ELEC_IDLE_SHIFT) & + PMA_TX_ELEC_IDLE_MASK; + cdns_torrent_dp_write(regmap, PHY_RESET, value); + + /* reset the link by asserting phy_l00_reset_n low */ + cdns_torrent_dp_write(regmap, PHY_RESET, + value & (~PHY_L00_RESET_N_MASK)); + + /* + * Assert lane reset on unused lanes and lane 0 so they remain in reset + * and powered down when re-enabling the link + */ + value = (value & 0x0000FFF0) | (0x0000000E & lane_mask); + cdns_torrent_dp_write(regmap, PHY_RESET, value); + + cdns_torrent_dp_set_a0_pll(cdns_phy, dp->lanes); + + /* release phy_l0*_reset_n based on used laneCount */ + value = (value & 0x0000FFF0) | (0x0000000F & lane_mask); + cdns_torrent_dp_write(regmap, PHY_RESET, value); + + /* Wait, until PHY gets ready after releasing PHY reset signal. */ + ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy); + if (ret) + return ret; + + ndelay(100); + + /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */ + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001); + + ret = cdns_torrent_dp_run(cdns_phy, dp->lanes); + + return ret; +} + +/* Configure link rate as required. */ +static int cdns_torrent_dp_set_rate(struct cdns_torrent_phy *cdns_phy, + struct phy_configure_opts_dp *dp) +{ + u32 ret; + + ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes, + POWERSTATE_A3); + if (ret) + return ret; + ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, false); + if (ret) + return ret; + ndelay(200); + + ret = cdns_torrent_dp_configure_rate(cdns_phy, dp); + if (ret) + return ret; + ndelay(200); + + ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, true); + if (ret) + return ret; + ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes, + POWERSTATE_A2); + if (ret) + return ret; + ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes, + POWERSTATE_A0); + if (ret) + return ret; + ndelay(900); + + return ret; +} + +/* Configure voltage swing and pre-emphasis for all enabled lanes. */ +static void cdns_torrent_dp_set_voltages(struct cdns_torrent_phy *cdns_phy, + struct phy_configure_opts_dp *dp) +{ + u8 lane; + u16 val; + + for (lane = 0; lane < dp->lanes; lane++) { + val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane], + TX_DIAG_ACYA); + /* + * Write 1 to register bit TX_DIAG_ACYA[0] to freeze the + * current state of the analog TX driver. + */ + val |= TX_DIAG_ACYA_HBDC_MASK; + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_DIAG_ACYA, val); + + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_TXCC_CTRL, 0x08A4); + val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].diag_tx_drv; + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + DRV_DIAG_TX_DRV, val); + val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].mgnfs_mult; + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_TXCC_MGNFS_MULT_000, + val); + val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].cpost_mult; + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_TXCC_CPOST_MULT_00, + val); + + val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane], + TX_DIAG_ACYA); + /* + * Write 0 to register bit TX_DIAG_ACYA[0] to allow the state of + * analog TX driver to reflect the new programmed one. + */ + val &= ~TX_DIAG_ACYA_HBDC_MASK; + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_DIAG_ACYA, val); + } +}; + +static int cdns_torrent_dp_configure(struct phy *phy, + union phy_configure_opts *opts) +{ + struct cdns_torrent_inst *inst = phy_get_drvdata(phy); + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + int ret; + + ret = cdns_torrent_dp_verify_config(inst, &opts->dp); + if (ret) { + dev_err(&phy->dev, "invalid params for phy configure\n"); + return ret; + } + + if (opts->dp.set_lanes) { + ret = cdns_torrent_dp_set_lanes(cdns_phy, &opts->dp); + if (ret) { + dev_err(&phy->dev, "cdns_torrent_dp_set_lanes failed\n"); + return ret; + } + } + + if (opts->dp.set_rate) { + ret = cdns_torrent_dp_set_rate(cdns_phy, &opts->dp); + if (ret) { + dev_err(&phy->dev, "cdns_torrent_dp_set_rate failed\n"); + return ret; + } + } + + if (opts->dp.set_voltages) + cdns_torrent_dp_set_voltages(cdns_phy, &opts->dp); + + return ret; +} + +static int cdns_torrent_dp_init(struct phy *phy) +{ + unsigned char lane_bits; + int ret; + struct cdns_torrent_inst *inst = phy_get_drvdata(phy); + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + ret = clk_prepare_enable(cdns_phy->clk); + if (ret) { + dev_err(cdns_phy->dev, "Failed to prepare ref clock\n"); + return ret; + } + + cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk); + if (!(cdns_phy->ref_clk_rate)) { + dev_err(cdns_phy->dev, "Failed to get ref clock rate\n"); + clk_disable_unprepare(cdns_phy->clk); + return -EINVAL; + } + + switch (cdns_phy->ref_clk_rate) { + case REF_CLK_19_2MHz: + case REF_CLK_25MHz: + /* Valid Ref Clock Rate */ + break; + default: + dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n"); + return -EINVAL; + } + + cdns_torrent_dp_write(regmap, PHY_AUX_CTRL, 0x0003); /* enable AUX */ + + /* PHY PMA registers configuration function */ + cdns_torrent_dp_pma_cfg(cdns_phy, inst); + + /* + * Set lines power state to A0 + * Set lines pll clk enable to 0 + */ + cdns_torrent_dp_set_a0_pll(cdns_phy, inst->num_lanes); + + /* + * release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on + * used lanes + */ + lane_bits = (1 << inst->num_lanes) - 1; + cdns_torrent_dp_write(regmap, PHY_RESET, + ((0xF & ~lane_bits) << 4) | (0xF & lane_bits)); + + /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */ + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001); + + /* PHY PMA registers configuration functions */ + /* Initialize PHY with max supported link rate, without SSC. */ + if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) + cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, + cdns_phy->max_bit_rate, + false); + else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) + cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, + cdns_phy->max_bit_rate, + false); + cdns_torrent_dp_pma_cmn_rate(cdns_phy, cdns_phy->max_bit_rate, + inst->num_lanes); + + /* take out of reset */ + regmap_field_write(cdns_phy->phy_reset_ctrl, 0x1); + + cdns_torrent_phy_on(phy); + + ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy); + if (ret) + return ret; + + ret = cdns_torrent_dp_run(cdns_phy, inst->num_lanes); + + return ret; +} + +static int cdns_torrent_dp_exit(struct phy *phy) +{ + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + + clk_disable_unprepare(cdns_phy->clk); + return 0; +} + +static +int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy) +{ + unsigned int reg; + int ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + ret = regmap_read_poll_timeout(regmap, PHY_PMA_CMN_READY, reg, + reg & 1, 0, POLL_TIMEOUT_US); + if (ret == -ETIMEDOUT) { + dev_err(cdns_phy->dev, + "timeout waiting for PMA common ready\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void cdns_torrent_dp_pma_cfg(struct cdns_torrent_phy *cdns_phy, + struct cdns_torrent_inst *inst) +{ + unsigned int i; + + if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) + /* PMA common configuration 19.2MHz */ + cdns_torrent_dp_pma_cmn_cfg_19_2mhz(cdns_phy); + else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) + /* PMA common configuration 25MHz */ + cdns_torrent_dp_pma_cmn_cfg_25mhz(cdns_phy); + + /* PMA lane configuration to deal with multi-link operation */ + for (i = 0; i < inst->num_lanes; i++) + cdns_torrent_dp_pma_lane_cfg(cdns_phy, i); +} + +static +void cdns_torrent_dp_pma_cmn_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + /* refclock registers - assumes 19.2 MHz refclock */ + cdns_torrent_phy_write(regmap, CMN_SSM_BIAS_TMR, 0x0014); + cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLPRE_TMR, 0x0027); + cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLLOCK_TMR, 0x00A1); + cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLPRE_TMR, 0x0027); + cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLLOCK_TMR, 0x00A1); + cdns_torrent_phy_write(regmap, CMN_BGCAL_INIT_TMR, 0x0060); + cdns_torrent_phy_write(regmap, CMN_BGCAL_ITER_TMR, 0x0060); + cdns_torrent_phy_write(regmap, CMN_IBCAL_INIT_TMR, 0x0014); + cdns_torrent_phy_write(regmap, CMN_TXPUCAL_INIT_TMR, 0x0018); + cdns_torrent_phy_write(regmap, CMN_TXPUCAL_ITER_TMR, 0x0005); + cdns_torrent_phy_write(regmap, CMN_TXPDCAL_INIT_TMR, 0x0018); + cdns_torrent_phy_write(regmap, CMN_TXPDCAL_ITER_TMR, 0x0005); + cdns_torrent_phy_write(regmap, CMN_RXCAL_INIT_TMR, 0x0240); + cdns_torrent_phy_write(regmap, CMN_RXCAL_ITER_TMR, 0x0005); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_INIT_TMR, 0x0002); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_ITER_TMR, 0x0002); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_REFTIM_START, 0x000B); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_PLLCNT_START, 0x0137); + + /* PLL registers */ + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_INIT_TMR, 0x00C0); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_ITER_TMR, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_INIT_TMR, 0x00C0); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_ITER_TMR, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_REFTIM_START, 0x0260); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_TCTRL, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_REFTIM_START, 0x0260); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_TCTRL, 0x0003); +} + +/* + * Set registers responsible for enabling and configuring SSC, with second and + * third register values provided by parameters. + */ +static +void cdns_torrent_dp_enable_ssc_19_2mhz(struct cdns_torrent_phy *cdns_phy, + u32 ctrl2_val, u32 ctrl3_val) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl3_val); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl3_val); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003); +} + +static +void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy, + u32 rate, bool ssc) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + /* Assumes 19.2 MHz refclock */ + switch (rate) { + /* Setting VCO for 10.8GHz */ + case 2700: + case 5400: + cdns_torrent_phy_write(regmap, + CMN_PLL0_INTDIV_M0, 0x0119); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVL_M0, 0x4000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL0_HIGH_THR_M0, 0x00BC); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL0_CTRL_M0, 0x0012); + cdns_torrent_phy_write(regmap, + CMN_PLL1_INTDIV_M0, 0x0119); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVL_M0, 0x4000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_HIGH_THR_M0, 0x00BC); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL1_CTRL_M0, 0x0012); + if (ssc) + cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x033A, + 0x006A); + break; + /* Setting VCO for 9.72GHz */ + case 1620: + case 2430: + case 3240: + cdns_torrent_phy_write(regmap, + CMN_PLL0_INTDIV_M0, 0x01FA); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVL_M0, 0x4000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL0_HIGH_THR_M0, 0x0152); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_INTDIV_M0, 0x01FA); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVL_M0, 0x4000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_HIGH_THR_M0, 0x0152); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + if (ssc) + cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x05DD, + 0x0069); + break; + /* Setting VCO for 8.64GHz */ + case 2160: + case 4320: + cdns_torrent_phy_write(regmap, + CMN_PLL0_INTDIV_M0, 0x01C2); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL0_HIGH_THR_M0, 0x012C); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_INTDIV_M0, 0x01C2); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_HIGH_THR_M0, 0x012C); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + if (ssc) + cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x0536, + 0x0069); + break; + /* Setting VCO for 8.1GHz */ + case 8100: + cdns_torrent_phy_write(regmap, + CMN_PLL0_INTDIV_M0, 0x01A5); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVL_M0, 0xE000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL0_HIGH_THR_M0, 0x011A); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_INTDIV_M0, 0x01A5); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVL_M0, 0xE000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_HIGH_THR_M0, 0x011A); + cdns_torrent_phy_write(regmap, + CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + if (ssc) + cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x04D7, + 0x006A); + break; + } + + if (ssc) { + cdns_torrent_phy_write(regmap, + CMN_PLL0_VCOCAL_PLLCNT_START, 0x025E); + cdns_torrent_phy_write(regmap, + CMN_PLL0_LOCK_PLLCNT_THR, 0x0005); + cdns_torrent_phy_write(regmap, + CMN_PLL1_VCOCAL_PLLCNT_START, 0x025E); + cdns_torrent_phy_write(regmap, + CMN_PLL1_LOCK_PLLCNT_THR, 0x0005); + } else { + cdns_torrent_phy_write(regmap, + CMN_PLL0_VCOCAL_PLLCNT_START, 0x0260); + cdns_torrent_phy_write(regmap, + CMN_PLL1_VCOCAL_PLLCNT_START, 0x0260); + /* Set reset register values to disable SSC */ + cdns_torrent_phy_write(regmap, + CMN_PLL0_SS_CTRL1_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL0_SS_CTRL2_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_SS_CTRL3_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_SS_CTRL4_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_LOCK_PLLCNT_THR, 0x0003); + cdns_torrent_phy_write(regmap, + CMN_PLL1_SS_CTRL1_M0, 0x0002); + cdns_torrent_phy_write(regmap, + CMN_PLL1_SS_CTRL2_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_SS_CTRL3_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_SS_CTRL4_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_LOCK_PLLCNT_THR, 0x0003); + } + + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x0099); + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x0099); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x0099); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x0099); +} + +static +void cdns_torrent_dp_pma_cmn_cfg_25mhz(struct cdns_torrent_phy *cdns_phy) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + /* refclock registers - assumes 25 MHz refclock */ + cdns_torrent_phy_write(regmap, CMN_SSM_BIAS_TMR, 0x0019); + cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLPRE_TMR, 0x0032); + cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLLOCK_TMR, 0x00D1); + cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLPRE_TMR, 0x0032); + cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLLOCK_TMR, 0x00D1); + cdns_torrent_phy_write(regmap, CMN_BGCAL_INIT_TMR, 0x007D); + cdns_torrent_phy_write(regmap, CMN_BGCAL_ITER_TMR, 0x007D); + cdns_torrent_phy_write(regmap, CMN_IBCAL_INIT_TMR, 0x0019); + cdns_torrent_phy_write(regmap, CMN_TXPUCAL_INIT_TMR, 0x001E); + cdns_torrent_phy_write(regmap, CMN_TXPUCAL_ITER_TMR, 0x0006); + cdns_torrent_phy_write(regmap, CMN_TXPDCAL_INIT_TMR, 0x001E); + cdns_torrent_phy_write(regmap, CMN_TXPDCAL_ITER_TMR, 0x0006); + cdns_torrent_phy_write(regmap, CMN_RXCAL_INIT_TMR, 0x02EE); + cdns_torrent_phy_write(regmap, CMN_RXCAL_ITER_TMR, 0x0006); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_INIT_TMR, 0x0002); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_ITER_TMR, 0x0002); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_REFTIM_START, 0x000E); + cdns_torrent_phy_write(regmap, CMN_SD_CAL_PLLCNT_START, 0x012B); + + /* PLL registers */ + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_INIT_TMR, 0x00FA); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_ITER_TMR, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_INIT_TMR, 0x00FA); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_ITER_TMR, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_REFTIM_START, 0x0317); + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_TCTRL, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_REFTIM_START, 0x0317); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_TCTRL, 0x0003); +} + +/* + * Set registers responsible for enabling and configuring SSC, with second + * register value provided by a parameter. + */ +static void cdns_torrent_dp_enable_ssc_25mhz(struct cdns_torrent_phy *cdns_phy, + u32 ctrl2_val) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x007F); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x007F); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003); +} + +static +void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy, + u32 rate, bool ssc) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + /* Assumes 25 MHz refclock */ + switch (rate) { + /* Setting VCO for 10.8GHz */ + case 2700: + case 5400: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01B0); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0120); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01B0); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0120); + if (ssc) + cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x0423); + break; + /* Setting VCO for 9.72GHz */ + case 1620: + case 2430: + case 3240: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0184); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xCCCD); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0104); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0184); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xCCCD); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0104); + if (ssc) + cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x03B9); + break; + /* Setting VCO for 8.64GHz */ + case 2160: + case 4320: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0159); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x999A); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00E7); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0159); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x999A); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00E7); + if (ssc) + cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x034F); + break; + /* Setting VCO for 8.1GHz */ + case 8100: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0144); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00D8); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0144); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00D8); + if (ssc) + cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x031A); + break; + } + + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + + if (ssc) { + cdns_torrent_phy_write(regmap, + CMN_PLL0_VCOCAL_PLLCNT_START, 0x0315); + cdns_torrent_phy_write(regmap, + CMN_PLL0_LOCK_PLLCNT_THR, 0x0005); + cdns_torrent_phy_write(regmap, + CMN_PLL1_VCOCAL_PLLCNT_START, 0x0315); + cdns_torrent_phy_write(regmap, + CMN_PLL1_LOCK_PLLCNT_THR, 0x0005); + } else { + cdns_torrent_phy_write(regmap, + CMN_PLL0_VCOCAL_PLLCNT_START, 0x0317); + cdns_torrent_phy_write(regmap, + CMN_PLL1_VCOCAL_PLLCNT_START, 0x0317); + /* Set reset register values to disable SSC */ + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_LOCK_PLLCNT_THR, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_LOCK_PLLCNT_THR, 0x0003); + } + + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x00C7); + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x00C7); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x00C7); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x00C7); +} + +static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy, + u32 rate, u32 num_lanes) +{ + unsigned int clk_sel_val = 0; + unsigned int hsclk_div_val = 0; + unsigned int i; + + /* 16'h0000 for single DP link configuration */ + regmap_field_write(cdns_phy->phy_pll_cfg, 0x0); + + switch (rate) { + case 1620: + clk_sel_val = 0x0f01; + hsclk_div_val = 2; + break; + case 2160: + case 2430: + case 2700: + clk_sel_val = 0x0701; + hsclk_div_val = 1; + break; + case 3240: + clk_sel_val = 0x0b00; + hsclk_div_val = 2; + break; + case 4320: + case 5400: + clk_sel_val = 0x0301; + hsclk_div_val = 0; + break; + case 8100: + clk_sel_val = 0x0200; + hsclk_div_val = 0; + break; + } + + cdns_torrent_phy_write(cdns_phy->regmap_common_cdb, + CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val); + cdns_torrent_phy_write(cdns_phy->regmap_common_cdb, + CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val); + + /* PMA lane configuration to deal with multi-link operation */ + for (i = 0; i < num_lanes; i++) + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[i], + XCVR_DIAG_HSCLK_DIV, hsclk_div_val); +} + +static void cdns_torrent_dp_pma_lane_cfg(struct cdns_torrent_phy *cdns_phy, + unsigned int lane) +{ + /* Per lane, refclock-dependent receiver detection setting */ + if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_RCVDET_ST_TMR, 0x0780); + else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_RCVDET_ST_TMR, 0x09C4); + + /* Writing Tx/Rx Power State Controllers registers */ + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_PSC_A0, 0x00FB); + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_PSC_A2, 0x04AA); + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + TX_PSC_A3, 0x04AA); + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_PSC_A0, 0x0000); + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_PSC_A2, 0x0000); + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_PSC_A3, 0x0000); + + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_PSC_CAL, 0x0000); + + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_REE_GCSM1_CTRL, 0x0000); + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_REE_GCSM2_CTRL, 0x0000); + cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], + RX_REE_PERGCSM_CTRL, 0x0000); + + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + XCVR_DIAG_BIDI_CTRL, 0x000F); + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + XCVR_DIAG_PLLDRC_CTRL, 0x0001); + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], + XCVR_DIAG_HSCLK_SEL, 0x0000); +} + +static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy, + u32 num_lanes, + enum phy_powerstate powerstate) +{ + /* Register value for power state for a single byte. */ + u32 value_part; + u32 value; + u32 mask; + u32 read_val; + u32 ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + switch (powerstate) { + case (POWERSTATE_A0): + value_part = 0x01U; + break; + case (POWERSTATE_A2): + value_part = 0x04U; + break; + default: + /* Powerstate A3 */ + value_part = 0x08U; + break; + } + + /* Select values of registers and mask, depending on enabled + * lane count. + */ + switch (num_lanes) { + /* lane 0 */ + case (1): + value = value_part; + mask = 0x0000003FU; + break; + /* lanes 0-1 */ + case (2): + value = (value_part + | (value_part << 8)); + mask = 0x00003F3FU; + break; + /* lanes 0-3, all */ + default: + value = (value_part + | (value_part << 8) + | (value_part << 16) + | (value_part << 24)); + mask = 0x3F3F3F3FU; + break; + } + + /* Set power state A<n>. */ + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, value); + /* Wait, until PHY acknowledges power state completion. */ + ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK, + read_val, (read_val & mask) == value, 0, + POLL_TIMEOUT_US); + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000); + ndelay(100); + + return ret; +} + +static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes) +{ + unsigned int read_val; + int ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + /* + * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the + * master lane + */ + ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_PLLCLK_EN_ACK, + read_val, read_val & 1, + 0, POLL_TIMEOUT_US); + if (ret == -ETIMEDOUT) { + dev_err(cdns_phy->dev, + "timeout waiting for link PLL clock enable ack\n"); + return ret; + } + + ndelay(100); + + ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes, + POWERSTATE_A2); + if (ret) + return ret; + + ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes, + POWERSTATE_A0); + + return ret; +} + +static int cdns_torrent_phy_on(struct phy *phy) +{ + struct cdns_torrent_inst *inst = phy_get_drvdata(phy); + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + int ret; + + /* Take the PHY out of reset */ + ret = reset_control_deassert(cdns_phy->phy_rst); + if (ret) + return ret; + + /* Take the PHY lane group out of reset */ + return reset_control_deassert(inst->lnk_rst); +} + +static int cdns_torrent_phy_off(struct phy *phy) +{ + struct cdns_torrent_inst *inst = phy_get_drvdata(phy); + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + int ret; + + ret = reset_control_assert(cdns_phy->phy_rst); + if (ret) + return ret; + + return reset_control_assert(inst->lnk_rst); +} + +static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base, + u32 block_offset, + u8 reg_offset_shift, + const struct regmap_config *config) +{ + struct cdns_regmap_cdb_context *ctx; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + ctx->dev = dev; + ctx->base = base + block_offset; + ctx->reg_offset_shift = reg_offset_shift; + + return devm_regmap_init(dev, NULL, ctx, config); +} + +static int cdns_regfield_init(struct cdns_torrent_phy *cdns_phy) +{ + struct device *dev = cdns_phy->dev; + struct regmap_field *field; + struct regmap *regmap; + + regmap = cdns_phy->regmap_phy_pcs_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PLL_CFG reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_pll_cfg = field; + + regmap = cdns_phy->regmap_phy_pma_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_2); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PMA_CMN_CTRL2 reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_pma_cmn_ctrl_2 = field; + + regmap = cdns_phy->regmap_phy_pma_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, phy_pma_pll_raw_ctrl); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PMA_PLL_RAW_CTRL reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_pma_pll_raw_ctrl = field; + + regmap = cdns_phy->regmap_dptx_phy_reg; + field = devm_regmap_field_alloc(dev, regmap, phy_reset_ctrl); + if (IS_ERR(field)) { + dev_err(dev, "PHY_RESET reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_reset_ctrl = field; + + return 0; +} + +static int cdns_regmap_init_torrent_dp(struct cdns_torrent_phy *cdns_phy, + void __iomem *sd_base, + void __iomem *base, + u8 block_offset_shift, + u8 reg_offset_shift) +{ + struct device *dev = cdns_phy->dev; + struct regmap *regmap; + u32 block_offset; + int i; + + for (i = 0; i < MAX_NUM_LANES; i++) { + block_offset = TORRENT_TX_LANE_CDB_OFFSET(i, block_offset_shift, + reg_offset_shift); + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift, + &cdns_torrent_tx_lane_cdb_config[i]); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init tx lane CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_tx_lane_cdb[i] = regmap; + + block_offset = TORRENT_RX_LANE_CDB_OFFSET(i, block_offset_shift, + reg_offset_shift); + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift, + &cdns_torrent_rx_lane_cdb_config[i]); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init rx lane CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_rx_lane_cdb[i] = regmap; + } + + block_offset = TORRENT_COMMON_CDB_OFFSET; + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift, + &cdns_torrent_common_cdb_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init common CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_common_cdb = regmap; + + block_offset = TORRENT_PHY_PCS_COMMON_OFFSET(block_offset_shift); + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift, + &cdns_torrent_phy_pcs_cmn_cdb_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init PHY PCS common CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_phy_pcs_common_cdb = regmap; + + block_offset = TORRENT_PHY_PMA_COMMON_OFFSET(block_offset_shift); + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift, + &cdns_torrent_phy_pma_cmn_cdb_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init PHY PMA common CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_phy_pma_common_cdb = regmap; + + block_offset = TORRENT_DPTX_PHY_OFFSET; + regmap = cdns_regmap_init(dev, base, block_offset, + reg_offset_shift, + &cdns_torrent_dptx_phy_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init DPTX PHY regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_dptx_phy_reg = regmap; + + return 0; +} + +static int cdns_torrent_phy_probe(struct platform_device *pdev) +{ + struct resource *regs; + struct cdns_torrent_phy *cdns_phy; + struct device *dev = &pdev->dev; + struct phy_provider *phy_provider; + const struct of_device_id *match; + struct cdns_torrent_data *data; + struct device_node *child; + int ret, subnodes, node = 0, i; + + /* Get init data for this PHY */ + match = of_match_device(cdns_torrent_phy_of_match, dev); + if (!match) + return -EINVAL; + + data = (struct cdns_torrent_data *)match->data; + + cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL); + if (!cdns_phy) + return -ENOMEM; + + dev_set_drvdata(dev, cdns_phy); + cdns_phy->dev = dev; + + cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0); + if (IS_ERR(cdns_phy->phy_rst)) { + dev_err(dev, "%s: failed to get reset\n", + dev->of_node->full_name); + return PTR_ERR(cdns_phy->phy_rst); + } + + cdns_phy->clk = devm_clk_get(dev, "refclk"); + if (IS_ERR(cdns_phy->clk)) { + dev_err(dev, "phy ref clock not found\n"); + return PTR_ERR(cdns_phy->clk); + } + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + cdns_phy->sd_base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(cdns_phy->sd_base)) + return PTR_ERR(cdns_phy->sd_base); + + subnodes = of_get_available_child_count(dev->of_node); + if (subnodes == 0) { + dev_err(dev, "No available link subnodes found\n"); + return -EINVAL; + } else if (subnodes != 1) { + dev_err(dev, "Driver supports only one link subnode.\n"); + return -EINVAL; + } + + for_each_available_child_of_node(dev->of_node, child) { + struct phy *gphy; + + cdns_phy->phys[node].lnk_rst = + of_reset_control_array_get_exclusive(child); + if (IS_ERR(cdns_phy->phys[node].lnk_rst)) { + dev_err(dev, "%s: failed to get reset\n", + child->full_name); + ret = PTR_ERR(cdns_phy->phys[node].lnk_rst); + goto put_lnk_rst; + } + + if (of_property_read_u32(child, "reg", + &cdns_phy->phys[node].mlane)) { + dev_err(dev, "%s: No \"reg\"-property.\n", + child->full_name); + ret = -EINVAL; + goto put_child; + } + + if (cdns_phy->phys[node].mlane != 0) { + dev_err(dev, + "%s: Driver supports only lane-0 as master lane.\n", + child->full_name); + ret = -EINVAL; + goto put_child; + } + + if (of_property_read_u32(child, "cdns,phy-type", + &cdns_phy->phys[node].phy_type)) { + dev_err(dev, "%s: No \"cdns,phy-type\"-property.\n", + child->full_name); + ret = -EINVAL; + goto put_child; + } + + cdns_phy->phys[node].num_lanes = DEFAULT_NUM_LANES; + of_property_read_u32(child, "cdns,num-lanes", + &cdns_phy->phys[node].num_lanes); + + if (cdns_phy->phys[node].phy_type == PHY_TYPE_DP) { + switch (cdns_phy->phys[node].num_lanes) { + case 1: + case 2: + case 4: + /* valid number of lanes */ + break; + default: + dev_err(dev, "unsupported number of lanes: %d\n", + cdns_phy->phys[node].num_lanes); + ret = -EINVAL; + goto put_child; + } + + cdns_phy->max_bit_rate = DEFAULT_MAX_BIT_RATE; + of_property_read_u32(child, "cdns,max-bit-rate", + &cdns_phy->max_bit_rate); + + switch (cdns_phy->max_bit_rate) { + case 1620: + case 2160: + case 2430: + case 2700: + case 3240: + case 4320: + case 5400: + case 8100: + /* valid bit rate */ + break; + default: + dev_err(dev, "unsupported max bit rate: %dMbps\n", + cdns_phy->max_bit_rate); + ret = -EINVAL; + goto put_child; + } + + /* DPTX registers */ + regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); + cdns_phy->base = devm_ioremap_resource(&pdev->dev, + regs); + if (IS_ERR(cdns_phy->base)) { + ret = PTR_ERR(cdns_phy->base); + goto put_child; + } + + gphy = devm_phy_create(dev, child, + &cdns_torrent_phy_ops); + if (IS_ERR(gphy)) { + ret = PTR_ERR(gphy); + goto put_child; + } + + dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n", + cdns_phy->phys[node].num_lanes, + cdns_phy->max_bit_rate / 1000, + cdns_phy->max_bit_rate % 1000); + } else { + dev_err(dev, "Driver supports only PHY_TYPE_DP\n"); + ret = -ENOTSUPP; + goto put_child; + } + cdns_phy->phys[node].phy = gphy; + phy_set_drvdata(gphy, &cdns_phy->phys[node]); + + node++; + } + cdns_phy->nsubnodes = node; + + ret = cdns_regmap_init_torrent_dp(cdns_phy, cdns_phy->sd_base, + cdns_phy->base, + data->block_offset_shift, + data->reg_offset_shift); + if (ret) + goto put_lnk_rst; + + ret = cdns_regfield_init(cdns_phy); + if (ret) + goto put_lnk_rst; + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(phy_provider)) { + ret = PTR_ERR(phy_provider); + goto put_lnk_rst; + } + + return 0; + +put_child: + node++; +put_lnk_rst: + for (i = 0; i < node; i++) + reset_control_put(cdns_phy->phys[i].lnk_rst); + of_node_put(child); + return ret; +} + +static int cdns_torrent_phy_remove(struct platform_device *pdev) +{ + struct cdns_torrent_phy *cdns_phy = platform_get_drvdata(pdev); + int i; + + reset_control_assert(cdns_phy->phy_rst); + for (i = 0; i < cdns_phy->nsubnodes; i++) { + reset_control_assert(cdns_phy->phys[i].lnk_rst); + reset_control_put(cdns_phy->phys[i].lnk_rst); + } + + return 0; +} + +static const struct cdns_torrent_data cdns_map_torrent = { + .block_offset_shift = 0x2, + .reg_offset_shift = 0x2, +}; + +static const struct cdns_torrent_data ti_j721e_map_torrent = { + .block_offset_shift = 0x0, + .reg_offset_shift = 0x1, +}; + +static const struct of_device_id cdns_torrent_phy_of_match[] = { + { + .compatible = "cdns,torrent-phy", + .data = &cdns_map_torrent, + }, + { + .compatible = "ti,j721e-serdes-10g", + .data = &ti_j721e_map_torrent, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cdns_torrent_phy_of_match); + +static struct platform_driver cdns_torrent_phy_driver = { + .probe = cdns_torrent_phy_probe, + .remove = cdns_torrent_phy_remove, + .driver = { + .name = "cdns-torrent-phy", + .of_match_table = cdns_torrent_phy_of_match, + } +}; +module_platform_driver(cdns_torrent_phy_driver); + +MODULE_AUTHOR("Cadence Design Systems, Inc."); +MODULE_DESCRIPTION("Cadence Torrent PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index cb2ed3b25068..cdbcc49f7115 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -43,6 +43,8 @@ #define PA0_RG_USB20_INTR_EN BIT(5) #define U3P_USBPHYACR1 0x004 +#define PA1_RG_INTR_CAL GENMASK(23, 19) +#define PA1_RG_INTR_CAL_VAL(x) ((0x1f & (x)) << 19) #define PA1_RG_VRT_SEL GENMASK(14, 12) #define PA1_RG_VRT_SEL_VAL(x) ((0x7 & (x)) << 12) #define PA1_RG_TERM_SEL GENMASK(10, 8) @@ -60,6 +62,8 @@ #define U3P_USBPHYACR6 0x018 #define PA6_RG_U2_BC11_SW_EN BIT(23) #define PA6_RG_U2_OTG_VBUSCMP_EN BIT(20) +#define PA6_RG_U2_DISCTH GENMASK(7, 4) +#define PA6_RG_U2_DISCTH_VAL(x) ((0xf & (x)) << 4) #define PA6_RG_U2_SQTH GENMASK(3, 0) #define PA6_RG_U2_SQTH_VAL(x) (0xf & (x)) @@ -294,20 +298,21 @@ struct mtk_phy_instance { struct u2phy_banks u2_banks; struct u3phy_banks u3_banks; }; - struct clk *ref_clk; /* reference clock of anolog phy */ + struct clk *ref_clk; /* reference clock of (digital) phy */ + struct clk *da_ref_clk; /* reference clock of analog phy */ u32 index; u8 type; int eye_src; int eye_vrt; int eye_term; + int intr; + int discth; bool bc12_en; }; struct mtk_tphy { struct device *dev; void __iomem *sif_base; /* only shared sif */ - /* deprecated, use @ref_clk instead in phy instance */ - struct clk *u3phya_ref; /* reference clock of usb3 anolog phy */ const struct mtk_phy_pdata *pdata; struct mtk_phy_instance **phys; int nphys; @@ -850,9 +855,14 @@ static void phy_parse_property(struct mtk_tphy *tphy, &instance->eye_vrt); device_property_read_u32(dev, "mediatek,eye-term", &instance->eye_term); - dev_dbg(dev, "bc12:%d, src:%d, vrt:%d, term:%d\n", + device_property_read_u32(dev, "mediatek,intr", + &instance->intr); + device_property_read_u32(dev, "mediatek,discth", + &instance->discth); + dev_dbg(dev, "bc12:%d, src:%d, vrt:%d, term:%d, intr:%d, disc:%d\n", instance->bc12_en, instance->eye_src, - instance->eye_vrt, instance->eye_term); + instance->eye_vrt, instance->eye_term, + instance->intr, instance->discth); } static void u2_phy_props_set(struct mtk_tphy *tphy, @@ -888,6 +898,20 @@ static void u2_phy_props_set(struct mtk_tphy *tphy, tmp |= PA1_RG_TERM_SEL_VAL(instance->eye_term); writel(tmp, com + U3P_USBPHYACR1); } + + if (instance->intr) { + tmp = readl(com + U3P_USBPHYACR1); + tmp &= ~PA1_RG_INTR_CAL; + tmp |= PA1_RG_INTR_CAL_VAL(instance->intr); + writel(tmp, com + U3P_USBPHYACR1); + } + + if (instance->discth) { + tmp = readl(com + U3P_USBPHYACR6); + tmp &= ~PA6_RG_U2_DISCTH; + tmp |= PA6_RG_U2_DISCTH_VAL(instance->discth); + writel(tmp, com + U3P_USBPHYACR6); + } } static int mtk_phy_init(struct phy *phy) @@ -896,15 +920,16 @@ static int mtk_phy_init(struct phy *phy) struct mtk_tphy *tphy = dev_get_drvdata(phy->dev.parent); int ret; - ret = clk_prepare_enable(tphy->u3phya_ref); + ret = clk_prepare_enable(instance->ref_clk); if (ret) { - dev_err(tphy->dev, "failed to enable u3phya_ref\n"); + dev_err(tphy->dev, "failed to enable ref_clk\n"); return ret; } - ret = clk_prepare_enable(instance->ref_clk); + ret = clk_prepare_enable(instance->da_ref_clk); if (ret) { - dev_err(tphy->dev, "failed to enable ref_clk\n"); + dev_err(tphy->dev, "failed to enable da_ref\n"); + clk_disable_unprepare(instance->ref_clk); return ret; } @@ -967,7 +992,7 @@ static int mtk_phy_exit(struct phy *phy) u2_phy_instance_exit(tphy, instance); clk_disable_unprepare(instance->ref_clk); - clk_disable_unprepare(tphy->u3phya_ref); + clk_disable_unprepare(instance->da_ref_clk); return 0; } @@ -1102,11 +1127,6 @@ static int mtk_tphy_probe(struct platform_device *pdev) } } - /* it's deprecated, make it optional for backward compatibility */ - tphy->u3phya_ref = devm_clk_get_optional(dev, "u3phya_ref"); - if (IS_ERR(tphy->u3phya_ref)) - return PTR_ERR(tphy->u3phya_ref); - tphy->src_ref_clk = U3P_REF_CLK; tphy->src_coef = U3P_SLEW_RATE_COEF; /* update parameters of slew rate calibrate if exist */ @@ -1153,16 +1173,20 @@ static int mtk_tphy_probe(struct platform_device *pdev) phy_set_drvdata(phy, instance); port++; - /* if deprecated clock is provided, ignore instance's one */ - if (tphy->u3phya_ref) - continue; - - instance->ref_clk = devm_clk_get(&phy->dev, "ref"); + instance->ref_clk = devm_clk_get_optional(&phy->dev, "ref"); if (IS_ERR(instance->ref_clk)) { dev_err(dev, "failed to get ref_clk(id-%d)\n", port); retval = PTR_ERR(instance->ref_clk); goto put_child; } + + instance->da_ref_clk = + devm_clk_get_optional(&phy->dev, "da_ref"); + if (IS_ERR(instance->da_ref_clk)) { + dev_err(dev, "failed to get da_ref_clk(id-%d)\n", port); + retval = PTR_ERR(instance->da_ref_clk); + goto put_child; + } } provider = devm_of_phy_provider_register(dev, mtk_phy_xlate); diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig index e46824da29f6..98674ed094d9 100644 --- a/drivers/phy/qualcomm/Kconfig +++ b/drivers/phy/qualcomm/Kconfig @@ -91,3 +91,23 @@ config PHY_QCOM_USB_HSIC select GENERIC_PHY help Support for the USB HSIC ULPI compliant PHY on QCOM chipsets. + +config PHY_QCOM_USB_HS_28NM + tristate "Qualcomm 28nm High-Speed PHY" + depends on ARCH_QCOM || COMPILE_TEST + depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in + select GENERIC_PHY + help + Enable this to support the Qualcomm Synopsys DesignWare Core 28nm + High-Speed PHY driver. This driver supports the Hi-Speed PHY which + is usually paired with either the ChipIdea or Synopsys DWC3 USB + IPs on MSM SOCs. + +config PHY_QCOM_USB_SS + tristate "Qualcomm USB Super-Speed PHY driver" + depends on ARCH_QCOM || COMPILE_TEST + depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in + select GENERIC_PHY + help + Enable this to support the Super-Speed USB transceiver on various + Qualcomm chipsets. diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile index 283251d6a5d9..1f14aeacbd70 100644 --- a/drivers/phy/qualcomm/Makefile +++ b/drivers/phy/qualcomm/Makefile @@ -10,3 +10,5 @@ obj-$(CONFIG_PHY_QCOM_UFS_14NM) += phy-qcom-ufs-qmp-14nm.o obj-$(CONFIG_PHY_QCOM_UFS_20NM) += phy-qcom-ufs-qmp-20nm.o obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o +obj-$(CONFIG_PHY_QCOM_USB_HS_28NM) += phy-qcom-usb-hs-28nm.o +obj-$(CONFIG_PHY_QCOM_USB_SS) += phy-qcom-usb-ss.o diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 7db2a94f7a99..c190406246ab 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -121,6 +121,11 @@ enum qphy_reg_layout { QPHY_PCS_LFPS_RXTERM_IRQ_STATUS, }; +static const unsigned int msm8996_ufsphy_regs_layout[] = { + [QPHY_START_CTRL] = 0x00, + [QPHY_PCS_READY_STATUS] = 0x168, +}; + static const unsigned int pciephy_regs_layout[] = { [QPHY_COM_SW_RESET] = 0x400, [QPHY_COM_POWER_DOWN_CONTROL] = 0x404, @@ -160,6 +165,18 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[] = { [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170, }; +static const unsigned int sdm845_qmp_pciephy_regs_layout[] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x08, + [QPHY_PCS_STATUS] = 0x174, +}; + +static const unsigned int sdm845_qhp_pciephy_regs_layout[] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x08, + [QPHY_PCS_STATUS] = 0x2ac, +}; + static const unsigned int sdm845_ufsphy_regs_layout[] = { [QPHY_START_CTRL] = 0x00, [QPHY_PCS_READY_STATUS] = 0x160, @@ -331,6 +348,75 @@ static const struct qmp_phy_init_tbl msm8998_pcie_pcs_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03), }; +static const struct qmp_phy_init_tbl msm8996_ufs_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xd7), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06), + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x10), + QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x54), + QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE1, 0x98), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE1, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE1, 0x32), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE1, 0x00), +}; + +static const struct qmp_phy_init_tbl msm8996_ufs_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45), + QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x02), +}; + +static const struct qmp_phy_init_tbl msm8996_ufs_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x24), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_INTERFACE_MODE, 0x00), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_TERM_BW, 0x5b), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xff), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xff), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E), +}; + static const struct qmp_phy_init_tbl msm8996_usb3_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14), QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08), @@ -481,6 +567,229 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = { QMP_PHY_INIT_CFG_L(QPHY_START_CTRL, 0x3), }; +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x007), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_EP_DIV, 0x19), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0d), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x33), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x7e), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x15), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN_HALF, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_01, 0x59), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x40), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE, 0x04), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x01), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x20), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xbb), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x0d), + + QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG4, 0x00), +}; + +static const struct qmp_phy_init_tbl sdm845_qmp_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_CONFIG2, 0x52), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG2, 0x10), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4, 0x1a), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5, 0x06), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_PCIE_INT_AUX_CLK_CONFIG1, 0x00), +}; + +static const struct qmp_phy_init_tbl sdm845_qhp_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SYSCLK_EN_SEL, 0x27), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1, 0xde), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2, 0x07), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1_MODE1, 0x4c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2_MODE1, 0x06), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BIAS_EN_CKBUFLR_EN, 0x18), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CLK_ENABLE1, 0xb0), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE0, 0x8c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE0, 0x20), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE1, 0x14), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_RESTRIM_CTRL2, 0x05), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE1, 0xaa), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE1, 0x3f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VCO_TUNE_MAP, 0x10), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CLK_SELECT, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_HSCLK_SEL1, 0x30), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORECLK_DIV, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORE_CLK_EN, 0x73), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CMN_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SVS_MODE_CLK_SEL, 0x15), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORECLK_DIV_MODE1, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CMN_MODE, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VREGCLK_DIV1, 0x22), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VREGCLK_DIV2, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BGV_TRIM, 0x20), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BG_CTRL, 0x07), +}; + +static const struct qmp_phy_init_tbl sdm845_qhp_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL0, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_TAP_EN, 0x0d), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_TX_BAND_MODE, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_LANE_MODE, 0x1a), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PARALLEL_RATE, 0x2f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE0, 0x09), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE1, 0x09), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE2, 0x1b), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE1, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE2, 0x07), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE0, 0x31), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE1, 0x31), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE2, 0x03), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_THRESH_DFE, 0x02), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CGA_THRESH_DFE, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXENGINE_EN0, 0x12), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_TRAIN_TIME, 0x25), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_DFE_OVRLP_TIME, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_REFRESH_TIME, 0x05), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_ENABLE_TIME, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_VGA_GAIN, 0x26), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_GAIN, 0x12), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EQ_GAIN, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_OFFSET_GAIN, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PRE_GAIN, 0x09), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EQ_INTVAL, 0x15), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EDAC_INITVAL, 0x28), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_INITB0, 0x7f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_INITB1, 0x07), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RCVRDONE_THRESH1, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_CTRL, 0x70), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE0, 0x8b), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE1, 0x08), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE2, 0x0a), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE0, 0x03), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE1, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE2, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_BAND, 0x02), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE0, 0x5c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE1, 0x3e), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE2, 0x3f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_ENABLES, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_CNTRL, 0xa0), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_DEGLITCH_CNTRL, 0x08), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DCC_GAIN, 0x01), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_EN_SIGNAL, 0xc3), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PSM_RX_EN_CAL, 0x00), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_MISC_CNTRL0, 0xbc), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_TS0_TIMER, 0x7f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DLL_HIGHDATARATE, 0x15), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL2, 0x0f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RESETCODE_OFFSET, 0x04), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_VGA_INITVAL, 0x20), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RSM_START, 0x01), +}; + +static const struct qmp_phy_init_tbl sdm845_qhp_pcie_rx_tbl[] = { +}; + +static const struct qmp_phy_init_tbl sdm845_qhp_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG, 0x3f), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG, 0x50), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M3P5DB, 0x19), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M3P5DB, 0x07), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M6DB, 0x17), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M6DB, 0x09), + QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5, 0x9f), +}; + static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14), @@ -988,6 +1297,8 @@ struct qmp_phy_cfg { int rx_tbl_num; const struct qmp_phy_init_tbl *pcs_tbl; int pcs_tbl_num; + const struct qmp_phy_init_tbl *pcs_misc_tbl; + int pcs_misc_tbl_num; /* clock ids to be requested */ const char * const *clk_list; @@ -1122,10 +1433,18 @@ static const char * const msm8996_phy_clk_l[] = { "aux", "cfg_ahb", "ref", }; +static const char * const msm8996_ufs_phy_clk_l[] = { + "ref", +}; + static const char * const qmp_v3_phy_clk_l[] = { "aux", "cfg_ahb", "ref", "com_aux", }; +static const char * const sdm845_pciephy_clk_l[] = { + "aux", "cfg_ahb", "ref", "refgen", +}; + static const char * const sdm845_ufs_phy_clk_l[] = { "ref", "ref_aux", }; @@ -1139,6 +1458,10 @@ static const char * const msm8996_usb3phy_reset_l[] = { "phy", "common", }; +static const char * const sdm845_pciephy_reset_l[] = { + "phy", +}; + /* list of regulators */ static const char * const qmp_phy_vreg_l[] = { "vdda-phy", "vdda-pll", @@ -1175,6 +1498,31 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = { .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, }; +static const struct qmp_phy_cfg msm8996_ufs_cfg = { + .type = PHY_TYPE_UFS, + .nlanes = 1, + + .serdes_tbl = msm8996_ufs_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(msm8996_ufs_serdes_tbl), + .tx_tbl = msm8996_ufs_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(msm8996_ufs_tx_tbl), + .rx_tbl = msm8996_ufs_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(msm8996_ufs_rx_tbl), + + .clk_list = msm8996_ufs_phy_clk_l, + .num_clks = ARRAY_SIZE(msm8996_ufs_phy_clk_l), + + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + + .regs = msm8996_ufsphy_regs_layout, + + .start_ctrl = SERDES_START, + .pwrdn_ctrl = SW_PWRDN, + + .no_pcs_sw_reset = true, +}; + static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { .type = PHY_TYPE_USB3, .nlanes = 1, @@ -1234,6 +1582,64 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { .pwrdn_delay_max = 1005, /* us */ }; +static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = sdm845_qmp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl), + .tx_tbl = sdm845_qmp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_tx_tbl), + .rx_tbl = sdm845_qmp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_rx_tbl), + .pcs_tbl = sdm845_qmp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_tbl), + .pcs_misc_tbl = sdm845_qmp_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sdm845_qmp_pciephy_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = sdm845_qhp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl), + .tx_tbl = sdm845_qhp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_tx_tbl), + .rx_tbl = sdm845_qhp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_rx_tbl), + .pcs_tbl = sdm845_qhp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sdm845_qhp_pciephy_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = { .type = PHY_TYPE_USB3, .nlanes = 1, @@ -1563,6 +1969,7 @@ static int qcom_qmp_phy_enable(struct phy *phy) void __iomem *tx = qphy->tx; void __iomem *rx = qphy->rx; void __iomem *pcs = qphy->pcs; + void __iomem *pcs_misc = qphy->pcs_misc; void __iomem *dp_com = qmp->dp_com; void __iomem *status; unsigned int mask, val, ready; @@ -1633,6 +2040,9 @@ static int qcom_qmp_phy_enable(struct phy *phy) if (ret) goto err_lane_rst; + qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl, + cfg->pcs_misc_tbl_num); + /* * Pull out PHY from POWER DOWN state. * This is active low enable signal to power-down PHY. @@ -1967,7 +2377,7 @@ static const struct phy_ops qcom_qmp_phy_gen_ops = { .owner = THIS_MODULE, }; -static const struct phy_ops qcom_qmp_ufs_ops = { +static const struct phy_ops qcom_qmp_pcie_ufs_ops = { .power_on = qcom_qmp_phy_enable, .power_off = qcom_qmp_phy_disable, .set_mode = qcom_qmp_phy_set_mode, @@ -2067,8 +2477,8 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id) } } - if (qmp->cfg->type == PHY_TYPE_UFS) - ops = &qcom_qmp_ufs_ops; + if (qmp->cfg->type == PHY_TYPE_UFS || qmp->cfg->type == PHY_TYPE_PCIE) + ops = &qcom_qmp_pcie_ufs_ops; generic_phy = devm_phy_create(dev, np, ops); if (IS_ERR(generic_phy)) { @@ -2091,6 +2501,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { .compatible = "qcom,msm8996-qmp-pcie-phy", .data = &msm8996_pciephy_cfg, }, { + .compatible = "qcom,msm8996-qmp-ufs-phy", + .data = &msm8996_ufs_cfg, + }, { .compatible = "qcom,msm8996-qmp-usb3-phy", .data = &msm8996_usb3phy_cfg, }, { @@ -2103,6 +2516,12 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { .compatible = "qcom,ipq8074-qmp-pcie-phy", .data = &ipq8074_pciephy_cfg, }, { + .compatible = "qcom,sdm845-qhp-pcie-phy", + .data = &sdm845_qhp_pciephy_cfg, + }, { + .compatible = "qcom,sdm845-qmp-pcie-phy", + .data = &sdm845_qmp_pciephy_cfg, + }, { .compatible = "qcom,sdm845-qmp-usb3-phy", .data = &qmp_v3_usb3phy_cfg, }, { diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index 90f793c2293d..dece0e67704b 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -409,4 +409,118 @@ #define QPHY_V4_TX_MID_TERM_CTRL1 0x1d8 #define QPHY_V4_MULTI_LANE_CTRL1 0x1e0 +/* PCIE GEN3 COM registers */ +#define PCIE_GEN3_QHP_COM_SSC_EN_CENTER 0x14 +#define PCIE_GEN3_QHP_COM_SSC_PER1 0x20 +#define PCIE_GEN3_QHP_COM_SSC_PER2 0x24 +#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1 0x28 +#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2 0x2c +#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1_MODE1 0x34 +#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2_MODE1 0x38 +#define PCIE_GEN3_QHP_COM_BIAS_EN_CKBUFLR_EN 0x54 +#define PCIE_GEN3_QHP_COM_CLK_ENABLE1 0x58 +#define PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE0 0x6c +#define PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE0 0x70 +#define PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE1 0x78 +#define PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE1 0x7c +#define PCIE_GEN3_QHP_COM_BGV_TRIM 0x98 +#define PCIE_GEN3_QHP_COM_CP_CTRL_MODE0 0xb4 +#define PCIE_GEN3_QHP_COM_CP_CTRL_MODE1 0xb8 +#define PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE0 0xc0 +#define PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE1 0xc4 +#define PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE0 0xcc +#define PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE1 0xd0 +#define PCIE_GEN3_QHP_COM_SYSCLK_EN_SEL 0xdc +#define PCIE_GEN3_QHP_COM_RESTRIM_CTRL2 0xf0 +#define PCIE_GEN3_QHP_COM_LOCK_CMP_EN 0xf8 +#define PCIE_GEN3_QHP_COM_DEC_START_MODE0 0x100 +#define PCIE_GEN3_QHP_COM_DEC_START_MODE1 0x108 +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE0 0x11c +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE0 0x120 +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE0 0x124 +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE1 0x128 +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE1 0x12c +#define PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE1 0x130 +#define PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE0 0x150 +#define PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE1 0x158 +#define PCIE_GEN3_QHP_COM_VCO_TUNE_MAP 0x178 +#define PCIE_GEN3_QHP_COM_BG_CTRL 0x1c8 +#define PCIE_GEN3_QHP_COM_CLK_SELECT 0x1cc +#define PCIE_GEN3_QHP_COM_HSCLK_SEL1 0x1d0 +#define PCIE_GEN3_QHP_COM_CORECLK_DIV 0x1e0 +#define PCIE_GEN3_QHP_COM_CORE_CLK_EN 0x1e8 +#define PCIE_GEN3_QHP_COM_CMN_CONFIG 0x1f0 +#define PCIE_GEN3_QHP_COM_SVS_MODE_CLK_SEL 0x1fc +#define PCIE_GEN3_QHP_COM_CORECLK_DIV_MODE1 0x21c +#define PCIE_GEN3_QHP_COM_CMN_MODE 0x224 +#define PCIE_GEN3_QHP_COM_VREGCLK_DIV1 0x228 +#define PCIE_GEN3_QHP_COM_VREGCLK_DIV2 0x22c + +/* PCIE GEN3 QHP Lane registers */ +#define PCIE_GEN3_QHP_L0_DRVR_CTRL0 0xc +#define PCIE_GEN3_QHP_L0_DRVR_CTRL1 0x10 +#define PCIE_GEN3_QHP_L0_DRVR_CTRL2 0x14 +#define PCIE_GEN3_QHP_L0_DRVR_TAP_EN 0x18 +#define PCIE_GEN3_QHP_L0_TX_BAND_MODE 0x60 +#define PCIE_GEN3_QHP_L0_LANE_MODE 0x64 +#define PCIE_GEN3_QHP_L0_PARALLEL_RATE 0x7c +#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE0 0xc0 +#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE1 0xc4 +#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE2 0xc8 +#define PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE1 0xd0 +#define PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE2 0xd4 +#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE0 0xd8 +#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE1 0xdc +#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE2 0xe0 +#define PCIE_GEN3_QHP_L0_CTLE_THRESH_DFE 0xfc +#define PCIE_GEN3_QHP_L0_CGA_THRESH_DFE 0x100 +#define PCIE_GEN3_QHP_L0_RXENGINE_EN0 0x108 +#define PCIE_GEN3_QHP_L0_CTLE_TRAIN_TIME 0x114 +#define PCIE_GEN3_QHP_L0_CTLE_DFE_OVRLP_TIME 0x118 +#define PCIE_GEN3_QHP_L0_DFE_REFRESH_TIME 0x11c +#define PCIE_GEN3_QHP_L0_DFE_ENABLE_TIME 0x120 +#define PCIE_GEN3_QHP_L0_VGA_GAIN 0x124 +#define PCIE_GEN3_QHP_L0_DFE_GAIN 0x128 +#define PCIE_GEN3_QHP_L0_EQ_GAIN 0x130 +#define PCIE_GEN3_QHP_L0_OFFSET_GAIN 0x134 +#define PCIE_GEN3_QHP_L0_PRE_GAIN 0x138 +#define PCIE_GEN3_QHP_L0_VGA_INITVAL 0x13c +#define PCIE_GEN3_QHP_L0_EQ_INTVAL 0x154 +#define PCIE_GEN3_QHP_L0_EDAC_INITVAL 0x160 +#define PCIE_GEN3_QHP_L0_RXEQ_INITB0 0x168 +#define PCIE_GEN3_QHP_L0_RXEQ_INITB1 0x16c +#define PCIE_GEN3_QHP_L0_RCVRDONE_THRESH1 0x178 +#define PCIE_GEN3_QHP_L0_RXEQ_CTRL 0x180 +#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE0 0x184 +#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE1 0x188 +#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE2 0x18c +#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE0 0x190 +#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE1 0x194 +#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE2 0x198 +#define PCIE_GEN3_QHP_L0_UCDR_SO_CONFIG 0x19c +#define PCIE_GEN3_QHP_L0_RX_BAND 0x1a4 +#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE0 0x1c0 +#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE1 0x1c4 +#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE2 0x1c8 +#define PCIE_GEN3_QHP_L0_SIGDET_ENABLES 0x230 +#define PCIE_GEN3_QHP_L0_SIGDET_CNTRL 0x234 +#define PCIE_GEN3_QHP_L0_SIGDET_DEGLITCH_CNTRL 0x238 +#define PCIE_GEN3_QHP_L0_DCC_GAIN 0x2a4 +#define PCIE_GEN3_QHP_L0_RSM_START 0x2a8 +#define PCIE_GEN3_QHP_L0_RX_EN_SIGNAL 0x2ac +#define PCIE_GEN3_QHP_L0_PSM_RX_EN_CAL 0x2b0 +#define PCIE_GEN3_QHP_L0_RX_MISC_CNTRL0 0x2b8 +#define PCIE_GEN3_QHP_L0_TS0_TIMER 0x2c0 +#define PCIE_GEN3_QHP_L0_DLL_HIGHDATARATE 0x2c4 +#define PCIE_GEN3_QHP_L0_RX_RESETCODE_OFFSET 0x2cc + +/* PCIE GEN3 PCS registers */ +#define PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M3P5DB 0x2c +#define PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M3P5DB 0x40 +#define PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M6DB 0x54 +#define PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M6DB 0x68 +#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG 0x15c +#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5 0x16c +#define PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG 0x174 + #endif diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c index bf94a52d3087..3708d43b7508 100644 --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2017, 2019, The Linux Foundation. All rights reserved. */ #include <linux/clk.h> @@ -66,6 +66,14 @@ #define IMP_RES_OFFSET_MASK GENMASK(5, 0) #define IMP_RES_OFFSET_SHIFT 0x0 +/* QUSB2PHY_PLL_BIAS_CONTROL_2 register bits */ +#define BIAS_CTRL2_RES_OFFSET_MASK GENMASK(5, 0) +#define BIAS_CTRL2_RES_OFFSET_SHIFT 0x0 + +/* QUSB2PHY_CHG_CONTROL_2 register bits */ +#define CHG_CTRL2_OFFSET_MASK GENMASK(5, 4) +#define CHG_CTRL2_OFFSET_SHIFT 0x4 + /* QUSB2PHY_PORT_TUNE1 register bits */ #define HSTX_TRIM_MASK GENMASK(7, 4) #define HSTX_TRIM_SHIFT 0x4 @@ -73,6 +81,10 @@ #define PREEMPHASIS_EN_MASK GENMASK(1, 0) #define PREEMPHASIS_EN_SHIFT 0x0 +/* QUSB2PHY_PORT_TUNE2 register bits */ +#define HSDISC_TRIM_MASK GENMASK(1, 0) +#define HSDISC_TRIM_SHIFT 0x0 + #define QUSB2PHY_PLL_ANALOG_CONTROLS_TWO 0x04 #define QUSB2PHY_PLL_CLOCK_INVERTERS 0x18c #define QUSB2PHY_PLL_CMODE 0x2c @@ -177,7 +189,7 @@ static const struct qusb2_phy_init_tbl msm8998_init_tbl[] = { QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_DIGITAL_TIMERS_TWO, 0x19), }; -static const unsigned int sdm845_regs_layout[] = { +static const unsigned int qusb2_v2_regs_layout[] = { [QUSB2PHY_PLL_CORE_INPUT_OVERRIDE] = 0xa8, [QUSB2PHY_PLL_STATUS] = 0x1a0, [QUSB2PHY_PORT_TUNE1] = 0x240, @@ -191,7 +203,7 @@ static const unsigned int sdm845_regs_layout[] = { [QUSB2PHY_INTR_CTRL] = 0x230, }; -static const struct qusb2_phy_init_tbl sdm845_init_tbl[] = { +static const struct qusb2_phy_init_tbl qusb2_v2_init_tbl[] = { QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_ANALOG_CONTROLS_TWO, 0x03), QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_CLOCK_INVERTERS, 0x7c), QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_CMODE, 0x80), @@ -258,10 +270,10 @@ static const struct qusb2_phy_cfg msm8998_phy_cfg = { .update_tune1_with_efuse = true, }; -static const struct qusb2_phy_cfg sdm845_phy_cfg = { - .tbl = sdm845_init_tbl, - .tbl_num = ARRAY_SIZE(sdm845_init_tbl), - .regs = sdm845_regs_layout, +static const struct qusb2_phy_cfg qusb2_v2_phy_cfg = { + .tbl = qusb2_v2_init_tbl, + .tbl_num = ARRAY_SIZE(qusb2_v2_init_tbl), + .regs = qusb2_v2_regs_layout, .disable_ctrl = (PWR_CTRL1_VREF_SUPPLY_TRIM | PWR_CTRL1_CLAMP_N_EN | POWER_DOWN), @@ -277,6 +289,34 @@ static const char * const qusb2_phy_vreg_names[] = { #define QUSB2_NUM_VREGS ARRAY_SIZE(qusb2_phy_vreg_names) +/* struct override_param - structure holding qusb2 v2 phy overriding param + * set override true if the device tree property exists and read and assign + * to value + */ +struct override_param { + bool override; + u8 value; +}; + +/*struct override_params - structure holding qusb2 v2 phy overriding params + * @imp_res_offset: rescode offset to be updated in IMP_CTRL1 register + * @hstx_trim: HSTX_TRIM to be updated in TUNE1 register + * @preemphasis: Amplitude Pre-Emphasis to be updated in TUNE1 register + * @preemphasis_width: half/full-width Pre-Emphasis updated via TUNE1 + * @bias_ctrl: bias ctrl to be updated in BIAS_CONTROL_2 register + * @charge_ctrl: charge ctrl to be updated in CHG_CTRL2 register + * @hsdisc_trim: disconnect threshold to be updated in TUNE2 register + */ +struct override_params { + struct override_param imp_res_offset; + struct override_param hstx_trim; + struct override_param preemphasis; + struct override_param preemphasis_width; + struct override_param bias_ctrl; + struct override_param charge_ctrl; + struct override_param hsdisc_trim; +}; + /** * struct qusb2_phy - structure holding qusb2 phy attributes * @@ -292,14 +332,7 @@ static const char * const qusb2_phy_vreg_names[] = { * @tcsr: TCSR syscon register map * @cell: nvmem cell containing phy tuning value * - * @override_imp_res_offset: PHY should use different rescode offset - * @imp_res_offset_value: rescode offset to be updated in IMP_CTRL1 register - * @override_hstx_trim: PHY should use different HSTX o/p current value - * @hstx_trim_value: HSTX_TRIM value to be updated in TUNE1 register - * @override_preemphasis: PHY should use different pre-amphasis amplitude - * @preemphasis_level: Amplitude Pre-Emphasis to be updated in TUNE1 register - * @override_preemphasis_width: PHY should use different pre-emphasis duration - * @preemphasis_width: half/full-width Pre-Emphasis updated via TUNE1 + * @overrides: pointer to structure for all overriding tuning params * * @cfg: phy config data * @has_se_clk_scheme: indicate if PHY has single-ended ref clock scheme @@ -319,14 +352,7 @@ struct qusb2_phy { struct regmap *tcsr; struct nvmem_cell *cell; - bool override_imp_res_offset; - u8 imp_res_offset_value; - bool override_hstx_trim; - u8 hstx_trim_value; - bool override_preemphasis; - u8 preemphasis_level; - bool override_preemphasis_width; - u8 preemphasis_width; + struct override_params overrides; const struct qusb2_phy_cfg *cfg; bool has_se_clk_scheme; @@ -394,24 +420,35 @@ void qcom_qusb2_phy_configure(void __iomem *base, static void qusb2_phy_override_phy_params(struct qusb2_phy *qphy) { const struct qusb2_phy_cfg *cfg = qphy->cfg; + struct override_params *or = &qphy->overrides; - if (qphy->override_imp_res_offset) + if (or->imp_res_offset.override) qusb2_write_mask(qphy->base, QUSB2PHY_IMP_CTRL1, - qphy->imp_res_offset_value << IMP_RES_OFFSET_SHIFT, + or->imp_res_offset.value << IMP_RES_OFFSET_SHIFT, IMP_RES_OFFSET_MASK); - if (qphy->override_hstx_trim) + if (or->bias_ctrl.override) + qusb2_write_mask(qphy->base, QUSB2PHY_PLL_BIAS_CONTROL_2, + or->bias_ctrl.value << BIAS_CTRL2_RES_OFFSET_SHIFT, + BIAS_CTRL2_RES_OFFSET_MASK); + + if (or->charge_ctrl.override) + qusb2_write_mask(qphy->base, QUSB2PHY_CHG_CTRL2, + or->charge_ctrl.value << CHG_CTRL2_OFFSET_SHIFT, + CHG_CTRL2_OFFSET_MASK); + + if (or->hstx_trim.override) qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1], - qphy->hstx_trim_value << HSTX_TRIM_SHIFT, + or->hstx_trim.value << HSTX_TRIM_SHIFT, HSTX_TRIM_MASK); - if (qphy->override_preemphasis) + if (or->preemphasis.override) qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1], - qphy->preemphasis_level << PREEMPHASIS_EN_SHIFT, + or->preemphasis.value << PREEMPHASIS_EN_SHIFT, PREEMPHASIS_EN_MASK); - if (qphy->override_preemphasis_width) { - if (qphy->preemphasis_width == + if (or->preemphasis_width.override) { + if (or->preemphasis_width.value == QUSB2_V2_PREEMPHASIS_WIDTH_HALF_BIT) qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1], @@ -421,6 +458,11 @@ static void qusb2_phy_override_phy_params(struct qusb2_phy *qphy) cfg->regs[QUSB2PHY_PORT_TUNE1], PREEMPH_WIDTH_HALF_BIT); } + + if (or->hsdisc_trim.override) + qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2], + or->hsdisc_trim.value << HSDISC_TRIM_SHIFT, + HSDISC_TRIM_MASK); } /* @@ -774,8 +816,8 @@ static const struct of_device_id qusb2_phy_of_match_table[] = { .compatible = "qcom,msm8998-qusb2-phy", .data = &msm8998_phy_cfg, }, { - .compatible = "qcom,sdm845-qusb2-phy", - .data = &sdm845_phy_cfg, + .compatible = "qcom,qusb2-v2-phy", + .data = &qusb2_v2_phy_cfg, }, { }, }; @@ -796,10 +838,12 @@ static int qusb2_phy_probe(struct platform_device *pdev) int ret, i; int num; u32 value; + struct override_params *or; qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); if (!qphy) return -ENOMEM; + or = &qphy->overrides; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); qphy->base = devm_ioremap_resource(dev, res); @@ -864,26 +908,44 @@ static int qusb2_phy_probe(struct platform_device *pdev) if (!of_property_read_u32(dev->of_node, "qcom,imp-res-offset-value", &value)) { - qphy->imp_res_offset_value = (u8)value; - qphy->override_imp_res_offset = true; + or->imp_res_offset.value = (u8)value; + or->imp_res_offset.override = true; + } + + if (!of_property_read_u32(dev->of_node, "qcom,bias-ctrl-value", + &value)) { + or->bias_ctrl.value = (u8)value; + or->bias_ctrl.override = true; + } + + if (!of_property_read_u32(dev->of_node, "qcom,charge-ctrl-value", + &value)) { + or->charge_ctrl.value = (u8)value; + or->charge_ctrl.override = true; } if (!of_property_read_u32(dev->of_node, "qcom,hstx-trim-value", &value)) { - qphy->hstx_trim_value = (u8)value; - qphy->override_hstx_trim = true; + or->hstx_trim.value = (u8)value; + or->hstx_trim.override = true; } if (!of_property_read_u32(dev->of_node, "qcom,preemphasis-level", &value)) { - qphy->preemphasis_level = (u8)value; - qphy->override_preemphasis = true; + or->preemphasis.value = (u8)value; + or->preemphasis.override = true; } if (!of_property_read_u32(dev->of_node, "qcom,preemphasis-width", &value)) { - qphy->preemphasis_width = (u8)value; - qphy->override_preemphasis_width = true; + or->preemphasis_width.value = (u8)value; + or->preemphasis_width.override = true; + } + + if (!of_property_read_u32(dev->of_node, "qcom,hsdisc-trim-value", + &value)) { + or->hsdisc_trim.value = (u8)value; + or->hsdisc_trim.override = true; } pm_runtime_set_active(dev); diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c b/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c new file mode 100644 index 000000000000..d998e65c89c8 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c @@ -0,0 +1,415 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2009-2018, Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020, Linaro Limited + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/slab.h> + +/* PHY register and bit definitions */ +#define PHY_CTRL_COMMON0 0x078 +#define SIDDQ BIT(2) +#define PHY_IRQ_CMD 0x0d0 +#define PHY_INTR_MASK0 0x0d4 +#define PHY_INTR_CLEAR0 0x0dc +#define DPDM_MASK 0x1e +#define DP_1_0 BIT(4) +#define DP_0_1 BIT(3) +#define DM_1_0 BIT(2) +#define DM_0_1 BIT(1) + +enum hsphy_voltage { + VOL_NONE, + VOL_MIN, + VOL_MAX, + VOL_NUM, +}; + +enum hsphy_vreg { + VDD, + VDDA_1P8, + VDDA_3P3, + VREG_NUM, +}; + +struct hsphy_init_seq { + int offset; + int val; + int delay; +}; + +struct hsphy_data { + const struct hsphy_init_seq *init_seq; + unsigned int init_seq_num; +}; + +struct hsphy_priv { + void __iomem *base; + struct clk_bulk_data *clks; + int num_clks; + struct reset_control *phy_reset; + struct reset_control *por_reset; + struct regulator_bulk_data vregs[VREG_NUM]; + const struct hsphy_data *data; + enum phy_mode mode; +}; + +static int qcom_snps_hsphy_set_mode(struct phy *phy, enum phy_mode mode, + int submode) +{ + struct hsphy_priv *priv = phy_get_drvdata(phy); + + priv->mode = PHY_MODE_INVALID; + + if (mode > 0) + priv->mode = mode; + + return 0; +} + +static void qcom_snps_hsphy_enable_hv_interrupts(struct hsphy_priv *priv) +{ + u32 val; + + /* Clear any existing interrupts before enabling the interrupts */ + val = readb(priv->base + PHY_INTR_CLEAR0); + val |= DPDM_MASK; + writeb(val, priv->base + PHY_INTR_CLEAR0); + + writeb(0x0, priv->base + PHY_IRQ_CMD); + usleep_range(200, 220); + writeb(0x1, priv->base + PHY_IRQ_CMD); + + /* Make sure the interrupts are cleared */ + usleep_range(200, 220); + + val = readb(priv->base + PHY_INTR_MASK0); + switch (priv->mode) { + case PHY_MODE_USB_HOST_HS: + case PHY_MODE_USB_HOST_FS: + case PHY_MODE_USB_DEVICE_HS: + case PHY_MODE_USB_DEVICE_FS: + val |= DP_1_0 | DM_0_1; + break; + case PHY_MODE_USB_HOST_LS: + case PHY_MODE_USB_DEVICE_LS: + val |= DP_0_1 | DM_1_0; + break; + default: + /* No device connected */ + val |= DP_0_1 | DM_0_1; + break; + } + writeb(val, priv->base + PHY_INTR_MASK0); +} + +static void qcom_snps_hsphy_disable_hv_interrupts(struct hsphy_priv *priv) +{ + u32 val; + + val = readb(priv->base + PHY_INTR_MASK0); + val &= ~DPDM_MASK; + writeb(val, priv->base + PHY_INTR_MASK0); + + /* Clear any pending interrupts */ + val = readb(priv->base + PHY_INTR_CLEAR0); + val |= DPDM_MASK; + writeb(val, priv->base + PHY_INTR_CLEAR0); + + writeb(0x0, priv->base + PHY_IRQ_CMD); + usleep_range(200, 220); + + writeb(0x1, priv->base + PHY_IRQ_CMD); + usleep_range(200, 220); +} + +static void qcom_snps_hsphy_enter_retention(struct hsphy_priv *priv) +{ + u32 val; + + val = readb(priv->base + PHY_CTRL_COMMON0); + val |= SIDDQ; + writeb(val, priv->base + PHY_CTRL_COMMON0); +} + +static void qcom_snps_hsphy_exit_retention(struct hsphy_priv *priv) +{ + u32 val; + + val = readb(priv->base + PHY_CTRL_COMMON0); + val &= ~SIDDQ; + writeb(val, priv->base + PHY_CTRL_COMMON0); +} + +static int qcom_snps_hsphy_power_on(struct phy *phy) +{ + struct hsphy_priv *priv = phy_get_drvdata(phy); + int ret; + + ret = regulator_bulk_enable(VREG_NUM, priv->vregs); + if (ret) + return ret; + ret = clk_bulk_prepare_enable(priv->num_clks, priv->clks); + if (ret) + goto err_disable_regulator; + qcom_snps_hsphy_disable_hv_interrupts(priv); + qcom_snps_hsphy_exit_retention(priv); + + return 0; + +err_disable_regulator: + regulator_bulk_disable(VREG_NUM, priv->vregs); + + return ret; +} + +static int qcom_snps_hsphy_power_off(struct phy *phy) +{ + struct hsphy_priv *priv = phy_get_drvdata(phy); + + qcom_snps_hsphy_enter_retention(priv); + qcom_snps_hsphy_enable_hv_interrupts(priv); + clk_bulk_disable_unprepare(priv->num_clks, priv->clks); + regulator_bulk_disable(VREG_NUM, priv->vregs); + + return 0; +} + +static int qcom_snps_hsphy_reset(struct hsphy_priv *priv) +{ + int ret; + + ret = reset_control_assert(priv->phy_reset); + if (ret) + return ret; + + usleep_range(10, 15); + + ret = reset_control_deassert(priv->phy_reset); + if (ret) + return ret; + + usleep_range(80, 100); + + return 0; +} + +static void qcom_snps_hsphy_init_sequence(struct hsphy_priv *priv) +{ + const struct hsphy_data *data = priv->data; + const struct hsphy_init_seq *seq; + int i; + + /* Device match data is optional. */ + if (!data) + return; + + seq = data->init_seq; + + for (i = 0; i < data->init_seq_num; i++, seq++) { + writeb(seq->val, priv->base + seq->offset); + if (seq->delay) + usleep_range(seq->delay, seq->delay + 10); + } +} + +static int qcom_snps_hsphy_por_reset(struct hsphy_priv *priv) +{ + int ret; + + ret = reset_control_assert(priv->por_reset); + if (ret) + return ret; + + /* + * The Femto PHY is POR reset in the following scenarios. + * + * 1. After overriding the parameter registers. + * 2. Low power mode exit from PHY retention. + * + * Ensure that SIDDQ is cleared before bringing the PHY + * out of reset. + */ + qcom_snps_hsphy_exit_retention(priv); + + /* + * As per databook, 10 usec delay is required between + * PHY POR assert and de-assert. + */ + usleep_range(10, 20); + ret = reset_control_deassert(priv->por_reset); + if (ret) + return ret; + + /* + * As per databook, it takes 75 usec for PHY to stabilize + * after the reset. + */ + usleep_range(80, 100); + + return 0; +} + +static int qcom_snps_hsphy_init(struct phy *phy) +{ + struct hsphy_priv *priv = phy_get_drvdata(phy); + int ret; + + ret = qcom_snps_hsphy_reset(priv); + if (ret) + return ret; + + qcom_snps_hsphy_init_sequence(priv); + + ret = qcom_snps_hsphy_por_reset(priv); + if (ret) + return ret; + + return 0; +} + +static const struct phy_ops qcom_snps_hsphy_ops = { + .init = qcom_snps_hsphy_init, + .power_on = qcom_snps_hsphy_power_on, + .power_off = qcom_snps_hsphy_power_off, + .set_mode = qcom_snps_hsphy_set_mode, + .owner = THIS_MODULE, +}; + +static const char * const qcom_snps_hsphy_clks[] = { + "ref", + "ahb", + "sleep", +}; + +static int qcom_snps_hsphy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phy_provider *provider; + struct hsphy_priv *priv; + struct phy *phy; + int ret; + int i; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->num_clks = ARRAY_SIZE(qcom_snps_hsphy_clks); + priv->clks = devm_kcalloc(dev, priv->num_clks, sizeof(*priv->clks), + GFP_KERNEL); + if (!priv->clks) + return -ENOMEM; + + for (i = 0; i < priv->num_clks; i++) + priv->clks[i].id = qcom_snps_hsphy_clks[i]; + + ret = devm_clk_bulk_get(dev, priv->num_clks, priv->clks); + if (ret) + return ret; + + priv->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); + if (IS_ERR(priv->phy_reset)) + return PTR_ERR(priv->phy_reset); + + priv->por_reset = devm_reset_control_get_exclusive(dev, "por"); + if (IS_ERR(priv->por_reset)) + return PTR_ERR(priv->por_reset); + + priv->vregs[VDD].supply = "vdd"; + priv->vregs[VDDA_1P8].supply = "vdda1p8"; + priv->vregs[VDDA_3P3].supply = "vdda3p3"; + + ret = devm_regulator_bulk_get(dev, VREG_NUM, priv->vregs); + if (ret) + return ret; + + /* Get device match data */ + priv->data = device_get_match_data(dev); + + phy = devm_phy_create(dev, dev->of_node, &qcom_snps_hsphy_ops); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + phy_set_drvdata(phy, priv); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(provider)) + return PTR_ERR(provider); + + ret = regulator_set_load(priv->vregs[VDDA_1P8].consumer, 19000); + if (ret < 0) + return ret; + + ret = regulator_set_load(priv->vregs[VDDA_3P3].consumer, 16000); + if (ret < 0) + goto unset_1p8_load; + + return 0; + +unset_1p8_load: + regulator_set_load(priv->vregs[VDDA_1P8].consumer, 0); + + return ret; +} + +/* + * The macro is used to define an initialization sequence. Each tuple + * is meant to program 'value' into phy register at 'offset' with 'delay' + * in us followed. + */ +#define HSPHY_INIT_CFG(o, v, d) { .offset = o, .val = v, .delay = d, } + +static const struct hsphy_init_seq init_seq_femtophy[] = { + HSPHY_INIT_CFG(0xc0, 0x01, 0), + HSPHY_INIT_CFG(0xe8, 0x0d, 0), + HSPHY_INIT_CFG(0x74, 0x12, 0), + HSPHY_INIT_CFG(0x98, 0x63, 0), + HSPHY_INIT_CFG(0x9c, 0x03, 0), + HSPHY_INIT_CFG(0xa0, 0x1d, 0), + HSPHY_INIT_CFG(0xa4, 0x03, 0), + HSPHY_INIT_CFG(0x8c, 0x23, 0), + HSPHY_INIT_CFG(0x78, 0x08, 0), + HSPHY_INIT_CFG(0x7c, 0xdc, 0), + HSPHY_INIT_CFG(0x90, 0xe0, 20), + HSPHY_INIT_CFG(0x74, 0x10, 0), + HSPHY_INIT_CFG(0x90, 0x60, 0), +}; + +static const struct hsphy_data hsphy_data_femtophy = { + .init_seq = init_seq_femtophy, + .init_seq_num = ARRAY_SIZE(init_seq_femtophy), +}; + +static const struct of_device_id qcom_snps_hsphy_match[] = { + { .compatible = "qcom,usb-hs-28nm-femtophy", .data = &hsphy_data_femtophy, }, + { }, +}; +MODULE_DEVICE_TABLE(of, qcom_snps_hsphy_match); + +static struct platform_driver qcom_snps_hsphy_driver = { + .probe = qcom_snps_hsphy_probe, + .driver = { + .name = "qcom,usb-hs-28nm-phy", + .of_match_table = qcom_snps_hsphy_match, + }, +}; +module_platform_driver(qcom_snps_hsphy_driver); + +MODULE_DESCRIPTION("Qualcomm 28nm Hi-Speed USB PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/qualcomm/phy-qcom-usb-ss.c b/drivers/phy/qualcomm/phy-qcom-usb-ss.c new file mode 100644 index 000000000000..a3a6d3ce7ea1 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-usb-ss.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2014,2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020, Linaro Limited + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#define PHY_CTRL0 0x6C +#define PHY_CTRL1 0x70 +#define PHY_CTRL2 0x74 +#define PHY_CTRL4 0x7C + +/* PHY_CTRL bits */ +#define REF_PHY_EN BIT(0) +#define LANE0_PWR_ON BIT(2) +#define SWI_PCS_CLK_SEL BIT(4) +#define TST_PWR_DOWN BIT(4) +#define PHY_RESET BIT(7) + +#define NUM_BULK_CLKS 3 +#define NUM_BULK_REGS 2 + +struct ssphy_priv { + void __iomem *base; + struct device *dev; + struct reset_control *reset_com; + struct reset_control *reset_phy; + struct regulator_bulk_data regs[NUM_BULK_REGS]; + struct clk_bulk_data clks[NUM_BULK_CLKS]; + enum phy_mode mode; +}; + +static inline void qcom_ssphy_updatel(void __iomem *addr, u32 mask, u32 val) +{ + writel((readl(addr) & ~mask) | val, addr); +} + +static int qcom_ssphy_do_reset(struct ssphy_priv *priv) +{ + int ret; + + if (!priv->reset_com) { + qcom_ssphy_updatel(priv->base + PHY_CTRL1, PHY_RESET, + PHY_RESET); + usleep_range(10, 20); + qcom_ssphy_updatel(priv->base + PHY_CTRL1, PHY_RESET, 0); + } else { + ret = reset_control_assert(priv->reset_com); + if (ret) { + dev_err(priv->dev, "Failed to assert reset com\n"); + return ret; + } + + ret = reset_control_assert(priv->reset_phy); + if (ret) { + dev_err(priv->dev, "Failed to assert reset phy\n"); + return ret; + } + + usleep_range(10, 20); + + ret = reset_control_deassert(priv->reset_com); + if (ret) { + dev_err(priv->dev, "Failed to deassert reset com\n"); + return ret; + } + + ret = reset_control_deassert(priv->reset_phy); + if (ret) { + dev_err(priv->dev, "Failed to deassert reset phy\n"); + return ret; + } + } + + return 0; +} + +static int qcom_ssphy_power_on(struct phy *phy) +{ + struct ssphy_priv *priv = phy_get_drvdata(phy); + int ret; + + ret = regulator_bulk_enable(NUM_BULK_REGS, priv->regs); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(NUM_BULK_CLKS, priv->clks); + if (ret) + goto err_disable_regulator; + + ret = qcom_ssphy_do_reset(priv); + if (ret) + goto err_disable_clock; + + writeb(SWI_PCS_CLK_SEL, priv->base + PHY_CTRL0); + qcom_ssphy_updatel(priv->base + PHY_CTRL4, LANE0_PWR_ON, LANE0_PWR_ON); + qcom_ssphy_updatel(priv->base + PHY_CTRL2, REF_PHY_EN, REF_PHY_EN); + qcom_ssphy_updatel(priv->base + PHY_CTRL4, TST_PWR_DOWN, 0); + + return 0; +err_disable_clock: + clk_bulk_disable_unprepare(NUM_BULK_CLKS, priv->clks); +err_disable_regulator: + regulator_bulk_disable(NUM_BULK_REGS, priv->regs); + + return ret; +} + +static int qcom_ssphy_power_off(struct phy *phy) +{ + struct ssphy_priv *priv = phy_get_drvdata(phy); + + qcom_ssphy_updatel(priv->base + PHY_CTRL4, LANE0_PWR_ON, 0); + qcom_ssphy_updatel(priv->base + PHY_CTRL2, REF_PHY_EN, 0); + qcom_ssphy_updatel(priv->base + PHY_CTRL4, TST_PWR_DOWN, TST_PWR_DOWN); + + clk_bulk_disable_unprepare(NUM_BULK_CLKS, priv->clks); + regulator_bulk_disable(NUM_BULK_REGS, priv->regs); + + return 0; +} + +static int qcom_ssphy_init_clock(struct ssphy_priv *priv) +{ + priv->clks[0].id = "ref"; + priv->clks[1].id = "ahb"; + priv->clks[2].id = "pipe"; + + return devm_clk_bulk_get(priv->dev, NUM_BULK_CLKS, priv->clks); +} + +static int qcom_ssphy_init_regulator(struct ssphy_priv *priv) +{ + int ret; + + priv->regs[0].supply = "vdd"; + priv->regs[1].supply = "vdda1p8"; + ret = devm_regulator_bulk_get(priv->dev, NUM_BULK_REGS, priv->regs); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(priv->dev, "Failed to get regulators\n"); + return ret; + } + + return ret; +} + +static int qcom_ssphy_init_reset(struct ssphy_priv *priv) +{ + priv->reset_com = devm_reset_control_get_optional_exclusive(priv->dev, "com"); + if (IS_ERR(priv->reset_com)) { + dev_err(priv->dev, "Failed to get reset control com\n"); + return PTR_ERR(priv->reset_com); + } + + if (priv->reset_com) { + /* if reset_com is present, reset_phy is no longer optional */ + priv->reset_phy = devm_reset_control_get_exclusive(priv->dev, "phy"); + if (IS_ERR(priv->reset_phy)) { + dev_err(priv->dev, "Failed to get reset control phy\n"); + return PTR_ERR(priv->reset_phy); + } + } + + return 0; +} + +static const struct phy_ops qcom_ssphy_ops = { + .power_off = qcom_ssphy_power_off, + .power_on = qcom_ssphy_power_on, + .owner = THIS_MODULE, +}; + +static int qcom_ssphy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phy_provider *provider; + struct ssphy_priv *priv; + struct phy *phy; + int ret; + + priv = devm_kzalloc(dev, sizeof(struct ssphy_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->mode = PHY_MODE_INVALID; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + ret = qcom_ssphy_init_clock(priv); + if (ret) + return ret; + + ret = qcom_ssphy_init_reset(priv); + if (ret) + return ret; + + ret = qcom_ssphy_init_regulator(priv); + if (ret) + return ret; + + phy = devm_phy_create(dev, dev->of_node, &qcom_ssphy_ops); + if (IS_ERR(phy)) { + dev_err(dev, "Failed to create the SS phy\n"); + return PTR_ERR(phy); + } + + phy_set_drvdata(phy, priv); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(provider); +} + +static const struct of_device_id qcom_ssphy_match[] = { + { .compatible = "qcom,usb-ss-28nm-phy", }, + { }, +}; +MODULE_DEVICE_TABLE(of, qcom_ssphy_match); + +static struct platform_driver qcom_ssphy_driver = { + .probe = qcom_ssphy_probe, + .driver = { + .name = "qcom-usb-ssphy", + .of_match_table = qcom_ssphy_match, + }, +}; +module_platform_driver(qcom_ssphy_driver); + +MODULE_DESCRIPTION("Qualcomm SuperSpeed USB PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c index 680cc0c8825c..a84e9f027fc4 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c @@ -763,7 +763,7 @@ static void rockchip_chg_detect_work(struct work_struct *work) /* put the controller in normal mode */ property_enable(base, &rphy->phy_cfg->chg_det.opmode, true); rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work); - dev_info(&rport->phy->dev, "charger = %s\n", + dev_dbg(&rport->phy->dev, "charger = %s\n", chg_to_string(rphy->chg_type)); return; default: diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c index 93ffbd2940fa..e4adab375c73 100644 --- a/drivers/phy/socionext/phy-uniphier-pcie.c +++ b/drivers/phy/socionext/phy-uniphier-pcie.c @@ -19,6 +19,10 @@ #include <linux/resource.h> /* PHY */ +#define PCL_PHY_CLKCTRL 0x0000 +#define PORT_SEL_MASK GENMASK(11, 9) +#define PORT_SEL_1 FIELD_PREP(PORT_SEL_MASK, 1) + #define PCL_PHY_TEST_I 0x2000 #define PCL_PHY_TEST_O 0x2004 #define TESTI_DAT_MASK GENMASK(13, 6) @@ -45,13 +49,14 @@ struct uniphier_pciephy_priv { void __iomem *base; struct device *dev; - struct clk *clk; - struct reset_control *rst; + struct clk *clk, *clk_gio; + struct reset_control *rst, *rst_gio; const struct uniphier_pciephy_soc_data *data; }; struct uniphier_pciephy_soc_data { - bool has_syscon; + bool is_legacy; + void (*set_phymode)(struct regmap *regmap); }; static void uniphier_pciephy_testio_write(struct uniphier_pciephy_priv *priv, @@ -111,16 +116,35 @@ static void uniphier_pciephy_deassert(struct uniphier_pciephy_priv *priv) static int uniphier_pciephy_init(struct phy *phy) { struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy); + u32 val; int ret; ret = clk_prepare_enable(priv->clk); if (ret) return ret; - ret = reset_control_deassert(priv->rst); + ret = clk_prepare_enable(priv->clk_gio); if (ret) goto out_clk_disable; + ret = reset_control_deassert(priv->rst); + if (ret) + goto out_clk_gio_disable; + + ret = reset_control_deassert(priv->rst_gio); + if (ret) + goto out_rst_assert; + + /* support only 1 port */ + val = readl(priv->base + PCL_PHY_CLKCTRL); + val &= ~PORT_SEL_MASK; + val |= PORT_SEL_1; + writel(val, priv->base + PCL_PHY_CLKCTRL); + + /* legacy controller doesn't have phy_reset and parameters */ + if (priv->data->is_legacy) + return 0; + uniphier_pciephy_set_param(priv, PCL_PHY_R00, RX_EQ_ADJ_EN, RX_EQ_ADJ_EN); uniphier_pciephy_set_param(priv, PCL_PHY_R06, RX_EQ_ADJ, @@ -134,6 +158,10 @@ static int uniphier_pciephy_init(struct phy *phy) return 0; +out_rst_assert: + reset_control_assert(priv->rst); +out_clk_gio_disable: + clk_disable_unprepare(priv->clk_gio); out_clk_disable: clk_disable_unprepare(priv->clk); @@ -144,8 +172,11 @@ static int uniphier_pciephy_exit(struct phy *phy) { struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy); - uniphier_pciephy_assert(priv); + if (!priv->data->is_legacy) + uniphier_pciephy_assert(priv); + reset_control_assert(priv->rst_gio); reset_control_assert(priv->rst); + clk_disable_unprepare(priv->clk_gio); clk_disable_unprepare(priv->clk); return 0; @@ -163,7 +194,6 @@ static int uniphier_pciephy_probe(struct platform_device *pdev) struct phy_provider *phy_provider; struct device *dev = &pdev->dev; struct regmap *regmap; - struct resource *res; struct phy *phy; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -176,18 +206,36 @@ static int uniphier_pciephy_probe(struct platform_device *pdev) priv->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - priv->clk = devm_clk_get(dev, NULL); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); - - priv->rst = devm_reset_control_get_shared(dev, NULL); - if (IS_ERR(priv->rst)) - return PTR_ERR(priv->rst); + if (priv->data->is_legacy) { + priv->clk_gio = devm_clk_get(dev, "gio"); + if (IS_ERR(priv->clk_gio)) + return PTR_ERR(priv->clk_gio); + + priv->rst_gio = + devm_reset_control_get_shared(dev, "gio"); + if (IS_ERR(priv->rst_gio)) + return PTR_ERR(priv->rst_gio); + + priv->clk = devm_clk_get(dev, "link"); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->rst = devm_reset_control_get_shared(dev, "link"); + if (IS_ERR(priv->rst)) + return PTR_ERR(priv->rst); + } else { + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->rst = devm_reset_control_get_shared(dev, NULL); + if (IS_ERR(priv->rst)) + return PTR_ERR(priv->rst); + } phy = devm_phy_create(dev, dev->of_node, &uniphier_pciephy_ops); if (IS_ERR(phy)) @@ -195,9 +243,8 @@ static int uniphier_pciephy_probe(struct platform_device *pdev) regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "socionext,syscon"); - if (!IS_ERR(regmap) && priv->data->has_syscon) - regmap_update_bits(regmap, SG_USBPCIESEL, - SG_USBPCIESEL_PCIE, SG_USBPCIESEL_PCIE); + if (!IS_ERR(regmap) && priv->data->set_phymode) + priv->data->set_phymode(regmap); phy_set_drvdata(phy, priv); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); @@ -205,16 +252,31 @@ static int uniphier_pciephy_probe(struct platform_device *pdev) return PTR_ERR_OR_ZERO(phy_provider); } +static void uniphier_pciephy_ld20_setmode(struct regmap *regmap) +{ + regmap_update_bits(regmap, SG_USBPCIESEL, + SG_USBPCIESEL_PCIE, SG_USBPCIESEL_PCIE); +} + +static const struct uniphier_pciephy_soc_data uniphier_pro5_data = { + .is_legacy = true, +}; + static const struct uniphier_pciephy_soc_data uniphier_ld20_data = { - .has_syscon = true, + .is_legacy = false, + .set_phymode = uniphier_pciephy_ld20_setmode, }; static const struct uniphier_pciephy_soc_data uniphier_pxs3_data = { - .has_syscon = false, + .is_legacy = false, }; static const struct of_device_id uniphier_pciephy_match[] = { { + .compatible = "socionext,uniphier-pro5-pcie-phy", + .data = &uniphier_pro5_data, + }, + { .compatible = "socionext,uniphier-ld20-pcie-phy", .data = &uniphier_ld20_data, }, diff --git a/drivers/phy/socionext/phy-uniphier-usb3hs.c b/drivers/phy/socionext/phy-uniphier-usb3hs.c index 50f379fc4e06..a9bc74121f38 100644 --- a/drivers/phy/socionext/phy-uniphier-usb3hs.c +++ b/drivers/phy/socionext/phy-uniphier-usb3hs.c @@ -41,10 +41,12 @@ #define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) } +#define RX_CHK_SYNC PHY_F(0, 5, 5) /* RX sync mode */ +#define RX_SYNC_SEL PHY_F(1, 1, 0) /* RX sync length */ #define LS_SLEW PHY_F(10, 6, 6) /* LS mode slew rate */ #define FS_LS_DRV PHY_F(10, 5, 5) /* FS/LS slew rate */ -#define MAX_PHY_PARAMS 2 +#define MAX_PHY_PARAMS 4 struct uniphier_u3hsphy_param { struct { @@ -66,13 +68,14 @@ struct uniphier_u3hsphy_trim_param { struct uniphier_u3hsphy_priv { struct device *dev; void __iomem *base; - struct clk *clk, *clk_parent, *clk_ext; - struct reset_control *rst, *rst_parent; + struct clk *clk, *clk_parent, *clk_ext, *clk_parent_gio; + struct reset_control *rst, *rst_parent, *rst_parent_gio; struct regulator *vbus; const struct uniphier_u3hsphy_soc_data *data; }; struct uniphier_u3hsphy_soc_data { + bool is_legacy; int nparams; const struct uniphier_u3hsphy_param param[MAX_PHY_PARAMS]; u32 config0; @@ -256,11 +259,20 @@ static int uniphier_u3hsphy_init(struct phy *phy) if (ret) return ret; - ret = reset_control_deassert(priv->rst_parent); + ret = clk_prepare_enable(priv->clk_parent_gio); if (ret) goto out_clk_disable; - if (!priv->data->config0 && !priv->data->config1) + ret = reset_control_deassert(priv->rst_parent); + if (ret) + goto out_clk_gio_disable; + + ret = reset_control_deassert(priv->rst_parent_gio); + if (ret) + goto out_rst_assert; + + if ((priv->data->is_legacy) + || (!priv->data->config0 && !priv->data->config1)) return 0; config0 = priv->data->config0; @@ -280,6 +292,8 @@ static int uniphier_u3hsphy_init(struct phy *phy) out_rst_assert: reset_control_assert(priv->rst_parent); +out_clk_gio_disable: + clk_disable_unprepare(priv->clk_parent_gio); out_clk_disable: clk_disable_unprepare(priv->clk_parent); @@ -290,7 +304,9 @@ static int uniphier_u3hsphy_exit(struct phy *phy) { struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy); + reset_control_assert(priv->rst_parent_gio); reset_control_assert(priv->rst_parent); + clk_disable_unprepare(priv->clk_parent_gio); clk_disable_unprepare(priv->clk_parent); return 0; @@ -309,7 +325,6 @@ static int uniphier_u3hsphy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct uniphier_u3hsphy_priv *priv; struct phy_provider *phy_provider; - struct resource *res; struct phy *phy; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -322,27 +337,38 @@ static int uniphier_u3hsphy_probe(struct platform_device *pdev) priv->data->nparams > MAX_PHY_PARAMS)) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - priv->clk = devm_clk_get(dev, "phy"); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); + if (!priv->data->is_legacy) { + priv->clk = devm_clk_get(dev, "phy"); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->clk_ext = devm_clk_get_optional(dev, "phy-ext"); + if (IS_ERR(priv->clk_ext)) + return PTR_ERR(priv->clk_ext); + + priv->rst = devm_reset_control_get_shared(dev, "phy"); + if (IS_ERR(priv->rst)) + return PTR_ERR(priv->rst); + + } else { + priv->clk_parent_gio = devm_clk_get(dev, "gio"); + if (IS_ERR(priv->clk_parent_gio)) + return PTR_ERR(priv->clk_parent_gio); + + priv->rst_parent_gio = + devm_reset_control_get_shared(dev, "gio"); + if (IS_ERR(priv->rst_parent_gio)) + return PTR_ERR(priv->rst_parent_gio); + } priv->clk_parent = devm_clk_get(dev, "link"); if (IS_ERR(priv->clk_parent)) return PTR_ERR(priv->clk_parent); - priv->clk_ext = devm_clk_get_optional(dev, "phy-ext"); - if (IS_ERR(priv->clk_ext)) - return PTR_ERR(priv->clk_ext); - - priv->rst = devm_reset_control_get_shared(dev, "phy"); - if (IS_ERR(priv->rst)) - return PTR_ERR(priv->rst); - priv->rst_parent = devm_reset_control_get_shared(dev, "link"); if (IS_ERR(priv->rst_parent)) return PTR_ERR(priv->rst_parent); @@ -364,13 +390,26 @@ static int uniphier_u3hsphy_probe(struct platform_device *pdev) return PTR_ERR_OR_ZERO(phy_provider); } -static const struct uniphier_u3hsphy_soc_data uniphier_pxs2_data = { +static const struct uniphier_u3hsphy_soc_data uniphier_pro5_data = { + .is_legacy = true, .nparams = 0, }; -static const struct uniphier_u3hsphy_soc_data uniphier_ld20_data = { +static const struct uniphier_u3hsphy_soc_data uniphier_pxs2_data = { + .is_legacy = false, .nparams = 2, .param = { + { RX_CHK_SYNC, 1 }, + { RX_SYNC_SEL, 1 }, + }, +}; + +static const struct uniphier_u3hsphy_soc_data uniphier_ld20_data = { + .is_legacy = false, + .nparams = 4, + .param = { + { RX_CHK_SYNC, 1 }, + { RX_SYNC_SEL, 1 }, { LS_SLEW, 1 }, { FS_LS_DRV, 1 }, }, @@ -380,7 +419,12 @@ static const struct uniphier_u3hsphy_soc_data uniphier_ld20_data = { }; static const struct uniphier_u3hsphy_soc_data uniphier_pxs3_data = { - .nparams = 0, + .is_legacy = false, + .nparams = 2, + .param = { + { RX_CHK_SYNC, 1 }, + { RX_SYNC_SEL, 1 }, + }, .trim_func = uniphier_u3hsphy_trim_ld20, .config0 = 0x92316680, .config1 = 0x00000106, @@ -388,6 +432,10 @@ static const struct uniphier_u3hsphy_soc_data uniphier_pxs3_data = { static const struct of_device_id uniphier_u3hsphy_match[] = { { + .compatible = "socionext,uniphier-pro5-usb3-hsphy", + .data = &uniphier_pro5_data, + }, + { .compatible = "socionext,uniphier-pxs2-usb3-hsphy", .data = &uniphier_pxs2_data, }, diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c index ec231e40ef2a..6700645bcbe6 100644 --- a/drivers/phy/socionext/phy-uniphier-usb3ss.c +++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c @@ -215,7 +215,6 @@ static int uniphier_u3ssphy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct uniphier_u3ssphy_priv *priv; struct phy_provider *phy_provider; - struct resource *res; struct phy *phy; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -228,8 +227,7 @@ static int uniphier_u3ssphy_probe(struct platform_device *pdev) priv->data->nparams > MAX_PHY_PARAMS)) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); @@ -315,6 +313,10 @@ static const struct of_device_id uniphier_u3ssphy_match[] = { .data = &uniphier_pro4_data, }, { + .compatible = "socionext,uniphier-pro5-usb3-ssphy", + .data = &uniphier_pro4_data, + }, + { .compatible = "socionext,uniphier-pxs2-usb3-ssphy", .data = &uniphier_pxs2_data, }, diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c index 1c536fc03c83..7edd5c3bc536 100644 --- a/drivers/phy/ti/phy-gmii-sel.c +++ b/drivers/phy/ti/phy-gmii-sel.c @@ -170,6 +170,21 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_soc_dm814 = { .regfields = phy_gmii_sel_fields_am33xx, }; +static const +struct reg_field phy_gmii_sel_fields_am654[][PHY_GMII_SEL_LAST] = { + { + [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x4040, 0, 1), + [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD((~0), 0, 0), + [PHY_GMII_SEL_RMII_IO_CLK_EN] = REG_FIELD((~0), 0, 0), + }, +}; + +static const +struct phy_gmii_sel_soc_data phy_gmii_sel_soc_am654 = { + .num_ports = 1, + .regfields = phy_gmii_sel_fields_am654, +}; + static const struct of_device_id phy_gmii_sel_id_table[] = { { .compatible = "ti,am3352-phy-gmii-sel", @@ -187,6 +202,10 @@ static const struct of_device_id phy_gmii_sel_id_table[] = { .compatible = "ti,dm814-phy-gmii-sel", .data = &phy_gmii_sel_soc_dm814, }, + { + .compatible = "ti,am654-phy-gmii-sel", + .data = &phy_gmii_sel_soc_am654, + }, {} }; MODULE_DEVICE_TABLE(of, phy_gmii_sel_id_table); |