diff options
Diffstat (limited to 'drivers')
43 files changed, 1351 insertions, 553 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 39f5d172e84f..314a187ed572 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -881,6 +881,7 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, int node = acpi_get_node(device->handle); struct pci_bus *bus; struct pci_host_bridge *host_bridge; + union acpi_object *obj; info->root = root; info->bridge = device; @@ -917,6 +918,17 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL)) host_bridge->native_ltr = 0; + /* + * Evaluate the "PCI Boot Configuration" _DSM Function. If it + * exists and returns 0, we must preserve any PCI resource + * assignments made by firmware for this host bridge. + */ + obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 1, + IGNORE_PCI_BOOT_CONFIG_DSM, NULL); + if (obj && obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 0) + host_bridge->preserve_config = 1; + ACPI_FREE(obj); + pci_scan_child_bus(bus); pci_set_host_bridge_release(host_bridge, acpi_pci_root_release_info, info); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index 473c4850c01d..2ee8f9522e05 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -45,7 +45,7 @@ enum i915_drm_suspend_mode { * to be disabled. This shouldn't happen and we'll print some error messages in * case it happens. * - * For more, read the Documentation/power/runtime_pm.txt. + * For more, read the Documentation/power/runtime_pm.rst. */ struct intel_runtime_pm { atomic_t wakeref_count; diff --git a/drivers/opp/Kconfig b/drivers/opp/Kconfig index fe54d349d2e1..35dfc7e80f92 100644 --- a/drivers/opp/Kconfig +++ b/drivers/opp/Kconfig @@ -11,4 +11,4 @@ config PM_OPP OPP layer organizes the data internally using device pointers representing individual voltage domains and provides SOC implementations a ready to use framework to manage OPPs. - For more information, read <file:Documentation/power/opp.txt> + For more information, read <file:Documentation/power/opp.rst> diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index 97c08146534a..e18499243f84 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -432,7 +432,7 @@ EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required); * @pdev: PCI device structure * * Returns negative value when PASID capability is not present. - * Otherwise it returns the numer of supported PASIDs. + * Otherwise it returns the number of supported PASIDs. */ int pci_max_pasids(struct pci_dev *pdev) { diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 011c57cae4b0..fe9f9f13ce11 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -174,14 +174,14 @@ config PCIE_IPROC_MSI PCIe controller config PCIE_ALTERA - bool "Altera PCIe controller" + tristate "Altera PCIe controller" depends on ARM || NIOS2 || ARM64 || COMPILE_TEST help Say Y here if you want to enable PCIe controller support on Altera FPGA. config PCIE_ALTERA_MSI - bool "Altera PCIe MSI feature" + tristate "Altera PCIe MSI feature" depends on PCIE_ALTERA depends on PCI_MSI_IRQ_DOMAIN help diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index a6ce1ee51b4c..6ea778ae4877 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -90,7 +90,7 @@ config PCI_EXYNOS config PCI_IMX6 bool "Freescale i.MX6/7/8 PCIe controller" - depends on SOC_IMX6Q || SOC_IMX7D || (ARM64 && ARCH_MXC) || COMPILE_TEST + depends on ARCH_MXC || COMPILE_TEST depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 419451efd58c..4234ddb4722f 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -26,6 +26,7 @@ #include <linux/types.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> +#include <linux/gpio/consumer.h> #include "../../pci.h" #include "pcie-designware.h" diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index 0c389a30ef5d..3d55dc78d999 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -25,10 +25,14 @@ #include "pcie-designware.h" +#define ARMADA8K_PCIE_MAX_LANES PCIE_LNK_X4 + struct armada8k_pcie { struct dw_pcie *pci; struct clk *clk; struct clk *clk_reg; + struct phy *phy[ARMADA8K_PCIE_MAX_LANES]; + unsigned int phy_count; }; #define PCIE_VENDOR_REGS_OFFSET 0x8000 @@ -55,7 +59,7 @@ struct armada8k_pcie { #define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C) #define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60) /* - * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write + * AR/AW Cache defaults: Normal memory, Write-Back, Read / Write * allocate */ #define ARCACHE_DEFAULT_VALUE 0x3511 @@ -67,6 +71,76 @@ struct armada8k_pcie { #define to_armada8k_pcie(x) dev_get_drvdata((x)->dev) +static void armada8k_pcie_disable_phys(struct armada8k_pcie *pcie) +{ + int i; + + for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) { + phy_power_off(pcie->phy[i]); + phy_exit(pcie->phy[i]); + } +} + +static int armada8k_pcie_enable_phys(struct armada8k_pcie *pcie) +{ + int ret; + int i; + + for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) { + ret = phy_init(pcie->phy[i]); + if (ret) + return ret; + + ret = phy_set_mode_ext(pcie->phy[i], PHY_MODE_PCIE, + pcie->phy_count); + if (ret) { + phy_exit(pcie->phy[i]); + return ret; + } + + ret = phy_power_on(pcie->phy[i]); + if (ret) { + phy_exit(pcie->phy[i]); + return ret; + } + } + + return 0; +} + +static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + struct device_node *node = dev->of_node; + int ret = 0; + int i; + + for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) { + pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i); + if (IS_ERR(pcie->phy[i]) && + (PTR_ERR(pcie->phy[i]) == -EPROBE_DEFER)) + return PTR_ERR(pcie->phy[i]); + + if (IS_ERR(pcie->phy[i])) { + pcie->phy[i] = NULL; + continue; + } + + pcie->phy_count++; + } + + /* Old bindings miss the PHY handle, so just warn if there is no PHY */ + if (!pcie->phy_count) + dev_warn(dev, "No available PHY\n"); + + ret = armada8k_pcie_enable_phys(pcie); + if (ret) + dev_err(dev, "Failed to initialize PHY(s) (%d)\n", ret); + + return ret; +} + static int armada8k_pcie_link_up(struct dw_pcie *pci) { u32 reg; @@ -249,14 +323,20 @@ static int armada8k_pcie_probe(struct platform_device *pdev) goto fail_clkreg; } + ret = armada8k_pcie_setup_phys(pcie); + if (ret) + goto fail_clkreg; + platform_set_drvdata(pdev, pcie); ret = armada8k_add_pcie_port(pcie, pdev); if (ret) - goto fail_clkreg; + goto disable_phy; return 0; +disable_phy: + armada8k_pcie_disable_phys(pcie); fail_clkreg: clk_disable_unprepare(pcie->clk_reg); fail: diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 77db32529319..f93252d0da5b 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -311,6 +311,7 @@ void dw_pcie_msi_init(struct pcie_port *pp) dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, upper_32_bits(msi_target)); } +EXPORT_SYMBOL_GPL(dw_pcie_msi_init); int dw_pcie_host_init(struct pcie_port *pp) { @@ -495,6 +496,16 @@ err_free_msi: dw_pcie_free_msi(pp); return ret; } +EXPORT_SYMBOL_GPL(dw_pcie_host_init); + +void dw_pcie_host_deinit(struct pcie_port *pp) +{ + pci_stop_root_bus(pp->root_bus); + pci_remove_root_bus(pp->root_bus); + if (pci_msi_enabled() && !pp->ops->msi_host_init) + dw_pcie_free_msi(pp); +} +EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val, @@ -687,3 +698,4 @@ void dw_pcie_setup_rc(struct pcie_port *pp) val |= PORT_LOGIC_SPEED_CHANGE; dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); } +EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 9d7c51c32b3b..7d25102c304c 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -34,6 +34,7 @@ int dw_pcie_read(void __iomem *addr, int size, u32 *val) return PCIBIOS_SUCCESSFUL; } +EXPORT_SYMBOL_GPL(dw_pcie_read); int dw_pcie_write(void __iomem *addr, int size, u32 val) { @@ -51,69 +52,97 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val) return PCIBIOS_SUCCESSFUL; } +EXPORT_SYMBOL_GPL(dw_pcie_write); -u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size) +u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size) { int ret; u32 val; if (pci->ops->read_dbi) - return pci->ops->read_dbi(pci, base, reg, size); + return pci->ops->read_dbi(pci, pci->dbi_base, reg, size); - ret = dw_pcie_read(base + reg, size, &val); + ret = dw_pcie_read(pci->dbi_base + reg, size, &val); if (ret) dev_err(pci->dev, "Read DBI address failed\n"); return val; } +EXPORT_SYMBOL_GPL(dw_pcie_read_dbi); -void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size, u32 val) +void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val) { int ret; if (pci->ops->write_dbi) { - pci->ops->write_dbi(pci, base, reg, size, val); + pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val); return; } - ret = dw_pcie_write(base + reg, size, val); + ret = dw_pcie_write(pci->dbi_base + reg, size, val); if (ret) dev_err(pci->dev, "Write DBI address failed\n"); } +EXPORT_SYMBOL_GPL(dw_pcie_write_dbi); -u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size) +u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size) { int ret; u32 val; if (pci->ops->read_dbi2) - return pci->ops->read_dbi2(pci, base, reg, size); + return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size); - ret = dw_pcie_read(base + reg, size, &val); + ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val); if (ret) dev_err(pci->dev, "read DBI address failed\n"); return val; } -void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size, u32 val) +void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val) { int ret; if (pci->ops->write_dbi2) { - pci->ops->write_dbi2(pci, base, reg, size, val); + pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val); return; } - ret = dw_pcie_write(base + reg, size, val); + ret = dw_pcie_write(pci->dbi_base2 + reg, size, val); if (ret) dev_err(pci->dev, "write DBI address failed\n"); } +u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size) +{ + int ret; + u32 val; + + if (pci->ops->read_dbi) + return pci->ops->read_dbi(pci, pci->atu_base, reg, size); + + ret = dw_pcie_read(pci->atu_base + reg, size, &val); + if (ret) + dev_err(pci->dev, "Read ATU address failed\n"); + + return val; +} + +void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val) +{ + int ret; + + if (pci->ops->write_dbi) { + pci->ops->write_dbi(pci, pci->atu_base, reg, size, val); + return; + } + + ret = dw_pcie_write(pci->atu_base + reg, size, val); + if (ret) + dev_err(pci->dev, "Write ATU address failed\n"); +} + static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) { u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index b8993f2b78df..ffed084a0b4f 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -254,14 +254,12 @@ struct dw_pcie { int dw_pcie_read(void __iomem *addr, int size, u32 *val); int dw_pcie_write(void __iomem *addr, int size, u32 val); -u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size); -void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size, u32 val); -u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size); -void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size, u32 val); +u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size); +void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val); +u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size); +void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val); +u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size); +void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val); int dw_pcie_link_up(struct dw_pcie *pci); int dw_pcie_wait_for_link(struct dw_pcie *pci); void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, @@ -275,52 +273,52 @@ void dw_pcie_setup(struct dw_pcie *pci); static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) { - __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val); + dw_pcie_write_dbi(pci, reg, 0x4, val); } static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) { - return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4); + return dw_pcie_read_dbi(pci, reg, 0x4); } static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val) { - __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val); + dw_pcie_write_dbi(pci, reg, 0x2, val); } static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg) { - return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2); + return dw_pcie_read_dbi(pci, reg, 0x2); } static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val) { - __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val); + dw_pcie_write_dbi(pci, reg, 0x1, val); } static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg) { - return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1); + return dw_pcie_read_dbi(pci, reg, 0x1); } static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) { - __dw_pcie_write_dbi2(pci, pci->dbi_base2, reg, 0x4, val); + dw_pcie_write_dbi2(pci, reg, 0x4, val); } static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) { - return __dw_pcie_read_dbi2(pci, pci->dbi_base2, reg, 0x4); + return dw_pcie_read_dbi2(pci, reg, 0x4); } static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val) { - __dw_pcie_write_dbi(pci, pci->atu_base, reg, 0x4, val); + dw_pcie_write_atu(pci, reg, 0x4, val); } static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg) { - return __dw_pcie_read_dbi(pci, pci->atu_base, reg, 0x4); + return dw_pcie_read_atu(pci, reg, 0x4); } static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci) @@ -351,6 +349,7 @@ void dw_pcie_msi_init(struct pcie_port *pp); void dw_pcie_free_msi(struct pcie_port *pp); void dw_pcie_setup_rc(struct pcie_port *pp); int dw_pcie_host_init(struct pcie_port *pp); +void dw_pcie_host_deinit(struct pcie_port *pp); int dw_pcie_allocate_domains(struct pcie_port *pp); #else static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) @@ -375,6 +374,10 @@ static inline int dw_pcie_host_init(struct pcie_port *pp) return 0; } +static inline void dw_pcie_host_deinit(struct pcie_port *pp) +{ +} + static inline int dw_pcie_allocate_domains(struct pcie_port *pp) { return 0; diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 9b599296205d..8df1914226be 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -2,7 +2,7 @@ /* * PCIe host controller driver for Kirin Phone SoCs * - * Copyright (C) 2017 Hilisicon Electronics Co., Ltd. + * Copyright (C) 2017 HiSilicon Electronics Co., Ltd. * http://www.huawei.com * * Author: Xiaowei Song <songxiaowei@huawei.com> diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 0ed235d560e3..7e581748ee9f 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -112,10 +112,10 @@ struct qcom_pcie_resources_2_3_2 { struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; }; +#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 struct qcom_pcie_resources_2_4_0 { - struct clk *aux_clk; - struct clk *master_clk; - struct clk *slave_clk; + struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; + int num_clks; struct reset_control *axi_m_reset; struct reset_control *axi_s_reset; struct reset_control *pipe_reset; @@ -178,6 +178,8 @@ static void qcom_ep_reset_assert(struct qcom_pcie *pcie) static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) { + /* Ensure that PERST has been asserted for at least 100 ms */ + msleep(100); gpiod_set_value_cansleep(pcie->reset, 0); usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); } @@ -638,18 +640,20 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; + bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); + int ret; - res->aux_clk = devm_clk_get(dev, "aux"); - if (IS_ERR(res->aux_clk)) - return PTR_ERR(res->aux_clk); + res->clks[0].id = "aux"; + res->clks[1].id = "master_bus"; + res->clks[2].id = "slave_bus"; + res->clks[3].id = "iface"; - res->master_clk = devm_clk_get(dev, "master_bus"); - if (IS_ERR(res->master_clk)) - return PTR_ERR(res->master_clk); + /* qcom,pcie-ipq4019 is defined without "iface" */ + res->num_clks = is_ipq ? 3 : 4; - res->slave_clk = devm_clk_get(dev, "slave_bus"); - if (IS_ERR(res->slave_clk)) - return PTR_ERR(res->slave_clk); + ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); + if (ret < 0) + return ret; res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); if (IS_ERR(res->axi_m_reset)) @@ -659,27 +663,33 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) if (IS_ERR(res->axi_s_reset)) return PTR_ERR(res->axi_s_reset); - res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); - if (IS_ERR(res->pipe_reset)) - return PTR_ERR(res->pipe_reset); - - res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, - "axi_m_vmid"); - if (IS_ERR(res->axi_m_vmid_reset)) - return PTR_ERR(res->axi_m_vmid_reset); - - res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, - "axi_s_xpu"); - if (IS_ERR(res->axi_s_xpu_reset)) - return PTR_ERR(res->axi_s_xpu_reset); - - res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); - if (IS_ERR(res->parf_reset)) - return PTR_ERR(res->parf_reset); - - res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); - if (IS_ERR(res->phy_reset)) - return PTR_ERR(res->phy_reset); + if (is_ipq) { + /* + * These resources relates to the PHY or are secure clocks, but + * are controlled here for IPQ4019 + */ + res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); + if (IS_ERR(res->pipe_reset)) + return PTR_ERR(res->pipe_reset); + + res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, + "axi_m_vmid"); + if (IS_ERR(res->axi_m_vmid_reset)) + return PTR_ERR(res->axi_m_vmid_reset); + + res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, + "axi_s_xpu"); + if (IS_ERR(res->axi_s_xpu_reset)) + return PTR_ERR(res->axi_s_xpu_reset); + + res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); + if (IS_ERR(res->parf_reset)) + return PTR_ERR(res->parf_reset); + + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); + if (IS_ERR(res->phy_reset)) + return PTR_ERR(res->phy_reset); + } res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, "axi_m_sticky"); @@ -699,9 +709,11 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) if (IS_ERR(res->ahb_reset)) return PTR_ERR(res->ahb_reset); - res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); - if (IS_ERR(res->phy_ahb_reset)) - return PTR_ERR(res->phy_ahb_reset); + if (is_ipq) { + res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); + if (IS_ERR(res->phy_ahb_reset)) + return PTR_ERR(res->phy_ahb_reset); + } return 0; } @@ -719,9 +731,7 @@ static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) reset_control_assert(res->axi_m_sticky_reset); reset_control_assert(res->pwr_reset); reset_control_assert(res->ahb_reset); - clk_disable_unprepare(res->aux_clk); - clk_disable_unprepare(res->master_clk); - clk_disable_unprepare(res->slave_clk); + clk_bulk_disable_unprepare(res->num_clks, res->clks); } static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) @@ -850,23 +860,9 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) usleep_range(10000, 12000); - ret = clk_prepare_enable(res->aux_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable iface clock\n"); - goto err_clk_aux; - } - - ret = clk_prepare_enable(res->master_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable core clock\n"); - goto err_clk_axi_m; - } - - ret = clk_prepare_enable(res->slave_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable phy clock\n"); - goto err_clk_axi_s; - } + ret = clk_bulk_prepare_enable(res->num_clks, res->clks); + if (ret) + goto err_clks; /* enable PCIe clocks and resets */ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); @@ -891,11 +887,7 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) return 0; -err_clk_axi_s: - clk_disable_unprepare(res->master_clk); -err_clk_axi_m: - clk_disable_unprepare(res->aux_clk); -err_clk_aux: +err_clks: reset_control_assert(res->ahb_reset); err_rst_ahb: reset_control_assert(res->pwr_reset); @@ -1289,6 +1281,7 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 }, + { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 }, { } }; diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 134e0306ff00..fc0fe4d4de49 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -308,7 +308,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); - /* Unmask all MSI's */ + /* Unmask all MSIs */ advk_writel(pcie, 0, PCIE_MSI_MASK_REG); /* Enable summary interrupt for GIC SPI source */ diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 82acd6155adf..40b625458afa 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1875,6 +1875,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus, static void hv_eject_device_work(struct work_struct *work) { struct pci_eject_response *ejct_pkt; + struct hv_pcibus_device *hbus; struct hv_pci_dev *hpdev; struct pci_dev *pdev; unsigned long flags; @@ -1885,6 +1886,7 @@ static void hv_eject_device_work(struct work_struct *work) } ctxt; hpdev = container_of(work, struct hv_pci_dev, wrk); + hbus = hpdev->hbus; WARN_ON(hpdev->state != hv_pcichild_ejecting); @@ -1895,8 +1897,7 @@ static void hv_eject_device_work(struct work_struct *work) * because hbus->pci_bus may not exist yet. */ wslot = wslot_to_devfn(hpdev->desc.win_slot.slot); - pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0, - wslot); + pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot); if (pdev) { pci_lock_rescan_remove(); pci_stop_and_remove_bus_device(pdev); @@ -1904,9 +1905,9 @@ static void hv_eject_device_work(struct work_struct *work) pci_unlock_rescan_remove(); } - spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags); + spin_lock_irqsave(&hbus->device_list_lock, flags); list_del(&hpdev->list_entry); - spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); + spin_unlock_irqrestore(&hbus->device_list_lock, flags); if (hpdev->pci_slot) pci_destroy_slot(hpdev->pci_slot); @@ -1915,7 +1916,7 @@ static void hv_eject_device_work(struct work_struct *work) ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; - vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt, + vmbus_sendpacket(hbus->hdev->channel, ejct_pkt, sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0); @@ -1924,7 +1925,9 @@ static void hv_eject_device_work(struct work_struct *work) /* For the two refs got in new_pcichild_device() */ put_pcichild(hpdev); put_pcichild(hpdev); - put_hvpcibus(hpdev->hbus); + /* hpdev has been freed. Do not use it any more. */ + + put_hvpcibus(hbus); } /** diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 464ba2538d52..9a917b2456f6 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -17,6 +17,7 @@ #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/export.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/irq.h> @@ -30,6 +31,7 @@ #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/phy/phy.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/sizes.h> @@ -95,7 +97,8 @@ #define AFI_MSI_EN_VEC7 0xa8 #define AFI_CONFIGURATION 0xac -#define AFI_CONFIGURATION_EN_FPCI (1 << 0) +#define AFI_CONFIGURATION_EN_FPCI (1 << 0) +#define AFI_CONFIGURATION_CLKEN_OVERRIDE (1 << 31) #define AFI_FPCI_ERROR_MASKS 0xb0 @@ -159,13 +162,14 @@ #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20) +#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x) (1 << ((x) + 29)) +#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL (0x7 << 29) #define AFI_FUSE 0x104 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) #define AFI_PEX0_CTRL 0x110 #define AFI_PEX1_CTRL 0x118 -#define AFI_PEX2_CTRL 0x128 #define AFI_PEX_CTRL_RST (1 << 0) #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1) #define AFI_PEX_CTRL_REFCLK_EN (1 << 3) @@ -177,20 +181,74 @@ #define AFI_PEXBIAS_CTRL_0 0x168 +#define RP_PRIV_XP_DL 0x00000494 +#define RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1ff << 1) + +#define RP_RX_HDR_LIMIT 0x00000e00 +#define RP_RX_HDR_LIMIT_PW_MASK (0xff << 8) +#define RP_RX_HDR_LIMIT_PW (0x0e << 8) + +#define RP_ECTL_2_R1 0x00000e84 +#define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff + +#define RP_ECTL_4_R1 0x00000e8c +#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK (0xffff << 16) +#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT 16 + +#define RP_ECTL_5_R1 0x00000e90 +#define RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK 0xffffffff + +#define RP_ECTL_6_R1 0x00000e94 +#define RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK 0xffffffff + +#define RP_ECTL_2_R2 0x00000ea4 +#define RP_ECTL_2_R2_RX_CTLE_1C_MASK 0xffff + +#define RP_ECTL_4_R2 0x00000eac +#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK (0xffff << 16) +#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT 16 + +#define RP_ECTL_5_R2 0x00000eb0 +#define RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK 0xffffffff + +#define RP_ECTL_6_R2 0x00000eb4 +#define RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK 0xffffffff + #define RP_VEND_XP 0x00000f00 -#define RP_VEND_XP_DL_UP (1 << 30) +#define RP_VEND_XP_DL_UP (1 << 30) +#define RP_VEND_XP_OPPORTUNISTIC_ACK (1 << 27) +#define RP_VEND_XP_OPPORTUNISTIC_UPDATEFC (1 << 28) +#define RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK (0xff << 18) + +#define RP_VEND_CTL0 0x00000f44 +#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK (0xf << 12) +#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH (0x9 << 12) + +#define RP_VEND_CTL1 0x00000f48 +#define RP_VEND_CTL1_ERPT (1 << 13) + +#define RP_VEND_XP_BIST 0x00000f4c +#define RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE (1 << 28) #define RP_VEND_CTL2 0x00000fa8 #define RP_VEND_CTL2_PCA_ENABLE (1 << 7) #define RP_PRIV_MISC 0x00000fe0 -#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0) -#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0) +#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0) +#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0) +#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK (0x7f << 16) +#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD (0xf << 16) +#define RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE (1 << 23) +#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK (0x7f << 24) +#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD (0xf << 24) +#define RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE (1 << 31) #define RP_LINK_CONTROL_STATUS 0x00000090 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 +#define RP_LINK_CONTROL_STATUS_2 0x000000b0 + #define PADS_CTL_SEL 0x0000009c #define PADS_CTL 0x000000a0 @@ -226,6 +284,7 @@ #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */ #define PME_ACK_TIMEOUT 10000 +#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */ struct tegra_msi { struct msi_controller chip; @@ -249,10 +308,12 @@ struct tegra_pcie_soc { unsigned int num_ports; const struct tegra_pcie_port_soc *ports; unsigned int msi_base_shift; + unsigned long afi_pex2_ctrl; u32 pads_pll_ctl; u32 tx_ref_sel; u32 pads_refclk_cfg0; u32 pads_refclk_cfg1; + u32 update_fc_threshold; bool has_pex_clkreq_en; bool has_pex_bias_ctrl; bool has_intr_prsnt_sense; @@ -260,6 +321,24 @@ struct tegra_pcie_soc { bool has_gen2; bool force_pca_enable; bool program_uphy; + bool update_clamp_threshold; + bool program_deskew_time; + bool raw_violation_fixup; + bool update_fc_timer; + bool has_cache_bars; + struct { + struct { + u32 rp_ectl_2_r1; + u32 rp_ectl_4_r1; + u32 rp_ectl_5_r1; + u32 rp_ectl_6_r1; + u32 rp_ectl_2_r2; + u32 rp_ectl_4_r2; + u32 rp_ectl_5_r2; + u32 rp_ectl_6_r2; + } regs; + bool enable; + } ectl; }; static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip) @@ -321,6 +400,8 @@ struct tegra_pcie_port { unsigned int lanes; struct phy **phys; + + struct gpio_desc *reset_gpio; }; struct tegra_pcie_bus { @@ -440,6 +521,7 @@ static struct pci_ops tegra_pcie_ops = { static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) { + const struct tegra_pcie_soc *soc = port->pcie->soc; unsigned long ret = 0; switch (port->index) { @@ -452,7 +534,7 @@ static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) break; case 2: - ret = AFI_PEX2_CTRL; + ret = soc->afi_pex2_ctrl; break; } @@ -465,15 +547,162 @@ static void tegra_pcie_port_reset(struct tegra_pcie_port *port) unsigned long value; /* pulse reset signal */ - value = afi_readl(port->pcie, ctrl); - value &= ~AFI_PEX_CTRL_RST; - afi_writel(port->pcie, value, ctrl); + if (port->reset_gpio) { + gpiod_set_value(port->reset_gpio, 1); + } else { + value = afi_readl(port->pcie, ctrl); + value &= ~AFI_PEX_CTRL_RST; + afi_writel(port->pcie, value, ctrl); + } usleep_range(1000, 2000); - value = afi_readl(port->pcie, ctrl); - value |= AFI_PEX_CTRL_RST; - afi_writel(port->pcie, value, ctrl); + if (port->reset_gpio) { + gpiod_set_value(port->reset_gpio, 0); + } else { + value = afi_readl(port->pcie, ctrl); + value |= AFI_PEX_CTRL_RST; + afi_writel(port->pcie, value, ctrl); + } +} + +static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port) +{ + const struct tegra_pcie_soc *soc = port->pcie->soc; + u32 value; + + /* Enable AER capability */ + value = readl(port->base + RP_VEND_CTL1); + value |= RP_VEND_CTL1_ERPT; + writel(value, port->base + RP_VEND_CTL1); + + /* Optimal settings to enhance bandwidth */ + value = readl(port->base + RP_VEND_XP); + value |= RP_VEND_XP_OPPORTUNISTIC_ACK; + value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC; + writel(value, port->base + RP_VEND_XP); + + /* + * LTSSM will wait for DLLP to finish before entering L1 or L2, + * to avoid truncation of PM messages which results in receiver errors + */ + value = readl(port->base + RP_VEND_XP_BIST); + value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE; + writel(value, port->base + RP_VEND_XP_BIST); + + value = readl(port->base + RP_PRIV_MISC); + value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE; + value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE; + + if (soc->update_clamp_threshold) { + value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK | + RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK); + value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD | + RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD; + } + + writel(value, port->base + RP_PRIV_MISC); +} + +static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port) +{ + const struct tegra_pcie_soc *soc = port->pcie->soc; + u32 value; + + value = readl(port->base + RP_ECTL_2_R1); + value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK; + value |= soc->ectl.regs.rp_ectl_2_r1; + writel(value, port->base + RP_ECTL_2_R1); + + value = readl(port->base + RP_ECTL_4_R1); + value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK; + value |= soc->ectl.regs.rp_ectl_4_r1 << + RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT; + writel(value, port->base + RP_ECTL_4_R1); + + value = readl(port->base + RP_ECTL_5_R1); + value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK; + value |= soc->ectl.regs.rp_ectl_5_r1; + writel(value, port->base + RP_ECTL_5_R1); + + value = readl(port->base + RP_ECTL_6_R1); + value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK; + value |= soc->ectl.regs.rp_ectl_6_r1; + writel(value, port->base + RP_ECTL_6_R1); + + value = readl(port->base + RP_ECTL_2_R2); + value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK; + value |= soc->ectl.regs.rp_ectl_2_r2; + writel(value, port->base + RP_ECTL_2_R2); + + value = readl(port->base + RP_ECTL_4_R2); + value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK; + value |= soc->ectl.regs.rp_ectl_4_r2 << + RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT; + writel(value, port->base + RP_ECTL_4_R2); + + value = readl(port->base + RP_ECTL_5_R2); + value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK; + value |= soc->ectl.regs.rp_ectl_5_r2; + writel(value, port->base + RP_ECTL_5_R2); + + value = readl(port->base + RP_ECTL_6_R2); + value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK; + value |= soc->ectl.regs.rp_ectl_6_r2; + writel(value, port->base + RP_ECTL_6_R2); +} + +static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port) +{ + const struct tegra_pcie_soc *soc = port->pcie->soc; + u32 value; + + /* + * Sometimes link speed change from Gen2 to Gen1 fails due to + * instability in deskew logic on lane-0. Increase the deskew + * retry time to resolve this issue. + */ + if (soc->program_deskew_time) { + value = readl(port->base + RP_VEND_CTL0); + value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK; + value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH; + writel(value, port->base + RP_VEND_CTL0); + } + + /* Fixup for read after write violation. */ + if (soc->raw_violation_fixup) { + value = readl(port->base + RP_RX_HDR_LIMIT); + value &= ~RP_RX_HDR_LIMIT_PW_MASK; + value |= RP_RX_HDR_LIMIT_PW; + writel(value, port->base + RP_RX_HDR_LIMIT); + + value = readl(port->base + RP_PRIV_XP_DL); + value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD; + writel(value, port->base + RP_PRIV_XP_DL); + + value = readl(port->base + RP_VEND_XP); + value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; + value |= soc->update_fc_threshold; + writel(value, port->base + RP_VEND_XP); + } + + if (soc->update_fc_timer) { + value = readl(port->base + RP_VEND_XP); + value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; + value |= soc->update_fc_threshold; + writel(value, port->base + RP_VEND_XP); + } + + /* + * PCIe link doesn't come up with few legacy PCIe endpoints if + * root port advertises both Gen-1 and Gen-2 speeds in Tegra. + * Hence, the strategy followed here is to initially advertise + * only Gen-1 and after link is up, retrain link to Gen-2 speed + */ + value = readl(port->base + RP_LINK_CONTROL_STATUS_2); + value &= ~PCI_EXP_LNKSTA_CLS; + value |= PCI_EXP_LNKSTA_CLS_2_5GB; + writel(value, port->base + RP_LINK_CONTROL_STATUS_2); } static void tegra_pcie_port_enable(struct tegra_pcie_port *port) @@ -500,6 +729,13 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port) value |= RP_VEND_CTL2_PCA_ENABLE; writel(value, port->base + RP_VEND_CTL2); } + + tegra_pcie_enable_rp_features(port); + + if (soc->ectl.enable) + tegra_pcie_program_ectl_settings(port); + + tegra_pcie_apply_sw_fixup(port); } static void tegra_pcie_port_disable(struct tegra_pcie_port *port) @@ -521,6 +757,12 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port) value &= ~AFI_PEX_CTRL_REFCLK_EN; afi_writel(port->pcie, value, ctrl); + + /* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */ + value = afi_readl(port->pcie, AFI_PCIE_CONFIG); + value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); + value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index); + afi_writel(port->pcie, value, AFI_PCIE_CONFIG); } static void tegra_pcie_port_free(struct tegra_pcie_port *port) @@ -545,12 +787,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class); -/* Tegra PCIE requires relaxed ordering */ +/* Tegra20 and Tegra30 PCIE requires relaxed ordering */ static void tegra_pcie_relax_enable(struct pci_dev *dev) { pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); } -DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable); static int tegra_pcie_request_resources(struct tegra_pcie *pcie) { @@ -635,7 +880,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg) * do not pollute kernel log with master abort reports since they * happen a lot during enumeration */ - if (code == AFI_INTR_MASTER_ABORT) + if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE) dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature); else dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature); @@ -704,11 +949,13 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) afi_writel(pcie, 0, AFI_AXI_BAR5_SZ); afi_writel(pcie, 0, AFI_FPCI_BAR5); - /* map all upstream transactions as uncached */ - afi_writel(pcie, 0, AFI_CACHE_BAR0_ST); - afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ); - afi_writel(pcie, 0, AFI_CACHE_BAR1_ST); - afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ); + if (pcie->soc->has_cache_bars) { + /* map all upstream transactions as uncached */ + afi_writel(pcie, 0, AFI_CACHE_BAR0_ST); + afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ); + afi_writel(pcie, 0, AFI_CACHE_BAR1_ST); + afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ); + } /* MSI translations are setup only when needed */ afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST); @@ -852,7 +1099,6 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port) static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; - const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_pcie_port *port; int err; @@ -878,12 +1124,6 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) } } - /* Configure the reference clock driver */ - pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0); - - if (soc->num_ports > 2) - pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1); - return 0; } @@ -918,13 +1158,11 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) return 0; } -static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) +static void tegra_pcie_enable_controller(struct tegra_pcie *pcie) { - struct device *dev = pcie->dev; const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_pcie_port *port; unsigned long value; - int err; /* enable PLL power down */ if (pcie->phy) { @@ -942,9 +1180,12 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) value = afi_readl(pcie, AFI_PCIE_CONFIG); value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK; value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config; + value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL; - list_for_each_entry(port, &pcie->ports, list) + list_for_each_entry(port, &pcie->ports, list) { value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); + value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index); + } afi_writel(pcie, value, AFI_PCIE_CONFIG); @@ -958,20 +1199,10 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) afi_writel(pcie, value, AFI_FUSE); } - if (soc->program_uphy) { - err = tegra_pcie_phy_power_on(pcie); - if (err < 0) { - dev_err(dev, "failed to power on PHY(s): %d\n", err); - return err; - } - } - - /* take the PCIe interface module out of reset */ - reset_control_deassert(pcie->pcie_xrst); - - /* finally enable PCIe */ + /* Disable AFI dynamic clock gating and enable PCIe */ value = afi_readl(pcie, AFI_CONFIGURATION); value |= AFI_CONFIGURATION_EN_FPCI; + value |= AFI_CONFIGURATION_CLKEN_OVERRIDE; afi_writel(pcie, value, AFI_CONFIGURATION); value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR | @@ -989,22 +1220,6 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) /* disable all exceptions */ afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS); - - return 0; -} - -static void tegra_pcie_disable_controller(struct tegra_pcie *pcie) -{ - int err; - - reset_control_assert(pcie->pcie_xrst); - - if (pcie->soc->program_uphy) { - err = tegra_pcie_phy_power_off(pcie); - if (err < 0) - dev_err(pcie->dev, "failed to power off PHY(s): %d\n", - err); - } } static void tegra_pcie_power_off(struct tegra_pcie *pcie) @@ -1014,13 +1229,11 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie) int err; reset_control_assert(pcie->afi_rst); - reset_control_assert(pcie->pex_rst); clk_disable_unprepare(pcie->pll_e); if (soc->has_cml_clk) clk_disable_unprepare(pcie->cml_clk); clk_disable_unprepare(pcie->afi_clk); - clk_disable_unprepare(pcie->pex_clk); if (!dev->pm_domain) tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); @@ -1048,46 +1261,66 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie) if (err < 0) dev_err(dev, "failed to enable regulators: %d\n", err); - if (dev->pm_domain) { - err = clk_prepare_enable(pcie->pex_clk); + if (!dev->pm_domain) { + err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE); if (err) { - dev_err(dev, "failed to enable PEX clock: %d\n", err); - return err; + dev_err(dev, "failed to power ungate: %d\n", err); + goto regulator_disable; } - reset_control_deassert(pcie->pex_rst); - } else { - err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, - pcie->pex_clk, - pcie->pex_rst); + err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE); if (err) { - dev_err(dev, "powerup sequence failed: %d\n", err); - return err; + dev_err(dev, "failed to remove clamp: %d\n", err); + goto powergate; } } - reset_control_deassert(pcie->afi_rst); - err = clk_prepare_enable(pcie->afi_clk); if (err < 0) { dev_err(dev, "failed to enable AFI clock: %d\n", err); - return err; + goto powergate; } if (soc->has_cml_clk) { err = clk_prepare_enable(pcie->cml_clk); if (err < 0) { dev_err(dev, "failed to enable CML clock: %d\n", err); - return err; + goto disable_afi_clk; } } err = clk_prepare_enable(pcie->pll_e); if (err < 0) { dev_err(dev, "failed to enable PLLE clock: %d\n", err); - return err; + goto disable_cml_clk; } + reset_control_deassert(pcie->afi_rst); + return 0; + +disable_cml_clk: + if (soc->has_cml_clk) + clk_disable_unprepare(pcie->cml_clk); +disable_afi_clk: + clk_disable_unprepare(pcie->afi_clk); +powergate: + if (!dev->pm_domain) + tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); +regulator_disable: + regulator_bulk_disable(pcie->num_supplies, pcie->supplies); + + return err; +} + +static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie) +{ + const struct tegra_pcie_soc *soc = pcie->soc; + + /* Configure the reference clock driver */ + pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0); + + if (soc->num_ports > 2) + pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1); } static int tegra_pcie_clocks_get(struct tegra_pcie *pcie) @@ -1647,6 +1880,15 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) return 0; } +static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie) +{ + u32 value; + + value = afi_readl(pcie, AFI_INTR_MASK); + value &= ~AFI_INTR_MASK_INT_MASK; + afi_writel(pcie, value, AFI_INTR_MASK); +} + static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes, u32 *xbar) { @@ -1990,6 +2232,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) struct tegra_pcie_port *rp; unsigned int index; u32 value; + char *label; err = of_pci_get_devfn(port); if (err < 0) { @@ -2048,6 +2291,31 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) if (IS_ERR(rp->base)) return PTR_ERR(rp->base); + label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index); + if (!label) { + dev_err(dev, "failed to create reset GPIO label\n"); + return -ENOMEM; + } + + /* + * Returns -ENOENT if reset-gpios property is not populated + * and in this case fall back to using AFI per port register + * to toggle PERST# SFIO line. + */ + rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port, + "reset-gpios", 0, + GPIOD_OUT_LOW, + label); + if (IS_ERR(rp->reset_gpio)) { + if (PTR_ERR(rp->reset_gpio) == -ENOENT) { + rp->reset_gpio = NULL; + } else { + dev_err(dev, "failed to get reset GPIO: %d\n", + err); + return PTR_ERR(rp->reset_gpio); + } + } + list_add_tail(&rp->list, &pcie->ports); } @@ -2095,7 +2363,7 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port) } while (--timeout); if (!timeout) { - dev_err(dev, "link %u down, retrying\n", port->index); + dev_dbg(dev, "link %u down, retrying\n", port->index); goto retry; } @@ -2117,6 +2385,64 @@ retry: return false; } +static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct tegra_pcie_port *port; + ktime_t deadline; + u32 value; + + list_for_each_entry(port, &pcie->ports, list) { + /* + * "Supported Link Speeds Vector" in "Link Capabilities 2" + * is not supported by Tegra. tegra_pcie_change_link_speed() + * is called only for Tegra chips which support Gen2. + * So there no harm if supported link speed is not verified. + */ + value = readl(port->base + RP_LINK_CONTROL_STATUS_2); + value &= ~PCI_EXP_LNKSTA_CLS; + value |= PCI_EXP_LNKSTA_CLS_5_0GB; + writel(value, port->base + RP_LINK_CONTROL_STATUS_2); + + /* + * Poll until link comes back from recovery to avoid race + * condition. + */ + deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT); + + while (ktime_before(ktime_get(), deadline)) { + value = readl(port->base + RP_LINK_CONTROL_STATUS); + if ((value & PCI_EXP_LNKSTA_LT) == 0) + break; + + usleep_range(2000, 3000); + } + + if (value & PCI_EXP_LNKSTA_LT) + dev_warn(dev, "PCIe port %u link is in recovery\n", + port->index); + + /* Retrain the link */ + value = readl(port->base + RP_LINK_CONTROL_STATUS); + value |= PCI_EXP_LNKCTL_RL; + writel(value, port->base + RP_LINK_CONTROL_STATUS); + + deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT); + + while (ktime_before(ktime_get(), deadline)) { + value = readl(port->base + RP_LINK_CONTROL_STATUS); + if ((value & PCI_EXP_LNKSTA_LT) == 0) + break; + + usleep_range(2000, 3000); + } + + if (value & PCI_EXP_LNKSTA_LT) + dev_err(dev, "failed to retrain link of port %u\n", + port->index); + } +} + static void tegra_pcie_enable_ports(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; @@ -2127,7 +2453,12 @@ static void tegra_pcie_enable_ports(struct tegra_pcie *pcie) port->index, port->lanes); tegra_pcie_port_enable(port); + } + /* Start LTSSM from Tegra side */ + reset_control_deassert(pcie->pcie_xrst); + + list_for_each_entry_safe(port, tmp, &pcie->ports, list) { if (tegra_pcie_port_check_link(port)) continue; @@ -2136,12 +2467,17 @@ static void tegra_pcie_enable_ports(struct tegra_pcie *pcie) tegra_pcie_port_disable(port); tegra_pcie_port_free(port); } + + if (pcie->soc->has_gen2) + tegra_pcie_change_link_speed(pcie); } static void tegra_pcie_disable_ports(struct tegra_pcie *pcie) { struct tegra_pcie_port *port, *tmp; + reset_control_assert(pcie->pcie_xrst); + list_for_each_entry_safe(port, tmp, &pcie->ports, list) tegra_pcie_port_disable(port); } @@ -2155,6 +2491,7 @@ static const struct tegra_pcie_soc tegra20_pcie = { .num_ports = 2, .ports = tegra20_pcie_ports, .msi_base_shift = 0, + .afi_pex2_ctrl = 0x128, .pads_pll_ctl = PADS_PLL_CTL_TEGRA20, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10, .pads_refclk_cfg0 = 0xfa5cfa5c, @@ -2165,6 +2502,12 @@ static const struct tegra_pcie_soc tegra20_pcie = { .has_gen2 = false, .force_pca_enable = false, .program_uphy = true, + .update_clamp_threshold = false, + .program_deskew_time = false, + .raw_violation_fixup = false, + .update_fc_timer = false, + .has_cache_bars = true, + .ectl.enable = false, }; static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = { @@ -2188,6 +2531,12 @@ static const struct tegra_pcie_soc tegra30_pcie = { .has_gen2 = false, .force_pca_enable = false, .program_uphy = true, + .update_clamp_threshold = false, + .program_deskew_time = false, + .raw_violation_fixup = false, + .update_fc_timer = false, + .has_cache_bars = false, + .ectl.enable = false, }; static const struct tegra_pcie_soc tegra124_pcie = { @@ -2197,6 +2546,8 @@ static const struct tegra_pcie_soc tegra124_pcie = { .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x44ac44ac, + /* FC threshold is bit[25:18] */ + .update_fc_threshold = 0x03fc0000, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, @@ -2204,6 +2555,12 @@ static const struct tegra_pcie_soc tegra124_pcie = { .has_gen2 = true, .force_pca_enable = false, .program_uphy = true, + .update_clamp_threshold = true, + .program_deskew_time = false, + .raw_violation_fixup = true, + .update_fc_timer = false, + .has_cache_bars = false, + .ectl.enable = false, }; static const struct tegra_pcie_soc tegra210_pcie = { @@ -2213,6 +2570,8 @@ static const struct tegra_pcie_soc tegra210_pcie = { .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x90b890b8, + /* FC threshold is bit[25:18] */ + .update_fc_threshold = 0x01800000, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, @@ -2220,6 +2579,24 @@ static const struct tegra_pcie_soc tegra210_pcie = { .has_gen2 = true, .force_pca_enable = true, .program_uphy = true, + .update_clamp_threshold = true, + .program_deskew_time = true, + .raw_violation_fixup = false, + .update_fc_timer = true, + .has_cache_bars = false, + .ectl = { + .regs = { + .rp_ectl_2_r1 = 0x0000000f, + .rp_ectl_4_r1 = 0x00000067, + .rp_ectl_5_r1 = 0x55010000, + .rp_ectl_6_r1 = 0x00000001, + .rp_ectl_2_r2 = 0x0000008f, + .rp_ectl_4_r2 = 0x000000c7, + .rp_ectl_5_r2 = 0x55010000, + .rp_ectl_6_r2 = 0x00000001, + }, + .enable = true, + }, }; static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = { @@ -2232,6 +2609,7 @@ static const struct tegra_pcie_soc tegra186_pcie = { .num_ports = 3, .ports = tegra186_pcie_ports, .msi_base_shift = 8, + .afi_pex2_ctrl = 0x19c, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x80b880b8, @@ -2243,6 +2621,12 @@ static const struct tegra_pcie_soc tegra186_pcie = { .has_gen2 = true, .force_pca_enable = false, .program_uphy = false, + .update_clamp_threshold = false, + .program_deskew_time = false, + .raw_violation_fixup = false, + .update_fc_timer = false, + .has_cache_bars = false, + .ectl.enable = false, }; static const struct of_device_id tegra_pcie_of_match[] = { @@ -2485,16 +2869,32 @@ static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev) { struct tegra_pcie *pcie = dev_get_drvdata(dev); struct tegra_pcie_port *port; + int err; list_for_each_entry(port, &pcie->ports, list) tegra_pcie_pme_turnoff(port); tegra_pcie_disable_ports(pcie); + /* + * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to + * avoid unwanted interrupts raised by AFI after pex_rst is asserted. + */ + tegra_pcie_disable_interrupts(pcie); + + if (pcie->soc->program_uphy) { + err = tegra_pcie_phy_power_off(pcie); + if (err < 0) + dev_err(dev, "failed to power off PHY(s): %d\n", err); + } + + reset_control_assert(pcie->pex_rst); + clk_disable_unprepare(pcie->pex_clk); + if (IS_ENABLED(CONFIG_PCI_MSI)) tegra_pcie_disable_msi(pcie); - tegra_pcie_disable_controller(pcie); + pinctrl_pm_select_idle_state(dev); tegra_pcie_power_off(pcie); return 0; @@ -2510,20 +2910,45 @@ static int __maybe_unused tegra_pcie_pm_resume(struct device *dev) dev_err(dev, "tegra pcie power on fail: %d\n", err); return err; } - err = tegra_pcie_enable_controller(pcie); - if (err) { - dev_err(dev, "tegra pcie controller enable fail: %d\n", err); + + err = pinctrl_pm_select_default_state(dev); + if (err < 0) { + dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err); goto poweroff; } + + tegra_pcie_enable_controller(pcie); tegra_pcie_setup_translations(pcie); if (IS_ENABLED(CONFIG_PCI_MSI)) tegra_pcie_enable_msi(pcie); + err = clk_prepare_enable(pcie->pex_clk); + if (err) { + dev_err(dev, "failed to enable PEX clock: %d\n", err); + goto pex_dpd_enable; + } + + reset_control_deassert(pcie->pex_rst); + + if (pcie->soc->program_uphy) { + err = tegra_pcie_phy_power_on(pcie); + if (err < 0) { + dev_err(dev, "failed to power on PHY(s): %d\n", err); + goto disable_pex_clk; + } + } + + tegra_pcie_apply_pad_settings(pcie); tegra_pcie_enable_ports(pcie); return 0; +disable_pex_clk: + reset_control_assert(pcie->pex_rst); + clk_disable_unprepare(pcie->pex_clk); +pex_dpd_enable: + pinctrl_pm_select_idle_state(dev); poweroff: tegra_pcie_power_off(pcie); diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c index 025ef7d9a046..16d938920ca5 100644 --- a/drivers/pci/controller/pcie-altera-msi.c +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> #include <linux/init.h> +#include <linux/module.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_irq.h> @@ -288,4 +289,13 @@ static int __init altera_msi_init(void) { return platform_driver_register(&altera_msi_driver); } + +static void __exit altera_msi_exit(void) +{ + platform_driver_unregister(&altera_msi_driver); +} + subsys_initcall(altera_msi_init); +MODULE_DEVICE_TABLE(of, altera_msi_of_match); +module_exit(altera_msi_exit); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 27edcebd1726..d2497ca43828 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> #include <linux/init.h> +#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_irq.h> @@ -43,6 +44,8 @@ #define S10_RP_RXCPL_STATUS 0x200C #define S10_RP_CFG_ADDR(pcie, reg) \ (((pcie)->hip_base) + (reg) + (1 << 20)) +#define S10_RP_SECONDARY(pcie) \ + readb(S10_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS)) /* TLP configuration type 0 and 1 */ #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ @@ -54,14 +57,9 @@ #define TLP_WRITE_TAG 0x10 #define RP_DEVFN 0 #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) -#define TLP_CFGRD_DW0(pcie, bus) \ - ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgrd0 \ - : pcie->pcie_data->cfgrd1) << 24) | \ - TLP_PAYLOAD_SIZE) -#define TLP_CFGWR_DW0(pcie, bus) \ - ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgwr0 \ - : pcie->pcie_data->cfgwr1) << 24) | \ - TLP_PAYLOAD_SIZE) +#define TLP_CFG_DW0(pcie, cfg) \ + (((cfg) << 24) | \ + TLP_PAYLOAD_SIZE) #define TLP_CFG_DW1(pcie, tag, be) \ (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) #define TLP_CFG_DW2(bus, devfn, offset) \ @@ -321,14 +319,31 @@ static void s10_tlp_write_packet(struct altera_pcie *pcie, u32 *headers, s10_tlp_write_tx(pcie, data, RP_TX_EOP); } +static void get_tlp_header(struct altera_pcie *pcie, u8 bus, u32 devfn, + int where, u8 byte_en, bool read, u32 *headers) +{ + u8 cfg; + u8 cfg0 = read ? pcie->pcie_data->cfgrd0 : pcie->pcie_data->cfgwr0; + u8 cfg1 = read ? pcie->pcie_data->cfgrd1 : pcie->pcie_data->cfgwr1; + u8 tag = read ? TLP_READ_TAG : TLP_WRITE_TAG; + + if (pcie->pcie_data->version == ALTERA_PCIE_V1) + cfg = (bus == pcie->root_bus_nr) ? cfg0 : cfg1; + else + cfg = (bus > S10_RP_SECONDARY(pcie)) ? cfg0 : cfg1; + + headers[0] = TLP_CFG_DW0(pcie, cfg); + headers[1] = TLP_CFG_DW1(pcie, tag, byte_en); + headers[2] = TLP_CFG_DW2(bus, devfn, where); +} + static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, int where, u8 byte_en, u32 *value) { u32 headers[TLP_HDR_SIZE]; - headers[0] = TLP_CFGRD_DW0(pcie, bus); - headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); - headers[2] = TLP_CFG_DW2(bus, devfn, where); + get_tlp_header(pcie, bus, devfn, where, byte_en, true, + headers); pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, 0, false); @@ -341,9 +356,8 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, u32 headers[TLP_HDR_SIZE]; int ret; - headers[0] = TLP_CFGWR_DW0(pcie, bus); - headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en); - headers[2] = TLP_CFG_DW2(bus, devfn, where); + get_tlp_header(pcie, bus, devfn, where, byte_en, false, + headers); /* check alignment to Qword */ if ((where & 0x7) == 0) @@ -705,6 +719,13 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) return 0; } +static void altera_pcie_irq_teardown(struct altera_pcie *pcie) +{ + irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); + irq_domain_remove(pcie->irq_domain); + irq_dispose_mapping(pcie->irq); +} + static int altera_pcie_parse_dt(struct altera_pcie *pcie) { struct device *dev = &pcie->pdev->dev; @@ -798,6 +819,7 @@ static int altera_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(bridge); pcie->pdev = pdev; + platform_set_drvdata(pdev, pcie); match = of_match_device(altera_pcie_of_match, &pdev->dev); if (!match) @@ -855,13 +877,28 @@ static int altera_pcie_probe(struct platform_device *pdev) return ret; } +static int altera_pcie_remove(struct platform_device *pdev) +{ + struct altera_pcie *pcie = platform_get_drvdata(pdev); + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); + + pci_stop_root_bus(bridge->bus); + pci_remove_root_bus(bridge->bus); + pci_free_resource_list(&pcie->resources); + altera_pcie_irq_teardown(pcie); + + return 0; +} + static struct platform_driver altera_pcie_driver = { .probe = altera_pcie_probe, + .remove = altera_pcie_remove, .driver = { .name = "altera-pcie", .of_match_table = altera_pcie_of_match, - .suppress_bind_attrs = true, }, }; -builtin_platform_driver(altera_pcie_driver); +MODULE_DEVICE_TABLE(of, altera_pcie_of_match); +module_platform_driver(altera_pcie_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index f30f5f3fb5c1..5a3550b6bb29 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -87,7 +87,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) /* * DT nodes are not used by all platforms that use the iProc PCIe - * core driver. For platforms that require explict inbound mapping + * core driver. For platforms that require explicit inbound mapping * configuration, "dma-ranges" would have been present in DT */ pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges"); diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index e3ca46497470..2d457bfdaf66 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -163,7 +163,7 @@ enum iproc_pcie_ib_map_type { * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or * SZ_1G * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or - * GB, depedning on the size unit + * GB, depending on the size unit * @nr_sizes: number of supported inbound mapping region sizes * @nr_windows: number of supported inbound mapping windows for the region * @imap_addr_offset: register offset between the upper and lower 32-bit diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c index 77052a0712d0..672e633601c7 100644 --- a/drivers/pci/controller/pcie-mobiveil.c +++ b/drivers/pci/controller/pcie-mobiveil.c @@ -31,56 +31,61 @@ * translation tables are grouped into windows, each window registers are * grouped into blocks of 4 or 16 registers each */ -#define PAB_REG_BLOCK_SIZE 16 -#define PAB_EXT_REG_BLOCK_SIZE 4 +#define PAB_REG_BLOCK_SIZE 16 +#define PAB_EXT_REG_BLOCK_SIZE 4 -#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE)) -#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE)) +#define PAB_REG_ADDR(offset, win) \ + (offset + (win * PAB_REG_BLOCK_SIZE)) +#define PAB_EXT_REG_ADDR(offset, win) \ + (offset + (win * PAB_EXT_REG_BLOCK_SIZE)) -#define LTSSM_STATUS 0x0404 -#define LTSSM_STATUS_L0_MASK 0x3f -#define LTSSM_STATUS_L0 0x2d +#define LTSSM_STATUS 0x0404 +#define LTSSM_STATUS_L0_MASK 0x3f +#define LTSSM_STATUS_L0 0x2d -#define PAB_CTRL 0x0808 -#define AMBA_PIO_ENABLE_SHIFT 0 -#define PEX_PIO_ENABLE_SHIFT 1 -#define PAGE_SEL_SHIFT 13 -#define PAGE_SEL_MASK 0x3f -#define PAGE_LO_MASK 0x3ff -#define PAGE_SEL_EN 0xc00 -#define PAGE_SEL_OFFSET_SHIFT 10 +#define PAB_CTRL 0x0808 +#define AMBA_PIO_ENABLE_SHIFT 0 +#define PEX_PIO_ENABLE_SHIFT 1 +#define PAGE_SEL_SHIFT 13 +#define PAGE_SEL_MASK 0x3f +#define PAGE_LO_MASK 0x3ff +#define PAGE_SEL_OFFSET_SHIFT 10 -#define PAB_AXI_PIO_CTRL 0x0840 -#define APIO_EN_MASK 0xf +#define PAB_AXI_PIO_CTRL 0x0840 +#define APIO_EN_MASK 0xf -#define PAB_PEX_PIO_CTRL 0x08c0 -#define PIO_ENABLE_SHIFT 0 +#define PAB_PEX_PIO_CTRL 0x08c0 +#define PIO_ENABLE_SHIFT 0 #define PAB_INTP_AMBA_MISC_ENB 0x0b0c -#define PAB_INTP_AMBA_MISC_STAT 0x0b1c +#define PAB_INTP_AMBA_MISC_STAT 0x0b1c #define PAB_INTP_INTX_MASK 0x01e0 #define PAB_INTP_MSI_MASK 0x8 -#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win) -#define WIN_ENABLE_SHIFT 0 -#define WIN_TYPE_SHIFT 1 +#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win) +#define WIN_ENABLE_SHIFT 0 +#define WIN_TYPE_SHIFT 1 +#define WIN_TYPE_MASK 0x3 +#define WIN_SIZE_MASK 0xfffffc00 #define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win) +#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win) #define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win) #define AXI_WINDOW_ALIGN_MASK 3 #define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win) -#define PAB_BUS_SHIFT 24 -#define PAB_DEVICE_SHIFT 19 -#define PAB_FUNCTION_SHIFT 16 +#define PAB_BUS_SHIFT 24 +#define PAB_DEVICE_SHIFT 19 +#define PAB_FUNCTION_SHIFT 16 #define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win) #define PAB_INTP_AXI_PIO_CLASS 0x474 -#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win) -#define AMAP_CTRL_EN_SHIFT 0 -#define AMAP_CTRL_TYPE_SHIFT 1 +#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win) +#define AMAP_CTRL_EN_SHIFT 0 +#define AMAP_CTRL_TYPE_SHIFT 1 +#define AMAP_CTRL_TYPE_MASK 3 #define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win) #define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win) @@ -88,34 +93,40 @@ #define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win) /* starting offset of INTX bits in status register */ -#define PAB_INTX_START 5 +#define PAB_INTX_START 5 /* supported number of MSI interrupts */ -#define PCI_NUM_MSI 16 +#define PCI_NUM_MSI 16 /* MSI registers */ -#define MSI_BASE_LO_OFFSET 0x04 -#define MSI_BASE_HI_OFFSET 0x08 -#define MSI_SIZE_OFFSET 0x0c -#define MSI_ENABLE_OFFSET 0x14 -#define MSI_STATUS_OFFSET 0x18 -#define MSI_DATA_OFFSET 0x20 -#define MSI_ADDR_L_OFFSET 0x24 -#define MSI_ADDR_H_OFFSET 0x28 +#define MSI_BASE_LO_OFFSET 0x04 +#define MSI_BASE_HI_OFFSET 0x08 +#define MSI_SIZE_OFFSET 0x0c +#define MSI_ENABLE_OFFSET 0x14 +#define MSI_STATUS_OFFSET 0x18 +#define MSI_DATA_OFFSET 0x20 +#define MSI_ADDR_L_OFFSET 0x24 +#define MSI_ADDR_H_OFFSET 0x28 /* outbound and inbound window definitions */ -#define WIN_NUM_0 0 -#define WIN_NUM_1 1 -#define CFG_WINDOW_TYPE 0 -#define IO_WINDOW_TYPE 1 -#define MEM_WINDOW_TYPE 2 -#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024) -#define MAX_PIO_WINDOWS 8 +#define WIN_NUM_0 0 +#define WIN_NUM_1 1 +#define CFG_WINDOW_TYPE 0 +#define IO_WINDOW_TYPE 1 +#define MEM_WINDOW_TYPE 2 +#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024) +#define MAX_PIO_WINDOWS 8 /* Parameters for the waiting for link up routine */ -#define LINK_WAIT_MAX_RETRIES 10 -#define LINK_WAIT_MIN 90000 -#define LINK_WAIT_MAX 100000 +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_MIN 90000 +#define LINK_WAIT_MAX 100000 + +#define PAGED_ADDR_BNDRY 0xc00 +#define OFFSET_TO_PAGE_ADDR(off) \ + ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY) +#define OFFSET_TO_PAGE_IDX(off) \ + ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK) struct mobiveil_msi { /* MSI information */ struct mutex lock; /* protect bitmap variable */ @@ -145,15 +156,119 @@ struct mobiveil_pcie { struct mobiveil_msi msi; }; -static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value, - const u32 reg) +/* + * mobiveil_pcie_sel_page - routine to access paged register + * + * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged, + * for this scheme to work extracted higher 6 bits of the offset will be + * written to pg_sel field of PAB_CTRL register and rest of the lower 10 + * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register. + */ +static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx) +{ + u32 val; + + val = readl(pcie->csr_axi_slave_base + PAB_CTRL); + val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT); + val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT; + + writel(val, pcie->csr_axi_slave_base + PAB_CTRL); +} + +static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off) +{ + if (off < PAGED_ADDR_BNDRY) { + /* For directly accessed registers, clear the pg_sel field */ + mobiveil_pcie_sel_page(pcie, 0); + return pcie->csr_axi_slave_base + off; + } + + mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off)); + return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off); +} + +static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val) { - writel_relaxed(value, pcie->csr_axi_slave_base + reg); + if ((uintptr_t)addr & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + switch (size) { + case 4: + *val = readl(addr); + break; + case 2: + *val = readw(addr); + break; + case 1: + *val = readb(addr); + break; + default: + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + return PCIBIOS_SUCCESSFUL; } -static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg) +static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val) { - return readl_relaxed(pcie->csr_axi_slave_base + reg); + if ((uintptr_t)addr & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + switch (size) { + case 4: + writel(val, addr); + break; + case 2: + writew(val, addr); + break; + case 1: + writeb(val, addr); + break; + default: + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + return PCIBIOS_SUCCESSFUL; +} + +static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size) +{ + void *addr; + u32 val; + int ret; + + addr = mobiveil_pcie_comp_addr(pcie, off); + + ret = mobiveil_pcie_read(addr, size, &val); + if (ret) + dev_err(&pcie->pdev->dev, "read CSR address failed\n"); + + return val; +} + +static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size) +{ + void *addr; + int ret; + + addr = mobiveil_pcie_comp_addr(pcie, off); + + ret = mobiveil_pcie_write(addr, size, val); + if (ret) + dev_err(&pcie->pdev->dev, "write CSR address failed\n"); +} + +static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off) +{ + return csr_read(pcie, off, 0x4); +} + +static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off) +{ + csr_write(pcie, val, off, 0x4); } static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie) @@ -174,7 +289,7 @@ static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) * Do not read more than one device on the bus directly * attached to RC */ - if ((bus->primary == pcie->root_bus_nr) && (devfn > 0)) + if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0)) return false; return true; @@ -185,17 +300,17 @@ static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) * root port or endpoint */ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, - unsigned int devfn, int where) + unsigned int devfn, int where) { struct mobiveil_pcie *pcie = bus->sysdata; + u32 value; if (!mobiveil_pcie_valid_device(bus, devfn)) return NULL; - if (bus->number == pcie->root_bus_nr) { - /* RC config access */ + /* RC config access */ + if (bus->number == pcie->root_bus_nr) return pcie->csr_axi_slave_base + where; - } /* * EP config access (in Config/APIO space) @@ -203,10 +318,12 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register. * Relies on pci_lock serialization */ - csr_writel(pcie, bus->number << PAB_BUS_SHIFT | - PCI_SLOT(devfn) << PAB_DEVICE_SHIFT | - PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT, - PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0)); + value = bus->number << PAB_BUS_SHIFT | + PCI_SLOT(devfn) << PAB_DEVICE_SHIFT | + PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT; + + csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0)); + return pcie->config_axi_slave_base + where; } @@ -241,24 +358,29 @@ static void mobiveil_pcie_isr(struct irq_desc *desc) /* Handle INTx */ if (intr_status & PAB_INTP_INTX_MASK) { - shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >> - PAB_INTX_START; + shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT); + shifted_status &= PAB_INTP_INTX_MASK; + shifted_status >>= PAB_INTX_START; do { for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) { virq = irq_find_mapping(pcie->intx_domain, - bit + 1); + bit + 1); if (virq) generic_handle_irq(virq); else - dev_err_ratelimited(dev, - "unexpected IRQ, INT%d\n", bit); + dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", + bit); - /* clear interrupt */ - csr_writel(pcie, - shifted_status << PAB_INTX_START, - PAB_INTP_AMBA_MISC_STAT); + /* clear interrupt handled */ + csr_writel(pcie, 1 << (PAB_INTX_START + bit), + PAB_INTP_AMBA_MISC_STAT); } - } while ((shifted_status >> PAB_INTX_START) != 0); + + shifted_status = csr_readl(pcie, + PAB_INTP_AMBA_MISC_STAT); + shifted_status &= PAB_INTP_INTX_MASK; + shifted_status >>= PAB_INTX_START; + } while (shifted_status != 0); } /* read extra MSI status register */ @@ -266,8 +388,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc) /* handle MSI interrupts */ while (msi_status & 1) { - msi_data = readl_relaxed(pcie->apb_csr_base - + MSI_DATA_OFFSET); + msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET); /* * MSI_STATUS_OFFSET register gets updated to zero @@ -276,18 +397,18 @@ static void mobiveil_pcie_isr(struct irq_desc *desc) * two dummy reads. */ msi_addr_lo = readl_relaxed(pcie->apb_csr_base + - MSI_ADDR_L_OFFSET); + MSI_ADDR_L_OFFSET); msi_addr_hi = readl_relaxed(pcie->apb_csr_base + - MSI_ADDR_H_OFFSET); + MSI_ADDR_H_OFFSET); dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n", - msi_data, msi_addr_hi, msi_addr_lo); + msi_data, msi_addr_hi, msi_addr_lo); virq = irq_find_mapping(msi->dev_domain, msi_data); if (virq) generic_handle_irq(virq); msi_status = readl_relaxed(pcie->apb_csr_base + - MSI_STATUS_OFFSET); + MSI_STATUS_OFFSET); } /* Clear the interrupt status */ @@ -304,7 +425,7 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) /* map config resource */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "config_axi_slave"); + "config_axi_slave"); pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pcie->config_axi_slave_base)) return PTR_ERR(pcie->config_axi_slave_base); @@ -312,7 +433,7 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) /* map csr resource */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "csr_axi_slave"); + "csr_axi_slave"); pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pcie->csr_axi_slave_base)) return PTR_ERR(pcie->csr_axi_slave_base); @@ -337,92 +458,50 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) return -ENODEV; } - irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie); - return 0; } -/* - * select_paged_register - routine to access paged register of root complex - * - * registers of RC are paged, for this scheme to work - * extracted higher 6 bits of the offset will be written to pg_sel - * field of PAB_CTRL register and rest of the lower 10 bits enabled with - * PAGE_SEL_EN are used as offset of the register. - */ -static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset) -{ - int pab_ctrl_dw, pg_sel; - - /* clear pg_sel field */ - pab_ctrl_dw = csr_readl(pcie, PAB_CTRL); - pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT)); - - /* set pg_sel field */ - pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK; - pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT)); - csr_writel(pcie, pab_ctrl_dw, PAB_CTRL); -} - -static void write_paged_register(struct mobiveil_pcie *pcie, - u32 val, u32 offset) -{ - u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN; - - select_paged_register(pcie, offset); - csr_writel(pcie, val, off); -} - -static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset) -{ - u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN; - - select_paged_register(pcie, offset); - return csr_readl(pcie, off); -} - static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, - int pci_addr, u32 type, u64 size) + u64 pci_addr, u32 type, u64 size) { - int pio_ctrl_val; - int amap_ctrl_dw; + u32 value; u64 size64 = ~(size - 1); - if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) { + if (win_num >= pcie->ppio_wins) { dev_err(&pcie->pdev->dev, "ERROR: max inbound windows reached !\n"); return; } - pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL); - csr_writel(pcie, - pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL); - amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num)); - amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT)); - amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT)); + value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num)); + value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK); + value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT | + (lower_32_bits(size64) & WIN_SIZE_MASK); + csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num)); + + csr_writel(pcie, upper_32_bits(size64), + PAB_EXT_PEX_AMAP_SIZEN(win_num)); - write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64), - PAB_PEX_AMAP_CTRL(win_num)); + csr_writel(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num)); - write_paged_register(pcie, upper_32_bits(size64), - PAB_EXT_PEX_AMAP_SIZEN(win_num)); + csr_writel(pcie, lower_32_bits(pci_addr), + PAB_PEX_AMAP_PEX_WIN_L(win_num)); + csr_writel(pcie, upper_32_bits(pci_addr), + PAB_PEX_AMAP_PEX_WIN_H(win_num)); - write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num)); - write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num)); - write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num)); + pcie->ib_wins_configured++; } /* * routine to program the outbound windows */ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, - u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size) + u64 cpu_addr, u64 pci_addr, u32 type, u64 size) { - - u32 value, type; + u32 value; u64 size64 = ~(size - 1); - if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) { + if (win_num >= pcie->apio_wins) { dev_err(&pcie->pdev->dev, "ERROR: max outbound windows reached !\n"); return; @@ -432,28 +511,27 @@ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit * to 4 KB in PAB_AXI_AMAP_CTRL register */ - type = config_io_bit; value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num)); - csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT | - lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num)); + value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK); + value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT | + (lower_32_bits(size64) & WIN_SIZE_MASK); + csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num)); - write_paged_register(pcie, upper_32_bits(size64), - PAB_EXT_AXI_AMAP_SIZE(win_num)); + csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num)); /* * program AXI window base with appropriate value in * PAB_AXI_AMAP_AXI_WIN0 register */ - value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num)); - csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK), - PAB_AXI_AMAP_AXI_WIN(win_num)); - - value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num)); + csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK), + PAB_AXI_AMAP_AXI_WIN(win_num)); + csr_writel(pcie, upper_32_bits(cpu_addr), + PAB_EXT_AXI_AMAP_AXI_WIN(win_num)); csr_writel(pcie, lower_32_bits(pci_addr), - PAB_AXI_AMAP_PEX_WIN_L(win_num)); + PAB_AXI_AMAP_PEX_WIN_L(win_num)); csr_writel(pcie, upper_32_bits(pci_addr), - PAB_AXI_AMAP_PEX_WIN_H(win_num)); + PAB_AXI_AMAP_PEX_WIN_H(win_num)); pcie->ob_wins_configured++; } @@ -469,7 +547,9 @@ static int mobiveil_bringup_link(struct mobiveil_pcie *pcie) usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); } + dev_err(&pcie->pdev->dev, "link never came up\n"); + return -ETIMEDOUT; } @@ -482,50 +562,55 @@ static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie) msi->msi_pages_phys = (phys_addr_t)msg_addr; writel_relaxed(lower_32_bits(msg_addr), - pcie->apb_csr_base + MSI_BASE_LO_OFFSET); + pcie->apb_csr_base + MSI_BASE_LO_OFFSET); writel_relaxed(upper_32_bits(msg_addr), - pcie->apb_csr_base + MSI_BASE_HI_OFFSET); + pcie->apb_csr_base + MSI_BASE_HI_OFFSET); writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET); writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET); } static int mobiveil_host_init(struct mobiveil_pcie *pcie) { - u32 value, pab_ctrl, type = 0; - int err; - struct resource_entry *win, *tmp; - - err = mobiveil_bringup_link(pcie); - if (err) { - dev_info(&pcie->pdev->dev, "link bring-up failed\n"); - return err; - } + u32 value, pab_ctrl, type; + struct resource_entry *win; + + /* setup bus numbers */ + value = csr_readl(pcie, PCI_PRIMARY_BUS); + value &= 0xff000000; + value |= 0x00ff0100; + csr_writel(pcie, value, PCI_PRIMARY_BUS); /* * program Bus Master Enable Bit in Command Register in PAB Config * Space */ value = csr_readl(pcie, PCI_COMMAND); - csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | - PCI_COMMAND_MASTER, PCI_COMMAND); + value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; + csr_writel(pcie, value, PCI_COMMAND); /* * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL * register */ pab_ctrl = csr_readl(pcie, PAB_CTRL); - csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) | - (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL); + pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT); + csr_writel(pcie, pab_ctrl, PAB_CTRL); csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK), - PAB_INTP_AMBA_MISC_ENB); + PAB_INTP_AMBA_MISC_ENB); /* * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in * PAB_AXI_PIO_CTRL Register */ value = csr_readl(pcie, PAB_AXI_PIO_CTRL); - csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL); + value |= APIO_EN_MASK; + csr_writel(pcie, value, PAB_AXI_PIO_CTRL); + + /* Enable PCIe PIO master */ + value = csr_readl(pcie, PAB_PEX_PIO_CTRL); + value |= 1 << PIO_ENABLE_SHIFT; + csr_writel(pcie, value, PAB_PEX_PIO_CTRL); /* * we'll program one outbound window for config reads and @@ -535,32 +620,38 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) */ /* config outbound translation window */ - program_ob_windows(pcie, pcie->ob_wins_configured, - pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE, - resource_size(pcie->ob_io_res)); + program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0, + CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res)); /* memory inbound translation window */ - program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); + program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); /* Get the I/O and memory ranges from DT */ - resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { - type = 0; + resource_list_for_each_entry(win, &pcie->resources) { if (resource_type(win->res) == IORESOURCE_MEM) type = MEM_WINDOW_TYPE; - if (resource_type(win->res) == IORESOURCE_IO) + else if (resource_type(win->res) == IORESOURCE_IO) type = IO_WINDOW_TYPE; - if (type) { - /* configure outbound translation window */ - program_ob_windows(pcie, pcie->ob_wins_configured, - win->res->start, 0, type, - resource_size(win->res)); - } + else + continue; + + /* configure outbound translation window */ + program_ob_windows(pcie, pcie->ob_wins_configured, + win->res->start, + win->res->start - win->offset, + type, resource_size(win->res)); } + /* fixup for PCIe class register */ + value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS); + value &= 0xff; + value |= (PCI_CLASS_BRIDGE_PCI << 16); + csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS); + /* setup MSI hardware registers */ mobiveil_pcie_enable_msi(pcie); - return err; + return 0; } static void mobiveil_mask_intx_irq(struct irq_data *data) @@ -574,7 +665,8 @@ static void mobiveil_mask_intx_irq(struct irq_data *data) mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); - csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB); + shifted_val &= ~mask; + csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB); raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); } @@ -589,7 +681,8 @@ static void mobiveil_unmask_intx_irq(struct irq_data *data) mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); - csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB); + shifted_val |= mask; + csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB); raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); } @@ -603,10 +696,11 @@ static struct irq_chip intx_irq_chip = { /* routine to setup the INTx related data */ static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) + irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); + return 0; } @@ -623,7 +717,7 @@ static struct irq_chip mobiveil_msi_irq_chip = { static struct msi_domain_info mobiveil_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), + MSI_FLAG_PCI_MSIX), .chip = &mobiveil_msi_irq_chip, }; @@ -641,7 +735,7 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) } static int mobiveil_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) + const struct cpumask *mask, bool force) { return -EINVAL; } @@ -653,7 +747,8 @@ static struct irq_chip mobiveil_msi_bottom_irq_chip = { }; static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs, void *args) + unsigned int virq, + unsigned int nr_irqs, void *args) { struct mobiveil_pcie *pcie = domain->host_data; struct mobiveil_msi *msi = &pcie->msi; @@ -673,13 +768,13 @@ static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain, mutex_unlock(&msi->lock); irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip, - domain->host_data, handle_level_irq, - NULL, NULL); + domain->host_data, handle_level_irq, NULL, NULL); return 0; } static void mobiveil_irq_msi_domain_free(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs) + unsigned int virq, + unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d); @@ -687,12 +782,11 @@ static void mobiveil_irq_msi_domain_free(struct irq_domain *domain, mutex_lock(&msi->lock); - if (!test_bit(d->hwirq, msi->msi_irq_in_use)) { + if (!test_bit(d->hwirq, msi->msi_irq_in_use)) dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n", d->hwirq); - } else { + else __clear_bit(d->hwirq, msi->msi_irq_in_use); - } mutex_unlock(&msi->lock); } @@ -716,12 +810,14 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie) } msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &mobiveil_msi_domain_info, msi->dev_domain); + &mobiveil_msi_domain_info, + msi->dev_domain); if (!msi->msi_domain) { dev_err(dev, "failed to create MSI domain\n"); irq_domain_remove(msi->dev_domain); return -ENOMEM; } + return 0; } @@ -732,12 +828,12 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie) int ret; /* setup INTx */ - pcie->intx_domain = irq_domain_add_linear(node, - PCI_NUM_INTX, &intx_domain_ops, pcie); + pcie->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX, + &intx_domain_ops, pcie); if (!pcie->intx_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return -ENODEV; + return -ENOMEM; } raw_spin_lock_init(&pcie->intx_mask_lock); @@ -763,11 +859,9 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) /* allocate the PCIe port */ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) - return -ENODEV; + return -ENOMEM; pcie = pci_host_bridge_priv(bridge); - if (!pcie) - return -ENOMEM; pcie->pdev = pdev; @@ -784,7 +878,7 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) &pcie->resources, &iobase); if (ret) { dev_err(dev, "Getting bridge resources failed\n"); - return -ENOMEM; + return ret; } /* @@ -797,9 +891,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) goto error; } - /* fixup for PCIe class register */ - csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS); - /* initialize the IRQ domains */ ret = mobiveil_pcie_init_irq_domain(pcie); if (ret) { @@ -807,6 +898,8 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) goto error; } + irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie); + ret = devm_request_pci_bus_resources(dev, &pcie->resources); if (ret) goto error; @@ -820,6 +913,12 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) bridge->map_irq = of_irq_parse_and_map_pci; bridge->swizzle_irq = pci_common_swizzle; + ret = mobiveil_bringup_link(pcie); + if (ret) { + dev_info(dev, "link bring-up failed\n"); + goto error; + } + /* setup the kernel resources for the newly added PCIe root bus */ ret = pci_scan_root_bus_bridge(bridge); if (ret) @@ -848,10 +947,10 @@ MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match); static struct platform_driver mobiveil_pcie_driver = { .probe = mobiveil_pcie_probe, .driver = { - .name = "mobiveil-pcie", - .of_match_table = mobiveil_pcie_of_match, - .suppress_bind_attrs = true, - }, + .name = "mobiveil-pcie", + .of_match_table = mobiveil_pcie_of_match, + .suppress_bind_attrs = true, + }, }; builtin_platform_driver(mobiveil_pcie_driver); diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 3b031f00a94a..45c0f344ccd1 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -482,15 +482,13 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, int i; mutex_lock(&msi->lock); - bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0, - nr_irqs, 0); - if (bit >= INT_PCI_MSI_NR) { + bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR, + get_count_order(nr_irqs)); + if (bit < 0) { mutex_unlock(&msi->lock); return -ENOSPC; } - bitmap_set(msi->bitmap, bit, nr_irqs); - for (i = 0; i < nr_irqs; i++) { irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip, domain->host_data, handle_simple_irq, @@ -508,7 +506,8 @@ static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq, struct nwl_msi *msi = &pcie->msi; mutex_lock(&msi->lock); - bitmap_clear(msi->bitmap, data->hwirq, nr_irqs); + bitmap_release_region(msi->bitmap, data->hwirq, + get_count_order(nr_irqs)); mutex_unlock(&msi->lock); } diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 999a5509e57e..4575e0c6dc4b 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -627,7 +627,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) * 32-bit resources. __pci_assign_resource() enforces that * artificial restriction to make sure everything will fit. * - * The only way we could use a 64-bit non-prefechable MEMBAR is + * The only way we could use a 64-bit non-prefetchable MEMBAR is * if its address is <4GB so that we can convert it to a 32-bit * resource. To be visible to the host OS, all VMD endpoints must * be initially configured by platform BIOS, which includes setting diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 27806987e93b..1cfe3687a211 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -381,15 +381,15 @@ static void pci_epf_test_unbind(struct pci_epf *epf) epf_bar = &epf->bar[bar]; if (epf_test->reg[bar]) { - pci_epf_free_space(epf, epf_test->reg[bar], bar); pci_epc_clear_bar(epc, epf->func_no, epf_bar); + pci_epf_free_space(epf, epf_test->reg[bar], bar); } } } static int pci_epf_test_set_bar(struct pci_epf *epf) { - int bar; + int bar, add; int ret; struct pci_epf_bar *epf_bar; struct pci_epc *epc = epf->epc; @@ -400,8 +400,14 @@ static int pci_epf_test_set_bar(struct pci_epf *epf) epc_features = epf_test->epc_features; - for (bar = BAR_0; bar <= BAR_5; bar++) { + for (bar = BAR_0; bar <= BAR_5; bar += add) { epf_bar = &epf->bar[bar]; + /* + * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64 + * if the specific implementation required a 64-bit BAR, + * even if we only requested a 32-bit BAR. + */ + add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; if (!!(epc_features->reserved_bar & (1 << bar))) continue; @@ -413,13 +419,6 @@ static int pci_epf_test_set_bar(struct pci_epf *epf) if (bar == test_reg_bar) return ret; } - /* - * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64 - * if the specific implementation required a 64-bit BAR, - * even if we only requested a 32-bit BAR. - */ - if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) - bar++; } return 0; @@ -431,13 +430,19 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) struct device *dev = &epf->dev; struct pci_epf_bar *epf_bar; void *base; - int bar; + int bar, add; enum pci_barno test_reg_bar = epf_test->test_reg_bar; const struct pci_epc_features *epc_features; + size_t test_reg_size; epc_features = epf_test->epc_features; - base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), + if (epc_features->bar_fixed_size[test_reg_bar]) + test_reg_size = bar_size[test_reg_bar]; + else + test_reg_size = sizeof(struct pci_epf_test_reg); + + base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar, epc_features->align); if (!base) { dev_err(dev, "Failed to allocated register space\n"); @@ -445,8 +450,10 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) } epf_test->reg[test_reg_bar] = base; - for (bar = BAR_0; bar <= BAR_5; bar++) { + for (bar = BAR_0; bar <= BAR_5; bar += add) { epf_bar = &epf->bar[bar]; + add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; + if (bar == test_reg_bar) continue; @@ -459,8 +466,6 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) dev_err(dev, "Failed to allocate space for BAR%d\n", bar); epf_test->reg[bar] = base; - if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) - bar++; } return 0; diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index e4712a0f249c..2091508c1620 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -519,11 +519,12 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf) { unsigned long flags; - if (!epc || IS_ERR(epc)) + if (!epc || IS_ERR(epc) || !epf) return; spin_lock_irqsave(&epc->lock, flags); list_del(&epf->list); + epf->epc = NULL; spin_unlock_irqrestore(&epc->lock, flags); } EXPORT_SYMBOL_GPL(pci_epc_remove_epf); diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 3aa115ed3a65..525fd3f272b3 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -132,8 +132,6 @@ static void pci_read_vf_config_common(struct pci_dev *virtfn) &physfn->sriov->subsystem_vendor); pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID, &physfn->sriov->subsystem_device); - - physfn->sriov->cfg_size = pci_cfg_space_size(virtfn); } int pci_iov_add_virtfn(struct pci_dev *dev, int id) diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c index 24505b08de40..b8c9011987f4 100644 --- a/drivers/pci/mmap.c +++ b/drivers/pci/mmap.c @@ -73,7 +73,7 @@ int pci_mmap_resource_range(struct pci_dev *pdev, int bar, #elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */ /* - * Legacy setup: Impement pci_mmap_resource_range() as a wrapper around + * Legacy setup: Implement pci_mmap_resource_range() as a wrapper around * the architecture's pci_mmap_page_range(), converting to "user visible" * addresses as necessary. */ diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index e039b740fe74..59a6d232f77a 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -237,7 +237,7 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag) } /** - * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts + * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts * @data: pointer to irqdata associated to that interrupt */ void pci_msi_mask_irq(struct irq_data *data) @@ -247,7 +247,7 @@ void pci_msi_mask_irq(struct irq_data *data) EXPORT_SYMBOL_GPL(pci_msi_mask_irq); /** - * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts + * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts * @data: pointer to irqdata associated to that interrupt */ void pci_msi_unmask_irq(struct irq_data *data) @@ -588,11 +588,11 @@ static int msi_verify_entries(struct pci_dev *dev) * msi_capability_init - configure device's MSI capability structure * @dev: pointer to the pci_dev data structure of MSI device function * @nvec: number of interrupts to allocate - * @affd: description of automatic irq affinity assignments (may be %NULL) + * @affd: description of automatic IRQ affinity assignments (may be %NULL) * * Setup the MSI capability structure of the device with the requested * number of interrupts. A return value of zero indicates the successful - * setup of an entry with the new MSI irq. A negative return value indicates + * setup of an entry with the new MSI IRQ. A negative return value indicates * an error, and a positive return value indicates the number of interrupts * which could have been allocated. */ @@ -609,7 +609,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, if (!entry) return -ENOMEM; - /* All MSIs are unmasked by default, Mask them all */ + /* All MSIs are unmasked by default; mask them all */ mask = msi_mask(entry->msi_attrib.multi_cap); msi_mask_irq(entry, mask, mask); @@ -637,7 +637,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, return ret; } - /* Set MSI enabled bits */ + /* Set MSI enabled bits */ pci_intx_for_msi(dev, 0); pci_msi_set_enable(dev, 1); dev->msi_enabled = 1; @@ -729,11 +729,11 @@ static void msix_program_entries(struct pci_dev *dev, * @dev: pointer to the pci_dev data structure of MSI-X device function * @entries: pointer to an array of struct msix_entry entries * @nvec: number of @entries - * @affd: Optional pointer to enable automatic affinity assignement + * @affd: Optional pointer to enable automatic affinity assignment * * Setup the MSI-X capability structure of device function with a - * single MSI-X irq. A return of zero indicates the successful setup of - * requested MSI-X entries with allocated irqs or non-zero for otherwise. + * single MSI-X IRQ. A return of zero indicates the successful setup of + * requested MSI-X entries with allocated IRQs or non-zero for otherwise. **/ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, int nvec, struct irq_affinity *affd) @@ -789,7 +789,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, out_avail: if (ret < 0) { /* - * If we had some success, report the number of irqs + * If we had some success, report the number of IRQs * we succeeded in setting up. */ struct msi_desc *entry; @@ -812,7 +812,7 @@ out_free: /** * pci_msi_supported - check whether MSI may be enabled on a device * @dev: pointer to the pci_dev data structure of MSI device function - * @nvec: how many MSIs have been requested ? + * @nvec: how many MSIs have been requested? * * Look at global flags, the device itself, and its parent buses * to determine if MSI/-X are supported for the device. If MSI/-X is @@ -896,7 +896,7 @@ static void pci_msi_shutdown(struct pci_dev *dev) /* Keep cached state to be restored */ __pci_msi_desc_mask_irq(desc, mask, ~mask); - /* Restore dev->irq to its default pin-assertion irq */ + /* Restore dev->irq to its default pin-assertion IRQ */ dev->irq = desc->msi_attrib.default_irq; pcibios_alloc_irq(dev); } @@ -958,7 +958,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, } } - /* Check whether driver already requested for MSI irq */ + /* Check whether driver already requested for MSI IRQ */ if (dev->msi_enabled) { pci_info(dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); return -EINVAL; @@ -1026,7 +1026,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, if (!pci_msi_supported(dev, minvec)) return -EINVAL; - /* Check whether driver already requested MSI-X irqs */ + /* Check whether driver already requested MSI-X IRQs */ if (dev->msix_enabled) { pci_info(dev, "can't enable MSI (MSI-X already enabled)\n"); return -EINVAL; @@ -1113,8 +1113,8 @@ static int __pci_enable_msix_range(struct pci_dev *dev, * pci_enable_msix_range - configure device's MSI-X capability structure * @dev: pointer to the pci_dev data structure of MSI-X device function * @entries: pointer to an array of MSI-X entries - * @minvec: minimum number of MSI-X irqs requested - * @maxvec: maximum number of MSI-X irqs requested + * @minvec: minimum number of MSI-X IRQs requested + * @maxvec: maximum number of MSI-X IRQs requested * * Setup the MSI-X capability structure of device function with a maximum * possible number of interrupts in the range between @minvec and @maxvec @@ -1179,7 +1179,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, return msi_vecs; } - /* use legacy irq if allowed */ + /* use legacy IRQ if allowed */ if (flags & PCI_IRQ_LEGACY) { if (min_vecs == 1 && dev->irq) { /* @@ -1248,7 +1248,7 @@ int pci_irq_vector(struct pci_dev *dev, unsigned int nr) EXPORT_SYMBOL(pci_irq_vector); /** - * pci_irq_get_affinity - return the affinity of a particular msi vector + * pci_irq_get_affinity - return the affinity of a particular MSI vector * @dev: PCI device to operate on * @nr: device-relative interrupt vector index (0-based). */ @@ -1280,7 +1280,7 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) EXPORT_SYMBOL(pci_irq_get_affinity); /** - * pci_irq_get_node - return the numa node of a particular msi vector + * pci_irq_get_node - return the NUMA node of a particular MSI vector * @pdev: PCI device to operate on * @vec: device-relative interrupt vector index (0-based). */ @@ -1330,7 +1330,7 @@ void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) /** * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source * @dev: Pointer to the PCI device - * @desc: Pointer to the msi descriptor + * @desc: Pointer to the MSI descriptor * * The ID number is only used within the irqdomain. */ @@ -1348,7 +1348,8 @@ static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) } /** - * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev + * pci_msi_domain_check_cap - Verify that @domain supports the capabilities + * for @dev * @domain: The interrupt domain to check * @info: The domain info for verification * @dev: The device to check diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index a3073ce16520..234476226529 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -195,7 +195,7 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource); /* * Note this function returns the parent PCI device with a - * reference taken. It is the caller's responsibily to drop + * reference taken. It is the caller's responsibility to drop * the reference. */ static struct pci_dev *find_parent_pci_dev(struct device *dev) @@ -355,7 +355,7 @@ static int upstream_bridge_distance(struct pci_dev *provider, /* * Allow the connection if both devices are on a whitelisted root - * complex, but add an arbitary large value to the distance. + * complex, but add an arbitrary large value to the distance. */ if (root_complex_whitelist(provider) && root_complex_whitelist(client)) @@ -414,7 +414,7 @@ static int upstream_bridge_distance_warn(struct pci_dev *provider, } /** - * pci_p2pdma_distance_many - Determive the cumulative distance between + * pci_p2pdma_distance_many - Determine the cumulative distance between * a p2pdma provider and the clients in use. * @provider: p2pdma provider to check against the client list * @clients: array of devices to check (NULL-terminated) @@ -443,6 +443,14 @@ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, return -1; for (i = 0; i < num_clients; i++) { + if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) && + clients[i]->dma_ops == &dma_virt_ops) { + if (verbose) + dev_warn(clients[i], + "cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n"); + return -1; + } + pci_client = find_parent_pci_dev(clients[i]); if (!pci_client) { if (verbose) @@ -721,7 +729,7 @@ int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, * p2pdma mappings are not compatible with devices that use * dma_virt_ops. If the upper layers do the right thing * this should never happen because it will be prevented - * by the check in pci_p2pdma_add_client() + * by the check in pci_p2pdma_distance_many() */ if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) && dev->dma_ops == &dma_virt_ops)) diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c index 83fb077d0b41..06083b86d4f4 100644 --- a/drivers/pci/pci-bridge-emul.c +++ b/drivers/pci/pci-bridge-emul.c @@ -305,7 +305,7 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge, } /* - * Cleanup a pci_bridge_emul structure that was previously initilized + * Cleanup a pci_bridge_emul structure that was previously initialized * using pci_bridge_emul_init(). */ void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 36dbe960306b..a8124e47bf6e 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -399,7 +399,8 @@ void __weak pcibios_free_irq(struct pci_dev *dev) #ifdef CONFIG_PCI_IOV static inline bool pci_device_can_probe(struct pci_dev *pdev) { - return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe); + return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe || + pdev->driver_override); } #else static inline bool pci_device_can_probe(struct pci_dev *pdev) @@ -414,6 +415,9 @@ static int pci_device_probe(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = to_pci_driver(dev->driver); + if (!pci_device_can_probe(pci_dev)) + return -ENODEV; + pci_assign_irq(pci_dev); error = pcibios_alloc_irq(pci_dev); @@ -421,12 +425,10 @@ static int pci_device_probe(struct device *dev) return error; pci_dev_get(pci_dev); - if (pci_device_can_probe(pci_dev)) { - error = __pci_device_probe(drv, pci_dev); - if (error) { - pcibios_free_irq(pci_dev); - pci_dev_put(pci_dev); - } + error = __pci_device_probe(drv, pci_dev); + if (error) { + pcibios_free_irq(pci_dev); + pci_dev_put(pci_dev); } return error; diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c index 9795649fc6f9..ef293e735c55 100644 --- a/drivers/pci/pci-pf-stub.c +++ b/drivers/pci/pci-pf-stub.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* pci-pf-stub - simple stub driver for PCI SR-IOV PF device * - * This driver is meant to act as a "whitelist" for devices that provde + * This driver is meant to act as a "whitelist" for devices that provide * SR-IOV functionality while at the same time not actually needing a * driver of their own. */ diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 6d27475e39b2..965c72104150 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -182,6 +182,9 @@ static ssize_t current_link_speed_show(struct device *dev, return -EINVAL; switch (linkstat & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_32_0GB: + speed = "32 GT/s"; + break; case PCI_EXP_LNKSTA_CLS_16_0GB: speed = "16 GT/s"; break; @@ -477,7 +480,7 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr, pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); return count; } -static struct device_attribute dev_remove_attr = __ATTR(remove, +static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove, (S_IWUSR|S_IWGRP), NULL, remove_store); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b1f563916036..29ed5ec1ac27 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4535,7 +4535,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe) /* * Wait for Transaction Pending bit to clear. A word-aligned test - * is used, so we use the conrol offset rather than status and shift + * is used, so we use the control offset rather than status and shift * the test bit to match. */ if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL, @@ -5669,7 +5669,9 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) */ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); if (lnkcap2) { /* PCIe r3.0-compliant */ - if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB) + return PCIE_SPEED_32_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) return PCIE_SPEED_16_0GT; else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) return PCIE_SPEED_8_0GT; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 5db6f985f16d..1be03a97cb92 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -298,7 +298,6 @@ struct pci_sriov { u16 driver_max_VFs; /* Max num VFs driver supports */ struct pci_dev *dev; /* Lowest numbered PF */ struct pci_dev *self; /* This PF */ - u32 cfg_size; /* VF config space size */ u32 class; /* VF device */ u8 hdr_type; /* VF header type */ u16 subsystem_vendor; /* VF subsystem vendor */ diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c index 043b8b0cfcc5..6988fe7389b9 100644 --- a/drivers/pci/pcie/aer_inject.c +++ b/drivers/pci/pcie/aer_inject.c @@ -2,7 +2,7 @@ /* * PCIe AER software error injection support. * - * Debuging PCIe AER code is quite difficult because it is hard to + * Debugging PCIe AER code is quite difficult because it is hard to * trigger various real hardware errors. Software based error * injection can fake almost all kinds of errors with the help of a * user space helper tool aer-inject, which can be gotten from: diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index f9ef7ad3f75d..a3c7338fad86 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -668,7 +668,7 @@ const unsigned char pcie_link_speed[] = { PCIE_SPEED_5_0GT, /* 2 */ PCIE_SPEED_8_0GT, /* 3 */ PCIE_SPEED_16_0GT, /* 4 */ - PCI_SPEED_UNKNOWN, /* 5 */ + PCIE_SPEED_32_0GT, /* 5 */ PCI_SPEED_UNKNOWN, /* 6 */ PCI_SPEED_UNKNOWN, /* 7 */ PCI_SPEED_UNKNOWN, /* 8 */ @@ -1555,17 +1555,6 @@ static int pci_cfg_space_size_ext(struct pci_dev *dev) return PCI_CFG_SPACE_EXP_SIZE; } -#ifdef CONFIG_PCI_IOV -static bool is_vf0(struct pci_dev *dev) -{ - if (pci_iov_virtfn_devfn(dev->physfn, 0) == dev->devfn && - pci_iov_virtfn_bus(dev->physfn, 0) == dev->bus->number) - return true; - - return false; -} -#endif - int pci_cfg_space_size(struct pci_dev *dev) { int pos; @@ -1573,9 +1562,18 @@ int pci_cfg_space_size(struct pci_dev *dev) u16 class; #ifdef CONFIG_PCI_IOV - /* Read cached value for all VFs except for VF0 */ - if (dev->is_virtfn && !is_vf0(dev)) - return dev->physfn->sriov->cfg_size; + /* + * Per the SR-IOV specification (rev 1.1, sec 3.5), VFs are required to + * implement a PCIe capability and therefore must implement extended + * config space. We can skip the NO_EXTCFG test below and the + * reachability/aliasing test in pci_cfg_space_size_ext() by virtue of + * the fact that the SR-IOV capability on the PF resides in extended + * config space and must be accessible and non-aliased to have enabled + * support for this VF. This is a micro performance optimization for + * systems supporting many VFs. + */ + if (dev->is_virtfn) + return PCI_CFG_SPACE_EXP_SIZE; #endif if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG) diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 445b51db75b0..fe7fe678965b 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -377,7 +377,7 @@ static int show_device(struct seq_file *m, void *v) } seq_putc(m, '\t'); if (drv) - seq_printf(m, "%s", drv->name); + seq_puts(m, drv->name); seq_putc(m, '\n'); return 0; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 0f16acc323c6..208aacf39329 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4934,35 +4934,49 @@ static void quirk_fsl_no_msi(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi); /* - * GPUs with integrated HDA controller for streaming audio to attached displays - * need a device link from the HDA controller (consumer) to the GPU (supplier) - * so that the GPU is powered up whenever the HDA controller is accessed. - * The GPU and HDA controller are functions 0 and 1 of the same PCI device. - * The device link stays in place until shutdown (or removal of the PCI device - * if it's hotplugged). Runtime PM is allowed by default on the HDA controller - * to prevent it from permanently keeping the GPU awake. + * Although not allowed by the spec, some multi-function devices have + * dependencies of one function (consumer) on another (supplier). For the + * consumer to work in D0, the supplier must also be in D0. Create a + * device link from the consumer to the supplier to enforce this + * dependency. Runtime PM is allowed by default on the consumer to prevent + * it from permanently keeping the supplier awake. */ -static void quirk_gpu_hda(struct pci_dev *hda) +static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer, + unsigned int supplier, unsigned int class, + unsigned int class_shift) { - struct pci_dev *gpu; + struct pci_dev *supplier_pdev; - if (PCI_FUNC(hda->devfn) != 1) + if (PCI_FUNC(pdev->devfn) != consumer) return; - gpu = pci_get_domain_bus_and_slot(pci_domain_nr(hda->bus), - hda->bus->number, - PCI_DEVFN(PCI_SLOT(hda->devfn), 0)); - if (!gpu || (gpu->class >> 16) != PCI_BASE_CLASS_DISPLAY) { - pci_dev_put(gpu); + supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), + pdev->bus->number, + PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier)); + if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) { + pci_dev_put(supplier_pdev); return; } - if (!device_link_add(&hda->dev, &gpu->dev, - DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME)) - pci_err(hda, "cannot link HDA to GPU %s\n", pci_name(gpu)); + if (device_link_add(&pdev->dev, &supplier_pdev->dev, + DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME)) + pci_info(pdev, "D0 power state depends on %s\n", + pci_name(supplier_pdev)); + else + pci_err(pdev, "Cannot enforce power dependency on %s\n", + pci_name(supplier_pdev)); + + pm_runtime_allow(&pdev->dev); + pci_dev_put(supplier_pdev); +} - pm_runtime_allow(&hda->dev); - pci_dev_put(gpu); +/* + * Create device link for GPUs with integrated HDA controller for streaming + * audio to attached displays. + */ +static void quirk_gpu_hda(struct pci_dev *hda) +{ + pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); @@ -4972,6 +4986,62 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); /* + * Create device link for NVIDIA GPU with integrated USB xHCI Host + * controller to VGA. + */ +static void quirk_gpu_usb(struct pci_dev *usb) +{ + pci_create_device_link(usb, 2, 0, PCI_BASE_CLASS_DISPLAY, 16); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); + +/* + * Create device link for NVIDIA GPU with integrated Type-C UCSI controller + * to VGA. Currently there is no class code defined for UCSI device over PCI + * so using UNKNOWN class for now and it will be updated when UCSI + * over PCI gets a class code. + */ +#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80 +static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi) +{ + pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, + PCI_CLASS_SERIAL_UNKNOWN, 8, + quirk_gpu_usb_typec_ucsi); + +/* + * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it + * disabled. https://devtalk.nvidia.com/default/topic/1024022 + */ +static void quirk_nvidia_hda(struct pci_dev *gpu) +{ + u8 hdr_type; + u32 val; + + /* There was no integrated HDA controller before MCP89 */ + if (gpu->device < PCI_DEVICE_ID_NVIDIA_GEFORCE_320M) + return; + + /* Bit 25 at offset 0x488 enables the HDA controller */ + pci_read_config_dword(gpu, 0x488, &val); + if (val & BIT(25)) + return; + + pci_info(gpu, "Enabling HDA controller\n"); + pci_write_config_dword(gpu, 0x488, val | BIT(25)); + + /* The GPU becomes a multi-function device when the HDA is enabled */ + pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type); + gpu->multifunction = !!(hdr_type & 0x80); +} +DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, + PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda); +DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, + PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda); + +/* * Some IDT switches incorrectly flag an ACS Source Validation error on * completions for config read requests even though PCIe r4.0, sec * 6.12.1.1, says that completions are never affected by ACS Source diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 0cdd5ff389de..79b1fa6519be 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1684,10 +1684,15 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus, enum enable_type enable_local) { bool unassigned = false; + struct pci_host_bridge *host; if (enable_local != undefined) return enable_local; + host = pci_find_host_bridge(bus); + if (host->preserve_config) + return auto_disabled; + pci_walk_bus(bus, iov_resources_unassigned, &unassigned); if (unassigned) return auto_enabled; @@ -1861,16 +1866,6 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, available_mmio_pref); /* - * Calculate the total amount of extra resource space we can - * pass to bridges below this one. This is basically the - * extra space reduced by the minimal required space for the - * non-hotplug bridges. - */ - remaining_io = available_io; - remaining_mmio = available_mmio; - remaining_mmio_pref = available_mmio_pref; - - /* * Calculate how many hotplug bridges and normal bridges there * are on this bus. We will distribute the additional available * resources between hotplug bridges. @@ -1882,6 +1877,34 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, normal_bridges++; } + /* + * There is only one bridge on the bus so it gets all available + * resources which it can then distribute to the possible hotplug + * bridges below. + */ + if (hotplug_bridges + normal_bridges == 1) { + dev = list_first_entry(&bus->devices, struct pci_dev, bus_list); + if (dev->subordinate) { + pci_bus_distribute_available_resources(dev->subordinate, + add_list, available_io, available_mmio, + available_mmio_pref); + } + return; + } + + if (hotplug_bridges == 0) + return; + + /* + * Calculate the total amount of extra resource space we can + * pass to bridges below this one. This is basically the + * extra space reduced by the minimal required space for the + * non-hotplug bridges. + */ + remaining_io = available_io; + remaining_mmio = available_mmio; + remaining_mmio_pref = available_mmio_pref; + for_each_pci_bridge(dev, bus) { const struct resource *res; @@ -1906,21 +1929,6 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, } /* - * There is only one bridge on the bus so it gets all available - * resources which it can then distribute to the possible hotplug - * bridges below. - */ - if (hotplug_bridges + normal_bridges == 1) { - dev = list_first_entry(&bus->devices, struct pci_dev, bus_list); - if (dev->subordinate) { - pci_bus_distribute_available_resources(dev->subordinate, - add_list, available_io, available_mmio, - available_mmio_pref); - } - return; - } - - /* * Go over devices on this bus and distribute the remaining * resource space between hotplug bridges. */ @@ -1936,8 +1944,6 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, * Distribute available extra resources equally between * hotplug-capable downstream ports taking alignment into * account. - * - * Here hotplug_bridges is always != 0. */ align = pci_resource_alignment(bridge, io_res); io = div64_ul(available_io, hotplug_bridges); diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index f4d92b1afe7b..ae4aa0e1f2f4 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -75,6 +75,7 @@ static const char *pci_bus_speed_strings[] = { "5.0 GT/s PCIe", /* 0x15 */ "8.0 GT/s PCIe", /* 0x16 */ "16.0 GT/s PCIe", /* 0x17 */ + "32.0 GT/s PCIe", /* 0x18 */ }; static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf) diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 136e8f64848a..b55cdfe22a2e 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -606,7 +606,7 @@ int power_supply_get_battery_info(struct power_supply *psy, /* The property and field names below must correspond to elements * in enum power_supply_property. For reasoning, see - * Documentation/power/power_supply_class.txt. + * Documentation/power/power_supply_class.rst. */ of_property_read_u32(battery_np, "energy-full-design-microwatt-hours", diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 8878720dd779..17e7796a832b 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -700,6 +700,7 @@ int tegra_powergate_power_on(unsigned int id) return tegra_powergate_set(pmc, id, true); } +EXPORT_SYMBOL(tegra_powergate_power_on); /** * tegra_powergate_power_off() - power off partition |