diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-08 19:03:25 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-08 19:03:25 -0700 |
commit | 857f8640147c9fb43f20e43cbca6452710e1ca5d (patch) | |
tree | 76a92068d703b8001ca790ffa096d435fa24ae81 /drivers/pci | |
parent | 8f3207c7eab9d885cc64c778416537034a7d9c5b (diff) | |
parent | 3146c8f4de9b0858794a902f273aec13f168596e (diff) | |
download | linux-857f8640147c9fb43f20e43cbca6452710e1ca5d.tar.gz linux-857f8640147c9fb43f20e43cbca6452710e1ca5d.tar.bz2 linux-857f8640147c9fb43f20e43cbca6452710e1ca5d.zip |
Merge tag 'pci-v4.12-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
Pull PCI updates from Bjorn Helgaas:
- add framework for supporting PCIe devices in Endpoint mode (Kishon
Vijay Abraham I)
- use non-postable PCI config space mappings when possible (Lorenzo
Pieralisi)
- clean up and unify mmap of PCI BARs (David Woodhouse)
- export and unify Function Level Reset support (Christoph Hellwig)
- avoid FLR for Intel 82579 NICs (Sasha Neftin)
- add pci_request_irq() and pci_free_irq() helpers (Christoph Hellwig)
- short-circuit config access failures for disconnected devices (Keith
Busch)
- remove D3 sleep delay when possible (Adrian Hunter)
- freeze PME scan before suspending devices (Lukas Wunner)
- stop disabling MSI/MSI-X in pci_device_shutdown() (Prarit Bhargava)
- disable boot interrupt quirk for ASUS M2N-LR (Stefan Assmann)
- add arch-specific alignment control to improve device passthrough by
avoiding multiple BARs in a page (Yongji Xie)
- add sysfs sriov_drivers_autoprobe to control VF driver binding
(Bodong Wang)
- allow slots below PCI-to-PCIe "reverse bridges" (Bjorn Helgaas)
- fix crashes when unbinding host controllers that don't support
removal (Brian Norris)
- add driver for MicroSemi Switchtec management interface (Logan
Gunthorpe)
- add driver for Faraday Technology FTPCI100 host bridge (Linus
Walleij)
- add i.MX7D support (Andrey Smirnov)
- use generic MSI support for Aardvark (Thomas Petazzoni)
- make Rockchip driver modular (Brian Norris)
- advertise 128-byte Read Completion Boundary support for Rockchip
(Shawn Lin)
- advertise PCI_EXP_LNKSTA_SLC for Rockchip root port (Shawn Lin)
- convert atomic_t to refcount_t in HV driver (Elena Reshetova)
- add CPU IRQ affinity in HV driver (K. Y. Srinivasan)
- fix PCI bus removal in HV driver (Long Li)
- add support for ThunderX2 DMA alias topology (Jayachandran C)
- add ThunderX pass2.x 2nd node MCFG quirk (Tomasz Nowicki)
- add ITE 8893 bridge DMA alias quirk (Jarod Wilson)
- restrict Cavium ACS quirk only to CN81xx/CN83xx/CN88xx devices
(Manish Jaggi)
* tag 'pci-v4.12-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (146 commits)
PCI: Don't allow unbinding host controllers that aren't prepared
ARM: DRA7: clockdomain: Change the CLKTRCTRL of CM_PCIE_CLKSTCTRL to SW_WKUP
MAINTAINERS: Add PCI Endpoint maintainer
Documentation: PCI: Add userguide for PCI endpoint test function
tools: PCI: Add sample test script to invoke pcitest
tools: PCI: Add a userspace tool to test PCI endpoint
Documentation: misc-devices: Add Documentation for pci-endpoint-test driver
misc: Add host side PCI driver for PCI test function device
PCI: Add device IDs for DRA74x and DRA72x
dt-bindings: PCI: dra7xx: Add DT bindings to enable unaligned access
PCI: dwc: dra7xx: Workaround for errata id i870
dt-bindings: PCI: dra7xx: Add DT bindings for PCI dra7xx EP mode
PCI: dwc: dra7xx: Add EP mode support
PCI: dwc: dra7xx: Facilitate wrapper and MSI interrupts to be enabled independently
dt-bindings: PCI: Add DT bindings for PCI designware EP mode
PCI: dwc: designware: Add EP mode support
Documentation: PCI: Add binding documentation for pci-test endpoint function
ixgbe: Use pcie_flr() instead of duplicating it
IB/hfi1: Use pcie_flr() instead of duplicating it
PCI: imx6: Fix spelling mistake: "contol" -> "control"
...
Diffstat (limited to 'drivers/pci')
65 files changed, 6355 insertions, 497 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index df141420c902..e0cacb7b8563 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -134,3 +134,5 @@ config PCI_HYPERV source "drivers/pci/hotplug/Kconfig" source "drivers/pci/dwc/Kconfig" source "drivers/pci/host/Kconfig" +source "drivers/pci/endpoint/Kconfig" +source "drivers/pci/switch/Kconfig" diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 8db5079f09a7..462c1f5f5546 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -4,7 +4,7 @@ obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \ pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ - irq.o vpd.o setup-bus.o vc.o + irq.o vpd.o setup-bus.o vc.o mmap.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_SYSFS) += slot.o @@ -68,3 +68,4 @@ ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG # PCI host controller drivers obj-y += host/ +obj-y += switch/ diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 8b7382705bf2..74cf5fffb1e1 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -629,7 +629,7 @@ void pci_vpd_release(struct pci_dev *dev) * * When access is locked, any userspace reads or writes to config * space and concurrent lock requests will sleep until access is - * allowed via pci_cfg_access_unlocked again. + * allowed via pci_cfg_access_unlock() again. */ void pci_cfg_access_lock(struct pci_dev *dev) { @@ -700,7 +700,8 @@ static bool pcie_downstream_port(const struct pci_dev *dev) int type = pci_pcie_type(dev); return type == PCI_EXP_TYPE_ROOT_PORT || - type == PCI_EXP_TYPE_DOWNSTREAM; + type == PCI_EXP_TYPE_DOWNSTREAM || + type == PCI_EXP_TYPE_PCIE_BRIDGE; } bool pcie_cap_has_lnkctl(const struct pci_dev *dev) @@ -890,3 +891,59 @@ int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, return ret; } EXPORT_SYMBOL(pcie_capability_clear_and_set_dword); + +int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) +{ + if (pci_dev_is_disconnected(dev)) { + *val = ~0; + return -ENODEV; + } + return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); +} +EXPORT_SYMBOL(pci_read_config_byte); + +int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val) +{ + if (pci_dev_is_disconnected(dev)) { + *val = ~0; + return -ENODEV; + } + return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); +} +EXPORT_SYMBOL(pci_read_config_word); + +int pci_read_config_dword(const struct pci_dev *dev, int where, + u32 *val) +{ + if (pci_dev_is_disconnected(dev)) { + *val = ~0; + return -ENODEV; + } + return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); +} +EXPORT_SYMBOL(pci_read_config_dword); + +int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) +{ + if (pci_dev_is_disconnected(dev)) + return -ENODEV; + return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); +} +EXPORT_SYMBOL(pci_write_config_byte); + +int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) +{ + if (pci_dev_is_disconnected(dev)) + return -ENODEV; + return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); +} +EXPORT_SYMBOL(pci_write_config_word); + +int pci_write_config_dword(const struct pci_dev *dev, int where, + u32 val) +{ + if (pci_dev_is_disconnected(dev)) + return -ENODEV; + return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); +} +EXPORT_SYMBOL(pci_write_config_dword); diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig index d2d2ba5b8a68..b7e15526d676 100644 --- a/drivers/pci/dwc/Kconfig +++ b/drivers/pci/dwc/Kconfig @@ -9,16 +9,44 @@ config PCIE_DW_HOST depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW +config PCIE_DW_EP + bool + depends on PCI_ENDPOINT + select PCIE_DW + config PCI_DRA7XX bool "TI DRA7xx PCIe controller" - depends on PCI + depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT depends on OF && HAS_IOMEM && TI_PIPE3 + help + Enables support for the PCIe controller in the DRA7xx SoC. There + are two instances of PCIe controller in DRA7xx. This controller can + work either as EP or RC. In order to enable host-specific features + PCI_DRA7XX_HOST must be selected and in order to enable device- + specific features PCI_DRA7XX_EP must be selected. This uses + the Designware core. + +if PCI_DRA7XX + +config PCI_DRA7XX_HOST + bool "PCI DRA7xx Host Mode" + depends on PCI depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST + default y help - Enables support for the PCIe controller in the DRA7xx SoC. There - are two instances of PCIe controller in DRA7xx. This controller can - act both as EP and RC. This reuses the Designware core. + Enables support for the PCIe controller in the DRA7xx SoC to work in + host mode. + +config PCI_DRA7XX_EP + bool "PCI DRA7xx Endpoint Mode" + depends on PCI_ENDPOINT + select PCIE_DW_EP + help + Enables support for the PCIe controller in the DRA7xx SoC to work in + endpoint mode. + +endif config PCIE_DW_PLAT bool "Platform bus based DesignWare PCIe Controller" diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile index a2df13c28798..f31a8596442a 100644 --- a/drivers/pci/dwc/Makefile +++ b/drivers/pci/dwc/Makefile @@ -1,7 +1,10 @@ obj-$(CONFIG_PCIE_DW) += pcie-designware.o obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o +obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o -obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o +ifneq ($(filter y,$(CONFIG_PCI_DRA7XX_HOST) $(CONFIG_PCI_DRA7XX_EP)),) + obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o +endif obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o obj-$(CONFIG_PCI_IMX6) += pci-imx6.o obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c index 0984baff07e3..8decf46cf525 100644 --- a/drivers/pci/dwc/pci-dra7xx.c +++ b/drivers/pci/dwc/pci-dra7xx.c @@ -10,12 +10,14 @@ * published by the Free Software Foundation. */ +#include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> +#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/of_pci.h> #include <linux/pci.h> @@ -24,6 +26,8 @@ #include <linux/pm_runtime.h> #include <linux/resource.h> #include <linux/types.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> #include "pcie-designware.h" @@ -57,6 +61,11 @@ #define MSI BIT(4) #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) +#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 +#define DEVICE_TYPE_EP 0x0 +#define DEVICE_TYPE_LEG_EP 0x1 +#define DEVICE_TYPE_RC 0x4 + #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 #define LTSSM_EN 0x1 @@ -66,6 +75,13 @@ #define EXP_CAP_ID_OFFSET 0x70 +#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 +#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 + +#define PCIECTRL_TI_CONF_MSI_XMT 0x012c +#define MSI_REQ_GRANT BIT(0) +#define MSI_VECTOR_SHIFT 7 + struct dra7xx_pcie { struct dw_pcie *pci; void __iomem *base; /* DT ti_conf */ @@ -73,6 +89,11 @@ struct dra7xx_pcie { struct phy **phy; int link_gen; struct irq_domain *irq_domain; + enum dw_pcie_device_mode mode; +}; + +struct dra7xx_pcie_of_data { + enum dw_pcie_device_mode mode; }; #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) @@ -88,6 +109,11 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, writel(value, pcie->base + offset); } +static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr) +{ + return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; +} + static int dra7xx_pcie_link_up(struct dw_pcie *pci) { struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); @@ -96,9 +122,19 @@ static int dra7xx_pcie_link_up(struct dw_pcie *pci) return !!(reg & LINK_UP); } -static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx) +static void dra7xx_pcie_stop_link(struct dw_pcie *pci) { - struct dw_pcie *pci = dra7xx->pci; + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + u32 reg; + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); + reg &= ~LTSSM_EN; + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); +} + +static int dra7xx_pcie_establish_link(struct dw_pcie *pci) +{ + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); struct device *dev = pci->dev; u32 reg; u32 exp_cap_off = EXP_CAP_ID_OFFSET; @@ -132,34 +168,42 @@ static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx) reg |= LTSSM_EN; dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); - return dw_pcie_wait_for_link(pci); + return 0; } -static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) +static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) { - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, - ~INTERRUPTS); - dra7xx_pcie_writel(dra7xx, - PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS); dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, ~LEG_EP_INTERRUPTS & ~MSI); - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, + + dra7xx_pcie_writel(dra7xx, + PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, MSI | LEG_EP_INTERRUPTS); } +static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) +{ + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, + ~INTERRUPTS); + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, + INTERRUPTS); +} + +static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) +{ + dra7xx_pcie_enable_wrapper_interrupts(dra7xx); + dra7xx_pcie_enable_msi_interrupts(dra7xx); +} + static void dra7xx_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); - pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR; - pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR; - pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR; - pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR; - dw_pcie_setup_rc(pp); - dra7xx_pcie_establish_link(dra7xx); + dra7xx_pcie_establish_link(pci); + dw_pcie_wait_for_link(pci); dw_pcie_msi_init(pp); dra7xx_pcie_enable_interrupts(dra7xx); } @@ -237,6 +281,7 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) struct dra7xx_pcie *dra7xx = arg; struct dw_pcie *pci = dra7xx->pci; struct device *dev = pci->dev; + struct dw_pcie_ep *ep = &pci->ep; u32 reg; reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); @@ -273,8 +318,11 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) if (reg & LINK_REQ_RST) dev_dbg(dev, "Link Request Reset\n"); - if (reg & LINK_UP_EVT) + if (reg & LINK_UP_EVT) { + if (dra7xx->mode == DW_PCIE_EP_TYPE) + dw_pcie_ep_linkup(ep); dev_dbg(dev, "Link-up state change\n"); + } if (reg & CFG_BME_EVT) dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); @@ -287,6 +335,94 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) return IRQ_HANDLED; } +static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + + dra7xx_pcie_enable_wrapper_interrupts(dra7xx); +} + +static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) +{ + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); + mdelay(1); + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); +} + +static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, + u8 interrupt_num) +{ + u32 reg; + + reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; + reg |= MSI_REQ_GRANT; + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); +} + +static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, + enum pci_epc_irq_type type, u8 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + dra7xx_pcie_raise_legacy_irq(dra7xx); + break; + case PCI_EPC_IRQ_MSI: + dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); + break; + default: + dev_err(pci->dev, "UNKNOWN IRQ type\n"); + } + + return 0; +} + +static struct dw_pcie_ep_ops pcie_ep_ops = { + .ep_init = dra7xx_pcie_ep_init, + .raise_irq = dra7xx_pcie_raise_irq, +}; + +static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, + struct platform_device *pdev) +{ + int ret; + struct dw_pcie_ep *ep; + struct resource *res; + struct device *dev = &pdev->dev; + struct dw_pcie *pci = dra7xx->pci; + + ep = &pci->ep; + ep->ops = &pcie_ep_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); + pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res)); + if (!pci->dbi_base) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); + pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res)); + if (!pci->dbi_base2) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); + if (!res) + return -EINVAL; + + ep->phys_base = res->start; + ep->addr_size = resource_size(res); + + ret = dw_pcie_ep_init(ep); + if (ret) { + dev_err(dev, "failed to initialize endpoint\n"); + return ret; + } + + return 0; +} + static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, struct platform_device *pdev) { @@ -329,6 +465,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, } static const struct dw_pcie_ops dw_pcie_ops = { + .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, + .start_link = dra7xx_pcie_establish_link, + .stop_link = dra7xx_pcie_stop_link, .link_up = dra7xx_pcie_link_up, }; @@ -371,6 +510,68 @@ err_phy: return ret; } +static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { + .mode = DW_PCIE_EP_TYPE, +}; + +static const struct of_device_id of_dra7xx_pcie_match[] = { + { + .compatible = "ti,dra7-pcie", + .data = &dra7xx_pcie_rc_of_data, + }, + { + .compatible = "ti,dra7-pcie-ep", + .data = &dra7xx_pcie_ep_of_data, + }, + {}, +}; + +/* + * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 + * @dra7xx: the dra7xx device where the workaround should be applied + * + * Access to the PCIe slave port that are not 32-bit aligned will result + * in incorrect mapping to TLP Address and Byte enable fields. Therefore, + * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or + * 0x3. + * + * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. + */ +static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev) +{ + int ret; + struct device_node *np = dev->of_node; + struct of_phandle_args args; + struct regmap *regmap; + + regmap = syscon_regmap_lookup_by_phandle(np, + "ti,syscon-unaligned-access"); + if (IS_ERR(regmap)) { + dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); + return -EINVAL; + } + + ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", + 2, 0, &args); + if (ret) { + dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); + return ret; + } + + ret = regmap_update_bits(regmap, args.args[0], args.args[1], + args.args[1]); + if (ret) + dev_err(dev, "failed to enable unaligned access\n"); + + of_node_put(args.np); + + return ret; +} + static int __init dra7xx_pcie_probe(struct platform_device *pdev) { u32 reg; @@ -388,6 +589,16 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; char name[10]; struct gpio_desc *reset; + const struct of_device_id *match; + const struct dra7xx_pcie_of_data *data; + enum dw_pcie_device_mode mode; + + match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); + if (!match) + return -EINVAL; + + data = (struct dra7xx_pcie_of_data *)match->data; + mode = (enum dw_pcie_device_mode)data->mode; dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); if (!dra7xx) @@ -409,13 +620,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) return -EINVAL; } - ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, - IRQF_SHARED, "dra7xx-pcie-main", dra7xx); - if (ret) { - dev_err(dev, "failed to request irq\n"); - return ret; - } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); base = devm_ioremap_nocache(dev, res->start, resource_size(res)); if (!base) @@ -473,9 +677,37 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) dra7xx->link_gen = 2; - ret = dra7xx_add_pcie_port(dra7xx, pdev); - if (ret < 0) + switch (mode) { + case DW_PCIE_RC_TYPE: + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, + DEVICE_TYPE_RC); + ret = dra7xx_add_pcie_port(dra7xx, pdev); + if (ret < 0) + goto err_gpio; + break; + case DW_PCIE_EP_TYPE: + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, + DEVICE_TYPE_EP); + + ret = dra7xx_pcie_ep_unaligned_memaccess(dev); + if (ret) + goto err_gpio; + + ret = dra7xx_add_pcie_ep(dra7xx, pdev); + if (ret < 0) + goto err_gpio; + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + } + dra7xx->mode = mode; + + ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, + IRQF_SHARED, "dra7xx-pcie-main", dra7xx); + if (ret) { + dev_err(dev, "failed to request irq\n"); goto err_gpio; + } return 0; @@ -496,6 +728,9 @@ static int dra7xx_pcie_suspend(struct device *dev) struct dw_pcie *pci = dra7xx->pci; u32 val; + if (dra7xx->mode != DW_PCIE_RC_TYPE) + return 0; + /* clear MSE */ val = dw_pcie_readl_dbi(pci, PCI_COMMAND); val &= ~PCI_COMMAND_MEMORY; @@ -510,6 +745,9 @@ static int dra7xx_pcie_resume(struct device *dev) struct dw_pcie *pci = dra7xx->pci; u32 val; + if (dra7xx->mode != DW_PCIE_RC_TYPE) + return 0; + /* set MSE */ val = dw_pcie_readl_dbi(pci, PCI_COMMAND); val |= PCI_COMMAND_MEMORY; @@ -548,11 +786,6 @@ static const struct dev_pm_ops dra7xx_pcie_pm_ops = { dra7xx_pcie_resume_noirq) }; -static const struct of_device_id of_dra7xx_pcie_match[] = { - { .compatible = "ti,dra7-pcie", }, - {}, -}; - static struct platform_driver dra7xx_pcie_driver = { .driver = { .name = "dra7-pcie", diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index 44f774c12fb2..546082ad5a3f 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c @@ -521,23 +521,25 @@ static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep) exynos_pcie_msi_init(ep); } -static u32 exynos_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) +static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size) { struct exynos_pcie *ep = to_exynos_pcie(pci); u32 val; exynos_pcie_sideband_dbi_r_mode(ep, true); - val = readl(pci->dbi_base + reg); + dw_pcie_read(base + reg, size, &val); exynos_pcie_sideband_dbi_r_mode(ep, false); return val; } -static void exynos_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) +static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size, u32 val) { struct exynos_pcie *ep = to_exynos_pcie(pci); exynos_pcie_sideband_dbi_w_mode(ep, true); - writel(val, pci->dbi_base + reg); + dw_pcie_write(base + reg, size, val); exynos_pcie_sideband_dbi_w_mode(ep, false); } @@ -644,8 +646,8 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, } static const struct dw_pcie_ops dw_pcie_ops = { - .readl_dbi = exynos_pcie_readl_dbi, - .writel_dbi = exynos_pcie_writel_dbi, + .read_dbi = exynos_pcie_read_dbi, + .write_dbi = exynos_pcie_write_dbi, .link_up = exynos_pcie_link_up, }; diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c index 801e46cd266d..a98cba55c7f0 100644 --- a/drivers/pci/dwc/pci-imx6.c +++ b/drivers/pci/dwc/pci-imx6.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> +#include <linux/mfd/syscon/imx7-iomuxc-gpr.h> #include <linux/module.h> #include <linux/of_gpio.h> #include <linux/of_device.h> @@ -27,6 +28,7 @@ #include <linux/signal.h> #include <linux/types.h> #include <linux/interrupt.h> +#include <linux/reset.h> #include "pcie-designware.h" @@ -36,6 +38,7 @@ enum imx6_pcie_variants { IMX6Q, IMX6SX, IMX6QP, + IMX7D, }; struct imx6_pcie { @@ -47,6 +50,8 @@ struct imx6_pcie { struct clk *pcie_inbound_axi; struct clk *pcie; struct regmap *iomuxc_gpr; + struct reset_control *pciephy_reset; + struct reset_control *apps_reset; enum imx6_pcie_variants variant; u32 tx_deemph_gen1; u32 tx_deemph_gen2_3p5db; @@ -56,6 +61,11 @@ struct imx6_pcie { int link_gen; }; +/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ +#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 +#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50 +#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 + /* PCIe Root Complex registers (memory-mapped) */ #define PCIE_RC_LCR 0x7c #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 @@ -248,6 +258,10 @@ static int imx6q_pcie_abort_handler(unsigned long addr, static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) { switch (imx6_pcie->variant) { + case IMX7D: + reset_control_assert(imx6_pcie->pciephy_reset); + reset_control_assert(imx6_pcie->apps_reset); + break; case IMX6SX: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_TEST_POWERDOWN, @@ -303,11 +317,32 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); break; + case IMX7D: + break; } return ret; } +static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) +{ + u32 val; + unsigned int retries; + struct device *dev = imx6_pcie->pci->dev; + + for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) { + regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val); + + if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED) + return; + + usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN, + PHY_PLL_LOCK_WAIT_USLEEP_MAX); + } + + dev_err(dev, "PCIe PLL lock timeout\n"); +} + static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) { struct dw_pcie *pci = imx6_pcie->pci; @@ -351,6 +386,10 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) } switch (imx6_pcie->variant) { + case IMX7D: + reset_control_deassert(imx6_pcie->pciephy_reset); + imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); + break; case IMX6SX: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); @@ -377,35 +416,44 @@ err_pcie_bus: static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) { - if (imx6_pcie->variant == IMX6SX) + switch (imx6_pcie->variant) { + case IMX7D: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); + break; + case IMX6SX: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); + /* FALLTHROUGH */ + default: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); + /* configure constant input signal to the pcie ctrl and phy */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_LOS_LEVEL, 9 << 4); + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN1, + imx6_pcie->tx_deemph_gen1 << 0); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, + imx6_pcie->tx_deemph_gen2_3p5db << 6); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, + imx6_pcie->tx_deemph_gen2_6db << 12); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_SWING_FULL, + imx6_pcie->tx_swing_full << 18); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_SWING_LOW, + imx6_pcie->tx_swing_low << 25); + break; + } - /* configure constant input signal to the pcie ctrl and phy */ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_LOS_LEVEL, 9 << 4); - - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_DEEMPH_GEN1, - imx6_pcie->tx_deemph_gen1 << 0); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, - imx6_pcie->tx_deemph_gen2_3p5db << 6); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, - imx6_pcie->tx_deemph_gen2_6db << 12); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_SWING_FULL, - imx6_pcie->tx_swing_full << 18); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_SWING_LOW, - imx6_pcie->tx_swing_low << 25); } static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) @@ -469,8 +517,11 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); /* Start LTSSM. */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); + if (imx6_pcie->variant == IMX7D) + reset_control_deassert(imx6_pcie->apps_reset); + else + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); ret = imx6_pcie_wait_for_link(imx6_pcie); if (ret) @@ -482,29 +533,40 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); - } else { - dev_info(dev, "Link: Gen2 disabled\n"); - } - - /* - * Start Directed Speed Change so the best possible speed both link - * partners support can be negotiated. - */ - tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); - tmp |= PORT_LOGIC_SPEED_CHANGE; - dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); - ret = imx6_pcie_wait_for_speed_change(imx6_pcie); - if (ret) { - dev_err(dev, "Failed to bring link up!\n"); - goto err_reset_phy; - } + /* + * Start Directed Speed Change so the best possible + * speed both link partners support can be negotiated. + */ + tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + tmp |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); + + if (imx6_pcie->variant != IMX7D) { + /* + * On i.MX7, DIRECT_SPEED_CHANGE behaves differently + * from i.MX6 family when no link speed transition + * occurs and we go Gen1 -> yep, Gen1. The difference + * is that, in such case, it will not be cleared by HW + * which will cause the following code to report false + * failure. + */ + + ret = imx6_pcie_wait_for_speed_change(imx6_pcie); + if (ret) { + dev_err(dev, "Failed to bring link up!\n"); + goto err_reset_phy; + } + } - /* Make sure link training is finished as well! */ - ret = imx6_pcie_wait_for_link(imx6_pcie); - if (ret) { - dev_err(dev, "Failed to bring link up!\n"); - goto err_reset_phy; + /* Make sure link training is finished as well! */ + ret = imx6_pcie_wait_for_link(imx6_pcie); + if (ret) { + dev_err(dev, "Failed to bring link up!\n"); + goto err_reset_phy; + } + } else { + dev_info(dev, "Link: Gen2 disabled\n"); } tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR); @@ -544,8 +606,8 @@ static struct dw_pcie_host_ops imx6_pcie_host_ops = { .host_init = imx6_pcie_host_init, }; -static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, - struct platform_device *pdev) +static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, + struct platform_device *pdev) { struct dw_pcie *pci = imx6_pcie->pci; struct pcie_port *pp = &pci->pp; @@ -585,7 +647,7 @@ static const struct dw_pcie_ops dw_pcie_ops = { .link_up = imx6_pcie_link_up, }; -static int __init imx6_pcie_probe(struct platform_device *pdev) +static int imx6_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_pcie *pci; @@ -609,10 +671,6 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) imx6_pcie->variant = (enum imx6_pcie_variants)of_device_get_match_data(dev); - /* Added for PCI abort handling */ - hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, - "imprecise external abort"); - dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); pci->dbi_base = devm_ioremap_resource(dev, dbi_base); if (IS_ERR(pci->dbi_base)) @@ -632,6 +690,8 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) dev_err(dev, "unable to get reset gpio\n"); return ret; } + } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) { + return imx6_pcie->reset_gpio; } /* Fetch clocks */ @@ -653,13 +713,31 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) return PTR_ERR(imx6_pcie->pcie); } - if (imx6_pcie->variant == IMX6SX) { + switch (imx6_pcie->variant) { + case IMX6SX: imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, "pcie_inbound_axi"); if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { dev_err(dev, "pcie_inbound_axi clock missing or invalid\n"); return PTR_ERR(imx6_pcie->pcie_inbound_axi); } + break; + case IMX7D: + imx6_pcie->pciephy_reset = devm_reset_control_get(dev, + "pciephy"); + if (IS_ERR(imx6_pcie->pciephy_reset)) { + dev_err(dev, "Failed to get PCIEPHY reset control\n"); + return PTR_ERR(imx6_pcie->pciephy_reset); + } + + imx6_pcie->apps_reset = devm_reset_control_get(dev, "apps"); + if (IS_ERR(imx6_pcie->apps_reset)) { + dev_err(dev, "Failed to get PCIE APPS reset control\n"); + return PTR_ERR(imx6_pcie->apps_reset); + } + break; + default: + break; } /* Grab GPR config register range */ @@ -718,6 +796,7 @@ static const struct of_device_id imx6_pcie_of_match[] = { { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, + { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, }, {}, }; @@ -725,12 +804,24 @@ static struct platform_driver imx6_pcie_driver = { .driver = { .name = "imx6q-pcie", .of_match_table = imx6_pcie_of_match, + .suppress_bind_attrs = true, }, + .probe = imx6_pcie_probe, .shutdown = imx6_pcie_shutdown, }; static int __init imx6_pcie_init(void) { - return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe); + /* + * Since probe() can be deferred we need to make sure that + * hook_fault_code is not called after __init memory is freed + * by kernel and since imx6q_pcie_abort_handler() is a no-op, + * we can install the handler here without risking it + * accessing some uninitialized driver state. + */ + hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, + "imprecise external abort"); + + return platform_driver_register(&imx6_pcie_driver); } device_initcall(imx6_pcie_init); diff --git a/drivers/pci/dwc/pci-keystone-dw.c b/drivers/pci/dwc/pci-keystone-dw.c index 6b396f6b4615..8bc626e640c8 100644 --- a/drivers/pci/dwc/pci-keystone-dw.c +++ b/drivers/pci/dwc/pci-keystone-dw.c @@ -543,7 +543,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, /* Index 0 is the config reg. space address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c index c32e392a0ae6..27d638c4e134 100644 --- a/drivers/pci/dwc/pci-layerscape.c +++ b/drivers/pci/dwc/pci-layerscape.c @@ -283,7 +283,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev) pcie->pci = pci; dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); - pci->dbi_base = devm_ioremap_resource(dev, dbi_base); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); @@ -305,6 +305,7 @@ static struct platform_driver ls_pcie_driver = { .driver = { .name = "layerscape-pcie", .of_match_table = ls_pcie_of_match, + .suppress_bind_attrs = true, }, }; builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe); diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c index f110e3b24a26..495b023042b3 100644 --- a/drivers/pci/dwc/pcie-armada8k.c +++ b/drivers/pci/dwc/pcie-armada8k.c @@ -230,7 +230,7 @@ static int armada8k_pcie_probe(struct platform_device *pdev) /* Get the dw-pcie unit configuration/control registers base. */ base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); - pci->dbi_base = devm_ioremap_resource(dev, base); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); if (IS_ERR(pci->dbi_base)) { dev_err(dev, "couldn't remap regs base %p\n", base); ret = PTR_ERR(pci->dbi_base); @@ -262,6 +262,7 @@ static struct platform_driver armada8k_pcie_driver = { .driver = { .name = "armada8k-pcie", .of_match_table = of_match_ptr(armada8k_pcie_of_match), + .suppress_bind_attrs = true, }, }; builtin_platform_driver(armada8k_pcie_driver); diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c index 6d23683c0892..82a04acc42fd 100644 --- a/drivers/pci/dwc/pcie-artpec6.c +++ b/drivers/pci/dwc/pcie-artpec6.c @@ -78,6 +78,11 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u regmap_write(artpec6_pcie->regmap, offset, val); } +static u64 artpec6_pcie_cpu_addr_fixup(u64 pci_addr) +{ + return pci_addr & ARTPEC6_CPU_TO_BUS_ADDR; +} + static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie) { struct dw_pcie *pci = artpec6_pcie->pci; @@ -142,11 +147,6 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie) */ dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN); - pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR; - pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR; - pp->cfg0_base &= ARTPEC6_CPU_TO_BUS_ADDR; - pp->cfg1_base &= ARTPEC6_CPU_TO_BUS_ADDR; - /* setup root complex */ dw_pcie_setup_rc(pp); @@ -235,6 +235,7 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, } static const struct dw_pcie_ops dw_pcie_ops = { + .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup, }; static int artpec6_pcie_probe(struct platform_device *pdev) @@ -294,6 +295,7 @@ static struct platform_driver artpec6_pcie_driver = { .driver = { .name = "artpec6-pcie", .of_match_table = artpec6_pcie_of_match, + .suppress_bind_attrs = true, }, }; builtin_platform_driver(artpec6_pcie_driver); diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c new file mode 100644 index 000000000000..398406393f37 --- /dev/null +++ b/drivers/pci/dwc/pcie-designware-ep.c @@ -0,0 +1,342 @@ +/** + * Synopsys Designware PCIe Endpoint controller driver + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I <kishon@ti.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/of.h> + +#include "pcie-designware.h" +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> + +void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) +{ + struct pci_epc *epc = ep->epc; + + pci_epc_linkup(epc); +} + +static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) +{ + u32 reg; + + reg = PCI_BASE_ADDRESS_0 + (4 * bar); + dw_pcie_writel_dbi2(pci, reg, 0x0); + dw_pcie_writel_dbi(pci, reg, 0x0); +} + +static int dw_pcie_ep_write_header(struct pci_epc *epc, + struct pci_epf_header *hdr) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid); + dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid); + dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid); + dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code); + dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, + hdr->subclass_code | hdr->baseclass_code << 8); + dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE, + hdr->cache_line_size); + dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID, + hdr->subsys_vendor_id); + dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id); + dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN, + hdr->interrupt_pin); + + return 0; +} + +static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar, + dma_addr_t cpu_addr, + enum dw_pcie_as_type as_type) +{ + int ret; + u32 free_win; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + free_win = find_first_zero_bit(&ep->ib_window_map, + sizeof(ep->ib_window_map)); + if (free_win >= ep->num_ib_windows) { + dev_err(pci->dev, "no free inbound window\n"); + return -EINVAL; + } + + ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr, + as_type); + if (ret < 0) { + dev_err(pci->dev, "Failed to program IB window\n"); + return ret; + } + + ep->bar_to_atu[bar] = free_win; + set_bit(free_win, &ep->ib_window_map); + + return 0; +} + +static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr, + u64 pci_addr, size_t size) +{ + u32 free_win; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + free_win = find_first_zero_bit(&ep->ob_window_map, + sizeof(ep->ob_window_map)); + if (free_win >= ep->num_ob_windows) { + dev_err(pci->dev, "no free outbound window\n"); + return -EINVAL; + } + + dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM, + phys_addr, pci_addr, size); + + set_bit(free_win, &ep->ob_window_map); + ep->outbound_addr[free_win] = phys_addr; + + return 0; +} + +static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u32 atu_index = ep->bar_to_atu[bar]; + + dw_pcie_ep_reset_bar(pci, bar); + + dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); + clear_bit(atu_index, &ep->ib_window_map); +} + +static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar, + dma_addr_t bar_phys, size_t size, int flags) +{ + int ret; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum dw_pcie_as_type as_type; + u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); + + if (!(flags & PCI_BASE_ADDRESS_SPACE)) + as_type = DW_PCIE_AS_MEM; + else + as_type = DW_PCIE_AS_IO; + + ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type); + if (ret) + return ret; + + dw_pcie_writel_dbi2(pci, reg, size - 1); + dw_pcie_writel_dbi(pci, reg, flags); + + return 0; +} + +static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, + u32 *atu_index) +{ + u32 index; + + for (index = 0; index < ep->num_ob_windows; index++) { + if (ep->outbound_addr[index] != addr) + continue; + *atu_index = index; + return 0; + } + + return -EINVAL; +} + +static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr) +{ + int ret; + u32 atu_index; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + ret = dw_pcie_find_index(ep, addr, &atu_index); + if (ret < 0) + return; + + dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND); + clear_bit(atu_index, &ep->ob_window_map); +} + +static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr, + u64 pci_addr, size_t size) +{ + int ret; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size); + if (ret) { + dev_err(pci->dev, "failed to enable address\n"); + return ret; + } + + return 0; +} + +static int dw_pcie_ep_get_msi(struct pci_epc *epc) +{ + int val; + u32 lower_addr; + u32 upper_addr; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + val = dw_pcie_readb_dbi(pci, MSI_MESSAGE_CONTROL); + val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; + + lower_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32); + upper_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32); + + if (!(lower_addr || upper_addr)) + return -EINVAL; + + return val; +} + +static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int) +{ + int val; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + val = (encode_int << MSI_CAP_MMC_SHIFT); + dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val); + + return 0; +} + +static int dw_pcie_ep_raise_irq(struct pci_epc *epc, + enum pci_epc_irq_type type, u8 interrupt_num) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + + if (!ep->ops->raise_irq) + return -EINVAL; + + return ep->ops->raise_irq(ep, type, interrupt_num); +} + +static void dw_pcie_ep_stop(struct pci_epc *epc) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + if (!pci->ops->stop_link) + return; + + pci->ops->stop_link(pci); +} + +static int dw_pcie_ep_start(struct pci_epc *epc) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + if (!pci->ops->start_link) + return -EINVAL; + + return pci->ops->start_link(pci); +} + +static const struct pci_epc_ops epc_ops = { + .write_header = dw_pcie_ep_write_header, + .set_bar = dw_pcie_ep_set_bar, + .clear_bar = dw_pcie_ep_clear_bar, + .map_addr = dw_pcie_ep_map_addr, + .unmap_addr = dw_pcie_ep_unmap_addr, + .set_msi = dw_pcie_ep_set_msi, + .get_msi = dw_pcie_ep_get_msi, + .raise_irq = dw_pcie_ep_raise_irq, + .start = dw_pcie_ep_start, + .stop = dw_pcie_ep_stop, +}; + +void dw_pcie_ep_exit(struct dw_pcie_ep *ep) +{ + struct pci_epc *epc = ep->epc; + + pci_epc_mem_exit(epc); +} + +int dw_pcie_ep_init(struct dw_pcie_ep *ep) +{ + int ret; + void *addr; + enum pci_barno bar; + struct pci_epc *epc; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + + if (!pci->dbi_base || !pci->dbi_base2) { + dev_err(dev, "dbi_base/deb_base2 is not populated\n"); + return -EINVAL; + } + + ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); + if (ret < 0) { + dev_err(dev, "unable to read *num-ib-windows* property\n"); + return ret; + } + + ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows); + if (ret < 0) { + dev_err(dev, "unable to read *num-ob-windows* property\n"); + return ret; + } + + addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows, + GFP_KERNEL); + if (!addr) + return -ENOMEM; + ep->outbound_addr = addr; + + for (bar = BAR_0; bar <= BAR_5; bar++) + dw_pcie_ep_reset_bar(pci, bar); + + if (ep->ops->ep_init) + ep->ops->ep_init(ep); + + epc = devm_pci_epc_create(dev, &epc_ops); + if (IS_ERR(epc)) { + dev_err(dev, "failed to create epc device\n"); + return PTR_ERR(epc); + } + + ret = of_property_read_u8(np, "max-functions", &epc->max_functions); + if (ret < 0) + epc->max_functions = 1; + + ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size); + if (ret < 0) { + dev_err(dev, "Failed to initialize address space\n"); + return ret; + } + + ep->epc = epc; + epc_set_drvdata(epc, ep); + dw_pcie_setup(pci); + + return 0; +} diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c index 5ba334938b52..28ed32ba4f1b 100644 --- a/drivers/pci/dwc/pcie-designware-host.c +++ b/drivers/pci/dwc/pcie-designware-host.c @@ -56,24 +56,25 @@ static struct irq_chip dw_msi_irq_chip = { /* MSI int handler */ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) { - unsigned long val; + u32 val; int i, pos, irq; irqreturn_t ret = IRQ_NONE; for (i = 0; i < MAX_MSI_CTRLS; i++) { dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, - (u32 *)&val); - if (val) { - ret = IRQ_HANDLED; - pos = 0; - while ((pos = find_next_bit(&val, 32, pos)) != 32) { - irq = irq_find_mapping(pp->irq_domain, - i * 32 + pos); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + - i * 12, 4, 1 << pos); - generic_handle_irq(irq); - pos++; - } + &val); + if (!val) + continue; + + ret = IRQ_HANDLED; + pos = 0; + while ((pos = find_next_bit((unsigned long *) &val, 32, + pos)) != 32) { + irq = irq_find_mapping(pp->irq_domain, i * 32 + pos); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, + 4, 1 << pos); + generic_handle_irq(irq); + pos++; } } @@ -338,8 +339,9 @@ int dw_pcie_host_init(struct pcie_port *pp) } if (!pci->dbi_base) { - pci->dbi_base = devm_ioremap(dev, pp->cfg->start, - resource_size(pp->cfg)); + pci->dbi_base = devm_pci_remap_cfgspace(dev, + pp->cfg->start, + resource_size(pp->cfg)); if (!pci->dbi_base) { dev_err(dev, "error with ioremap\n"); ret = -ENOMEM; @@ -350,8 +352,8 @@ int dw_pcie_host_init(struct pcie_port *pp) pp->mem_base = pp->mem->start; if (!pp->va_cfg0_base) { - pp->va_cfg0_base = devm_ioremap(dev, pp->cfg0_base, - pp->cfg0_size); + pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, + pp->cfg0_base, pp->cfg0_size); if (!pp->va_cfg0_base) { dev_err(dev, "error with ioremap in function\n"); ret = -ENOMEM; @@ -360,7 +362,8 @@ int dw_pcie_host_init(struct pcie_port *pp) } if (!pp->va_cfg1_base) { - pp->va_cfg1_base = devm_ioremap(dev, pp->cfg1_base, + pp->va_cfg1_base = devm_pci_remap_cfgspace(dev, + pp->cfg1_base, pp->cfg1_size); if (!pp->va_cfg1_base) { dev_err(dev, "error with ioremap\n"); diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c index f20d494922ab..32091b32f6e1 100644 --- a/drivers/pci/dwc/pcie-designware-plat.c +++ b/drivers/pci/dwc/pcie-designware-plat.c @@ -133,6 +133,7 @@ static struct platform_driver dw_plat_pcie_driver = { .driver = { .name = "dw-pcie", .of_match_table = dw_plat_pcie_of_match, + .suppress_bind_attrs = true, }, .probe = dw_plat_pcie_probe, }; diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c index 7e1fb7d6643c..0e03af279259 100644 --- a/drivers/pci/dwc/pcie-designware.c +++ b/drivers/pci/dwc/pcie-designware.c @@ -61,91 +61,253 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val) return PCIBIOS_SUCCESSFUL; } -u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) +u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size) { - if (pci->ops->readl_dbi) - return pci->ops->readl_dbi(pci, reg); + int ret; + u32 val; - return readl(pci->dbi_base + reg); + if (pci->ops->read_dbi) + return pci->ops->read_dbi(pci, base, reg, size); + + ret = dw_pcie_read(base + reg, size, &val); + if (ret) + dev_err(pci->dev, "read DBI address failed\n"); + + return val; } -void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) +void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size, u32 val) { - if (pci->ops->writel_dbi) - pci->ops->writel_dbi(pci, reg, val); - else - writel(val, pci->dbi_base + reg); + int ret; + + if (pci->ops->write_dbi) { + pci->ops->write_dbi(pci, base, reg, size, val); + return; + } + + ret = dw_pcie_write(base + reg, size, val); + if (ret) + dev_err(pci->dev, "write DBI address failed\n"); } -static u32 dw_pcie_readl_unroll(struct dw_pcie *pci, u32 index, u32 reg) +static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) { u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); return dw_pcie_readl_dbi(pci, offset + reg); } -static void dw_pcie_writel_unroll(struct dw_pcie *pci, u32 index, u32 reg, - u32 val) +static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, + u32 val) { u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); dw_pcie_writel_dbi(pci, offset + reg, val); } +void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, int type, + u64 cpu_addr, u64 pci_addr, u32 size) +{ + u32 retries, val; + + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, + lower_32_bits(cpu_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, + upper_32_bits(cpu_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT, + lower_32_bits(cpu_addr + size - 1)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, + lower_32_bits(pci_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, + upper_32_bits(pci_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, + type); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_ENABLE); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_ob_unroll(pci, index, + PCIE_ATU_UNR_REGION_CTRL2); + if (val & PCIE_ATU_ENABLE) + return; + + usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + } + dev_err(pci->dev, "outbound iATU is not being enabled\n"); +} + void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, u64 cpu_addr, u64 pci_addr, u32 size) { u32 retries, val; + if (pci->ops->cpu_addr_fixup) + cpu_addr = pci->ops->cpu_addr_fixup(cpu_addr); + if (pci->iatu_unroll_enabled) { - dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, - lower_32_bits(cpu_addr)); - dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, - upper_32_bits(cpu_addr)); - dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LIMIT, - lower_32_bits(cpu_addr + size - 1)); - dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, - lower_32_bits(pci_addr)); - dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, - upper_32_bits(pci_addr)); - dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, - type); - dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, - PCIE_ATU_ENABLE); - } else { - dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, - PCIE_ATU_REGION_OUTBOUND | index); - dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, - lower_32_bits(cpu_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, - upper_32_bits(cpu_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, - lower_32_bits(cpu_addr + size - 1)); - dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, - lower_32_bits(pci_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, - upper_32_bits(pci_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); - dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); + dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr, + pci_addr, size); + return; } + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, + PCIE_ATU_REGION_OUTBOUND | index); + dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, + lower_32_bits(cpu_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, + upper_32_bits(cpu_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, + lower_32_bits(cpu_addr + size - 1)); + dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, + lower_32_bits(pci_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, + upper_32_bits(pci_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); + /* * Make sure ATU enable takes effect before any subsequent config * and I/O accesses. */ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { - if (pci->iatu_unroll_enabled) - val = dw_pcie_readl_unroll(pci, index, - PCIE_ATU_UNR_REGION_CTRL2); - else - val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); - + val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); if (val == PCIE_ATU_ENABLE) return; usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); } - dev_err(pci->dev, "iATU is not being enabled\n"); + dev_err(pci->dev, "outbound iATU is not being enabled\n"); +} + +static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) +{ + u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); + + return dw_pcie_readl_dbi(pci, offset + reg); +} + +static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, + u32 val) +{ + u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); + + dw_pcie_writel_dbi(pci, offset + reg, val); +} + +int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, int bar, + u64 cpu_addr, enum dw_pcie_as_type as_type) +{ + int type; + u32 retries, val; + + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, + lower_32_bits(cpu_addr)); + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, + upper_32_bits(cpu_addr)); + + switch (as_type) { + case DW_PCIE_AS_MEM: + type = PCIE_ATU_TYPE_MEM; + break; + case DW_PCIE_AS_IO: + type = PCIE_ATU_TYPE_IO; + break; + default: + return -EINVAL; + } + + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type); + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_ENABLE | + PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_ib_unroll(pci, index, + PCIE_ATU_UNR_REGION_CTRL2); + if (val & PCIE_ATU_ENABLE) + return 0; + + usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + } + dev_err(pci->dev, "inbound iATU is not being enabled\n"); + + return -EBUSY; +} + +int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, + u64 cpu_addr, enum dw_pcie_as_type as_type) +{ + int type; + u32 retries, val; + + if (pci->iatu_unroll_enabled) + return dw_pcie_prog_inbound_atu_unroll(pci, index, bar, + cpu_addr, as_type); + + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | + index); + dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); + + switch (as_type) { + case DW_PCIE_AS_MEM: + type = PCIE_ATU_TYPE_MEM; + break; + case DW_PCIE_AS_IO: + type = PCIE_ATU_TYPE_IO; + break; + default: + return -EINVAL; + } + + dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE + | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); + if (val & PCIE_ATU_ENABLE) + return 0; + + usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + } + dev_err(pci->dev, "inbound iATU is not being enabled\n"); + + return -EBUSY; +} + +void dw_pcie_disable_atu(struct dw_pcie *pci, int index, + enum dw_pcie_region_type type) +{ + int region; + + switch (type) { + case DW_PCIE_REGION_INBOUND: + region = PCIE_ATU_REGION_INBOUND; + break; + case DW_PCIE_REGION_OUTBOUND: + region = PCIE_ATU_REGION_OUTBOUND; + break; + default: + return; + } + + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE); } int dw_pcie_wait_for_link(struct dw_pcie *pci) diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h index cd3b8713fe50..c6a840575796 100644 --- a/drivers/pci/dwc/pcie-designware.h +++ b/drivers/pci/dwc/pcie-designware.h @@ -18,6 +18,9 @@ #include <linux/msi.h> #include <linux/pci.h> +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> + /* Parameters for the waiting for link up routine */ #define LINK_WAIT_MAX_RETRIES 10 #define LINK_WAIT_USLEEP_MIN 90000 @@ -89,6 +92,16 @@ #define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ ((0x3 << 20) | ((region) << 9)) +#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ + ((0x3 << 20) | ((region) << 9) | (0x1 << 8)) + +#define MSI_MESSAGE_CONTROL 0x52 +#define MSI_CAP_MMC_SHIFT 1 +#define MSI_CAP_MME_SHIFT 4 +#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT) +#define MSI_MESSAGE_ADDR_L32 0x54 +#define MSI_MESSAGE_ADDR_U32 0x58 + /* * Maximum number of MSI IRQs can be 256 per controller. But keep * it 32 as of now. Probably we will never need more than 32. If needed, @@ -99,6 +112,20 @@ struct pcie_port; struct dw_pcie; +struct dw_pcie_ep; + +enum dw_pcie_region_type { + DW_PCIE_REGION_UNKNOWN, + DW_PCIE_REGION_INBOUND, + DW_PCIE_REGION_OUTBOUND, +}; + +enum dw_pcie_device_mode { + DW_PCIE_UNKNOWN_TYPE, + DW_PCIE_EP_TYPE, + DW_PCIE_LEG_EP_TYPE, + DW_PCIE_RC_TYPE, +}; struct dw_pcie_host_ops { int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); @@ -142,35 +169,116 @@ struct pcie_port { DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); }; +enum dw_pcie_as_type { + DW_PCIE_AS_UNKNOWN, + DW_PCIE_AS_MEM, + DW_PCIE_AS_IO, +}; + +struct dw_pcie_ep_ops { + void (*ep_init)(struct dw_pcie_ep *ep); + int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type, + u8 interrupt_num); +}; + +struct dw_pcie_ep { + struct pci_epc *epc; + struct dw_pcie_ep_ops *ops; + phys_addr_t phys_base; + size_t addr_size; + u8 bar_to_atu[6]; + phys_addr_t *outbound_addr; + unsigned long ib_window_map; + unsigned long ob_window_map; + u32 num_ib_windows; + u32 num_ob_windows; +}; + struct dw_pcie_ops { - u32 (*readl_dbi)(struct dw_pcie *pcie, u32 reg); - void (*writel_dbi)(struct dw_pcie *pcie, u32 reg, u32 val); + u64 (*cpu_addr_fixup)(u64 cpu_addr); + u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, + size_t size); + void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, + size_t size, u32 val); int (*link_up)(struct dw_pcie *pcie); + int (*start_link)(struct dw_pcie *pcie); + void (*stop_link)(struct dw_pcie *pcie); }; struct dw_pcie { struct device *dev; void __iomem *dbi_base; + void __iomem *dbi_base2; u32 num_viewport; u8 iatu_unroll_enabled; struct pcie_port pp; + struct dw_pcie_ep ep; const struct dw_pcie_ops *ops; }; #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) +#define to_dw_pcie_from_ep(endpoint) \ + container_of((endpoint), struct dw_pcie, ep) + int dw_pcie_read(void __iomem *addr, int size, u32 *val); int dw_pcie_write(void __iomem *addr, int size, u32 val); -u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg); -void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val); +u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size); +void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size, u32 val); int dw_pcie_link_up(struct dw_pcie *pci); int dw_pcie_wait_for_link(struct dw_pcie *pci); void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, u64 cpu_addr, u64 pci_addr, u32 size); +int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, + u64 cpu_addr, enum dw_pcie_as_type as_type); +void dw_pcie_disable_atu(struct dw_pcie *pci, int index, + enum dw_pcie_region_type type); void dw_pcie_setup(struct dw_pcie *pci); +static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) +{ + __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val); +} + +static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4); +} + +static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val) +{ + __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val); +} + +static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2); +} + +static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val) +{ + __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val); +} + +static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1); +} + +static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) +{ + __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val); +} + +static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); +} + #ifdef CONFIG_PCIE_DW_HOST irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); void dw_pcie_msi_init(struct pcie_port *pp); @@ -195,4 +303,23 @@ static inline int dw_pcie_host_init(struct pcie_port *pp) return 0; } #endif + +#ifdef CONFIG_PCIE_DW_EP +void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); +int dw_pcie_ep_init(struct dw_pcie_ep *ep); +void dw_pcie_ep_exit(struct dw_pcie_ep *ep); +#else +static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) +{ +} + +static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep) +{ + return 0; +} + +static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) +{ +} +#endif #endif /* _PCIE_DESIGNWARE_H */ diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c index cf9d6a9d9fd4..e51acee0ddf3 100644 --- a/drivers/pci/dwc/pcie-hisi.c +++ b/drivers/pci/dwc/pcie-hisi.c @@ -99,7 +99,7 @@ static int hisi_pcie_init(struct pci_config_window *cfg) return -ENOMEM; } - reg_base = devm_ioremap(dev, res->start, resource_size(res)); + reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); if (!reg_base) return -ENOMEM; @@ -296,10 +296,9 @@ static int hisi_pcie_probe(struct platform_device *pdev) } reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); - pci->dbi_base = devm_ioremap_resource(dev, reg); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); - platform_set_drvdata(pdev, hisi_pcie); ret = hisi_add_pcie_port(hisi_pcie, pdev); @@ -334,6 +333,7 @@ static struct platform_driver hisi_pcie_driver = { .driver = { .name = "hisi-pcie", .of_match_table = hisi_pcie_of_match, + .suppress_bind_attrs = true, }, }; builtin_platform_driver(hisi_pcie_driver); @@ -360,7 +360,7 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg) return -EINVAL; } - reg_base = devm_ioremap(dev, res->start, resource_size(res)); + reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); if (!reg_base) return -ENOMEM; @@ -395,6 +395,7 @@ static struct platform_driver hisi_pcie_almost_ecam_driver = { .driver = { .name = "hisi-pcie-almost-ecam", .of_match_table = hisi_pcie_almost_ecam_of_match, + .suppress_bind_attrs = true, }, }; builtin_platform_driver(hisi_pcie_almost_ecam_driver); diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c index 67eb7f5926dd..5bf23d432fdb 100644 --- a/drivers/pci/dwc/pcie-qcom.c +++ b/drivers/pci/dwc/pcie-qcom.c @@ -700,7 +700,7 @@ static int qcom_pcie_probe(struct platform_device *pdev) return PTR_ERR(pcie->parf); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c index eaa4ea8e2ea4..8ff36b3dbbdf 100644 --- a/drivers/pci/dwc/pcie-spear13xx.c +++ b/drivers/pci/dwc/pcie-spear13xx.c @@ -273,7 +273,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev) } dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_ioremap_resource(dev, dbi_base); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); if (IS_ERR(pci->dbi_base)) { dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); ret = PTR_ERR(pci->dbi_base); @@ -308,6 +308,7 @@ static struct platform_driver spear13xx_pcie_driver = { .driver = { .name = "spear-pcie", .of_match_table = of_match_ptr(spear13xx_pcie_of_match), + .suppress_bind_attrs = true, }, }; diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c index 2fee61bb6559..c228a2eb7faa 100644 --- a/drivers/pci/ecam.c +++ b/drivers/pci/ecam.c @@ -84,12 +84,14 @@ struct pci_config_window *pci_ecam_create(struct device *dev, if (!cfg->winp) goto err_exit_malloc; for (i = 0; i < bus_range; i++) { - cfg->winp[i] = ioremap(cfgres->start + i * bsz, bsz); + cfg->winp[i] = + pci_remap_cfgspace(cfgres->start + i * bsz, + bsz); if (!cfg->winp[i]) goto err_exit_iomap; } } else { - cfg->win = ioremap(cfgres->start, bus_range * bsz); + cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz); if (!cfg->win) goto err_exit_iomap; } diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig new file mode 100644 index 000000000000..c23f146fb5a6 --- /dev/null +++ b/drivers/pci/endpoint/Kconfig @@ -0,0 +1,31 @@ +# +# PCI Endpoint Support +# + +menu "PCI Endpoint" + +config PCI_ENDPOINT + bool "PCI Endpoint Support" + help + Enable this configuration option to support configurable PCI + endpoint. This should be enabled if the platform has a PCI + controller that can operate in endpoint mode. + + Enabling this option will build the endpoint library, which + includes endpoint controller library and endpoint function + library. + + If in doubt, say "N" to disable Endpoint support. + +config PCI_ENDPOINT_CONFIGFS + bool "PCI Endpoint Configfs Support" + depends on PCI_ENDPOINT + select CONFIGFS_FS + help + This will enable the configfs entry that can be used to + configure the endpoint function and used to bind the + function with a endpoint controller. + +source "drivers/pci/endpoint/functions/Kconfig" + +endmenu diff --git a/drivers/pci/endpoint/Makefile b/drivers/pci/endpoint/Makefile new file mode 100644 index 000000000000..1041f80a4645 --- /dev/null +++ b/drivers/pci/endpoint/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for PCI Endpoint Support +# + +obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS) += pci-ep-cfs.o +obj-$(CONFIG_PCI_ENDPOINT) += pci-epc-core.o pci-epf-core.o\ + pci-epc-mem.o functions/ diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig new file mode 100644 index 000000000000..175edad42d2f --- /dev/null +++ b/drivers/pci/endpoint/functions/Kconfig @@ -0,0 +1,12 @@ +# +# PCI Endpoint Functions +# + +config PCI_EPF_TEST + tristate "PCI Endpoint Test driver" + depends on PCI_ENDPOINT + help + Enable this configuration option to enable the test driver + for PCI Endpoint. + + If in doubt, say "N" to disable Endpoint test driver. diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile new file mode 100644 index 000000000000..6d94a4801838 --- /dev/null +++ b/drivers/pci/endpoint/functions/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for PCI Endpoint Functions +# + +obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c new file mode 100644 index 000000000000..53fff8030337 --- /dev/null +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -0,0 +1,510 @@ +/** + * Test driver to test endpoint functionality + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I <kishon@ti.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/crc32.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/pci_ids.h> +#include <linux/random.h> + +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> +#include <linux/pci_regs.h> + +#define COMMAND_RAISE_LEGACY_IRQ BIT(0) +#define COMMAND_RAISE_MSI_IRQ BIT(1) +#define MSI_NUMBER_SHIFT 2 +#define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT) +#define COMMAND_READ BIT(8) +#define COMMAND_WRITE BIT(9) +#define COMMAND_COPY BIT(10) + +#define STATUS_READ_SUCCESS BIT(0) +#define STATUS_READ_FAIL BIT(1) +#define STATUS_WRITE_SUCCESS BIT(2) +#define STATUS_WRITE_FAIL BIT(3) +#define STATUS_COPY_SUCCESS BIT(4) +#define STATUS_COPY_FAIL BIT(5) +#define STATUS_IRQ_RAISED BIT(6) +#define STATUS_SRC_ADDR_INVALID BIT(7) +#define STATUS_DST_ADDR_INVALID BIT(8) + +#define TIMER_RESOLUTION 1 + +static struct workqueue_struct *kpcitest_workqueue; + +struct pci_epf_test { + void *reg[6]; + struct pci_epf *epf; + struct delayed_work cmd_handler; +}; + +struct pci_epf_test_reg { + u32 magic; + u32 command; + u32 status; + u64 src_addr; + u64 dst_addr; + u32 size; + u32 checksum; +} __packed; + +static struct pci_epf_header test_header = { + .vendorid = PCI_ANY_ID, + .deviceid = PCI_ANY_ID, + .baseclass_code = PCI_CLASS_OTHERS, + .interrupt_pin = PCI_INTERRUPT_INTA, +}; + +static int bar_size[] = { 512, 1024, 16384, 131072, 1048576 }; + +static int pci_epf_test_copy(struct pci_epf_test *epf_test) +{ + int ret; + void __iomem *src_addr; + void __iomem *dst_addr; + phys_addr_t src_phys_addr; + phys_addr_t dst_phys_addr; + struct pci_epf *epf = epf_test->epf; + struct device *dev = &epf->dev; + struct pci_epc *epc = epf->epc; + struct pci_epf_test_reg *reg = epf_test->reg[0]; + + src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size); + if (!src_addr) { + dev_err(dev, "failed to allocate source address\n"); + reg->status = STATUS_SRC_ADDR_INVALID; + ret = -ENOMEM; + goto err; + } + + ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size); + if (ret) { + dev_err(dev, "failed to map source address\n"); + reg->status = STATUS_SRC_ADDR_INVALID; + goto err_src_addr; + } + + dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size); + if (!dst_addr) { + dev_err(dev, "failed to allocate destination address\n"); + reg->status = STATUS_DST_ADDR_INVALID; + ret = -ENOMEM; + goto err_src_map_addr; + } + + ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size); + if (ret) { + dev_err(dev, "failed to map destination address\n"); + reg->status = STATUS_DST_ADDR_INVALID; + goto err_dst_addr; + } + + memcpy(dst_addr, src_addr, reg->size); + + pci_epc_unmap_addr(epc, dst_phys_addr); + +err_dst_addr: + pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size); + +err_src_map_addr: + pci_epc_unmap_addr(epc, src_phys_addr); + +err_src_addr: + pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size); + +err: + return ret; +} + +static int pci_epf_test_read(struct pci_epf_test *epf_test) +{ + int ret; + void __iomem *src_addr; + void *buf; + u32 crc32; + phys_addr_t phys_addr; + struct pci_epf *epf = epf_test->epf; + struct device *dev = &epf->dev; + struct pci_epc *epc = epf->epc; + struct pci_epf_test_reg *reg = epf_test->reg[0]; + + src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); + if (!src_addr) { + dev_err(dev, "failed to allocate address\n"); + reg->status = STATUS_SRC_ADDR_INVALID; + ret = -ENOMEM; + goto err; + } + + ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size); + if (ret) { + dev_err(dev, "failed to map address\n"); + reg->status = STATUS_SRC_ADDR_INVALID; + goto err_addr; + } + + buf = kzalloc(reg->size, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_map_addr; + } + + memcpy(buf, src_addr, reg->size); + + crc32 = crc32_le(~0, buf, reg->size); + if (crc32 != reg->checksum) + ret = -EIO; + + kfree(buf); + +err_map_addr: + pci_epc_unmap_addr(epc, phys_addr); + +err_addr: + pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size); + +err: + return ret; +} + +static int pci_epf_test_write(struct pci_epf_test *epf_test) +{ + int ret; + void __iomem *dst_addr; + void *buf; + phys_addr_t phys_addr; + struct pci_epf *epf = epf_test->epf; + struct device *dev = &epf->dev; + struct pci_epc *epc = epf->epc; + struct pci_epf_test_reg *reg = epf_test->reg[0]; + + dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); + if (!dst_addr) { + dev_err(dev, "failed to allocate address\n"); + reg->status = STATUS_DST_ADDR_INVALID; + ret = -ENOMEM; + goto err; + } + + ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size); + if (ret) { + dev_err(dev, "failed to map address\n"); + reg->status = STATUS_DST_ADDR_INVALID; + goto err_addr; + } + + buf = kzalloc(reg->size, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_map_addr; + } + + get_random_bytes(buf, reg->size); + reg->checksum = crc32_le(~0, buf, reg->size); + + memcpy(dst_addr, buf, reg->size); + + /* + * wait 1ms inorder for the write to complete. Without this delay L3 + * error in observed in the host system. + */ + mdelay(1); + + kfree(buf); + +err_map_addr: + pci_epc_unmap_addr(epc, phys_addr); + +err_addr: + pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size); + +err: + return ret; +} + +static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test) +{ + u8 irq; + u8 msi_count; + struct pci_epf *epf = epf_test->epf; + struct pci_epc *epc = epf->epc; + struct pci_epf_test_reg *reg = epf_test->reg[0]; + + reg->status |= STATUS_IRQ_RAISED; + msi_count = pci_epc_get_msi(epc); + irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; + if (irq > msi_count || msi_count <= 0) + pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); + else + pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq); +} + +static void pci_epf_test_cmd_handler(struct work_struct *work) +{ + int ret; + u8 irq; + u8 msi_count; + struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test, + cmd_handler.work); + struct pci_epf *epf = epf_test->epf; + struct pci_epc *epc = epf->epc; + struct pci_epf_test_reg *reg = epf_test->reg[0]; + + if (!reg->command) + goto reset_handler; + + if (reg->command & COMMAND_RAISE_LEGACY_IRQ) { + reg->status = STATUS_IRQ_RAISED; + pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); + goto reset_handler; + } + + if (reg->command & COMMAND_WRITE) { + ret = pci_epf_test_write(epf_test); + if (ret) + reg->status |= STATUS_WRITE_FAIL; + else + reg->status |= STATUS_WRITE_SUCCESS; + pci_epf_test_raise_irq(epf_test); + goto reset_handler; + } + + if (reg->command & COMMAND_READ) { + ret = pci_epf_test_read(epf_test); + if (!ret) + reg->status |= STATUS_READ_SUCCESS; + else + reg->status |= STATUS_READ_FAIL; + pci_epf_test_raise_irq(epf_test); + goto reset_handler; + } + + if (reg->command & COMMAND_COPY) { + ret = pci_epf_test_copy(epf_test); + if (!ret) + reg->status |= STATUS_COPY_SUCCESS; + else + reg->status |= STATUS_COPY_FAIL; + pci_epf_test_raise_irq(epf_test); + goto reset_handler; + } + + if (reg->command & COMMAND_RAISE_MSI_IRQ) { + msi_count = pci_epc_get_msi(epc); + irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; + if (irq > msi_count || msi_count <= 0) + goto reset_handler; + reg->status = STATUS_IRQ_RAISED; + pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq); + goto reset_handler; + } + +reset_handler: + reg->command = 0; + + queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, + msecs_to_jiffies(1)); +} + +static void pci_epf_test_linkup(struct pci_epf *epf) +{ + struct pci_epf_test *epf_test = epf_get_drvdata(epf); + + queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, + msecs_to_jiffies(1)); +} + +static void pci_epf_test_unbind(struct pci_epf *epf) +{ + struct pci_epf_test *epf_test = epf_get_drvdata(epf); + struct pci_epc *epc = epf->epc; + int bar; + + cancel_delayed_work(&epf_test->cmd_handler); + pci_epc_stop(epc); + for (bar = BAR_0; bar <= BAR_5; bar++) { + if (epf_test->reg[bar]) { + pci_epf_free_space(epf, epf_test->reg[bar], bar); + pci_epc_clear_bar(epc, bar); + } + } +} + +static int pci_epf_test_set_bar(struct pci_epf *epf) +{ + int flags; + int bar; + int ret; + struct pci_epf_bar *epf_bar; + struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; + struct pci_epf_test *epf_test = epf_get_drvdata(epf); + + flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32; + if (sizeof(dma_addr_t) == 0x8) + flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; + + for (bar = BAR_0; bar <= BAR_5; bar++) { + epf_bar = &epf->bar[bar]; + ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr, + epf_bar->size, flags); + if (ret) { + pci_epf_free_space(epf, epf_test->reg[bar], bar); + dev_err(dev, "failed to set BAR%d\n", bar); + if (bar == BAR_0) + return ret; + } + } + + return 0; +} + +static int pci_epf_test_alloc_space(struct pci_epf *epf) +{ + struct pci_epf_test *epf_test = epf_get_drvdata(epf); + struct device *dev = &epf->dev; + void *base; + int bar; + + base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), + BAR_0); + if (!base) { + dev_err(dev, "failed to allocated register space\n"); + return -ENOMEM; + } + epf_test->reg[0] = base; + + for (bar = BAR_1; bar <= BAR_5; bar++) { + base = pci_epf_alloc_space(epf, bar_size[bar - 1], bar); + if (!base) + dev_err(dev, "failed to allocate space for BAR%d\n", + bar); + epf_test->reg[bar] = base; + } + + return 0; +} + +static int pci_epf_test_bind(struct pci_epf *epf) +{ + int ret; + struct pci_epf_header *header = epf->header; + struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; + + if (WARN_ON_ONCE(!epc)) + return -EINVAL; + + ret = pci_epc_write_header(epc, header); + if (ret) { + dev_err(dev, "configuration header write failed\n"); + return ret; + } + + ret = pci_epf_test_alloc_space(epf); + if (ret) + return ret; + + ret = pci_epf_test_set_bar(epf); + if (ret) + return ret; + + ret = pci_epc_set_msi(epc, epf->msi_interrupts); + if (ret) + return ret; + + return 0; +} + +static int pci_epf_test_probe(struct pci_epf *epf) +{ + struct pci_epf_test *epf_test; + struct device *dev = &epf->dev; + + epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL); + if (!epf_test) + return -ENOMEM; + + epf->header = &test_header; + epf_test->epf = epf; + + INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler); + + epf_set_drvdata(epf, epf_test); + return 0; +} + +static int pci_epf_test_remove(struct pci_epf *epf) +{ + struct pci_epf_test *epf_test = epf_get_drvdata(epf); + + kfree(epf_test); + return 0; +} + +static struct pci_epf_ops ops = { + .unbind = pci_epf_test_unbind, + .bind = pci_epf_test_bind, + .linkup = pci_epf_test_linkup, +}; + +static const struct pci_epf_device_id pci_epf_test_ids[] = { + { + .name = "pci_epf_test", + }, + {}, +}; + +static struct pci_epf_driver test_driver = { + .driver.name = "pci_epf_test", + .probe = pci_epf_test_probe, + .remove = pci_epf_test_remove, + .id_table = pci_epf_test_ids, + .ops = &ops, + .owner = THIS_MODULE, +}; + +static int __init pci_epf_test_init(void) +{ + int ret; + + kpcitest_workqueue = alloc_workqueue("kpcitest", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + ret = pci_epf_register_driver(&test_driver); + if (ret) { + pr_err("failed to register pci epf test driver --> %d\n", ret); + return ret; + } + + return 0; +} +module_init(pci_epf_test_init); + +static void __exit pci_epf_test_exit(void) +{ + pci_epf_unregister_driver(&test_driver); +} +module_exit(pci_epf_test_exit); + +MODULE_DESCRIPTION("PCI EPF TEST DRIVER"); +MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c new file mode 100644 index 000000000000..424fdd6ed1ca --- /dev/null +++ b/drivers/pci/endpoint/pci-ep-cfs.c @@ -0,0 +1,509 @@ +/** + * configfs to configure the PCI endpoint + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I <kishon@ti.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/module.h> +#include <linux/slab.h> + +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> +#include <linux/pci-ep-cfs.h> + +static struct config_group *functions_group; +static struct config_group *controllers_group; + +struct pci_epf_group { + struct config_group group; + struct pci_epf *epf; +}; + +struct pci_epc_group { + struct config_group group; + struct pci_epc *epc; + bool start; + unsigned long function_num_map; +}; + +static inline struct pci_epf_group *to_pci_epf_group(struct config_item *item) +{ + return container_of(to_config_group(item), struct pci_epf_group, group); +} + +static inline struct pci_epc_group *to_pci_epc_group(struct config_item *item) +{ + return container_of(to_config_group(item), struct pci_epc_group, group); +} + +static ssize_t pci_epc_start_store(struct config_item *item, const char *page, + size_t len) +{ + int ret; + bool start; + struct pci_epc *epc; + struct pci_epc_group *epc_group = to_pci_epc_group(item); + + epc = epc_group->epc; + + ret = kstrtobool(page, &start); + if (ret) + return ret; + + if (!start) { + pci_epc_stop(epc); + return len; + } + + ret = pci_epc_start(epc); + if (ret) { + dev_err(&epc->dev, "failed to start endpoint controller\n"); + return -EINVAL; + } + + epc_group->start = start; + + return len; +} + +static ssize_t pci_epc_start_show(struct config_item *item, char *page) +{ + return sprintf(page, "%d\n", + to_pci_epc_group(item)->start); +} + +CONFIGFS_ATTR(pci_epc_, start); + +static struct configfs_attribute *pci_epc_attrs[] = { + &pci_epc_attr_start, + NULL, +}; + +static int pci_epc_epf_link(struct config_item *epc_item, + struct config_item *epf_item) +{ + int ret; + u32 func_no = 0; + struct pci_epc *epc; + struct pci_epf *epf; + struct pci_epf_group *epf_group = to_pci_epf_group(epf_item); + struct pci_epc_group *epc_group = to_pci_epc_group(epc_item); + + epc = epc_group->epc; + epf = epf_group->epf; + ret = pci_epc_add_epf(epc, epf); + if (ret) + goto err_add_epf; + + func_no = find_first_zero_bit(&epc_group->function_num_map, + sizeof(epc_group->function_num_map)); + set_bit(func_no, &epc_group->function_num_map); + epf->func_no = func_no; + + ret = pci_epf_bind(epf); + if (ret) + goto err_epf_bind; + + return 0; + +err_epf_bind: + pci_epc_remove_epf(epc, epf); + +err_add_epf: + clear_bit(func_no, &epc_group->function_num_map); + + return ret; +} + +static void pci_epc_epf_unlink(struct config_item *epc_item, + struct config_item *epf_item) +{ + struct pci_epc *epc; + struct pci_epf *epf; + struct pci_epf_group *epf_group = to_pci_epf_group(epf_item); + struct pci_epc_group *epc_group = to_pci_epc_group(epc_item); + + WARN_ON_ONCE(epc_group->start); + + epc = epc_group->epc; + epf = epf_group->epf; + clear_bit(epf->func_no, &epc_group->function_num_map); + pci_epf_unbind(epf); + pci_epc_remove_epf(epc, epf); +} + +static struct configfs_item_operations pci_epc_item_ops = { + .allow_link = pci_epc_epf_link, + .drop_link = pci_epc_epf_unlink, +}; + +static struct config_item_type pci_epc_type = { + .ct_item_ops = &pci_epc_item_ops, + .ct_attrs = pci_epc_attrs, + .ct_owner = THIS_MODULE, +}; + +struct config_group *pci_ep_cfs_add_epc_group(const char *name) +{ + int ret; + struct pci_epc *epc; + struct config_group *group; + struct pci_epc_group *epc_group; + + epc_group = kzalloc(sizeof(*epc_group), GFP_KERNEL); + if (!epc_group) { + ret = -ENOMEM; + goto err; + } + + group = &epc_group->group; + + config_group_init_type_name(group, name, &pci_epc_type); + ret = configfs_register_group(controllers_group, group); + if (ret) { + pr_err("failed to register configfs group for %s\n", name); + goto err_register_group; + } + + epc = pci_epc_get(name); + if (IS_ERR(epc)) { + ret = PTR_ERR(epc); + goto err_epc_get; + } + + epc_group->epc = epc; + + return group; + +err_epc_get: + configfs_unregister_group(group); + +err_register_group: + kfree(epc_group); + +err: + return ERR_PTR(ret); +} +EXPORT_SYMBOL(pci_ep_cfs_add_epc_group); + +void pci_ep_cfs_remove_epc_group(struct config_group *group) +{ + struct pci_epc_group *epc_group; + + if (!group) + return; + + epc_group = container_of(group, struct pci_epc_group, group); + pci_epc_put(epc_group->epc); + configfs_unregister_group(&epc_group->group); + kfree(epc_group); +} +EXPORT_SYMBOL(pci_ep_cfs_remove_epc_group); + +#define PCI_EPF_HEADER_R(_name) \ +static ssize_t pci_epf_##_name##_show(struct config_item *item, char *page) \ +{ \ + struct pci_epf *epf = to_pci_epf_group(item)->epf; \ + if (WARN_ON_ONCE(!epf->header)) \ + return -EINVAL; \ + return sprintf(page, "0x%04x\n", epf->header->_name); \ +} + +#define PCI_EPF_HEADER_W_u32(_name) \ +static ssize_t pci_epf_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + u32 val; \ + int ret; \ + struct pci_epf *epf = to_pci_epf_group(item)->epf; \ + if (WARN_ON_ONCE(!epf->header)) \ + return -EINVAL; \ + ret = kstrtou32(page, 0, &val); \ + if (ret) \ + return ret; \ + epf->header->_name = val; \ + return len; \ +} + +#define PCI_EPF_HEADER_W_u16(_name) \ +static ssize_t pci_epf_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + u16 val; \ + int ret; \ + struct pci_epf *epf = to_pci_epf_group(item)->epf; \ + if (WARN_ON_ONCE(!epf->header)) \ + return -EINVAL; \ + ret = kstrtou16(page, 0, &val); \ + if (ret) \ + return ret; \ + epf->header->_name = val; \ + return len; \ +} + +#define PCI_EPF_HEADER_W_u8(_name) \ +static ssize_t pci_epf_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + u8 val; \ + int ret; \ + struct pci_epf *epf = to_pci_epf_group(item)->epf; \ + if (WARN_ON_ONCE(!epf->header)) \ + return -EINVAL; \ + ret = kstrtou8(page, 0, &val); \ + if (ret) \ + return ret; \ + epf->header->_name = val; \ + return len; \ +} + +static ssize_t pci_epf_msi_interrupts_store(struct config_item *item, + const char *page, size_t len) +{ + u8 val; + int ret; + + ret = kstrtou8(page, 0, &val); + if (ret) + return ret; + + to_pci_epf_group(item)->epf->msi_interrupts = val; + + return len; +} + +static ssize_t pci_epf_msi_interrupts_show(struct config_item *item, + char *page) +{ + return sprintf(page, "%d\n", + to_pci_epf_group(item)->epf->msi_interrupts); +} + +PCI_EPF_HEADER_R(vendorid) +PCI_EPF_HEADER_W_u16(vendorid) + +PCI_EPF_HEADER_R(deviceid) +PCI_EPF_HEADER_W_u16(deviceid) + +PCI_EPF_HEADER_R(revid) +PCI_EPF_HEADER_W_u8(revid) + +PCI_EPF_HEADER_R(progif_code) +PCI_EPF_HEADER_W_u8(progif_code) + +PCI_EPF_HEADER_R(subclass_code) +PCI_EPF_HEADER_W_u8(subclass_code) + +PCI_EPF_HEADER_R(baseclass_code) +PCI_EPF_HEADER_W_u8(baseclass_code) + +PCI_EPF_HEADER_R(cache_line_size) +PCI_EPF_HEADER_W_u8(cache_line_size) + +PCI_EPF_HEADER_R(subsys_vendor_id) +PCI_EPF_HEADER_W_u16(subsys_vendor_id) + +PCI_EPF_HEADER_R(subsys_id) +PCI_EPF_HEADER_W_u16(subsys_id) + +PCI_EPF_HEADER_R(interrupt_pin) +PCI_EPF_HEADER_W_u8(interrupt_pin) + +CONFIGFS_ATTR(pci_epf_, vendorid); +CONFIGFS_ATTR(pci_epf_, deviceid); +CONFIGFS_ATTR(pci_epf_, revid); +CONFIGFS_ATTR(pci_epf_, progif_code); +CONFIGFS_ATTR(pci_epf_, subclass_code); +CONFIGFS_ATTR(pci_epf_, baseclass_code); +CONFIGFS_ATTR(pci_epf_, cache_line_size); +CONFIGFS_ATTR(pci_epf_, subsys_vendor_id); +CONFIGFS_ATTR(pci_epf_, subsys_id); +CONFIGFS_ATTR(pci_epf_, interrupt_pin); +CONFIGFS_ATTR(pci_epf_, msi_interrupts); + +static struct configfs_attribute *pci_epf_attrs[] = { + &pci_epf_attr_vendorid, + &pci_epf_attr_deviceid, + &pci_epf_attr_revid, + &pci_epf_attr_progif_code, + &pci_epf_attr_subclass_code, + &pci_epf_attr_baseclass_code, + &pci_epf_attr_cache_line_size, + &pci_epf_attr_subsys_vendor_id, + &pci_epf_attr_subsys_id, + &pci_epf_attr_interrupt_pin, + &pci_epf_attr_msi_interrupts, + NULL, +}; + +static void pci_epf_release(struct config_item *item) +{ + struct pci_epf_group *epf_group = to_pci_epf_group(item); + + pci_epf_destroy(epf_group->epf); + kfree(epf_group); +} + +static struct configfs_item_operations pci_epf_ops = { + .release = pci_epf_release, +}; + +static struct config_item_type pci_epf_type = { + .ct_item_ops = &pci_epf_ops, + .ct_attrs = pci_epf_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *pci_epf_make(struct config_group *group, + const char *name) +{ + struct pci_epf_group *epf_group; + struct pci_epf *epf; + + epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL); + if (!epf_group) + return ERR_PTR(-ENOMEM); + + config_group_init_type_name(&epf_group->group, name, &pci_epf_type); + + epf = pci_epf_create(group->cg_item.ci_name); + if (IS_ERR(epf)) { + pr_err("failed to create endpoint function device\n"); + return ERR_PTR(-EINVAL); + } + + epf_group->epf = epf; + + return &epf_group->group; +} + +static void pci_epf_drop(struct config_group *group, struct config_item *item) +{ + config_item_put(item); +} + +static struct configfs_group_operations pci_epf_group_ops = { + .make_group = &pci_epf_make, + .drop_item = &pci_epf_drop, +}; + +static struct config_item_type pci_epf_group_type = { + .ct_group_ops = &pci_epf_group_ops, + .ct_owner = THIS_MODULE, +}; + +struct config_group *pci_ep_cfs_add_epf_group(const char *name) +{ + struct config_group *group; + + group = configfs_register_default_group(functions_group, name, + &pci_epf_group_type); + if (IS_ERR(group)) + pr_err("failed to register configfs group for %s function\n", + name); + + return group; +} +EXPORT_SYMBOL(pci_ep_cfs_add_epf_group); + +void pci_ep_cfs_remove_epf_group(struct config_group *group) +{ + if (IS_ERR_OR_NULL(group)) + return; + + configfs_unregister_default_group(group); +} +EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group); + +static struct config_item_type pci_functions_type = { + .ct_owner = THIS_MODULE, +}; + +static struct config_item_type pci_controllers_type = { + .ct_owner = THIS_MODULE, +}; + +static struct config_item_type pci_ep_type = { + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem pci_ep_cfs_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = "pci_ep", + .ci_type = &pci_ep_type, + }, + }, + .su_mutex = __MUTEX_INITIALIZER(pci_ep_cfs_subsys.su_mutex), +}; + +static int __init pci_ep_cfs_init(void) +{ + int ret; + struct config_group *root = &pci_ep_cfs_subsys.su_group; + + config_group_init(root); + + ret = configfs_register_subsystem(&pci_ep_cfs_subsys); + if (ret) { + pr_err("Error %d while registering subsystem %s\n", + ret, root->cg_item.ci_namebuf); + goto err; + } + + functions_group = configfs_register_default_group(root, "functions", + &pci_functions_type); + if (IS_ERR(functions_group)) { + ret = PTR_ERR(functions_group); + pr_err("Error %d while registering functions group\n", + ret); + goto err_functions_group; + } + + controllers_group = + configfs_register_default_group(root, "controllers", + &pci_controllers_type); + if (IS_ERR(controllers_group)) { + ret = PTR_ERR(controllers_group); + pr_err("Error %d while registering controllers group\n", + ret); + goto err_controllers_group; + } + + return 0; + +err_controllers_group: + configfs_unregister_default_group(functions_group); + +err_functions_group: + configfs_unregister_subsystem(&pci_ep_cfs_subsys); + +err: + return ret; +} +module_init(pci_ep_cfs_init); + +static void __exit pci_ep_cfs_exit(void) +{ + configfs_unregister_default_group(controllers_group); + configfs_unregister_default_group(functions_group); + configfs_unregister_subsystem(&pci_ep_cfs_subsys); +} +module_exit(pci_ep_cfs_exit); + +MODULE_DESCRIPTION("PCI EP CONFIGFS"); +MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c new file mode 100644 index 000000000000..caa7be10e473 --- /dev/null +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -0,0 +1,580 @@ +/** + * PCI Endpoint *Controller* (EPC) library + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I <kishon@ti.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> +#include <linux/pci-ep-cfs.h> + +static struct class *pci_epc_class; + +static void devm_pci_epc_release(struct device *dev, void *res) +{ + struct pci_epc *epc = *(struct pci_epc **)res; + + pci_epc_destroy(epc); +} + +static int devm_pci_epc_match(struct device *dev, void *res, void *match_data) +{ + struct pci_epc **epc = res; + + return *epc == match_data; +} + +/** + * pci_epc_put() - release the PCI endpoint controller + * @epc: epc returned by pci_epc_get() + * + * release the refcount the caller obtained by invoking pci_epc_get() + */ +void pci_epc_put(struct pci_epc *epc) +{ + if (!epc || IS_ERR(epc)) + return; + + module_put(epc->ops->owner); + put_device(&epc->dev); +} +EXPORT_SYMBOL_GPL(pci_epc_put); + +/** + * pci_epc_get() - get the PCI endpoint controller + * @epc_name: device name of the endpoint controller + * + * Invoke to get struct pci_epc * corresponding to the device name of the + * endpoint controller + */ +struct pci_epc *pci_epc_get(const char *epc_name) +{ + int ret = -EINVAL; + struct pci_epc *epc; + struct device *dev; + struct class_dev_iter iter; + + class_dev_iter_init(&iter, pci_epc_class, NULL, NULL); + while ((dev = class_dev_iter_next(&iter))) { + if (strcmp(epc_name, dev_name(dev))) + continue; + + epc = to_pci_epc(dev); + if (!try_module_get(epc->ops->owner)) { + ret = -EINVAL; + goto err; + } + + class_dev_iter_exit(&iter); + get_device(&epc->dev); + return epc; + } + +err: + class_dev_iter_exit(&iter); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(pci_epc_get); + +/** + * pci_epc_stop() - stop the PCI link + * @epc: the link of the EPC device that has to be stopped + * + * Invoke to stop the PCI link + */ +void pci_epc_stop(struct pci_epc *epc) +{ + unsigned long flags; + + if (IS_ERR(epc) || !epc->ops->stop) + return; + + spin_lock_irqsave(&epc->lock, flags); + epc->ops->stop(epc); + spin_unlock_irqrestore(&epc->lock, flags); +} +EXPORT_SYMBOL_GPL(pci_epc_stop); + +/** + * pci_epc_start() - start the PCI link + * @epc: the link of *this* EPC device has to be started + * + * Invoke to start the PCI link + */ +int pci_epc_start(struct pci_epc *epc) +{ + int ret; + unsigned long flags; + + if (IS_ERR(epc)) + return -EINVAL; + + if (!epc->ops->start) + return 0; + + spin_lock_irqsave(&epc->lock, flags); + ret = epc->ops->start(epc); + spin_unlock_irqrestore(&epc->lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(pci_epc_start); + +/** + * pci_epc_raise_irq() - interrupt the host system + * @epc: the EPC device which has to interrupt the host + * @type: specify the type of interrupt; legacy or MSI + * @interrupt_num: the MSI interrupt number + * + * Invoke to raise an MSI or legacy interrupt + */ +int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type, + u8 interrupt_num) +{ + int ret; + unsigned long flags; + + if (IS_ERR(epc)) + return -EINVAL; + + if (!epc->ops->raise_irq) + return 0; + + spin_lock_irqsave(&epc->lock, flags); + ret = epc->ops->raise_irq(epc, type, interrupt_num); + spin_unlock_irqrestore(&epc->lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(pci_epc_raise_irq); + +/** + * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated + * @epc: the EPC device to which MSI interrupts was requested + * + * Invoke to get the number of MSI interrupts allocated by the RC + */ +int pci_epc_get_msi(struct pci_epc *epc) +{ + int interrupt; + unsigned long flags; + + if (IS_ERR(epc)) + return 0; + + if (!epc->ops->get_msi) + return 0; + + spin_lock_irqsave(&epc->lock, flags); + interrupt = epc->ops->get_msi(epc); + spin_unlock_irqrestore(&epc->lock, flags); + + if (interrupt < 0) + return 0; + + interrupt = 1 << interrupt; + + return interrupt; +} +EXPORT_SYMBOL_GPL(pci_epc_get_msi); + +/** + * pci_epc_set_msi() - set the number of MSI interrupt numbers required + * @epc: the EPC device on which MSI has to be configured + * @interrupts: number of MSI interrupts required by the EPF + * + * Invoke to set the required number of MSI interrupts. + */ +int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts) +{ + int ret; + u8 encode_int; + unsigned long flags; + + if (IS_ERR(epc)) + return -EINVAL; + + if (!epc->ops->set_msi) + return 0; + + encode_int = order_base_2(interrupts); + + spin_lock_irqsave(&epc->lock, flags); + ret = epc->ops->set_msi(epc, encode_int); + spin_unlock_irqrestore(&epc->lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(pci_epc_set_msi); + +/** + * pci_epc_unmap_addr() - unmap CPU address from PCI address + * @epc: the EPC device on which address is allocated + * @phys_addr: physical address of the local system + * + * Invoke to unmap the CPU address from PCI address. + */ +void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr) +{ + unsigned long flags; + + if (IS_ERR(epc)) + return; + + if (!epc->ops->unmap_addr) + return; + + spin_lock_irqsave(&epc->lock, flags); + epc->ops->unmap_addr(epc, phys_addr); + spin_unlock_irqrestore(&epc->lock, flags); +} +EXPORT_SYMBOL_GPL(pci_epc_unmap_addr); + +/** + * pci_epc_map_addr() - map CPU address to PCI address + * @epc: the EPC device on which address is allocated + * @phys_addr: physical address of the local system + * @pci_addr: PCI address to which the physical address should be mapped + * @size: the size of the allocation + * + * Invoke to map CPU address with PCI address. + */ +int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr, + u64 pci_addr, size_t size) +{ + int ret; + unsigned long flags; + + if (IS_ERR(epc)) + return -EINVAL; + + if (!epc->ops->map_addr) + return 0; + + spin_lock_irqsave(&epc->lock, flags); + ret = epc->ops->map_addr(epc, phys_addr, pci_addr, size); + spin_unlock_irqrestore(&epc->lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(pci_epc_map_addr); + +/** + * pci_epc_clear_bar() - reset the BAR + * @epc: the EPC device for which the BAR has to be cleared + * @bar: the BAR number that has to be reset + * + * Invoke to reset the BAR of the endpoint device. + */ +void pci_epc_clear_bar(struct pci_epc *epc, int bar) +{ + unsigned long flags; + + if (IS_ERR(epc)) + return; + + if (!epc->ops->clear_bar) + return; + + spin_lock_irqsave(&epc->lock, flags); + epc->ops->clear_bar(epc, bar); + spin_unlock_irqrestore(&epc->lock, flags); +} +EXPORT_SYMBOL_GPL(pci_epc_clear_bar); + +/** + * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space + * @epc: the EPC device on which BAR has to be configured + * @bar: the BAR number that has to be configured + * @size: the size of the addr space + * @flags: specify memory allocation/io allocation/32bit address/64 bit address + * + * Invoke to configure the BAR of the endpoint device. + */ +int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar, + dma_addr_t bar_phys, size_t size, int flags) +{ + int ret; + unsigned long irq_flags; + + if (IS_ERR(epc)) + return -EINVAL; + + if (!epc->ops->set_bar) + return 0; + + spin_lock_irqsave(&epc->lock, irq_flags); + ret = epc->ops->set_bar(epc, bar, bar_phys, size, flags); + spin_unlock_irqrestore(&epc->lock, irq_flags); + + return ret; +} +EXPORT_SYMBOL_GPL(pci_epc_set_bar); + +/** + * pci_epc_write_header() - write standard configuration header + * @epc: the EPC device to which the configuration header should be written + * @header: standard configuration header fields + * + * Invoke to write the configuration header to the endpoint controller. Every + * endpoint controller will have a dedicated location to which the standard + * configuration header would be written. The callback function should write + * the header fields to this dedicated location. + */ +int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *header) +{ + int ret; + unsigned long flags; + + if (IS_ERR(epc)) + return -EINVAL; + + if (!epc->ops->write_header) + return 0; + + spin_lock_irqsave(&epc->lock, flags); + ret = epc->ops->write_header(epc, header); + spin_unlock_irqrestore(&epc->lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(pci_epc_write_header); + +/** + * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller + * @epc: the EPC device to which the endpoint function should be added + * @epf: the endpoint function to be added + * + * A PCI endpoint device can have one or more functions. In the case of PCIe, + * the specification allows up to 8 PCIe endpoint functions. Invoke + * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller. + */ +int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf) +{ + unsigned long flags; + + if (epf->epc) + return -EBUSY; + + if (IS_ERR(epc)) + return -EINVAL; + + if (epf->func_no > epc->max_functions - 1) + return -EINVAL; + + epf->epc = epc; + dma_set_coherent_mask(&epf->dev, epc->dev.coherent_dma_mask); + epf->dev.dma_mask = epc->dev.dma_mask; + + spin_lock_irqsave(&epc->lock, flags); + list_add_tail(&epf->list, &epc->pci_epf); + spin_unlock_irqrestore(&epc->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(pci_epc_add_epf); + +/** + * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller + * @epc: the EPC device from which the endpoint function should be removed + * @epf: the endpoint function to be removed + * + * Invoke to remove PCI endpoint function from the endpoint controller. + */ +void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf) +{ + unsigned long flags; + + if (!epc || IS_ERR(epc)) + return; + + spin_lock_irqsave(&epc->lock, flags); + list_del(&epf->list); + spin_unlock_irqrestore(&epc->lock, flags); +} +EXPORT_SYMBOL_GPL(pci_epc_remove_epf); + +/** + * pci_epc_linkup() - Notify the EPF device that EPC device has established a + * connection with the Root Complex. + * @epc: the EPC device which has established link with the host + * + * Invoke to Notify the EPF device that the EPC device has established a + * connection with the Root Complex. + */ +void pci_epc_linkup(struct pci_epc *epc) +{ + unsigned long flags; + struct pci_epf *epf; + + if (!epc || IS_ERR(epc)) + return; + + spin_lock_irqsave(&epc->lock, flags); + list_for_each_entry(epf, &epc->pci_epf, list) + pci_epf_linkup(epf); + spin_unlock_irqrestore(&epc->lock, flags); +} +EXPORT_SYMBOL_GPL(pci_epc_linkup); + +/** + * pci_epc_destroy() - destroy the EPC device + * @epc: the EPC device that has to be destroyed + * + * Invoke to destroy the PCI EPC device + */ +void pci_epc_destroy(struct pci_epc *epc) +{ + pci_ep_cfs_remove_epc_group(epc->group); + device_unregister(&epc->dev); + kfree(epc); +} +EXPORT_SYMBOL_GPL(pci_epc_destroy); + +/** + * devm_pci_epc_destroy() - destroy the EPC device + * @dev: device that wants to destroy the EPC + * @epc: the EPC device that has to be destroyed + * + * Invoke to destroy the devres associated with this + * pci_epc and destroy the EPC device. + */ +void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc) +{ + int r; + + r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match, + epc); + dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n"); +} +EXPORT_SYMBOL_GPL(devm_pci_epc_destroy); + +/** + * __pci_epc_create() - create a new endpoint controller (EPC) device + * @dev: device that is creating the new EPC + * @ops: function pointers for performing EPC operations + * @owner: the owner of the module that creates the EPC device + * + * Invoke to create a new EPC device and add it to pci_epc class. + */ +struct pci_epc * +__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, + struct module *owner) +{ + int ret; + struct pci_epc *epc; + + if (WARN_ON(!dev)) { + ret = -EINVAL; + goto err_ret; + } + + epc = kzalloc(sizeof(*epc), GFP_KERNEL); + if (!epc) { + ret = -ENOMEM; + goto err_ret; + } + + spin_lock_init(&epc->lock); + INIT_LIST_HEAD(&epc->pci_epf); + + device_initialize(&epc->dev); + dma_set_coherent_mask(&epc->dev, dev->coherent_dma_mask); + epc->dev.class = pci_epc_class; + epc->dev.dma_mask = dev->dma_mask; + epc->ops = ops; + + ret = dev_set_name(&epc->dev, "%s", dev_name(dev)); + if (ret) + goto put_dev; + + ret = device_add(&epc->dev); + if (ret) + goto put_dev; + + epc->group = pci_ep_cfs_add_epc_group(dev_name(dev)); + + return epc; + +put_dev: + put_device(&epc->dev); + kfree(epc); + +err_ret: + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(__pci_epc_create); + +/** + * __devm_pci_epc_create() - create a new endpoint controller (EPC) device + * @dev: device that is creating the new EPC + * @ops: function pointers for performing EPC operations + * @owner: the owner of the module that creates the EPC device + * + * Invoke to create a new EPC device and add it to pci_epc class. + * While at that, it also associates the device with the pci_epc using devres. + * On driver detach, release function is invoked on the devres data, + * then, devres data is freed. + */ +struct pci_epc * +__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, + struct module *owner) +{ + struct pci_epc **ptr, *epc; + + ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + epc = __pci_epc_create(dev, ops, owner); + if (!IS_ERR(epc)) { + *ptr = epc; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return epc; +} +EXPORT_SYMBOL_GPL(__devm_pci_epc_create); + +static int __init pci_epc_init(void) +{ + pci_epc_class = class_create(THIS_MODULE, "pci_epc"); + if (IS_ERR(pci_epc_class)) { + pr_err("failed to create pci epc class --> %ld\n", + PTR_ERR(pci_epc_class)); + return PTR_ERR(pci_epc_class); + } + + return 0; +} +module_init(pci_epc_init); + +static void __exit pci_epc_exit(void) +{ + class_destroy(pci_epc_class); +} +module_exit(pci_epc_exit); + +MODULE_DESCRIPTION("PCI EPC Library"); +MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c new file mode 100644 index 000000000000..3a94cc1caf22 --- /dev/null +++ b/drivers/pci/endpoint/pci-epc-mem.c @@ -0,0 +1,143 @@ +/** + * PCI Endpoint *Controller* Address Space Management + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I <kishon@ti.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include <linux/pci-epc.h> + +/** + * pci_epc_mem_init() - initialize the pci_epc_mem structure + * @epc: the EPC device that invoked pci_epc_mem_init + * @phys_base: the physical address of the base + * @size: the size of the address space + * + * Invoke to initialize the pci_epc_mem structure used by the + * endpoint functions to allocate mapped PCI address. + */ +int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size) +{ + int ret; + struct pci_epc_mem *mem; + unsigned long *bitmap; + int pages = size >> PAGE_SHIFT; + int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); + + mem = kzalloc(sizeof(*mem), GFP_KERNEL); + if (!mem) { + ret = -ENOMEM; + goto err; + } + + bitmap = kzalloc(bitmap_size, GFP_KERNEL); + if (!bitmap) { + ret = -ENOMEM; + goto err_mem; + } + + mem->bitmap = bitmap; + mem->phys_base = phys_base; + mem->pages = pages; + mem->size = size; + + epc->mem = mem; + + return 0; + +err_mem: + kfree(mem); + +err: +return ret; +} +EXPORT_SYMBOL_GPL(pci_epc_mem_init); + +/** + * pci_epc_mem_exit() - cleanup the pci_epc_mem structure + * @epc: the EPC device that invoked pci_epc_mem_exit + * + * Invoke to cleanup the pci_epc_mem structure allocated in + * pci_epc_mem_init(). + */ +void pci_epc_mem_exit(struct pci_epc *epc) +{ + struct pci_epc_mem *mem = epc->mem; + + epc->mem = NULL; + kfree(mem->bitmap); + kfree(mem); +} +EXPORT_SYMBOL_GPL(pci_epc_mem_exit); + +/** + * pci_epc_mem_alloc_addr() - allocate memory address from EPC addr space + * @epc: the EPC device on which memory has to be allocated + * @phys_addr: populate the allocated physical address here + * @size: the size of the address space that has to be allocated + * + * Invoke to allocate memory address from the EPC address space. This + * is usually done to map the remote RC address into the local system. + */ +void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, + phys_addr_t *phys_addr, size_t size) +{ + int pageno; + void __iomem *virt_addr; + struct pci_epc_mem *mem = epc->mem; + int order = get_order(size); + + pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order); + if (pageno < 0) + return NULL; + + *phys_addr = mem->phys_base + (pageno << PAGE_SHIFT); + virt_addr = ioremap(*phys_addr, size); + if (!virt_addr) + bitmap_release_region(mem->bitmap, pageno, order); + + return virt_addr; +} +EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr); + +/** + * pci_epc_mem_free_addr() - free the allocated memory address + * @epc: the EPC device on which memory was allocated + * @phys_addr: the allocated physical address + * @virt_addr: virtual address of the allocated mem space + * @size: the size of the allocated address space + * + * Invoke to free the memory allocated using pci_epc_mem_alloc_addr. + */ +void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, + void __iomem *virt_addr, size_t size) +{ + int pageno; + int order = get_order(size); + struct pci_epc_mem *mem = epc->mem; + + iounmap(virt_addr); + pageno = (phys_addr - mem->phys_base) >> PAGE_SHIFT; + bitmap_release_region(mem->bitmap, pageno, order); +} +EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr); + +MODULE_DESCRIPTION("PCI EPC Address Space Management"); +MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c new file mode 100644 index 000000000000..6877d6a5bcc9 --- /dev/null +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -0,0 +1,359 @@ +/** + * PCI Endpoint *Function* (EPF) library + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I <kishon@ti.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> +#include <linux/pci-ep-cfs.h> + +static struct bus_type pci_epf_bus_type; +static struct device_type pci_epf_type; + +/** + * pci_epf_linkup() - Notify the function driver that EPC device has + * established a connection with the Root Complex. + * @epf: the EPF device bound to the EPC device which has established + * the connection with the host + * + * Invoke to notify the function driver that EPC device has established + * a connection with the Root Complex. + */ +void pci_epf_linkup(struct pci_epf *epf) +{ + if (!epf->driver) { + dev_WARN(&epf->dev, "epf device not bound to driver\n"); + return; + } + + epf->driver->ops->linkup(epf); +} +EXPORT_SYMBOL_GPL(pci_epf_linkup); + +/** + * pci_epf_unbind() - Notify the function driver that the binding between the + * EPF device and EPC device has been lost + * @epf: the EPF device which has lost the binding with the EPC device + * + * Invoke to notify the function driver that the binding between the EPF device + * and EPC device has been lost. + */ +void pci_epf_unbind(struct pci_epf *epf) +{ + if (!epf->driver) { + dev_WARN(&epf->dev, "epf device not bound to driver\n"); + return; + } + + epf->driver->ops->unbind(epf); + module_put(epf->driver->owner); +} +EXPORT_SYMBOL_GPL(pci_epf_unbind); + +/** + * pci_epf_bind() - Notify the function driver that the EPF device has been + * bound to a EPC device + * @epf: the EPF device which has been bound to the EPC device + * + * Invoke to notify the function driver that it has been bound to a EPC device + */ +int pci_epf_bind(struct pci_epf *epf) +{ + if (!epf->driver) { + dev_WARN(&epf->dev, "epf device not bound to driver\n"); + return -EINVAL; + } + + if (!try_module_get(epf->driver->owner)) + return -EAGAIN; + + return epf->driver->ops->bind(epf); +} +EXPORT_SYMBOL_GPL(pci_epf_bind); + +/** + * pci_epf_free_space() - free the allocated PCI EPF register space + * @addr: the virtual address of the PCI EPF register space + * @bar: the BAR number corresponding to the register space + * + * Invoke to free the allocated PCI EPF register space. + */ +void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar) +{ + struct device *dev = &epf->dev; + + if (!addr) + return; + + dma_free_coherent(dev, epf->bar[bar].size, addr, + epf->bar[bar].phys_addr); + + epf->bar[bar].phys_addr = 0; + epf->bar[bar].size = 0; +} +EXPORT_SYMBOL_GPL(pci_epf_free_space); + +/** + * pci_epf_alloc_space() - allocate memory for the PCI EPF register space + * @size: the size of the memory that has to be allocated + * @bar: the BAR number corresponding to the allocated register space + * + * Invoke to allocate memory for the PCI EPF register space. + */ +void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) +{ + void *space; + struct device *dev = &epf->dev; + dma_addr_t phys_addr; + + if (size < 128) + size = 128; + size = roundup_pow_of_two(size); + + space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); + if (!space) { + dev_err(dev, "failed to allocate mem space\n"); + return NULL; + } + + epf->bar[bar].phys_addr = phys_addr; + epf->bar[bar].size = size; + + return space; +} +EXPORT_SYMBOL_GPL(pci_epf_alloc_space); + +/** + * pci_epf_unregister_driver() - unregister the PCI EPF driver + * @driver: the PCI EPF driver that has to be unregistered + * + * Invoke to unregister the PCI EPF driver. + */ +void pci_epf_unregister_driver(struct pci_epf_driver *driver) +{ + pci_ep_cfs_remove_epf_group(driver->group); + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(pci_epf_unregister_driver); + +/** + * __pci_epf_register_driver() - register a new PCI EPF driver + * @driver: structure representing PCI EPF driver + * @owner: the owner of the module that registers the PCI EPF driver + * + * Invoke to register a new PCI EPF driver. + */ +int __pci_epf_register_driver(struct pci_epf_driver *driver, + struct module *owner) +{ + int ret; + + if (!driver->ops) + return -EINVAL; + + if (!driver->ops->bind || !driver->ops->unbind || !driver->ops->linkup) + return -EINVAL; + + driver->driver.bus = &pci_epf_bus_type; + driver->driver.owner = owner; + + ret = driver_register(&driver->driver); + if (ret) + return ret; + + driver->group = pci_ep_cfs_add_epf_group(driver->driver.name); + + return 0; +} +EXPORT_SYMBOL_GPL(__pci_epf_register_driver); + +/** + * pci_epf_destroy() - destroy the created PCI EPF device + * @epf: the PCI EPF device that has to be destroyed. + * + * Invoke to destroy the PCI EPF device created by invoking pci_epf_create(). + */ +void pci_epf_destroy(struct pci_epf *epf) +{ + device_unregister(&epf->dev); +} +EXPORT_SYMBOL_GPL(pci_epf_destroy); + +/** + * pci_epf_create() - create a new PCI EPF device + * @name: the name of the PCI EPF device. This name will be used to bind the + * the EPF device to a EPF driver + * + * Invoke to create a new PCI EPF device by providing the name of the function + * device. + */ +struct pci_epf *pci_epf_create(const char *name) +{ + int ret; + struct pci_epf *epf; + struct device *dev; + char *func_name; + char *buf; + + epf = kzalloc(sizeof(*epf), GFP_KERNEL); + if (!epf) { + ret = -ENOMEM; + goto err_ret; + } + + buf = kstrdup(name, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto free_epf; + } + + func_name = buf; + buf = strchrnul(buf, '.'); + *buf = '\0'; + + epf->name = kstrdup(func_name, GFP_KERNEL); + if (!epf->name) { + ret = -ENOMEM; + goto free_func_name; + } + + dev = &epf->dev; + device_initialize(dev); + dev->bus = &pci_epf_bus_type; + dev->type = &pci_epf_type; + + ret = dev_set_name(dev, "%s", name); + if (ret) + goto put_dev; + + ret = device_add(dev); + if (ret) + goto put_dev; + + kfree(func_name); + return epf; + +put_dev: + put_device(dev); + kfree(epf->name); + +free_func_name: + kfree(func_name); + +free_epf: + kfree(epf); + +err_ret: + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(pci_epf_create); + +static void pci_epf_dev_release(struct device *dev) +{ + struct pci_epf *epf = to_pci_epf(dev); + + kfree(epf->name); + kfree(epf); +} + +static struct device_type pci_epf_type = { + .release = pci_epf_dev_release, +}; + +static int +pci_epf_match_id(const struct pci_epf_device_id *id, const struct pci_epf *epf) +{ + while (id->name[0]) { + if (strcmp(epf->name, id->name) == 0) + return true; + id++; + } + + return false; +} + +static int pci_epf_device_match(struct device *dev, struct device_driver *drv) +{ + struct pci_epf *epf = to_pci_epf(dev); + struct pci_epf_driver *driver = to_pci_epf_driver(drv); + + if (driver->id_table) + return pci_epf_match_id(driver->id_table, epf); + + return !strcmp(epf->name, drv->name); +} + +static int pci_epf_device_probe(struct device *dev) +{ + struct pci_epf *epf = to_pci_epf(dev); + struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver); + + if (!driver->probe) + return -ENODEV; + + epf->driver = driver; + + return driver->probe(epf); +} + +static int pci_epf_device_remove(struct device *dev) +{ + int ret; + struct pci_epf *epf = to_pci_epf(dev); + struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver); + + ret = driver->remove(epf); + epf->driver = NULL; + + return ret; +} + +static struct bus_type pci_epf_bus_type = { + .name = "pci-epf", + .match = pci_epf_device_match, + .probe = pci_epf_device_probe, + .remove = pci_epf_device_remove, +}; + +static int __init pci_epf_init(void) +{ + int ret; + + ret = bus_register(&pci_epf_bus_type); + if (ret) { + pr_err("failed to register pci epf bus --> %d\n", ret); + return ret; + } + + return 0; +} +module_init(pci_epf_init); + +static void __exit pci_epf_exit(void) +{ + bus_unregister(&pci_epf_bus_type); +} +module_exit(pci_epf_exit); + +MODULE_DESCRIPTION("PCI EPF Library"); +MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index f7c1d4d5c665..7f47cd5e10a5 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -27,6 +27,12 @@ config PCIE_XILINX_NWL or End Point. The current option selection will only support root port enabling. +config PCI_FTPCI100 + bool "Faraday Technology FTPCI100 PCI controller" + depends on OF + depends on ARM + default ARCH_GEMINI + config PCI_TEGRA bool "NVIDIA Tegra PCIe controller" depends on ARCH_TEGRA @@ -95,6 +101,7 @@ config PCI_VERSATILE config PCIE_IPROC tristate + select PCI_DOMAINS help This enables the iProc PCIe core controller support for Broadcom's iProc family of SoCs. An appropriate bus interface driver needs @@ -115,7 +122,6 @@ config PCIE_IPROC_BCMA depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST) select PCIE_IPROC select BCMA - select PCI_DOMAINS default ARCH_BCM_5301X help Say Y here if you want to use the Broadcom iProc PCIe controller @@ -164,7 +170,7 @@ config PCI_HOST_THUNDER_ECAM Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. config PCIE_ROCKCHIP - bool "Rockchip PCIe controller" + tristate "Rockchip PCIe controller" depends on ARCH_ROCKCHIP || COMPILE_TEST depends on OF depends on PCI_MSI_IRQ_DOMAIN diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 4d3686676cc3..cab879578003 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -1,3 +1,4 @@ +obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 4fce494271cc..37d0bcd31f8a 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c @@ -200,10 +200,12 @@ struct advk_pcie { struct list_head resources; struct irq_domain *irq_domain; struct irq_chip irq_chip; - struct msi_controller msi; struct irq_domain *msi_domain; + struct irq_domain *msi_inner_domain; + struct irq_chip msi_bottom_irq_chip; struct irq_chip msi_irq_chip; - DECLARE_BITMAP(msi_irq_in_use, MSI_IRQ_NUM); + struct msi_domain_info msi_domain_info; + DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); struct mutex msi_used_lock; u16 msi_msg; int root_bus_nr; @@ -545,94 +547,64 @@ static struct pci_ops advk_pcie_ops = { .write = advk_pcie_wr_conf, }; -static int advk_pcie_alloc_msi(struct advk_pcie *pcie) +static void advk_msi_irq_compose_msi_msg(struct irq_data *data, + struct msi_msg *msg) { - int hwirq; + struct advk_pcie *pcie = irq_data_get_irq_chip_data(data); + phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg); - mutex_lock(&pcie->msi_used_lock); - hwirq = find_first_zero_bit(pcie->msi_irq_in_use, MSI_IRQ_NUM); - if (hwirq >= MSI_IRQ_NUM) - hwirq = -ENOSPC; - else - set_bit(hwirq, pcie->msi_irq_in_use); - mutex_unlock(&pcie->msi_used_lock); - - return hwirq; + msg->address_lo = lower_32_bits(msi_msg); + msg->address_hi = upper_32_bits(msi_msg); + msg->data = data->irq; } -static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq) +static int advk_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) { - struct device *dev = &pcie->pdev->dev; - - mutex_lock(&pcie->msi_used_lock); - if (!test_bit(hwirq, pcie->msi_irq_in_use)) - dev_err(dev, "trying to free unused MSI#%d\n", hwirq); - else - clear_bit(hwirq, pcie->msi_irq_in_use); - mutex_unlock(&pcie->msi_used_lock); + return -EINVAL; } -static int advk_pcie_setup_msi_irq(struct msi_controller *chip, - struct pci_dev *pdev, - struct msi_desc *desc) +static int advk_msi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, void *args) { - struct advk_pcie *pcie = pdev->bus->sysdata; - struct msi_msg msg; - int virq, hwirq; - phys_addr_t msi_msg_phys; - - /* We support MSI, but not MSI-X */ - if (desc->msi_attrib.is_msix) - return -EINVAL; - - hwirq = advk_pcie_alloc_msi(pcie); - if (hwirq < 0) - return hwirq; + struct advk_pcie *pcie = domain->host_data; + int hwirq, i; - virq = irq_create_mapping(pcie->msi_domain, hwirq); - if (!virq) { - advk_pcie_free_msi(pcie, hwirq); - return -EINVAL; + mutex_lock(&pcie->msi_used_lock); + hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM, + 0, nr_irqs, 0); + if (hwirq >= MSI_IRQ_NUM) { + mutex_unlock(&pcie->msi_used_lock); + return -ENOSPC; } - irq_set_msi_desc(virq, desc); - - msi_msg_phys = virt_to_phys(&pcie->msi_msg); - - msg.address_lo = lower_32_bits(msi_msg_phys); - msg.address_hi = upper_32_bits(msi_msg_phys); - msg.data = virq; - - pci_write_msi_msg(virq, &msg); - - return 0; -} + bitmap_set(pcie->msi_used, hwirq, nr_irqs); + mutex_unlock(&pcie->msi_used_lock); -static void advk_pcie_teardown_msi_irq(struct msi_controller *chip, - unsigned int irq) -{ - struct irq_data *d = irq_get_irq_data(irq); - struct msi_desc *msi = irq_data_get_msi_desc(d); - struct advk_pcie *pcie = msi_desc_to_pci_sysdata(msi); - unsigned long hwirq = d->hwirq; + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, + &pcie->msi_bottom_irq_chip, + domain->host_data, handle_simple_irq, + NULL, NULL); - irq_dispose_mapping(irq); - advk_pcie_free_msi(pcie, hwirq); + return hwirq; } -static int advk_pcie_msi_map(struct irq_domain *domain, - unsigned int virq, irq_hw_number_t hw) +static void advk_msi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct advk_pcie *pcie = domain->host_data; - irq_set_chip_and_handler(virq, &pcie->msi_irq_chip, - handle_simple_irq); - - return 0; + mutex_lock(&pcie->msi_used_lock); + bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs); + mutex_unlock(&pcie->msi_used_lock); } -static const struct irq_domain_ops advk_pcie_msi_irq_ops = { - .map = advk_pcie_msi_map, +static const struct irq_domain_ops advk_msi_domain_ops = { + .alloc = advk_msi_irq_domain_alloc, + .free = advk_msi_irq_domain_free, }; static void advk_pcie_irq_mask(struct irq_data *d) @@ -680,30 +652,25 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) { struct device *dev = &pcie->pdev->dev; struct device_node *node = dev->of_node; - struct irq_chip *msi_irq_chip; - struct msi_controller *msi; + struct irq_chip *bottom_ic, *msi_ic; + struct msi_domain_info *msi_di; phys_addr_t msi_msg_phys; - int ret; - msi_irq_chip = &pcie->msi_irq_chip; + mutex_init(&pcie->msi_used_lock); - msi_irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-msi", - dev_name(dev)); - if (!msi_irq_chip->name) - return -ENOMEM; + bottom_ic = &pcie->msi_bottom_irq_chip; - msi_irq_chip->irq_enable = pci_msi_unmask_irq; - msi_irq_chip->irq_disable = pci_msi_mask_irq; - msi_irq_chip->irq_mask = pci_msi_mask_irq; - msi_irq_chip->irq_unmask = pci_msi_unmask_irq; + bottom_ic->name = "MSI"; + bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg; + bottom_ic->irq_set_affinity = advk_msi_set_affinity; - msi = &pcie->msi; + msi_ic = &pcie->msi_irq_chip; + msi_ic->name = "advk-MSI"; - msi->setup_irq = advk_pcie_setup_msi_irq; - msi->teardown_irq = advk_pcie_teardown_msi_irq; - msi->of_node = node; - - mutex_init(&pcie->msi_used_lock); + msi_di = &pcie->msi_domain_info; + msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI; + msi_di->chip = msi_ic; msi_msg_phys = virt_to_phys(&pcie->msi_msg); @@ -712,16 +679,18 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) advk_writel(pcie, upper_32_bits(msi_msg_phys), PCIE_MSI_ADDR_HIGH_REG); - pcie->msi_domain = + pcie->msi_inner_domain = irq_domain_add_linear(NULL, MSI_IRQ_NUM, - &advk_pcie_msi_irq_ops, pcie); - if (!pcie->msi_domain) + &advk_msi_domain_ops, pcie); + if (!pcie->msi_inner_domain) return -ENOMEM; - ret = of_pci_msi_chip_add(msi); - if (ret < 0) { - irq_domain_remove(pcie->msi_domain); - return ret; + pcie->msi_domain = + pci_msi_create_irq_domain(of_node_to_fwnode(node), + msi_di, pcie->msi_inner_domain); + if (!pcie->msi_domain) { + irq_domain_remove(pcie->msi_inner_domain); + return -ENOMEM; } return 0; @@ -729,8 +698,8 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie) { - of_pci_msi_chip_remove(&pcie->msi); irq_domain_remove(pcie->msi_domain); + irq_domain_remove(pcie->msi_inner_domain); } static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) @@ -917,8 +886,6 @@ static int advk_pcie_probe(struct platform_device *pdev) struct advk_pcie *pcie; struct resource *res; struct pci_bus *bus, *child; - struct msi_controller *msi; - struct device_node *msi_node; int ret, irq; pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL); @@ -962,14 +929,8 @@ static int advk_pcie_probe(struct platform_device *pdev) return ret; } - msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0); - if (msi_node) - msi = of_pci_find_msi_chip_by_node(msi_node); - else - msi = NULL; - - bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops, - pcie, &pcie->resources, &pcie->msi); + bus = pci_scan_root_bus(dev, 0, &advk_pcie_ops, + pcie, &pcie->resources); if (!bus) { advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie); diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c new file mode 100644 index 000000000000..d26501c4145a --- /dev/null +++ b/drivers/pci/host/pci-ftpci100.c @@ -0,0 +1,563 @@ +/* + * Support for Faraday Technology FTPC100 PCI Controller + * + * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> + * + * Based on the out-of-tree OpenWRT patch for Cortina Gemini: + * Copyright (C) 2009 Janos Laube <janos.dev@gmail.com> + * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> + * Based on SL2312 PCI controller code + * Storlink (C) 2003 + */ + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/irqdomain.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/bitops.h> +#include <linux/irq.h> + +/* + * Special configuration registers directly in the first few words + * in I/O space. + */ +#define PCI_IOSIZE 0x00 +#define PCI_PROT 0x04 /* AHB protection */ +#define PCI_CTRL 0x08 /* PCI control signal */ +#define PCI_SOFTRST 0x10 /* Soft reset counter and response error enable */ +#define PCI_CONFIG 0x28 /* PCI configuration command register */ +#define PCI_DATA 0x2C + +#define FARADAY_PCI_PMC 0x40 /* Power management control */ +#define FARADAY_PCI_PMCSR 0x44 /* Power management status */ +#define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */ +#define FARADAY_PCI_CTRL2 0x4C /* Control register 2 */ +#define FARADAY_PCI_MEM1_BASE_SIZE 0x50 /* Memory base and size #1 */ +#define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */ +#define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */ + +/* Bits 31..28 gives INTD..INTA status */ +#define PCI_CTRL2_INTSTS_SHIFT 28 +#define PCI_CTRL2_INTMASK_CMDERR BIT(27) +#define PCI_CTRL2_INTMASK_PARERR BIT(26) +/* Bits 25..22 masks INTD..INTA */ +#define PCI_CTRL2_INTMASK_SHIFT 22 +#define PCI_CTRL2_INTMASK_MABRT_RX BIT(21) +#define PCI_CTRL2_INTMASK_TABRT_RX BIT(20) +#define PCI_CTRL2_INTMASK_TABRT_TX BIT(19) +#define PCI_CTRL2_INTMASK_RETRY4 BIT(18) +#define PCI_CTRL2_INTMASK_SERR_RX BIT(17) +#define PCI_CTRL2_INTMASK_PERR_RX BIT(16) +/* Bit 15 reserved */ +#define PCI_CTRL2_MSTPRI_REQ6 BIT(14) +#define PCI_CTRL2_MSTPRI_REQ5 BIT(13) +#define PCI_CTRL2_MSTPRI_REQ4 BIT(12) +#define PCI_CTRL2_MSTPRI_REQ3 BIT(11) +#define PCI_CTRL2_MSTPRI_REQ2 BIT(10) +#define PCI_CTRL2_MSTPRI_REQ1 BIT(9) +#define PCI_CTRL2_MSTPRI_REQ0 BIT(8) +/* Bits 7..4 reserved */ +/* Bits 3..0 TRDYW */ + +/* + * Memory configs: + * Bit 31..20 defines the PCI side memory base + * Bit 19..16 (4 bits) defines the size per below + */ +#define FARADAY_PCI_MEMBASE_MASK 0xfff00000 +#define FARADAY_PCI_MEMSIZE_1MB 0x0 +#define FARADAY_PCI_MEMSIZE_2MB 0x1 +#define FARADAY_PCI_MEMSIZE_4MB 0x2 +#define FARADAY_PCI_MEMSIZE_8MB 0x3 +#define FARADAY_PCI_MEMSIZE_16MB 0x4 +#define FARADAY_PCI_MEMSIZE_32MB 0x5 +#define FARADAY_PCI_MEMSIZE_64MB 0x6 +#define FARADAY_PCI_MEMSIZE_128MB 0x7 +#define FARADAY_PCI_MEMSIZE_256MB 0x8 +#define FARADAY_PCI_MEMSIZE_512MB 0x9 +#define FARADAY_PCI_MEMSIZE_1GB 0xa +#define FARADAY_PCI_MEMSIZE_2GB 0xb +#define FARADAY_PCI_MEMSIZE_SHIFT 16 + +/* + * The DMA base is set to 0x0 for all memory segments, it reflects the + * fact that the memory of the host system starts at 0x0. + */ +#define FARADAY_PCI_DMA_MEM1_BASE 0x00000000 +#define FARADAY_PCI_DMA_MEM2_BASE 0x00000000 +#define FARADAY_PCI_DMA_MEM3_BASE 0x00000000 + +/* Defines for PCI configuration command register */ +#define PCI_CONF_ENABLE BIT(31) +#define PCI_CONF_WHERE(r) ((r) & 0xFC) +#define PCI_CONF_BUS(b) (((b) & 0xFF) << 16) +#define PCI_CONF_DEVICE(d) (((d) & 0x1F) << 11) +#define PCI_CONF_FUNCTION(f) (((f) & 0x07) << 8) + +/** + * struct faraday_pci_variant - encodes IP block differences + * @cascaded_irq: this host has cascaded IRQs from an interrupt controller + * embedded in the host bridge. + */ +struct faraday_pci_variant { + bool cascaded_irq; +}; + +struct faraday_pci { + struct device *dev; + void __iomem *base; + struct irq_domain *irqdomain; + struct pci_bus *bus; +}; + +static int faraday_res_to_memcfg(resource_size_t mem_base, + resource_size_t mem_size, u32 *val) +{ + u32 outval; + + switch (mem_size) { + case SZ_1M: + outval = FARADAY_PCI_MEMSIZE_1MB; + break; + case SZ_2M: + outval = FARADAY_PCI_MEMSIZE_2MB; + break; + case SZ_4M: + outval = FARADAY_PCI_MEMSIZE_4MB; + break; + case SZ_8M: + outval = FARADAY_PCI_MEMSIZE_8MB; + break; + case SZ_16M: + outval = FARADAY_PCI_MEMSIZE_16MB; + break; + case SZ_32M: + outval = FARADAY_PCI_MEMSIZE_32MB; + break; + case SZ_64M: + outval = FARADAY_PCI_MEMSIZE_64MB; + break; + case SZ_128M: + outval = FARADAY_PCI_MEMSIZE_128MB; + break; + case SZ_256M: + outval = FARADAY_PCI_MEMSIZE_256MB; + break; + case SZ_512M: + outval = FARADAY_PCI_MEMSIZE_512MB; + break; + case SZ_1G: + outval = FARADAY_PCI_MEMSIZE_1GB; + break; + case SZ_2G: + outval = FARADAY_PCI_MEMSIZE_2GB; + break; + default: + return -EINVAL; + } + outval <<= FARADAY_PCI_MEMSIZE_SHIFT; + + /* This is probably not good */ + if (mem_base & ~(FARADAY_PCI_MEMBASE_MASK)) + pr_warn("truncated PCI memory base\n"); + /* Translate to bridge side address space */ + outval |= (mem_base & FARADAY_PCI_MEMBASE_MASK); + pr_debug("Translated pci base @%pap, size %pap to config %08x\n", + &mem_base, &mem_size, outval); + + *val = outval; + return 0; +} + +static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn, + int config, int size, u32 *value) +{ + struct faraday_pci *p = bus->sysdata; + + writel(PCI_CONF_BUS(bus->number) | + PCI_CONF_DEVICE(PCI_SLOT(fn)) | + PCI_CONF_FUNCTION(PCI_FUNC(fn)) | + PCI_CONF_WHERE(config) | + PCI_CONF_ENABLE, + p->base + PCI_CONFIG); + + *value = readl(p->base + PCI_DATA); + + if (size == 1) + *value = (*value >> (8 * (config & 3))) & 0xFF; + else if (size == 2) + *value = (*value >> (8 * (config & 3))) & 0xFFFF; + + dev_dbg(&bus->dev, + "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", + PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); + + return PCIBIOS_SUCCESSFUL; +} + +static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn, + int config, int size, u32 value) +{ + struct faraday_pci *p = bus->sysdata; + int ret = PCIBIOS_SUCCESSFUL; + + dev_dbg(&bus->dev, + "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", + PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); + + writel(PCI_CONF_BUS(bus->number) | + PCI_CONF_DEVICE(PCI_SLOT(fn)) | + PCI_CONF_FUNCTION(PCI_FUNC(fn)) | + PCI_CONF_WHERE(config) | + PCI_CONF_ENABLE, + p->base + PCI_CONFIG); + + switch (size) { + case 4: + writel(value, p->base + PCI_DATA); + break; + case 2: + writew(value, p->base + PCI_DATA + (config & 3)); + break; + case 1: + writeb(value, p->base + PCI_DATA + (config & 3)); + break; + default: + ret = PCIBIOS_BAD_REGISTER_NUMBER; + } + + return ret; +} + +static struct pci_ops faraday_pci_ops = { + .read = faraday_pci_read_config, + .write = faraday_pci_write_config, +}; + +static void faraday_pci_ack_irq(struct irq_data *d) +{ + struct faraday_pci *p = irq_data_get_irq_chip_data(d); + unsigned int reg; + + faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); + reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); + reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT); + faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg); +} + +static void faraday_pci_mask_irq(struct irq_data *d) +{ + struct faraday_pci *p = irq_data_get_irq_chip_data(d); + unsigned int reg; + + faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); + reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT) + | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT)); + faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg); +} + +static void faraday_pci_unmask_irq(struct irq_data *d) +{ + struct faraday_pci *p = irq_data_get_irq_chip_data(d); + unsigned int reg; + + faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); + reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); + reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT); + faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg); +} + +static void faraday_pci_irq_handler(struct irq_desc *desc) +{ + struct faraday_pci *p = irq_desc_get_handler_data(desc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + unsigned int irq_stat, reg, i; + + faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); + irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT; + + chained_irq_enter(irqchip, desc); + + for (i = 0; i < 4; i++) { + if ((irq_stat & BIT(i)) == 0) + continue; + generic_handle_irq(irq_find_mapping(p->irqdomain, i)); + } + + chained_irq_exit(irqchip, desc); +} + +static struct irq_chip faraday_pci_irq_chip = { + .name = "PCI", + .irq_ack = faraday_pci_ack_irq, + .irq_mask = faraday_pci_mask_irq, + .irq_unmask = faraday_pci_unmask_irq, +}; + +static int faraday_pci_irq_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &faraday_pci_irq_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops faraday_pci_irqdomain_ops = { + .map = faraday_pci_irq_map, +}; + +static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) +{ + struct device_node *intc = of_get_next_child(p->dev->of_node, NULL); + int irq; + int i; + + if (!intc) { + dev_err(p->dev, "missing child interrupt-controller node\n"); + return -EINVAL; + } + + /* All PCI IRQs cascade off this one */ + irq = of_irq_get(intc, 0); + if (!irq) { + dev_err(p->dev, "failed to get parent IRQ\n"); + return -EINVAL; + } + + p->irqdomain = irq_domain_add_linear(intc, 4, + &faraday_pci_irqdomain_ops, p); + if (!p->irqdomain) { + dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); + return -EINVAL; + } + + irq_set_chained_handler_and_data(irq, faraday_pci_irq_handler, p); + + for (i = 0; i < 4; i++) + irq_create_mapping(p->irqdomain, i); + + return 0; +} + +static int pci_dma_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node) +{ + const int na = 3, ns = 2; + int rlen; + + parser->node = node; + parser->pna = of_n_addr_cells(node); + parser->np = parser->pna + na + ns; + + parser->range = of_get_property(node, "dma-ranges", &rlen); + if (!parser->range) + return -ENOENT; + parser->end = parser->range + rlen / sizeof(__be32); + + return 0; +} + +static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p, + struct device_node *np) +{ + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device *dev = p->dev; + u32 confreg[3] = { + FARADAY_PCI_MEM1_BASE_SIZE, + FARADAY_PCI_MEM2_BASE_SIZE, + FARADAY_PCI_MEM3_BASE_SIZE, + }; + int i = 0; + u32 val; + + if (pci_dma_range_parser_init(&parser, np)) { + dev_err(dev, "missing dma-ranges property\n"); + return -EINVAL; + } + + /* + * Get the dma-ranges from the device tree + */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.pci_addr + range.size - 1; + int ret; + + ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val); + if (ret) { + dev_err(dev, + "DMA range %d: illegal MEM resource size\n", i); + return -EINVAL; + } + + dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n", + i + 1, range.pci_addr, end, val); + if (i <= 2) { + faraday_pci_write_config(p->bus, 0, confreg[i], + 4, val); + } else { + dev_err(dev, "ignore extraneous dma-range %d\n", i); + break; + } + + i++; + } + + return 0; +} + +static int faraday_pci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct faraday_pci_variant *variant = + of_device_get_match_data(dev); + struct resource *regs; + resource_size_t io_base; + struct resource_entry *win; + struct faraday_pci *p; + struct resource *mem; + struct resource *io; + struct pci_host_bridge *host; + int ret; + u32 val; + LIST_HEAD(res); + + host = pci_alloc_host_bridge(sizeof(*p)); + if (!host) + return -ENOMEM; + + host->dev.parent = dev; + host->ops = &faraday_pci_ops; + host->busnr = 0; + host->msi = NULL; + p = pci_host_bridge_priv(host); + host->sysdata = p; + p->dev = dev; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + p->base = devm_ioremap_resource(dev, regs); + if (IS_ERR(p->base)) + return PTR_ERR(p->base); + + ret = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, + &res, &io_base); + if (ret) + return ret; + + ret = devm_request_pci_bus_resources(dev, &res); + if (ret) + return ret; + + /* Get the I/O and memory ranges from DT */ + resource_list_for_each_entry(win, &res) { + switch (resource_type(win->res)) { + case IORESOURCE_IO: + io = win->res; + io->name = "Gemini PCI I/O"; + if (!faraday_res_to_memcfg(io->start - win->offset, + resource_size(io), &val)) { + /* setup I/O space size */ + writel(val, p->base + PCI_IOSIZE); + } else { + dev_err(dev, "illegal IO mem size\n"); + return -EINVAL; + } + ret = pci_remap_iospace(io, io_base); + if (ret) { + dev_warn(dev, "error %d: failed to map resource %pR\n", + ret, io); + continue; + } + break; + case IORESOURCE_MEM: + mem = win->res; + mem->name = "Gemini PCI MEM"; + break; + case IORESOURCE_BUS: + break; + default: + break; + } + } + + /* Setup hostbridge */ + val = readl(p->base + PCI_CTRL); + val |= PCI_COMMAND_IO; + val |= PCI_COMMAND_MEMORY; + val |= PCI_COMMAND_MASTER; + writel(val, p->base + PCI_CTRL); + + list_splice_init(&res, &host->windows); + ret = pci_register_host_bridge(host); + if (ret) { + dev_err(dev, "failed to register host: %d\n", ret); + return ret; + } + p->bus = host->bus; + + /* Mask and clear all interrupts */ + faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000); + if (variant->cascaded_irq) { + ret = faraday_pci_setup_cascaded_irq(p); + if (ret) { + dev_err(dev, "failed to setup cascaded IRQ\n"); + return ret; + } + } + + ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node); + if (ret) + return ret; + + pci_scan_child_bus(p->bus); + pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); + pci_bus_assign_resources(p->bus); + pci_bus_add_devices(p->bus); + pci_free_resource_list(&res); + + return 0; +} + +/* + * We encode bridge variants here, we have at least two so it doesn't + * hurt to have infrastructure to encompass future variants as well. + */ +const struct faraday_pci_variant faraday_regular = { + .cascaded_irq = true, +}; + +const struct faraday_pci_variant faraday_dual = { + .cascaded_irq = false, +}; + +static const struct of_device_id faraday_pci_of_match[] = { + { + .compatible = "faraday,ftpci100", + .data = &faraday_regular, + }, + { + .compatible = "faraday,ftpci100-dual", + .data = &faraday_dual, + }, + {}, +}; + +static struct platform_driver faraday_pci_driver = { + .driver = { + .name = "ftpci100", + .of_match_table = of_match_ptr(faraday_pci_of_match), + .suppress_bind_attrs = true, + }, + .probe = faraday_pci_probe, +}; +builtin_platform_driver(faraday_pci_driver); diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c index c05ea9d72f69..7d709a7e0aa8 100644 --- a/drivers/pci/host/pci-host-generic.c +++ b/drivers/pci/host/pci-host-generic.c @@ -60,6 +60,7 @@ static struct platform_driver gen_pci_driver = { .driver = { .name = "pci-host-generic", .of_match_table = gen_pci_of_match, + .suppress_bind_attrs = true, }, .probe = gen_pci_probe, }; diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index ada98569b78e..84936383e269 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -56,6 +56,7 @@ #include <asm/apic.h> #include <linux/msi.h> #include <linux/hyperv.h> +#include <linux/refcount.h> #include <asm/mshyperv.h> /* @@ -72,6 +73,7 @@ enum { PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1 }; +#define CPU_AFFINITY_ALL -1ULL #define PCI_CONFIG_MMIO_LENGTH 0x2000 #define CFG_PAGE_OFFSET 0x1000 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) @@ -350,6 +352,7 @@ enum hv_pcibus_state { hv_pcibus_init = 0, hv_pcibus_probed, hv_pcibus_installed, + hv_pcibus_removed, hv_pcibus_maximum }; @@ -421,7 +424,7 @@ enum hv_pcidev_ref_reason { struct hv_pci_dev { /* List protected by pci_rescan_remove_lock */ struct list_head list_entry; - atomic_t refs; + refcount_t refs; enum hv_pcichild_state state; struct pci_function_description desc; bool reported_missing; @@ -876,7 +879,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) hv_int_desc_free(hpdev, int_desc); } - int_desc = kzalloc(sizeof(*int_desc), GFP_KERNEL); + int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); if (!int_desc) goto drop_reference; @@ -897,9 +900,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) * processors because Hyper-V only supports 64 in a guest. */ affinity = irq_data_get_affinity_mask(data); - for_each_cpu_and(cpu, affinity, cpu_online_mask) { - int_pkt->int_desc.cpu_mask |= - (1ULL << vmbus_cpu_number_to_vp_number(cpu)); + if (cpumask_weight(affinity) >= 32) { + int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; + } else { + for_each_cpu_and(cpu, affinity, cpu_online_mask) { + int_pkt->int_desc.cpu_mask |= + (1ULL << vmbus_cpu_number_to_vp_number(cpu)); + } } ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, @@ -1208,9 +1215,11 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) hbus->pci_bus->msi = &hbus->msi_chip; hbus->pci_bus->msi->dev = &hbus->hdev->device; + pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); pci_bus_assign_resources(hbus->pci_bus); pci_bus_add_devices(hbus->pci_bus); + pci_unlock_rescan_remove(); hbus->state = hv_pcibus_installed; return 0; } @@ -1254,13 +1263,13 @@ static void q_resource_requirements(void *context, struct pci_response *resp, static void get_pcichild(struct hv_pci_dev *hpdev, enum hv_pcidev_ref_reason reason) { - atomic_inc(&hpdev->refs); + refcount_inc(&hpdev->refs); } static void put_pcichild(struct hv_pci_dev *hpdev, enum hv_pcidev_ref_reason reason) { - if (atomic_dec_and_test(&hpdev->refs)) + if (refcount_dec_and_test(&hpdev->refs)) kfree(hpdev); } @@ -1314,7 +1323,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, wait_for_completion(&comp_pkt.host_event); hpdev->desc = *desc; - get_pcichild(hpdev, hv_pcidev_ref_initial); + refcount_set(&hpdev->refs, 1); get_pcichild(hpdev, hv_pcidev_ref_childlist); spin_lock_irqsave(&hbus->device_list_lock, flags); @@ -1504,13 +1513,24 @@ static void pci_devices_present_work(struct work_struct *work) put_pcichild(hpdev, hv_pcidev_ref_initial); } - /* Tell the core to rescan bus because there may have been changes. */ - if (hbus->state == hv_pcibus_installed) { + switch(hbus->state) { + case hv_pcibus_installed: + /* + * Tell the core to rescan bus + * because there may have been changes. + */ pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); pci_unlock_rescan_remove(); - } else { + break; + + case hv_pcibus_init: + case hv_pcibus_probed: survey_child_resources(hbus); + break; + + default: + break; } up(&hbus->enum_sem); @@ -1600,8 +1620,10 @@ static void hv_eject_device_work(struct work_struct *work) pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0, wslot); if (pdev) { + pci_lock_rescan_remove(); pci_stop_and_remove_bus_device(pdev); pci_dev_put(pdev); + pci_unlock_rescan_remove(); } spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags); @@ -2185,6 +2207,7 @@ static int hv_pci_probe(struct hv_device *hdev, hbus = kzalloc(sizeof(*hbus), GFP_KERNEL); if (!hbus) return -ENOMEM; + hbus->state = hv_pcibus_init; /* * The PCI bus "domain" is what is called "segment" in ACPI and @@ -2348,6 +2371,7 @@ static int hv_pci_remove(struct hv_device *hdev) pci_stop_root_bus(hbus->pci_bus); pci_remove_root_bus(hbus->pci_bus); pci_unlock_rescan_remove(); + hbus->state = hv_pcibus_removed; } hv_pci_bus_exit(hdev); diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index cd7d51988738..f353a6eb2f01 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -752,10 +752,11 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, * If the mask is 0xffff0000, then we only want to write * the link control register, rather than clearing the * RW1C bits in the link status register. Mask out the - * status register bits. + * RW1C status register bits. */ if (mask == 0xffff0000) - value &= 0xffff; + value &= ~((PCI_EXP_LNKSTA_LABS | + PCI_EXP_LNKSTA_LBMS) << 16); mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); break; @@ -1005,22 +1006,6 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn, return -ENOENT; } -static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie) -{ - struct device_node *msi_node; - - msi_node = of_parse_phandle(pcie->pdev->dev.of_node, - "msi-parent", 0); - if (!msi_node) - return; - - pcie->msi = of_pci_find_msi_chip_by_node(msi_node); - of_node_put(msi_node); - - if (pcie->msi) - pcie->msi->dev = &pcie->pdev->dev; -} - #ifdef CONFIG_PM_SLEEP static int mvebu_pcie_suspend(struct device *dev) { @@ -1298,7 +1283,6 @@ static int mvebu_pcie_probe(struct platform_device *pdev) for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K) pci_ioremap_io(i, pcie->io.start + i); - mvebu_pcie_msi_enable(pcie); mvebu_pcie_enable(pcie); platform_set_drvdata(pdev, pcie); diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index ed8a93f2bfb5..2618f875a600 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -380,7 +380,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie, unsigned int busnr) { struct device *dev = pcie->dev; - pgprot_t prot = pgprot_device(PAGE_KERNEL); + pgprot_t prot = pgprot_noncached(PAGE_KERNEL); phys_addr_t cs = pcie->cs->start; struct tegra_pcie_bus *bus; unsigned int i; @@ -1962,7 +1962,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) rp->pcie = pcie; rp->np = port; - rp->base = devm_ioremap_resource(dev, &rp->regs); + rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs); if (IS_ERR(rp->base)) return PTR_ERR(rp->base); diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c index 3f54a43bbbea..fc0ca03f280e 100644 --- a/drivers/pci/host/pci-thunder-ecam.c +++ b/drivers/pci/host/pci-thunder-ecam.c @@ -373,6 +373,7 @@ static struct platform_driver thunder_ecam_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = thunder_ecam_of_match, + .suppress_bind_attrs = true, }, .probe = thunder_ecam_probe, }; diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c index 6e031b522529..6e066f8b74df 100644 --- a/drivers/pci/host/pci-thunder-pem.c +++ b/drivers/pci/host/pci-thunder-pem.c @@ -474,6 +474,7 @@ static struct platform_driver thunder_pem_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = thunder_pem_of_match, + .suppress_bind_attrs = true, }, .probe = thunder_pem_probe, }; diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c index 5ebee7d37ff5..9281eee2d000 100644 --- a/drivers/pci/host/pci-versatile.c +++ b/drivers/pci/host/pci-versatile.c @@ -138,7 +138,8 @@ static int versatile_pci_probe(struct platform_device *pdev) return PTR_ERR(versatile_cfg_base[0]); res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - versatile_cfg_base[1] = devm_ioremap_resource(&pdev->dev, res); + versatile_cfg_base[1] = devm_pci_remap_cfg_resource(&pdev->dev, + res); if (IS_ERR(versatile_cfg_base[1])) return PTR_ERR(versatile_cfg_base[1]); @@ -221,6 +222,7 @@ static struct platform_driver versatile_pci_driver = { .driver = { .name = "versatile-pci", .of_match_table = versatile_pci_of_match, + .suppress_bind_attrs = true, }, .probe = versatile_pci_probe, }; diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index 1a6108788f6f..8cae013e7188 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -248,7 +248,7 @@ static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion) dev_err(dev, "can't get CSR resource\n"); return ret; } - port->csr_base = devm_ioremap_resource(dev, &csr); + port->csr_base = devm_pci_remap_cfg_resource(dev, &csr); if (IS_ERR(port->csr_base)) return PTR_ERR(port->csr_base); @@ -359,7 +359,7 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port, struct resource *res; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); - port->csr_base = devm_ioremap_resource(dev, res); + port->csr_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(port->csr_base)) return PTR_ERR(port->csr_base); @@ -697,6 +697,7 @@ static struct platform_driver xgene_pcie_driver = { .driver = { .name = "xgene-pcie", .of_match_table = of_match_ptr(xgene_pcie_match_table), + .suppress_bind_attrs = true, }, .probe = xgene_pcie_probe_bridge, }; diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index 8c6a327ca6cd..90d2bdd94e41 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -67,7 +67,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) return ret; } - pcie->base = devm_ioremap(dev, reg.start, resource_size(®)); + pcie->base = devm_pci_remap_cfgspace(dev, reg.start, + resource_size(®)); if (!pcie->base) { dev_err(dev, "unable to map controller registers\n"); return -ENOMEM; diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index 26ddd3535272..0e020b6e0943 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c @@ -26,6 +26,7 @@ #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> +#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_pci.h> @@ -223,9 +224,11 @@ struct rockchip_pcie { int link_gen; struct device *dev; struct irq_domain *irq_domain; - u32 io_size; int offset; + struct pci_bus *root_bus; + struct resource *io; phys_addr_t io_bus_addr; + u32 io_size; void __iomem *msg_region; u32 mem_size; phys_addr_t msg_bus_addr; @@ -425,7 +428,8 @@ static struct pci_ops rockchip_pcie_ops = { static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) { - u32 status, curr, scale, power; + int curr; + u32 status, scale, power; if (IS_ERR(rockchip->vpcie3v3)) return; @@ -437,24 +441,25 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) * to the actual power supply. */ curr = regulator_get_current_limit(rockchip->vpcie3v3); - if (curr > 0) { - scale = 3; /* 0.001x */ - curr = curr / 1000; /* convert to mA */ - power = (curr * 3300) / 1000; /* milliwatt */ - while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { - if (!scale) { - dev_warn(rockchip->dev, "invalid power supply\n"); - return; - } - scale--; - power = power / 10; - } + if (curr <= 0) + return; - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); - status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | - (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); + scale = 3; /* 0.001x */ + curr = curr / 1000; /* convert to mA */ + power = (curr * 3300) / 1000; /* milliwatt */ + while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { + if (!scale) { + dev_warn(rockchip->dev, "invalid power supply\n"); + return; + } + scale--; + power = power / 10; } + + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); + status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | + (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); } /** @@ -596,7 +601,12 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) /* Set RC's clock architecture as common clock */ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= PCI_EXP_LNKCTL_CCC; + status |= PCI_EXP_LNKSTA_SLC << 16; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + + /* Set RC's RCB to 128 */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= PCI_EXP_LNKCTL_RCB; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); /* Enable Gen1 training */ @@ -822,7 +832,7 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "axi-base"); - rockchip->reg_base = devm_ioremap_resource(dev, regs); + rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs); if (IS_ERR(rockchip->reg_base)) return PTR_ERR(rockchip->reg_base); @@ -1359,6 +1369,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) err, io); continue; } + rockchip->io = io; break; case IORESOURCE_MEM: mem = win->res; @@ -1390,6 +1401,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) err = -ENOMEM; goto err_free_res; } + rockchip->root_bus = bus; pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); @@ -1397,7 +1409,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) pcie_bus_configure_settings(child); pci_bus_add_devices(bus); - return err; + return 0; err_free_res: pci_free_resource_list(&res); @@ -1420,6 +1432,34 @@ err_aclk_pcie: return err; } +static int rockchip_pcie_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rockchip_pcie *rockchip = dev_get_drvdata(dev); + + pci_stop_root_bus(rockchip->root_bus); + pci_remove_root_bus(rockchip->root_bus); + pci_unmap_iospace(rockchip->io); + irq_domain_remove(rockchip->irq_domain); + + phy_power_off(rockchip->phy); + phy_exit(rockchip->phy); + + clk_disable_unprepare(rockchip->clk_pcie_pm); + clk_disable_unprepare(rockchip->hclk_pcie); + clk_disable_unprepare(rockchip->aclk_perf_pcie); + clk_disable_unprepare(rockchip->aclk_pcie); + + if (!IS_ERR(rockchip->vpcie3v3)) + regulator_disable(rockchip->vpcie3v3); + if (!IS_ERR(rockchip->vpcie1v8)) + regulator_disable(rockchip->vpcie1v8); + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); + + return 0; +} + static const struct dev_pm_ops rockchip_pcie_pm_ops = { SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, rockchip_pcie_resume_noirq) @@ -1429,6 +1469,7 @@ static const struct of_device_id rockchip_pcie_of_match[] = { { .compatible = "rockchip,rk3399-pcie", }, {} }; +MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match); static struct platform_driver rockchip_pcie_driver = { .driver = { @@ -1437,6 +1478,10 @@ static struct platform_driver rockchip_pcie_driver = { .pm = &rockchip_pcie_pm_ops, }, .probe = rockchip_pcie_probe, - + .remove = rockchip_pcie_remove, }; -builtin_platform_driver(rockchip_pcie_driver); +module_platform_driver(rockchip_pcie_driver); + +MODULE_AUTHOR("Rockchip Inc"); +MODULE_DESCRIPTION("Rockchip AXI PCIe driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c index 4c3e0ab35496..4b16b26ae909 100644 --- a/drivers/pci/host/pcie-xilinx-nwl.c +++ b/drivers/pci/host/pcie-xilinx-nwl.c @@ -761,7 +761,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, pcie->phys_pcie_reg_base = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); - pcie->ecam_base = devm_ioremap_resource(dev, res); + pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pcie->ecam_base)) return PTR_ERR(pcie->ecam_base); pcie->phys_ecam_base = res->start; diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c index 7f030f5d750b..2fe2df51f9f8 100644 --- a/drivers/pci/host/pcie-xilinx.c +++ b/drivers/pci/host/pcie-xilinx.c @@ -606,7 +606,7 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) return err; } - port->reg_base = devm_ioremap_resource(dev, ®s); + port->reg_base = devm_pci_remap_cfg_resource(dev, ®s); if (IS_ERR(port->reg_base)) return PTR_ERR(port->reg_base); diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 9e69403be632..19f30a9f461d 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c @@ -109,6 +109,12 @@ int pciehp_unconfigure_device(struct slot *p_slot) break; } } + if (!presence) { + pci_dev_set_disconnected(dev, NULL); + if (pci_has_subordinate(dev)) + pci_walk_bus(dev->subordinate, + pci_dev_set_disconnected, NULL); + } pci_stop_and_remove_bus_device(dev); /* * Ensure that no new Requests will be generated from diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 2479ae876482..d9dc7363ac77 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -450,6 +450,7 @@ found: iov->total_VFs = total; iov->pgsz = pgsz; iov->self = dev; + iov->drivers_autoprobe = true; pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index f9f2a0324ecc..83d30953ce19 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c @@ -1,7 +1,8 @@ /* - * PCI IRQ failure handing code + * PCI IRQ handling code * * Copyright (c) 2008 James Bottomley <James.Bottomley@HansenPartnership.com> + * Copyright (C) 2017 Christoph Hellwig. */ #include <linux/acpi.h> @@ -59,3 +60,61 @@ enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *pdev) return PCI_LOST_IRQ_NO_INFORMATION; } EXPORT_SYMBOL(pci_lost_interrupt); + +/** + * pci_request_irq - allocate an interrupt line for a PCI device + * @dev: PCI device to operate on + * @nr: device-relative interrupt vector index (0-based). + * @handler: Function to be called when the IRQ occurs. + * Primary handler for threaded interrupts. + * If NULL and thread_fn != NULL the default primary handler is + * installed. + * @thread_fn: Function called from the IRQ handler thread + * If NULL, no IRQ thread is created + * @dev_id: Cookie passed back to the handler function + * @fmt: Printf-like format string naming the handler + * + * This call allocates interrupt resources and enables the interrupt line and + * IRQ handling. From the point this call is made @handler and @thread_fn may + * be invoked. All interrupts requested using this function might be shared. + * + * @dev_id must not be NULL and must be globally unique. + */ +int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler, + irq_handler_t thread_fn, void *dev_id, const char *fmt, ...) +{ + va_list ap; + int ret; + char *devname; + + va_start(ap, fmt); + devname = kvasprintf(GFP_KERNEL, fmt, ap); + va_end(ap); + + ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn, + IRQF_SHARED, devname, dev_id); + if (ret) + kfree(devname); + return ret; +} +EXPORT_SYMBOL(pci_request_irq); + +/** + * pci_free_irq - free an interrupt allocated with pci_request_irq + * @dev: PCI device to operate on + * @nr: device-relative interrupt vector index (0-based). + * @dev_id: Device identity to free + * + * Remove an interrupt handler. The handler is removed and if the interrupt + * line is no longer in use by any driver it is disabled. The caller must + * ensure the interrupt is disabled on the device before calling this function. + * The function does not return until any executing interrupts for this IRQ + * have completed. + * + * This function must not be called from interrupt context. + */ +void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id) +{ + kfree(free_irq(pci_irq_vector(dev, nr), dev_id)); +} +EXPORT_SYMBOL(pci_free_irq); diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c new file mode 100644 index 000000000000..9a5e5a9055eb --- /dev/null +++ b/drivers/pci/mmap.c @@ -0,0 +1,99 @@ +/* + * mmap.c — generic PCI resource mmap helper + * + * Copyright © 2017 Amazon.com, Inc. or its affiliates. + * + * Author: David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/pci.h> + +#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE + +/* + * Modern setup: generic pci_mmap_resource_range(), and implement the legacy + * pci_mmap_page_range() (if needed) as a wrapper round it. + */ + +#ifdef HAVE_PCI_MMAP +int pci_mmap_page_range(struct pci_dev *pdev, int bar, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine) +{ + resource_size_t start, end; + + pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end); + + /* Adjust vm_pgoff to be the offset within the resource */ + vma->vm_pgoff -= start >> PAGE_SHIFT; + return pci_mmap_resource_range(pdev, bar, vma, mmap_state, + write_combine); +} +#endif + +static const struct vm_operations_struct pci_phys_vm_ops = { +#ifdef CONFIG_HAVE_IOREMAP_PROT + .access = generic_access_phys, +#endif +}; + +int pci_mmap_resource_range(struct pci_dev *pdev, int bar, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine) +{ + unsigned long size; + int ret; + + size = ((pci_resource_len(pdev, bar) - 1) >> PAGE_SHIFT) + 1; + if (vma->vm_pgoff + vma_pages(vma) > size) + return -EINVAL; + + if (write_combine) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + else + vma->vm_page_prot = pgprot_device(vma->vm_page_prot); + + if (mmap_state == pci_mmap_io) { + ret = pci_iobar_pfn(pdev, bar, vma); + if (ret) + return ret; + } else + vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); + + vma->vm_ops = &pci_phys_vm_ops; + + return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +#elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */ + +/* + * Legacy setup: Impement pci_mmap_resource_range() as a wrapper around + * the architecture's pci_mmap_page_range(), converting to "user visible" + * addresses as necessary. + */ + +int pci_mmap_resource_range(struct pci_dev *pdev, int bar, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine) +{ + resource_size_t start, end; + + /* + * pci_mmap_page_range() expects the same kind of entry as coming + * from /proc/bus/pci/ which is a "user visible" value. If this is + * different from the resource itself, arch will do necessary fixup. + */ + pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end); + vma->vm_pgoff += start >> PAGE_SHIFT; + return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine); +} +#endif diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 0042c365b29b..ba44fdfda66b 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -298,7 +298,7 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { struct pci_dev *dev = msi_desc_to_pci_dev(entry); - if (dev->current_state != PCI_D0) { + if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) { /* Don't touch the hardware now */ } else if (entry->msi_attrib.is_msix) { void __iomem *base = pci_msix_desc_addr(entry); @@ -541,7 +541,8 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) if (affd) { masks = irq_create_affinity_masks(nvec, affd); if (!masks) - pr_err("Unable to allocate affinity masks, ignoring\n"); + dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n", + nvec); } /* MSI Entry Initialization */ @@ -681,7 +682,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, if (affd) { masks = irq_create_affinity_masks(nvec, affd); if (!masks) - pr_err("Unable to allocate affinity masks, ignoring\n"); + dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n", + nvec); } for (i = 0, curmsk = masks; i < nvec; i++) { @@ -882,7 +884,7 @@ int pci_msi_vec_count(struct pci_dev *dev) } EXPORT_SYMBOL(pci_msi_vec_count); -void pci_msi_shutdown(struct pci_dev *dev) +static void pci_msi_shutdown(struct pci_dev *dev) { struct msi_desc *desc; u32 mask; @@ -973,13 +975,18 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, return msix_capability_init(dev, entries, nvec, affd); } -void pci_msix_shutdown(struct pci_dev *dev) +static void pci_msix_shutdown(struct pci_dev *dev) { struct msi_desc *entry; if (!pci_msi_enable || !dev || !dev->msix_enabled) return; + if (pci_dev_is_disconnected(dev)) { + dev->msix_enabled = 0; + return; + } + /* Return the device with MSI-X masked as initial states */ for_each_pci_msi_entry(entry, dev) { /* Keep cached states to be restored */ diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index afa72717a979..192e7b681b96 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -394,6 +394,18 @@ void __weak pcibios_free_irq(struct pci_dev *dev) { } +#ifdef CONFIG_PCI_IOV +static inline bool pci_device_can_probe(struct pci_dev *pdev) +{ + return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe); +} +#else +static inline bool pci_device_can_probe(struct pci_dev *pdev) +{ + return true; +} +#endif + static int pci_device_probe(struct device *dev) { int error; @@ -405,10 +417,12 @@ static int pci_device_probe(struct device *dev) return error; pci_dev_get(pci_dev); - error = __pci_device_probe(drv, pci_dev); - if (error) { - pcibios_free_irq(pci_dev); - pci_dev_put(pci_dev); + if (pci_device_can_probe(pci_dev)) { + error = __pci_device_probe(drv, pci_dev); + if (error) { + pcibios_free_irq(pci_dev); + pci_dev_put(pci_dev); + } } return error; @@ -461,8 +475,6 @@ static void pci_device_shutdown(struct device *dev) if (drv && drv->shutdown) drv->shutdown(pci_dev); - pci_msi_shutdown(pci_dev); - pci_msix_shutdown(pci_dev); /* * If this is a kexec reboot, turn off Bus Master bit on the diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 25d010d449a3..31e99613a12e 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -526,10 +526,37 @@ exit: return count; } +static ssize_t sriov_drivers_autoprobe_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe); +} + +static ssize_t sriov_drivers_autoprobe_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + bool drivers_autoprobe; + + if (kstrtobool(buf, &drivers_autoprobe) < 0) + return -EINVAL; + + pdev->sriov->drivers_autoprobe = drivers_autoprobe; + + return count; +} + static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs); static struct device_attribute sriov_numvfs_attr = __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP), sriov_numvfs_show, sriov_numvfs_store); +static struct device_attribute sriov_drivers_autoprobe_attr = + __ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP), + sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store); #endif /* CONFIG_PCI_IOV */ static ssize_t driver_override_store(struct device *dev, @@ -980,20 +1007,24 @@ void pci_remove_legacy_files(struct pci_bus *b) } #endif /* HAVE_PCI_LEGACY */ -#ifdef HAVE_PCI_MMAP +#if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, enum pci_mmap_api mmap_api) { - unsigned long nr, start, size, pci_start; + unsigned long nr, start, size; + resource_size_t pci_start = 0, pci_end; if (pci_resource_len(pdev, resno) == 0) return 0; nr = vma_pages(vma); start = vma->vm_pgoff; size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; - pci_start = (mmap_api == PCI_MMAP_PROCFS) ? - pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0; + if (mmap_api == PCI_MMAP_PROCFS) { + pci_resource_to_user(pdev, resno, &pdev->resource[resno], + &pci_start, &pci_end); + pci_start >>= PAGE_SHIFT; + } if (start >= pci_start && start < pci_start + size && start + nr <= pci_start + size) return 1; @@ -1013,37 +1044,24 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma, int write_combine) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); - struct resource *res = attr->private; + int bar = (unsigned long)attr->private; enum pci_mmap_state mmap_type; - resource_size_t start, end; - int i; - - for (i = 0; i < PCI_ROM_RESOURCE; i++) - if (res == &pdev->resource[i]) - break; - if (i >= PCI_ROM_RESOURCE) - return -ENODEV; + struct resource *res = &pdev->resource[bar]; if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) return -EINVAL; - if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) { + if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) { WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, - pci_name(pdev), i, - (u64)pci_resource_start(pdev, i), - (u64)pci_resource_len(pdev, i)); + pci_name(pdev), bar, + (u64)pci_resource_start(pdev, bar), + (u64)pci_resource_len(pdev, bar)); return -EINVAL; } - - /* pci_mmap_page_range() expects the same kind of entry as coming - * from /proc/bus/pci/ which is a "user visible" value. If this is - * different from the resource itself, arch will do necessary fixup. - */ - pci_resource_to_user(pdev, i, res, &start, &end); - vma->vm_pgoff += start >> PAGE_SHIFT; mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; - return pci_mmap_page_range(pdev, vma, mmap_type, write_combine); + + return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine); } static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, @@ -1065,22 +1083,18 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj, loff_t off, size_t count, bool write) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); - struct resource *res = attr->private; + int bar = (unsigned long)attr->private; + struct resource *res; unsigned long port = off; - int i; - for (i = 0; i < PCI_ROM_RESOURCE; i++) - if (res == &pdev->resource[i]) - break; - if (i >= PCI_ROM_RESOURCE) - return -ENODEV; + res = &pdev->resource[bar]; - port += pci_resource_start(pdev, i); + port += pci_resource_start(pdev, bar); - if (port > pci_resource_end(pdev, i)) + if (port > pci_resource_end(pdev, bar)) return 0; - if (port + count - 1 > pci_resource_end(pdev, i)) + if (port + count - 1 > pci_resource_end(pdev, bar)) return -EINVAL; switch (count) { @@ -1170,16 +1184,19 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) } else { pdev->res_attr[num] = res_attr; sprintf(res_attr_name, "resource%d", num); - res_attr->mmap = pci_mmap_resource_uc; - } - if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { - res_attr->read = pci_read_resource_io; - res_attr->write = pci_write_resource_io; + if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { + res_attr->read = pci_read_resource_io; + res_attr->write = pci_write_resource_io; + if (arch_can_pci_mmap_io()) + res_attr->mmap = pci_mmap_resource_uc; + } else { + res_attr->mmap = pci_mmap_resource_uc; + } } res_attr->attr.name = res_attr_name; res_attr->attr.mode = S_IRUSR | S_IWUSR; res_attr->size = pci_resource_len(pdev, num); - res_attr->private = &pdev->resource[num]; + res_attr->private = (void *)(unsigned long)num; retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); if (retval) kfree(res_attr); @@ -1207,9 +1224,9 @@ static int pci_create_resource_files(struct pci_dev *pdev) retval = pci_create_attr(pdev, i, 0); /* for prefetchable resources, create a WC mappable file */ - if (!retval && pdev->resource[i].flags & IORESOURCE_PREFETCH) + if (!retval && arch_can_pci_mmap_wc() && + pdev->resource[i].flags & IORESOURCE_PREFETCH) retval = pci_create_attr(pdev, i, 1); - if (retval) { pci_remove_resource_files(pdev); return retval; @@ -1549,6 +1566,7 @@ static struct attribute_group pci_dev_hp_attr_group = { static struct attribute *sriov_dev_attrs[] = { &sriov_totalvfs_attr.attr, &sriov_numvfs_attr.attr, + &sriov_drivers_autoprobe_attr.attr, NULL, }; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7904d02ffdb9..b01bd5bba8e6 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -66,7 +66,8 @@ static void pci_dev_d3_sleep(struct pci_dev *dev) if (delay < pci_pm_d3_delay) delay = pci_pm_d3_delay; - msleep(delay); + if (delay) + msleep(delay); } #ifdef CONFIG_PCI_DOMAINS @@ -827,7 +828,8 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) * because have already delayed for the bridge. */ if (dev->runtime_d3cold) { - msleep(dev->d3cold_delay); + if (dev->d3cold_delay) + msleep(dev->d3cold_delay); /* * When powering on a bridge from D3cold, the * whole hierarchy may be powered on into @@ -1782,8 +1784,8 @@ static void pci_pme_list_scan(struct work_struct *work) } } if (!list_empty(&pci_pme_list)) - schedule_delayed_work(&pci_pme_work, - msecs_to_jiffies(PME_TIMEOUT)); + queue_delayed_work(system_freezable_wq, &pci_pme_work, + msecs_to_jiffies(PME_TIMEOUT)); mutex_unlock(&pci_pme_list_mutex); } @@ -1848,8 +1850,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable) mutex_lock(&pci_pme_list_mutex); list_add(&pme_dev->list, &pci_pme_list); if (list_is_singular(&pci_pme_list)) - schedule_delayed_work(&pci_pme_work, - msecs_to_jiffies(PME_TIMEOUT)); + queue_delayed_work(system_freezable_wq, + &pci_pme_work, + msecs_to_jiffies(PME_TIMEOUT)); mutex_unlock(&pci_pme_list_mutex); } else { mutex_lock(&pci_pme_list_mutex); @@ -3363,7 +3366,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address) * Only architectures that have memory mapped IO functions defined * (and the PCI_IOBASE value defined) should call this function. */ -int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) +int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) { #if defined(PCI_IOBASE) && defined(CONFIG_MMU) unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; @@ -3383,6 +3386,7 @@ int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) return -ENODEV; #endif } +EXPORT_SYMBOL(pci_remap_iospace); /** * pci_unmap_iospace - Unmap the memory mapped I/O space @@ -3400,6 +3404,89 @@ void pci_unmap_iospace(struct resource *res) unmap_kernel_range(vaddr, resource_size(res)); #endif } +EXPORT_SYMBOL(pci_unmap_iospace); + +/** + * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() + * @dev: Generic device to remap IO address for + * @offset: Resource address to map + * @size: Size of map + * + * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver + * detach. + */ +void __iomem *devm_pci_remap_cfgspace(struct device *dev, + resource_size_t offset, + resource_size_t size) +{ + void __iomem **ptr, *addr; + + ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return NULL; + + addr = pci_remap_cfgspace(offset, size); + if (addr) { + *ptr = addr; + devres_add(dev, ptr); + } else + devres_free(ptr); + + return addr; +} +EXPORT_SYMBOL(devm_pci_remap_cfgspace); + +/** + * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource + * @dev: generic device to handle the resource for + * @res: configuration space resource to be handled + * + * Checks that a resource is a valid memory region, requests the memory + * region and ioremaps with pci_remap_cfgspace() API that ensures the + * proper PCI configuration space memory attributes are guaranteed. + * + * All operations are managed and will be undone on driver detach. + * + * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code + * on failure. Usage example: + * + * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + * base = devm_pci_remap_cfg_resource(&pdev->dev, res); + * if (IS_ERR(base)) + * return PTR_ERR(base); + */ +void __iomem *devm_pci_remap_cfg_resource(struct device *dev, + struct resource *res) +{ + resource_size_t size; + const char *name; + void __iomem *dest_ptr; + + BUG_ON(!dev); + + if (!res || resource_type(res) != IORESOURCE_MEM) { + dev_err(dev, "invalid resource\n"); + return IOMEM_ERR_PTR(-EINVAL); + } + + size = resource_size(res); + name = res->name ?: dev_name(dev); + + if (!devm_request_mem_region(dev, res->start, size, name)) { + dev_err(dev, "can't request region for resource %pR\n", res); + return IOMEM_ERR_PTR(-EBUSY); + } + + dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size); + if (!dest_ptr) { + dev_err(dev, "ioremap failed for resource %pR\n", res); + devm_release_mem_region(dev, res->start, size); + dest_ptr = IOMEM_ERR_PTR(-ENOMEM); + } + + return dest_ptr; +} +EXPORT_SYMBOL(devm_pci_remap_cfg_resource); static void __pci_set_master(struct pci_dev *dev, bool enable) { @@ -3773,24 +3860,41 @@ static void pci_flr_wait(struct pci_dev *dev) (i - 1) * 100); } -static int pcie_flr(struct pci_dev *dev, int probe) +/** + * pcie_has_flr - check if a device supports function level resets + * @dev: device to check + * + * Returns true if the device advertises support for PCIe function level + * resets. + */ +static bool pcie_has_flr(struct pci_dev *dev) { u32 cap; - pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); - if (!(cap & PCI_EXP_DEVCAP_FLR)) - return -ENOTTY; + if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) + return false; - if (probe) - return 0; + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); + return cap & PCI_EXP_DEVCAP_FLR; +} +/** + * pcie_flr - initiate a PCIe function level reset + * @dev: device to reset + * + * Initiate a function level reset on @dev. The caller should ensure the + * device supports FLR before calling this function, e.g. by using the + * pcie_has_flr() helper. + */ +void pcie_flr(struct pci_dev *dev) +{ if (!pci_wait_for_pending_transaction(dev)) dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); pci_flr_wait(dev); - return 0; } +EXPORT_SYMBOL_GPL(pcie_flr); static int pci_af_flr(struct pci_dev *dev, int probe) { @@ -3801,6 +3905,9 @@ static int pci_af_flr(struct pci_dev *dev, int probe) if (!pos) return -ENOTTY; + if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) + return -ENOTTY; + pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) return -ENOTTY; @@ -3971,9 +4078,12 @@ static int __pci_dev_reset(struct pci_dev *dev, int probe) if (rc != -ENOTTY) goto done; - rc = pcie_flr(dev, probe); - if (rc != -ENOTTY) + if (pcie_has_flr(dev)) { + if (!probe) + pcie_flr(dev); + rc = 0; goto done; + } rc = pci_af_flr(dev, probe); if (rc != -ENOTTY) @@ -4932,6 +5042,8 @@ bool pci_device_is_present(struct pci_dev *pdev) { u32 v; + if (pci_dev_is_disconnected(pdev)) + return false; return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); } EXPORT_SYMBOL_GPL(pci_device_is_present); @@ -4947,6 +5059,11 @@ void pci_ignore_hotplug(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_ignore_hotplug); +resource_size_t __weak pcibios_default_alignment(void) +{ + return 0; +} + #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; static DEFINE_SPINLOCK(resource_alignment_lock); @@ -4954,22 +5071,25 @@ static DEFINE_SPINLOCK(resource_alignment_lock); /** * pci_specified_resource_alignment - get resource alignment specified by user. * @dev: the PCI device to get + * @resize: whether or not to change resources' size when reassigning alignment * * RETURNS: Resource alignment if it is specified. * Zero if it is not specified. */ -static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) +static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev, + bool *resize) { int seg, bus, slot, func, align_order, count; unsigned short vendor, device, subsystem_vendor, subsystem_device; - resource_size_t align = 0; + resource_size_t align = pcibios_default_alignment(); char *p; spin_lock(&resource_alignment_lock); p = resource_alignment_param; - if (!*p) + if (!*p && !align) goto out; if (pci_has_flag(PCI_PROBE_ONLY)) { + align = 0; pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n"); goto out; } @@ -4999,6 +5119,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) (!device || (device == dev->device)) && (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) && (!subsystem_device || (subsystem_device == dev->subsystem_device))) { + *resize = true; if (align_order == -1) align = PAGE_SIZE; else @@ -5024,6 +5145,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) bus == dev->bus->number && slot == PCI_SLOT(dev->devfn) && func == PCI_FUNC(dev->devfn)) { + *resize = true; if (align_order == -1) align = PAGE_SIZE; else @@ -5043,6 +5165,68 @@ out: return align; } +static void pci_request_resource_alignment(struct pci_dev *dev, int bar, + resource_size_t align, bool resize) +{ + struct resource *r = &dev->resource[bar]; + resource_size_t size; + + if (!(r->flags & IORESOURCE_MEM)) + return; + + if (r->flags & IORESOURCE_PCI_FIXED) { + dev_info(&dev->dev, "BAR%d %pR: ignoring requested alignment %#llx\n", + bar, r, (unsigned long long)align); + return; + } + + size = resource_size(r); + if (size >= align) + return; + + /* + * Increase the alignment of the resource. There are two ways we + * can do this: + * + * 1) Increase the size of the resource. BARs are aligned on their + * size, so when we reallocate space for this resource, we'll + * allocate it with the larger alignment. This also prevents + * assignment of any other BARs inside the alignment region, so + * if we're requesting page alignment, this means no other BARs + * will share the page. + * + * The disadvantage is that this makes the resource larger than + * the hardware BAR, which may break drivers that compute things + * based on the resource size, e.g., to find registers at a + * fixed offset before the end of the BAR. + * + * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and + * set r->start to the desired alignment. By itself this + * doesn't prevent other BARs being put inside the alignment + * region, but if we realign *every* resource of every device in + * the system, none of them will share an alignment region. + * + * When the user has requested alignment for only some devices via + * the "pci=resource_alignment" argument, "resize" is true and we + * use the first method. Otherwise we assume we're aligning all + * devices and we use the second. + */ + + dev_info(&dev->dev, "BAR%d %pR: requesting alignment to %#llx\n", + bar, r, (unsigned long long)align); + + if (resize) { + r->start = 0; + r->end = align - 1; + } else { + r->flags &= ~IORESOURCE_SIZEALIGN; + r->flags |= IORESOURCE_STARTALIGN; + r->start = align; + r->end = r->start + size - 1; + } + r->flags |= IORESOURCE_UNSET; +} + /* * This function disables memory decoding and releases memory resources * of the device specified by kernel's boot parameter 'pci=resource_alignment='. @@ -5054,8 +5238,9 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) { int i; struct resource *r; - resource_size_t align, size; + resource_size_t align; u16 command; + bool resize = false; /* * VF BARs are read-only zero according to SR-IOV spec r1.1, sec @@ -5067,7 +5252,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) return; /* check if specified PCI is target device to reassign */ - align = pci_specified_resource_alignment(dev); + align = pci_specified_resource_alignment(dev, &resize); if (!align) return; @@ -5084,28 +5269,11 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) command &= ~PCI_COMMAND_MEMORY; pci_write_config_word(dev, PCI_COMMAND, command); - for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { - r = &dev->resource[i]; - if (!(r->flags & IORESOURCE_MEM)) - continue; - if (r->flags & IORESOURCE_PCI_FIXED) { - dev_info(&dev->dev, "Ignoring requested alignment for BAR%d: %pR\n", - i, r); - continue; - } + for (i = 0; i <= PCI_ROM_RESOURCE; i++) + pci_request_resource_alignment(dev, i, align, resize); - size = resource_size(r); - if (size < align) { - size = align; - dev_info(&dev->dev, - "Rounding up size of resource #%d to %#llx.\n", - i, (unsigned long long)size); - } - r->flags |= IORESOURCE_UNSET; - r->end = size - 1; - r->start = 0; - } - /* Need to disable bridge's resource window, + /* + * Need to disable bridge's resource window, * to enable the kernel to reassign new resource * window later on. */ diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 4dbf9f96ae5b..f8113e5b9812 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -23,14 +23,14 @@ void pci_create_firmware_label_files(struct pci_dev *pdev); void pci_remove_firmware_label_files(struct pci_dev *pdev); #endif void pci_cleanup_rom(struct pci_dev *dev); -#ifdef HAVE_PCI_MMAP + enum pci_mmap_api { PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */ PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */ }; int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai, enum pci_mmap_api mmap_api); -#endif + int pci_probe_reset_function(struct pci_dev *dev); /** @@ -274,8 +274,23 @@ struct pci_sriov { struct pci_dev *self; /* this PF */ struct mutex lock; /* lock for setting sriov_numvfs in sysfs */ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ + bool drivers_autoprobe; /* auto probing of VFs by driver */ }; +/* pci_dev priv_flags */ +#define PCI_DEV_DISCONNECTED 0 + +static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) +{ + set_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags); + return 0; +} + +static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) +{ + return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags); +} + #ifdef CONFIG_PCI_ATS void pci_restore_ats_state(struct pci_dev *dev); #else diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c index d4d70ef4a2d7..77d2ca99d2ec 100644 --- a/drivers/pci/pcie/pcie-dpc.c +++ b/drivers/pci/pcie/pcie-dpc.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/pci.h> #include <linux/pcieport_if.h> +#include "../pci.h" struct dpc_dev { struct pcie_device *dev; @@ -66,6 +67,10 @@ static void interrupt_event_handler(struct work_struct *work) list_for_each_entry_safe_reverse(dev, temp, &parent->devices, bus_list) { pci_dev_get(dev); + pci_dev_set_disconnected(dev, NULL); + if (pci_has_subordinate(dev)) + pci_walk_bus(dev->subordinate, + pci_dev_set_disconnected, NULL); pci_stop_and_remove_bus_device(dev); pci_dev_put(dev); } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 90592d424e9b..01eb8038fceb 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -175,7 +175,7 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int pos) { - u32 l, sz, mask; + u32 l = 0, sz = 0, mask; u64 l64, sz64, mask64; u16 orig_cmd; struct pci_bus_region region, inverted_region; @@ -231,7 +231,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, res->flags |= IORESOURCE_ROM_ENABLE; l64 = l & PCI_ROM_ADDRESS_MASK; sz64 = sz & PCI_ROM_ADDRESS_MASK; - mask64 = (u32)PCI_ROM_ADDRESS_MASK; + mask64 = PCI_ROM_ADDRESS_MASK; } if (res->flags & IORESOURCE_MEM_64) { diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index f82710a8694d..098360d7ff81 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -202,6 +202,8 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, #ifdef HAVE_PCI_MMAP case PCIIOC_MMAP_IS_IO: + if (!arch_can_pci_mmap_io()) + return -EINVAL; fpriv->mmap_state = pci_mmap_io; break; @@ -210,14 +212,15 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, break; case PCIIOC_WRITE_COMBINE: - if (arg) - fpriv->write_combine = 1; - else - fpriv->write_combine = 0; - break; - + if (arch_can_pci_mmap_wc()) { + if (arg) + fpriv->write_combine = 1; + else + fpriv->write_combine = 0; + break; + } + /* If arch decided it can't, fall through... */ #endif /* HAVE_PCI_MMAP */ - default: ret = -EINVAL; break; @@ -231,25 +234,35 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) { struct pci_dev *dev = PDE_DATA(file_inode(file)); struct pci_filp_private *fpriv = file->private_data; - int i, ret, write_combine; + int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM; if (!capable(CAP_SYS_RAWIO)) return -EPERM; + if (fpriv->mmap_state == pci_mmap_io) { + if (!arch_can_pci_mmap_io()) + return -EINVAL; + res_bit = IORESOURCE_IO; + } + /* Make sure the caller is mapping a real resource for this device */ for (i = 0; i < PCI_ROM_RESOURCE; i++) { - if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) + if (dev->resource[i].flags & res_bit && + pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) break; } if (i >= PCI_ROM_RESOURCE) return -ENODEV; - if (fpriv->mmap_state == pci_mmap_mem) - write_combine = fpriv->write_combine; - else - write_combine = 0; - ret = pci_mmap_page_range(dev, vma, + if (fpriv->mmap_state == pci_mmap_mem && + fpriv->write_combine) { + if (dev->resource[i].flags & IORESOURCE_PREFETCH) + write_combine = 1; + else + return -EINVAL; + } + ret = pci_mmap_page_range(dev, i, vma, fpriv->mmap_state, write_combine); if (ret < 0) return ret; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 673683660b5c..085fb787aa9e 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1685,6 +1685,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); #ifdef CONFIG_X86_IO_APIC +static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) +{ + noioapicreroute = 1; + pr_info("%s detected: disable boot interrupt reroute\n", d->ident); + + return 0; +} + +static struct dmi_system_id boot_interrupt_dmi_table[] = { + /* + * Systems to exclude from boot interrupt reroute quirks + */ + { + .callback = dmi_disable_ioapicreroute, + .ident = "ASUSTek Computer INC. M2N-LR", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"), + }, + }, + {} +}; + /* * Boot interrupts on some chipsets cannot be turned off. For these chipsets, * remap the original interrupt in the linux kernel to the boot interrupt, so @@ -1693,6 +1716,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); */ static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev) { + dmi_check_system(boot_interrupt_dmi_table); if (noioapicquirk || noioapicreroute) return; @@ -3642,19 +3666,11 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe) * * The 82599 supports FLR on VFs, but FLR support is reported only * in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5). - * Therefore, we can't use pcie_flr(), which checks the VF DEVCAP. + * Thus we must call pcie_flr() directly without first checking if it is + * supported. */ - - if (probe) - return 0; - - if (!pci_wait_for_pending_transaction(dev)) - dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); - - pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); - - msleep(100); - + if (!probe) + pcie_flr(dev); return 0; } @@ -3759,20 +3775,7 @@ static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe) PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); - /* - * Start of pcie_flr() code sequence. This reset code is a copy of - * the guts of pcie_flr() because that's not an exported function. - */ - - if (!pci_wait_for_pending_transaction(dev)) - dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); - - pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); - msleep(100); - - /* - * End of pcie_flr() code sequence. - */ + pcie_flr(dev); /* * Restore the configuration information (BAR values, etc.) including @@ -3939,6 +3942,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080, DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias); /* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias); +/* ITE 8893 has the same problem as the 8892 */ +DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias); /* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */ DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias); @@ -3958,6 +3963,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias); /* + * The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are + * associated not at the root bus, but at a bridge below. This quirk avoids + * generating invalid DMA aliases. + */ +static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev) +{ + pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000, + quirk_bridge_cavm_thrx2_pcie_root); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084, + quirk_bridge_cavm_thrx2_pcie_root); + +/* * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) * class code. Fix it. */ @@ -4095,6 +4114,9 @@ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); + if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff))) + return -ENOTTY; + return acs_flags ? 0 : 1; } @@ -4634,3 +4656,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid); + +/* FLR may cause some 82579 devices to hang. */ +static void quirk_intel_no_flr(struct pci_dev *dev) +{ + dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr); diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 33e0f033a48e..4c6044ad7368 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -60,6 +60,10 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, tmp = bus->self; + /* stop at bridge where translation unit is associated */ + if (tmp->dev_flags & PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT) + return ret; + /* * PCIe-to-PCI/X bridges alias transactions from downstream * devices using the subordinate bus number (PCI Express to diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index cb389277df41..958da7db9033 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1066,10 +1066,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, r->flags = 0; continue; } - size += r_size; + size += max(r_size, align); /* Exclude ranges with size > align from calculation of the alignment. */ - if (r_size == align) + if (r_size <= align) aligns[order] += align; if (order > max_order) max_order = order; diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 4bc589ee78d0..85774b7a316a 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -63,7 +63,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) mask = (u32)PCI_BASE_ADDRESS_IO_MASK; new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK; } else if (resno == PCI_ROM_RESOURCE) { - mask = (u32)PCI_ROM_ADDRESS_MASK; + mask = PCI_ROM_ADDRESS_MASK; } else { mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; diff --git a/drivers/pci/switch/Kconfig b/drivers/pci/switch/Kconfig new file mode 100644 index 000000000000..4c49648e0646 --- /dev/null +++ b/drivers/pci/switch/Kconfig @@ -0,0 +1,13 @@ +menu "PCI switch controller drivers" + depends on PCI + +config PCI_SW_SWITCHTEC + tristate "MicroSemi Switchtec PCIe Switch Management Driver" + help + Enables support for the management interface for the MicroSemi + Switchtec series of PCIe switches. Supports userspace access + to submit MRPC commands to the switch via /dev/switchtecX + devices. See <file:Documentation/switchtec.txt> for more + information. + +endmenu diff --git a/drivers/pci/switch/Makefile b/drivers/pci/switch/Makefile new file mode 100644 index 000000000000..37d8cfb03f3f --- /dev/null +++ b/drivers/pci/switch/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_PCI_SW_SWITCHTEC) += switchtec.o diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c new file mode 100644 index 000000000000..cc6e085008fb --- /dev/null +++ b/drivers/pci/switch/switchtec.c @@ -0,0 +1,1600 @@ +/* + * Microsemi Switchtec(tm) PCIe Management Driver + * Copyright (c) 2017, Microsemi Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/switchtec_ioctl.h> + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/uaccess.h> +#include <linux/poll.h> +#include <linux/pci.h> +#include <linux/cdev.h> +#include <linux/wait.h> + +MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); +MODULE_VERSION("0.1"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Microsemi Corporation"); + +static int max_devices = 16; +module_param(max_devices, int, 0644); +MODULE_PARM_DESC(max_devices, "max number of switchtec device instances"); + +static dev_t switchtec_devt; +static struct class *switchtec_class; +static DEFINE_IDA(switchtec_minor_ida); + +#define MICROSEMI_VENDOR_ID 0x11f8 +#define MICROSEMI_NTB_CLASSCODE 0x068000 +#define MICROSEMI_MGMT_CLASSCODE 0x058000 + +#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024 +#define SWITCHTEC_MAX_PFF_CSR 48 + +#define SWITCHTEC_EVENT_OCCURRED BIT(0) +#define SWITCHTEC_EVENT_CLEAR BIT(0) +#define SWITCHTEC_EVENT_EN_LOG BIT(1) +#define SWITCHTEC_EVENT_EN_CLI BIT(2) +#define SWITCHTEC_EVENT_EN_IRQ BIT(3) +#define SWITCHTEC_EVENT_FATAL BIT(4) + +enum { + SWITCHTEC_GAS_MRPC_OFFSET = 0x0000, + SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000, + SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800, + SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000, + SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200, + SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000, + SWITCHTEC_GAS_NTB_OFFSET = 0x10000, + SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000, +}; + +struct mrpc_regs { + u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; + u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; + u32 cmd; + u32 status; + u32 ret_value; +} __packed; + +enum mrpc_status { + SWITCHTEC_MRPC_STATUS_INPROGRESS = 1, + SWITCHTEC_MRPC_STATUS_DONE = 2, + SWITCHTEC_MRPC_STATUS_ERROR = 0xFF, + SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100, +}; + +struct sw_event_regs { + u64 event_report_ctrl; + u64 reserved1; + u64 part_event_bitmap; + u64 reserved2; + u32 global_summary; + u32 reserved3[3]; + u32 stack_error_event_hdr; + u32 stack_error_event_data; + u32 reserved4[4]; + u32 ppu_error_event_hdr; + u32 ppu_error_event_data; + u32 reserved5[4]; + u32 isp_error_event_hdr; + u32 isp_error_event_data; + u32 reserved6[4]; + u32 sys_reset_event_hdr; + u32 reserved7[5]; + u32 fw_exception_hdr; + u32 reserved8[5]; + u32 fw_nmi_hdr; + u32 reserved9[5]; + u32 fw_non_fatal_hdr; + u32 reserved10[5]; + u32 fw_fatal_hdr; + u32 reserved11[5]; + u32 twi_mrpc_comp_hdr; + u32 twi_mrpc_comp_data; + u32 reserved12[4]; + u32 twi_mrpc_comp_async_hdr; + u32 twi_mrpc_comp_async_data; + u32 reserved13[4]; + u32 cli_mrpc_comp_hdr; + u32 cli_mrpc_comp_data; + u32 reserved14[4]; + u32 cli_mrpc_comp_async_hdr; + u32 cli_mrpc_comp_async_data; + u32 reserved15[4]; + u32 gpio_interrupt_hdr; + u32 gpio_interrupt_data; + u32 reserved16[4]; +} __packed; + +struct sys_info_regs { + u32 device_id; + u32 device_version; + u32 firmware_version; + u32 reserved1; + u32 vendor_table_revision; + u32 table_format_version; + u32 partition_id; + u32 cfg_file_fmt_version; + u32 reserved2[58]; + char vendor_id[8]; + char product_id[16]; + char product_revision[4]; + char component_vendor[8]; + u16 component_id; + u8 component_revision; +} __packed; + +struct flash_info_regs { + u32 flash_part_map_upd_idx; + + struct active_partition_info { + u32 address; + u32 build_version; + u32 build_string; + } active_img; + + struct active_partition_info active_cfg; + struct active_partition_info inactive_img; + struct active_partition_info inactive_cfg; + + u32 flash_length; + + struct partition_info { + u32 address; + u32 length; + } cfg0; + + struct partition_info cfg1; + struct partition_info img0; + struct partition_info img1; + struct partition_info nvlog; + struct partition_info vendor[8]; +}; + +struct ntb_info_regs { + u8 partition_count; + u8 partition_id; + u16 reserved1; + u64 ep_map; + u16 requester_id; +} __packed; + +struct part_cfg_regs { + u32 status; + u32 state; + u32 port_cnt; + u32 usp_port_mode; + u32 usp_pff_inst_id; + u32 vep_pff_inst_id; + u32 dsp_pff_inst_id[47]; + u32 reserved1[11]; + u16 vep_vector_number; + u16 usp_vector_number; + u32 port_event_bitmap; + u32 reserved2[3]; + u32 part_event_summary; + u32 reserved3[3]; + u32 part_reset_hdr; + u32 part_reset_data[5]; + u32 mrpc_comp_hdr; + u32 mrpc_comp_data[5]; + u32 mrpc_comp_async_hdr; + u32 mrpc_comp_async_data[5]; + u32 dyn_binding_hdr; + u32 dyn_binding_data[5]; + u32 reserved4[159]; +} __packed; + +enum { + SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0, + SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1, + SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2, + SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3, +}; + +struct pff_csr_regs { + u16 vendor_id; + u16 device_id; + u32 pci_cfg_header[15]; + u32 pci_cap_region[48]; + u32 pcie_cap_region[448]; + u32 indirect_gas_window[128]; + u32 indirect_gas_window_off; + u32 reserved[127]; + u32 pff_event_summary; + u32 reserved2[3]; + u32 aer_in_p2p_hdr; + u32 aer_in_p2p_data[5]; + u32 aer_in_vep_hdr; + u32 aer_in_vep_data[5]; + u32 dpc_hdr; + u32 dpc_data[5]; + u32 cts_hdr; + u32 cts_data[5]; + u32 reserved3[6]; + u32 hotplug_hdr; + u32 hotplug_data[5]; + u32 ier_hdr; + u32 ier_data[5]; + u32 threshold_hdr; + u32 threshold_data[5]; + u32 power_mgmt_hdr; + u32 power_mgmt_data[5]; + u32 tlp_throttling_hdr; + u32 tlp_throttling_data[5]; + u32 force_speed_hdr; + u32 force_speed_data[5]; + u32 credit_timeout_hdr; + u32 credit_timeout_data[5]; + u32 link_state_hdr; + u32 link_state_data[5]; + u32 reserved4[174]; +} __packed; + +struct switchtec_dev { + struct pci_dev *pdev; + struct device dev; + struct cdev cdev; + + int partition; + int partition_count; + int pff_csr_count; + char pff_local[SWITCHTEC_MAX_PFF_CSR]; + + void __iomem *mmio; + struct mrpc_regs __iomem *mmio_mrpc; + struct sw_event_regs __iomem *mmio_sw_event; + struct sys_info_regs __iomem *mmio_sys_info; + struct flash_info_regs __iomem *mmio_flash_info; + struct ntb_info_regs __iomem *mmio_ntb; + struct part_cfg_regs __iomem *mmio_part_cfg; + struct part_cfg_regs __iomem *mmio_part_cfg_all; + struct pff_csr_regs __iomem *mmio_pff_csr; + + /* + * The mrpc mutex must be held when accessing the other + * mrpc_ fields, alive flag and stuser->state field + */ + struct mutex mrpc_mutex; + struct list_head mrpc_queue; + int mrpc_busy; + struct work_struct mrpc_work; + struct delayed_work mrpc_timeout; + bool alive; + + wait_queue_head_t event_wq; + atomic_t event_cnt; +}; + +static struct switchtec_dev *to_stdev(struct device *dev) +{ + return container_of(dev, struct switchtec_dev, dev); +} + +enum mrpc_state { + MRPC_IDLE = 0, + MRPC_QUEUED, + MRPC_RUNNING, + MRPC_DONE, +}; + +struct switchtec_user { + struct switchtec_dev *stdev; + + enum mrpc_state state; + + struct completion comp; + struct kref kref; + struct list_head list; + + u32 cmd; + u32 status; + u32 return_code; + size_t data_len; + size_t read_len; + unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; + int event_cnt; +}; + +static struct switchtec_user *stuser_create(struct switchtec_dev *stdev) +{ + struct switchtec_user *stuser; + + stuser = kzalloc(sizeof(*stuser), GFP_KERNEL); + if (!stuser) + return ERR_PTR(-ENOMEM); + + get_device(&stdev->dev); + stuser->stdev = stdev; + kref_init(&stuser->kref); + INIT_LIST_HEAD(&stuser->list); + init_completion(&stuser->comp); + stuser->event_cnt = atomic_read(&stdev->event_cnt); + + dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); + + return stuser; +} + +static void stuser_free(struct kref *kref) +{ + struct switchtec_user *stuser; + + stuser = container_of(kref, struct switchtec_user, kref); + + dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser); + + put_device(&stuser->stdev->dev); + kfree(stuser); +} + +static void stuser_put(struct switchtec_user *stuser) +{ + kref_put(&stuser->kref, stuser_free); +} + +static void stuser_set_state(struct switchtec_user *stuser, + enum mrpc_state state) +{ + /* requires the mrpc_mutex to already be held when called */ + + const char * const state_names[] = { + [MRPC_IDLE] = "IDLE", + [MRPC_QUEUED] = "QUEUED", + [MRPC_RUNNING] = "RUNNING", + [MRPC_DONE] = "DONE", + }; + + stuser->state = state; + + dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s", + stuser, state_names[state]); +} + +static void mrpc_complete_cmd(struct switchtec_dev *stdev); + +static void mrpc_cmd_submit(struct switchtec_dev *stdev) +{ + /* requires the mrpc_mutex to already be held when called */ + + struct switchtec_user *stuser; + + if (stdev->mrpc_busy) + return; + + if (list_empty(&stdev->mrpc_queue)) + return; + + stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user, + list); + + stuser_set_state(stuser, MRPC_RUNNING); + stdev->mrpc_busy = 1; + memcpy_toio(&stdev->mmio_mrpc->input_data, + stuser->data, stuser->data_len); + iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd); + + stuser->status = ioread32(&stdev->mmio_mrpc->status); + if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS) + mrpc_complete_cmd(stdev); + + schedule_delayed_work(&stdev->mrpc_timeout, + msecs_to_jiffies(500)); +} + +static int mrpc_queue_cmd(struct switchtec_user *stuser) +{ + /* requires the mrpc_mutex to already be held when called */ + + struct switchtec_dev *stdev = stuser->stdev; + + kref_get(&stuser->kref); + stuser->read_len = sizeof(stuser->data); + stuser_set_state(stuser, MRPC_QUEUED); + init_completion(&stuser->comp); + list_add_tail(&stuser->list, &stdev->mrpc_queue); + + mrpc_cmd_submit(stdev); + + return 0; +} + +static void mrpc_complete_cmd(struct switchtec_dev *stdev) +{ + /* requires the mrpc_mutex to already be held when called */ + struct switchtec_user *stuser; + + if (list_empty(&stdev->mrpc_queue)) + return; + + stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user, + list); + + stuser->status = ioread32(&stdev->mmio_mrpc->status); + if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS) + return; + + stuser_set_state(stuser, MRPC_DONE); + stuser->return_code = 0; + + if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE) + goto out; + + stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value); + if (stuser->return_code != 0) + goto out; + + memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data, + stuser->read_len); + +out: + complete_all(&stuser->comp); + list_del_init(&stuser->list); + stuser_put(stuser); + stdev->mrpc_busy = 0; + + mrpc_cmd_submit(stdev); +} + +static void mrpc_event_work(struct work_struct *work) +{ + struct switchtec_dev *stdev; + + stdev = container_of(work, struct switchtec_dev, mrpc_work); + + dev_dbg(&stdev->dev, "%s\n", __func__); + + mutex_lock(&stdev->mrpc_mutex); + cancel_delayed_work(&stdev->mrpc_timeout); + mrpc_complete_cmd(stdev); + mutex_unlock(&stdev->mrpc_mutex); +} + +static void mrpc_timeout_work(struct work_struct *work) +{ + struct switchtec_dev *stdev; + u32 status; + + stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work); + + dev_dbg(&stdev->dev, "%s\n", __func__); + + mutex_lock(&stdev->mrpc_mutex); + + status = ioread32(&stdev->mmio_mrpc->status); + if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) { + schedule_delayed_work(&stdev->mrpc_timeout, + msecs_to_jiffies(500)); + goto out; + } + + mrpc_complete_cmd(stdev); + +out: + mutex_unlock(&stdev->mrpc_mutex); +} + +static ssize_t device_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct switchtec_dev *stdev = to_stdev(dev); + u32 ver; + + ver = ioread32(&stdev->mmio_sys_info->device_version); + + return sprintf(buf, "%x\n", ver); +} +static DEVICE_ATTR_RO(device_version); + +static ssize_t fw_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct switchtec_dev *stdev = to_stdev(dev); + u32 ver; + + ver = ioread32(&stdev->mmio_sys_info->firmware_version); + + return sprintf(buf, "%08x\n", ver); +} +static DEVICE_ATTR_RO(fw_version); + +static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len) +{ + int i; + + memcpy_fromio(buf, attr, len); + buf[len] = '\n'; + buf[len + 1] = 0; + + for (i = len - 1; i > 0; i--) { + if (buf[i] != ' ') + break; + buf[i] = '\n'; + buf[i + 1] = 0; + } + + return strlen(buf); +} + +#define DEVICE_ATTR_SYS_INFO_STR(field) \ +static ssize_t field ## _show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct switchtec_dev *stdev = to_stdev(dev); \ + return io_string_show(buf, &stdev->mmio_sys_info->field, \ + sizeof(stdev->mmio_sys_info->field)); \ +} \ +\ +static DEVICE_ATTR_RO(field) + +DEVICE_ATTR_SYS_INFO_STR(vendor_id); +DEVICE_ATTR_SYS_INFO_STR(product_id); +DEVICE_ATTR_SYS_INFO_STR(product_revision); +DEVICE_ATTR_SYS_INFO_STR(component_vendor); + +static ssize_t component_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct switchtec_dev *stdev = to_stdev(dev); + int id = ioread16(&stdev->mmio_sys_info->component_id); + + return sprintf(buf, "PM%04X\n", id); +} +static DEVICE_ATTR_RO(component_id); + +static ssize_t component_revision_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct switchtec_dev *stdev = to_stdev(dev); + int rev = ioread8(&stdev->mmio_sys_info->component_revision); + + return sprintf(buf, "%d\n", rev); +} +static DEVICE_ATTR_RO(component_revision); + +static ssize_t partition_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct switchtec_dev *stdev = to_stdev(dev); + + return sprintf(buf, "%d\n", stdev->partition); +} +static DEVICE_ATTR_RO(partition); + +static ssize_t partition_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct switchtec_dev *stdev = to_stdev(dev); + + return sprintf(buf, "%d\n", stdev->partition_count); +} +static DEVICE_ATTR_RO(partition_count); + +static struct attribute *switchtec_device_attrs[] = { + &dev_attr_device_version.attr, + &dev_attr_fw_version.attr, + &dev_attr_vendor_id.attr, + &dev_attr_product_id.attr, + &dev_attr_product_revision.attr, + &dev_attr_component_vendor.attr, + &dev_attr_component_id.attr, + &dev_attr_component_revision.attr, + &dev_attr_partition.attr, + &dev_attr_partition_count.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(switchtec_device); + +static int switchtec_dev_open(struct inode *inode, struct file *filp) +{ + struct switchtec_dev *stdev; + struct switchtec_user *stuser; + + stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev); + + stuser = stuser_create(stdev); + if (IS_ERR(stuser)) + return PTR_ERR(stuser); + + filp->private_data = stuser; + nonseekable_open(inode, filp); + + dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); + + return 0; +} + +static int switchtec_dev_release(struct inode *inode, struct file *filp) +{ + struct switchtec_user *stuser = filp->private_data; + + stuser_put(stuser); + + return 0; +} + +static int lock_mutex_and_test_alive(struct switchtec_dev *stdev) +{ + if (mutex_lock_interruptible(&stdev->mrpc_mutex)) + return -EINTR; + + if (!stdev->alive) { + mutex_unlock(&stdev->mrpc_mutex); + return -ENODEV; + } + + return 0; +} + +static ssize_t switchtec_dev_write(struct file *filp, const char __user *data, + size_t size, loff_t *off) +{ + struct switchtec_user *stuser = filp->private_data; + struct switchtec_dev *stdev = stuser->stdev; + int rc; + + if (size < sizeof(stuser->cmd) || + size > sizeof(stuser->cmd) + sizeof(stuser->data)) + return -EINVAL; + + stuser->data_len = size - sizeof(stuser->cmd); + + rc = lock_mutex_and_test_alive(stdev); + if (rc) + return rc; + + if (stuser->state != MRPC_IDLE) { + rc = -EBADE; + goto out; + } + + rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd)); + if (rc) { + rc = -EFAULT; + goto out; + } + + data += sizeof(stuser->cmd); + rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd)); + if (rc) { + rc = -EFAULT; + goto out; + } + + rc = mrpc_queue_cmd(stuser); + +out: + mutex_unlock(&stdev->mrpc_mutex); + + if (rc) + return rc; + + return size; +} + +static ssize_t switchtec_dev_read(struct file *filp, char __user *data, + size_t size, loff_t *off) +{ + struct switchtec_user *stuser = filp->private_data; + struct switchtec_dev *stdev = stuser->stdev; + int rc; + + if (size < sizeof(stuser->cmd) || + size > sizeof(stuser->cmd) + sizeof(stuser->data)) + return -EINVAL; + + rc = lock_mutex_and_test_alive(stdev); + if (rc) + return rc; + + if (stuser->state == MRPC_IDLE) { + mutex_unlock(&stdev->mrpc_mutex); + return -EBADE; + } + + stuser->read_len = size - sizeof(stuser->return_code); + + mutex_unlock(&stdev->mrpc_mutex); + + if (filp->f_flags & O_NONBLOCK) { + if (!try_wait_for_completion(&stuser->comp)) + return -EAGAIN; + } else { + rc = wait_for_completion_interruptible(&stuser->comp); + if (rc < 0) + return rc; + } + + rc = lock_mutex_and_test_alive(stdev); + if (rc) + return rc; + + if (stuser->state != MRPC_DONE) { + mutex_unlock(&stdev->mrpc_mutex); + return -EBADE; + } + + rc = copy_to_user(data, &stuser->return_code, + sizeof(stuser->return_code)); + if (rc) { + rc = -EFAULT; + goto out; + } + + data += sizeof(stuser->return_code); + rc = copy_to_user(data, &stuser->data, + size - sizeof(stuser->return_code)); + if (rc) { + rc = -EFAULT; + goto out; + } + + stuser_set_state(stuser, MRPC_IDLE); + +out: + mutex_unlock(&stdev->mrpc_mutex); + + if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE) + return size; + else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED) + return -ENXIO; + else + return -EBADMSG; +} + +static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait) +{ + struct switchtec_user *stuser = filp->private_data; + struct switchtec_dev *stdev = stuser->stdev; + int ret = 0; + + poll_wait(filp, &stuser->comp.wait, wait); + poll_wait(filp, &stdev->event_wq, wait); + + if (lock_mutex_and_test_alive(stdev)) + return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP; + + mutex_unlock(&stdev->mrpc_mutex); + + if (try_wait_for_completion(&stuser->comp)) + ret |= POLLIN | POLLRDNORM; + + if (stuser->event_cnt != atomic_read(&stdev->event_cnt)) + ret |= POLLPRI | POLLRDBAND; + + return ret; +} + +static int ioctl_flash_info(struct switchtec_dev *stdev, + struct switchtec_ioctl_flash_info __user *uinfo) +{ + struct switchtec_ioctl_flash_info info = {0}; + struct flash_info_regs __iomem *fi = stdev->mmio_flash_info; + + info.flash_length = ioread32(&fi->flash_length); + info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS; + + if (copy_to_user(uinfo, &info, sizeof(info))) + return -EFAULT; + + return 0; +} + +static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info, + struct partition_info __iomem *pi) +{ + info->address = ioread32(&pi->address); + info->length = ioread32(&pi->length); +} + +static int ioctl_flash_part_info(struct switchtec_dev *stdev, + struct switchtec_ioctl_flash_part_info __user *uinfo) +{ + struct switchtec_ioctl_flash_part_info info = {0}; + struct flash_info_regs __iomem *fi = stdev->mmio_flash_info; + u32 active_addr = -1; + + if (copy_from_user(&info, uinfo, sizeof(info))) + return -EFAULT; + + switch (info.flash_partition) { + case SWITCHTEC_IOCTL_PART_CFG0: + active_addr = ioread32(&fi->active_cfg); + set_fw_info_part(&info, &fi->cfg0); + break; + case SWITCHTEC_IOCTL_PART_CFG1: + active_addr = ioread32(&fi->active_cfg); + set_fw_info_part(&info, &fi->cfg1); + break; + case SWITCHTEC_IOCTL_PART_IMG0: + active_addr = ioread32(&fi->active_img); + set_fw_info_part(&info, &fi->img0); + break; + case SWITCHTEC_IOCTL_PART_IMG1: + active_addr = ioread32(&fi->active_img); + set_fw_info_part(&info, &fi->img1); + break; + case SWITCHTEC_IOCTL_PART_NVLOG: + set_fw_info_part(&info, &fi->nvlog); + break; + case SWITCHTEC_IOCTL_PART_VENDOR0: + set_fw_info_part(&info, &fi->vendor[0]); + break; + case SWITCHTEC_IOCTL_PART_VENDOR1: + set_fw_info_part(&info, &fi->vendor[1]); + break; + case SWITCHTEC_IOCTL_PART_VENDOR2: + set_fw_info_part(&info, &fi->vendor[2]); + break; + case SWITCHTEC_IOCTL_PART_VENDOR3: + set_fw_info_part(&info, &fi->vendor[3]); + break; + case SWITCHTEC_IOCTL_PART_VENDOR4: + set_fw_info_part(&info, &fi->vendor[4]); + break; + case SWITCHTEC_IOCTL_PART_VENDOR5: + set_fw_info_part(&info, &fi->vendor[5]); + break; + case SWITCHTEC_IOCTL_PART_VENDOR6: + set_fw_info_part(&info, &fi->vendor[6]); + break; + case SWITCHTEC_IOCTL_PART_VENDOR7: + set_fw_info_part(&info, &fi->vendor[7]); + break; + default: + return -EINVAL; + } + + if (info.address == active_addr) + info.active = 1; + + if (copy_to_user(uinfo, &info, sizeof(info))) + return -EFAULT; + + return 0; +} + +static int ioctl_event_summary(struct switchtec_dev *stdev, + struct switchtec_user *stuser, + struct switchtec_ioctl_event_summary __user *usum) +{ + struct switchtec_ioctl_event_summary s = {0}; + int i; + u32 reg; + + s.global = ioread32(&stdev->mmio_sw_event->global_summary); + s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap); + s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary); + + for (i = 0; i < stdev->partition_count; i++) { + reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary); + s.part[i] = reg; + } + + for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) { + reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id); + if (reg != MICROSEMI_VENDOR_ID) + break; + + reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary); + s.pff[i] = reg; + } + + if (copy_to_user(usum, &s, sizeof(s))) + return -EFAULT; + + stuser->event_cnt = atomic_read(&stdev->event_cnt); + + return 0; +} + +static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev, + size_t offset, int index) +{ + return (void __iomem *)stdev->mmio_sw_event + offset; +} + +static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev, + size_t offset, int index) +{ + return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset; +} + +static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev, + size_t offset, int index) +{ + return (void __iomem *)&stdev->mmio_pff_csr[index] + offset; +} + +#define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg} +#define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg} +#define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg} + +const struct event_reg { + size_t offset; + u32 __iomem *(*map_reg)(struct switchtec_dev *stdev, + size_t offset, int index); +} event_regs[] = { + EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr), + EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr), + EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr), + EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr), + EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr), + EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr), + EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr), + EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr), + EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr), + EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC, + twi_mrpc_comp_async_hdr), + EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr), + EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC, + cli_mrpc_comp_async_hdr), + EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr), + EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr), + EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr), + EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr), + EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr), + EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr), + EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr), + EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr), + EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr), + EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr), + EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr), + EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr), + EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr), + EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr), + EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr), + EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr), + EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr), +}; + +static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev, + int event_id, int index) +{ + size_t off; + + if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS) + return ERR_PTR(-EINVAL); + + off = event_regs[event_id].offset; + + if (event_regs[event_id].map_reg == part_ev_reg) { + if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX) + index = stdev->partition; + else if (index < 0 || index >= stdev->partition_count) + return ERR_PTR(-EINVAL); + } else if (event_regs[event_id].map_reg == pff_ev_reg) { + if (index < 0 || index >= stdev->pff_csr_count) + return ERR_PTR(-EINVAL); + } + + return event_regs[event_id].map_reg(stdev, off, index); +} + +static int event_ctl(struct switchtec_dev *stdev, + struct switchtec_ioctl_event_ctl *ctl) +{ + int i; + u32 __iomem *reg; + u32 hdr; + + reg = event_hdr_addr(stdev, ctl->event_id, ctl->index); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + hdr = ioread32(reg); + for (i = 0; i < ARRAY_SIZE(ctl->data); i++) + ctl->data[i] = ioread32(®[i + 1]); + + ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED; + ctl->count = (hdr >> 5) & 0xFF; + + if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR)) + hdr &= ~SWITCHTEC_EVENT_CLEAR; + if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL) + hdr |= SWITCHTEC_EVENT_EN_IRQ; + if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL) + hdr &= ~SWITCHTEC_EVENT_EN_IRQ; + if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG) + hdr |= SWITCHTEC_EVENT_EN_LOG; + if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG) + hdr &= ~SWITCHTEC_EVENT_EN_LOG; + if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI) + hdr |= SWITCHTEC_EVENT_EN_CLI; + if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI) + hdr &= ~SWITCHTEC_EVENT_EN_CLI; + if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL) + hdr |= SWITCHTEC_EVENT_FATAL; + if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL) + hdr &= ~SWITCHTEC_EVENT_FATAL; + + if (ctl->flags) + iowrite32(hdr, reg); + + ctl->flags = 0; + if (hdr & SWITCHTEC_EVENT_EN_IRQ) + ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL; + if (hdr & SWITCHTEC_EVENT_EN_LOG) + ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG; + if (hdr & SWITCHTEC_EVENT_EN_CLI) + ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI; + if (hdr & SWITCHTEC_EVENT_FATAL) + ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL; + + return 0; +} + +static int ioctl_event_ctl(struct switchtec_dev *stdev, + struct switchtec_ioctl_event_ctl __user *uctl) +{ + int ret; + int nr_idxs; + struct switchtec_ioctl_event_ctl ctl; + + if (copy_from_user(&ctl, uctl, sizeof(ctl))) + return -EFAULT; + + if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS) + return -EINVAL; + + if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED) + return -EINVAL; + + if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) { + if (event_regs[ctl.event_id].map_reg == global_ev_reg) + nr_idxs = 1; + else if (event_regs[ctl.event_id].map_reg == part_ev_reg) + nr_idxs = stdev->partition_count; + else if (event_regs[ctl.event_id].map_reg == pff_ev_reg) + nr_idxs = stdev->pff_csr_count; + else + return -EINVAL; + + for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) { + ret = event_ctl(stdev, &ctl); + if (ret < 0) + return ret; + } + } else { + ret = event_ctl(stdev, &ctl); + if (ret < 0) + return ret; + } + + if (copy_to_user(uctl, &ctl, sizeof(ctl))) + return -EFAULT; + + return 0; +} + +static int ioctl_pff_to_port(struct switchtec_dev *stdev, + struct switchtec_ioctl_pff_port *up) +{ + int i, part; + u32 reg; + struct part_cfg_regs *pcfg; + struct switchtec_ioctl_pff_port p; + + if (copy_from_user(&p, up, sizeof(p))) + return -EFAULT; + + p.port = -1; + for (part = 0; part < stdev->partition_count; part++) { + pcfg = &stdev->mmio_part_cfg_all[part]; + p.partition = part; + + reg = ioread32(&pcfg->usp_pff_inst_id); + if (reg == p.pff) { + p.port = 0; + break; + } + + reg = ioread32(&pcfg->vep_pff_inst_id); + if (reg == p.pff) { + p.port = SWITCHTEC_IOCTL_PFF_VEP; + break; + } + + for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) { + reg = ioread32(&pcfg->dsp_pff_inst_id[i]); + if (reg != p.pff) + continue; + + p.port = i + 1; + break; + } + + if (p.port != -1) + break; + } + + if (copy_to_user(up, &p, sizeof(p))) + return -EFAULT; + + return 0; +} + +static int ioctl_port_to_pff(struct switchtec_dev *stdev, + struct switchtec_ioctl_pff_port *up) +{ + struct switchtec_ioctl_pff_port p; + struct part_cfg_regs *pcfg; + + if (copy_from_user(&p, up, sizeof(p))) + return -EFAULT; + + if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX) + pcfg = stdev->mmio_part_cfg; + else if (p.partition < stdev->partition_count) + pcfg = &stdev->mmio_part_cfg_all[p.partition]; + else + return -EINVAL; + + switch (p.port) { + case 0: + p.pff = ioread32(&pcfg->usp_pff_inst_id); + break; + case SWITCHTEC_IOCTL_PFF_VEP: + p.pff = ioread32(&pcfg->vep_pff_inst_id); + break; + default: + if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id)) + return -EINVAL; + p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]); + break; + } + + if (copy_to_user(up, &p, sizeof(p))) + return -EFAULT; + + return 0; +} + +static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct switchtec_user *stuser = filp->private_data; + struct switchtec_dev *stdev = stuser->stdev; + int rc; + void __user *argp = (void __user *)arg; + + rc = lock_mutex_and_test_alive(stdev); + if (rc) + return rc; + + switch (cmd) { + case SWITCHTEC_IOCTL_FLASH_INFO: + rc = ioctl_flash_info(stdev, argp); + break; + case SWITCHTEC_IOCTL_FLASH_PART_INFO: + rc = ioctl_flash_part_info(stdev, argp); + break; + case SWITCHTEC_IOCTL_EVENT_SUMMARY: + rc = ioctl_event_summary(stdev, stuser, argp); + break; + case SWITCHTEC_IOCTL_EVENT_CTL: + rc = ioctl_event_ctl(stdev, argp); + break; + case SWITCHTEC_IOCTL_PFF_TO_PORT: + rc = ioctl_pff_to_port(stdev, argp); + break; + case SWITCHTEC_IOCTL_PORT_TO_PFF: + rc = ioctl_port_to_pff(stdev, argp); + break; + default: + rc = -ENOTTY; + break; + } + + mutex_unlock(&stdev->mrpc_mutex); + return rc; +} + +static const struct file_operations switchtec_fops = { + .owner = THIS_MODULE, + .open = switchtec_dev_open, + .release = switchtec_dev_release, + .write = switchtec_dev_write, + .read = switchtec_dev_read, + .poll = switchtec_dev_poll, + .unlocked_ioctl = switchtec_dev_ioctl, + .compat_ioctl = switchtec_dev_ioctl, +}; + +static void stdev_release(struct device *dev) +{ + struct switchtec_dev *stdev = to_stdev(dev); + + kfree(stdev); +} + +static void stdev_kill(struct switchtec_dev *stdev) +{ + struct switchtec_user *stuser, *tmpuser; + + pci_clear_master(stdev->pdev); + + cancel_delayed_work_sync(&stdev->mrpc_timeout); + + /* Mark the hardware as unavailable and complete all completions */ + mutex_lock(&stdev->mrpc_mutex); + stdev->alive = false; + + /* Wake up and kill any users waiting on an MRPC request */ + list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) { + complete_all(&stuser->comp); + list_del_init(&stuser->list); + stuser_put(stuser); + } + + mutex_unlock(&stdev->mrpc_mutex); + + /* Wake up any users waiting on event_wq */ + wake_up_interruptible(&stdev->event_wq); +} + +static struct switchtec_dev *stdev_create(struct pci_dev *pdev) +{ + struct switchtec_dev *stdev; + int minor; + struct device *dev; + struct cdev *cdev; + int rc; + + stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!stdev) + return ERR_PTR(-ENOMEM); + + stdev->alive = true; + stdev->pdev = pdev; + INIT_LIST_HEAD(&stdev->mrpc_queue); + mutex_init(&stdev->mrpc_mutex); + stdev->mrpc_busy = 0; + INIT_WORK(&stdev->mrpc_work, mrpc_event_work); + INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work); + init_waitqueue_head(&stdev->event_wq); + atomic_set(&stdev->event_cnt, 0); + + dev = &stdev->dev; + device_initialize(dev); + dev->class = switchtec_class; + dev->parent = &pdev->dev; + dev->groups = switchtec_device_groups; + dev->release = stdev_release; + + minor = ida_simple_get(&switchtec_minor_ida, 0, 0, + GFP_KERNEL); + if (minor < 0) { + rc = minor; + goto err_put; + } + + dev->devt = MKDEV(MAJOR(switchtec_devt), minor); + dev_set_name(dev, "switchtec%d", minor); + + cdev = &stdev->cdev; + cdev_init(cdev, &switchtec_fops); + cdev->owner = THIS_MODULE; + cdev->kobj.parent = &dev->kobj; + + return stdev; + +err_put: + put_device(&stdev->dev); + return ERR_PTR(rc); +} + +static int mask_event(struct switchtec_dev *stdev, int eid, int idx) +{ + size_t off = event_regs[eid].offset; + u32 __iomem *hdr_reg; + u32 hdr; + + hdr_reg = event_regs[eid].map_reg(stdev, off, idx); + hdr = ioread32(hdr_reg); + + if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) + return 0; + + dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); + hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED); + iowrite32(hdr, hdr_reg); + + return 1; +} + +static int mask_all_events(struct switchtec_dev *stdev, int eid) +{ + int idx; + int count = 0; + + if (event_regs[eid].map_reg == part_ev_reg) { + for (idx = 0; idx < stdev->partition_count; idx++) + count += mask_event(stdev, eid, idx); + } else if (event_regs[eid].map_reg == pff_ev_reg) { + for (idx = 0; idx < stdev->pff_csr_count; idx++) { + if (!stdev->pff_local[idx]) + continue; + count += mask_event(stdev, eid, idx); + } + } else { + count += mask_event(stdev, eid, 0); + } + + return count; +} + +static irqreturn_t switchtec_event_isr(int irq, void *dev) +{ + struct switchtec_dev *stdev = dev; + u32 reg; + irqreturn_t ret = IRQ_NONE; + int eid, event_count = 0; + + reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr); + if (reg & SWITCHTEC_EVENT_OCCURRED) { + dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__); + ret = IRQ_HANDLED; + schedule_work(&stdev->mrpc_work); + iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr); + } + + for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) + event_count += mask_all_events(stdev, eid); + + if (event_count) { + atomic_inc(&stdev->event_cnt); + wake_up_interruptible(&stdev->event_wq); + dev_dbg(&stdev->dev, "%s: %d events\n", __func__, + event_count); + return IRQ_HANDLED; + } + + return ret; +} + +static int switchtec_init_isr(struct switchtec_dev *stdev) +{ + int nvecs; + int event_irq; + + nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4, + PCI_IRQ_MSIX | PCI_IRQ_MSI); + if (nvecs < 0) + return nvecs; + + event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number); + if (event_irq < 0 || event_irq >= nvecs) + return -EFAULT; + + event_irq = pci_irq_vector(stdev->pdev, event_irq); + if (event_irq < 0) + return event_irq; + + return devm_request_irq(&stdev->pdev->dev, event_irq, + switchtec_event_isr, 0, + KBUILD_MODNAME, stdev); +} + +static void init_pff(struct switchtec_dev *stdev) +{ + int i; + u32 reg; + struct part_cfg_regs *pcfg = stdev->mmio_part_cfg; + + for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) { + reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id); + if (reg != MICROSEMI_VENDOR_ID) + break; + } + + stdev->pff_csr_count = i; + + reg = ioread32(&pcfg->usp_pff_inst_id); + if (reg < SWITCHTEC_MAX_PFF_CSR) + stdev->pff_local[reg] = 1; + + reg = ioread32(&pcfg->vep_pff_inst_id); + if (reg < SWITCHTEC_MAX_PFF_CSR) + stdev->pff_local[reg] = 1; + + for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) { + reg = ioread32(&pcfg->dsp_pff_inst_id[i]); + if (reg < SWITCHTEC_MAX_PFF_CSR) + stdev->pff_local[reg] = 1; + } +} + +static int switchtec_init_pci(struct switchtec_dev *stdev, + struct pci_dev *pdev) +{ + int rc; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME); + if (rc) + return rc; + + pci_set_master(pdev); + + stdev->mmio = pcim_iomap_table(pdev)[0]; + stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET; + stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET; + stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET; + stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET; + stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET; + stdev->partition = ioread8(&stdev->mmio_ntb->partition_id); + stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count); + stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET; + stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition]; + stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET; + + init_pff(stdev); + + pci_set_drvdata(pdev, stdev); + + return 0; +} + +static int switchtec_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct switchtec_dev *stdev; + int rc; + + stdev = stdev_create(pdev); + if (IS_ERR(stdev)) + return PTR_ERR(stdev); + + rc = switchtec_init_pci(stdev, pdev); + if (rc) + goto err_put; + + rc = switchtec_init_isr(stdev); + if (rc) { + dev_err(&stdev->dev, "failed to init isr.\n"); + goto err_put; + } + + iowrite32(SWITCHTEC_EVENT_CLEAR | + SWITCHTEC_EVENT_EN_IRQ, + &stdev->mmio_part_cfg->mrpc_comp_hdr); + + rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1); + if (rc) + goto err_put; + + rc = device_add(&stdev->dev); + if (rc) + goto err_devadd; + + dev_info(&stdev->dev, "Management device registered.\n"); + + return 0; + +err_devadd: + cdev_del(&stdev->cdev); + stdev_kill(stdev); +err_put: + ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); + put_device(&stdev->dev); + return rc; +} + +static void switchtec_pci_remove(struct pci_dev *pdev) +{ + struct switchtec_dev *stdev = pci_get_drvdata(pdev); + + pci_set_drvdata(pdev, NULL); + + device_del(&stdev->dev); + cdev_del(&stdev->cdev); + ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); + dev_info(&stdev->dev, "unregistered.\n"); + + stdev_kill(stdev); + put_device(&stdev->dev); +} + +#define SWITCHTEC_PCI_DEVICE(device_id) \ + { \ + .vendor = MICROSEMI_VENDOR_ID, \ + .device = device_id, \ + .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, \ + .class = MICROSEMI_MGMT_CLASSCODE, \ + .class_mask = 0xFFFFFFFF, \ + }, \ + { \ + .vendor = MICROSEMI_VENDOR_ID, \ + .device = device_id, \ + .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, \ + .class = MICROSEMI_NTB_CLASSCODE, \ + .class_mask = 0xFFFFFFFF, \ + } + +static const struct pci_device_id switchtec_pci_tbl[] = { + SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3 + SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3 + SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3 + SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3 + SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3 + SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3 + SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3 + SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3 + SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3 + SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3 + {0} +}; +MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl); + +static struct pci_driver switchtec_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = switchtec_pci_tbl, + .probe = switchtec_pci_probe, + .remove = switchtec_pci_remove, +}; + +static int __init switchtec_init(void) +{ + int rc; + + rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices, + "switchtec"); + if (rc) + return rc; + + switchtec_class = class_create(THIS_MODULE, "switchtec"); + if (IS_ERR(switchtec_class)) { + rc = PTR_ERR(switchtec_class); + goto err_create_class; + } + + rc = pci_register_driver(&switchtec_pci_driver); + if (rc) + goto err_pci_register; + + pr_info(KBUILD_MODNAME ": loaded.\n"); + + return 0; + +err_pci_register: + class_destroy(switchtec_class); + +err_create_class: + unregister_chrdev_region(switchtec_devt, max_devices); + + return rc; +} +module_init(switchtec_init); + +static void __exit switchtec_exit(void) +{ + pci_unregister_driver(&switchtec_pci_driver); + class_destroy(switchtec_class); + unregister_chrdev_region(switchtec_devt, max_devices); + ida_destroy(&switchtec_minor_ida); + + pr_info(KBUILD_MODNAME ": unloaded.\n"); +} +module_exit(switchtec_exit); |