From 584ec22759c06cdfc189c03a727f20038526245b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:32:12 -0700 Subject: ioat: move to drivers/dma/ioat/ When first created the ioat driver was the only inhabitant of drivers/dma/. Now, it is the only multi-file (more than a .c and a .h) driver in the directory. Moving it to an ioat/ subdirectory allows the naming convention to be cleaned up, and allows for future splitting of the source files by hardware version (v1, v2, and v3). Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/Makefile | 3 +- drivers/dma/ioat.c | 202 ----- drivers/dma/ioat/Makefile | 2 + drivers/dma/ioat/dca.c | 681 +++++++++++++++ drivers/dma/ioat/dma.c | 1741 +++++++++++++++++++++++++++++++++++++++ drivers/dma/ioat/dma.h | 165 ++++ drivers/dma/ioat/hw.h | 70 ++ drivers/dma/ioat/pci.c | 202 +++++ drivers/dma/ioat/registers.h | 226 +++++ drivers/dma/ioat_dca.c | 681 --------------- drivers/dma/ioat_dma.c | 1741 --------------------------------------- drivers/dma/ioatdma.h | 165 ---- drivers/dma/ioatdma_hw.h | 70 -- drivers/dma/ioatdma_registers.h | 226 ----- 14 files changed, 3088 insertions(+), 3087 deletions(-) delete mode 100644 drivers/dma/ioat.c create mode 100644 drivers/dma/ioat/Makefile create mode 100644 drivers/dma/ioat/dca.c create mode 100644 drivers/dma/ioat/dma.c create mode 100644 drivers/dma/ioat/dma.h create mode 100644 drivers/dma/ioat/hw.h create mode 100644 drivers/dma/ioat/pci.c create mode 100644 drivers/dma/ioat/registers.h delete mode 100644 drivers/dma/ioat_dca.c delete mode 100644 drivers/dma/ioat_dma.c delete mode 100644 drivers/dma/ioatdma.h delete mode 100644 drivers/dma/ioatdma_hw.h delete mode 100644 drivers/dma/ioatdma_registers.h (limited to 'drivers/dma') diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 2e5dc96700d2..a1cb2857bba6 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -1,8 +1,7 @@ obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_DMATEST) += dmatest.o -obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o -ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o +obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_MV_XOR) += mv_xor.o diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c deleted file mode 100644 index 2225bb6ba3d1..000000000000 --- a/drivers/dma/ioat.c +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Intel I/OAT DMA Linux driver - * Copyright(c) 2007 - 2009 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - */ - -/* - * This driver supports an Intel I/OAT DMA engine, which does asynchronous - * copy operations. - */ - -#include -#include -#include -#include -#include -#include "ioatdma.h" -#include "ioatdma_registers.h" -#include "ioatdma_hw.h" - -MODULE_VERSION(IOAT_DMA_VERSION); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Intel Corporation"); - -static struct pci_device_id ioat_pci_tbl[] = { - /* I/OAT v1 platforms */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) }, - { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) }, - - /* I/OAT v2 platforms */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, - - /* I/OAT v3 platforms */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, - { 0, } -}; - -struct ioat_device { - struct pci_dev *pdev; - void __iomem *iobase; - struct ioatdma_device *dma; - struct dca_provider *dca; -}; - -static int __devinit ioat_probe(struct pci_dev *pdev, - const struct pci_device_id *id); -static void __devexit ioat_remove(struct pci_dev *pdev); - -static int ioat_dca_enabled = 1; -module_param(ioat_dca_enabled, int, 0644); -MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); - -static struct pci_driver ioat_pci_driver = { - .name = "ioatdma", - .id_table = ioat_pci_tbl, - .probe = ioat_probe, - .remove = __devexit_p(ioat_remove), -}; - -static int __devinit ioat_probe(struct pci_dev *pdev, - const struct pci_device_id *id) -{ - void __iomem *iobase; - struct ioat_device *device; - unsigned long mmio_start, mmio_len; - int err; - - err = pci_enable_device(pdev); - if (err) - goto err_enable_device; - - err = pci_request_regions(pdev, ioat_pci_driver.name); - if (err) - goto err_request_regions; - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) - goto err_set_dma_mask; - - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) - goto err_set_dma_mask; - - mmio_start = pci_resource_start(pdev, 0); - mmio_len = pci_resource_len(pdev, 0); - iobase = ioremap(mmio_start, mmio_len); - if (!iobase) { - err = -ENOMEM; - goto err_ioremap; - } - - device = kzalloc(sizeof(*device), GFP_KERNEL); - if (!device) { - err = -ENOMEM; - goto err_kzalloc; - } - device->pdev = pdev; - pci_set_drvdata(pdev, device); - device->iobase = iobase; - - pci_set_master(pdev); - - switch (readb(iobase + IOAT_VER_OFFSET)) { - case IOAT_VER_1_2: - device->dma = ioat_dma_probe(pdev, iobase); - if (device->dma && ioat_dca_enabled) - device->dca = ioat_dca_init(pdev, iobase); - break; - case IOAT_VER_2_0: - device->dma = ioat_dma_probe(pdev, iobase); - if (device->dma && ioat_dca_enabled) - device->dca = ioat2_dca_init(pdev, iobase); - break; - case IOAT_VER_3_0: - device->dma = ioat_dma_probe(pdev, iobase); - if (device->dma && ioat_dca_enabled) - device->dca = ioat3_dca_init(pdev, iobase); - break; - default: - err = -ENODEV; - break; - } - if (!device->dma) - err = -ENODEV; - - if (err) - goto err_version; - - return 0; - -err_version: - kfree(device); -err_kzalloc: - iounmap(iobase); -err_ioremap: -err_set_dma_mask: - pci_release_regions(pdev); - pci_disable_device(pdev); -err_request_regions: -err_enable_device: - return err; -} - -static void __devexit ioat_remove(struct pci_dev *pdev) -{ - struct ioat_device *device = pci_get_drvdata(pdev); - - dev_err(&pdev->dev, "Removing dma and dca services\n"); - if (device->dca) { - unregister_dca_provider(device->dca); - free_dca_provider(device->dca); - device->dca = NULL; - } - - if (device->dma) { - ioat_dma_remove(device->dma); - device->dma = NULL; - } - - kfree(device); -} - -static int __init ioat_init_module(void) -{ - return pci_register_driver(&ioat_pci_driver); -} -module_init(ioat_init_module); - -static void __exit ioat_exit_module(void) -{ - pci_unregister_driver(&ioat_pci_driver); -} -module_exit(ioat_exit_module); diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile new file mode 100644 index 000000000000..2ce3d3a4270b --- /dev/null +++ b/drivers/dma/ioat/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o +ioatdma-objs := pci.o dma.o dca.o diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c new file mode 100644 index 000000000000..af1c762dd9d0 --- /dev/null +++ b/drivers/dma/ioat/dca.c @@ -0,0 +1,681 @@ +/* + * Intel I/OAT DMA Linux driver + * Copyright(c) 2007 - 2009 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include + +/* either a kernel change is needed, or we need something like this in kernel */ +#ifndef CONFIG_SMP +#include +#undef cpu_physical_id +#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24) +#endif + +#include "dma.h" +#include "registers.h" + +/* + * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 + * contain the bit number of the APIC ID to map into the DCA tag. If the valid + * bit is not set, then the value must be 0 or 1 and defines the bit in the tag. + */ +#define DCA_TAG_MAP_VALID 0x80 + +#define DCA3_TAG_MAP_BIT_TO_INV 0x80 +#define DCA3_TAG_MAP_BIT_TO_SEL 0x40 +#define DCA3_TAG_MAP_LITERAL_VAL 0x1 + +#define DCA_TAG_MAP_MASK 0xDF + +/* expected tag map bytes for I/OAT ver.2 */ +#define DCA2_TAG_MAP_BYTE0 0x80 +#define DCA2_TAG_MAP_BYTE1 0x0 +#define DCA2_TAG_MAP_BYTE2 0x81 +#define DCA2_TAG_MAP_BYTE3 0x82 +#define DCA2_TAG_MAP_BYTE4 0x82 + +/* verify if tag map matches expected values */ +static inline int dca2_tag_map_valid(u8 *tag_map) +{ + return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) && + (tag_map[1] == DCA2_TAG_MAP_BYTE1) && + (tag_map[2] == DCA2_TAG_MAP_BYTE2) && + (tag_map[3] == DCA2_TAG_MAP_BYTE3) && + (tag_map[4] == DCA2_TAG_MAP_BYTE4)); +} + +/* + * "Legacy" DCA systems do not implement the DCA register set in the + * I/OAT device. Software needs direct support for their tag mappings. + */ + +#define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x)) +#define IOAT_TAG_MAP_LEN 8 + +static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = { + 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; +static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = { + 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; +static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = { + 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), }; +static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 }; + +/* pack PCI B/D/F into a u16 */ +static inline u16 dcaid_from_pcidev(struct pci_dev *pci) +{ + return (pci->bus->number << 8) | pci->devfn; +} + +static int dca_enabled_in_bios(struct pci_dev *pdev) +{ + /* CPUID level 9 returns DCA configuration */ + /* Bit 0 indicates DCA enabled by the BIOS */ + unsigned long cpuid_level_9; + int res; + + cpuid_level_9 = cpuid_eax(9); + res = test_bit(0, &cpuid_level_9); + if (!res) + dev_err(&pdev->dev, "DCA is disabled in BIOS\n"); + + return res; +} + +static int system_has_dca_enabled(struct pci_dev *pdev) +{ + if (boot_cpu_has(X86_FEATURE_DCA)) + return dca_enabled_in_bios(pdev); + + dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n"); + return 0; +} + +struct ioat_dca_slot { + struct pci_dev *pdev; /* requester device */ + u16 rid; /* requester id, as used by IOAT */ +}; + +#define IOAT_DCA_MAX_REQ 6 +#define IOAT3_DCA_MAX_REQ 2 + +struct ioat_dca_priv { + void __iomem *iobase; + void __iomem *dca_base; + int max_requesters; + int requester_count; + u8 tag_map[IOAT_TAG_MAP_LEN]; + struct ioat_dca_slot req_slots[0]; +}; + +/* 5000 series chipset DCA Port Requester ID Table Entry Format + * [15:8] PCI-Express Bus Number + * [7:3] PCI-Express Device Number + * [2:0] PCI-Express Function Number + * + * 5000 series chipset DCA control register format + * [7:1] Reserved (0) + * [0] Ignore Function Number + */ + +static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) +{ + struct ioat_dca_priv *ioatdca = dca_priv(dca); + struct pci_dev *pdev; + int i; + u16 id; + + /* This implementation only supports PCI-Express */ + if (dev->bus != &pci_bus_type) + return -ENODEV; + pdev = to_pci_dev(dev); + id = dcaid_from_pcidev(pdev); + + if (ioatdca->requester_count == ioatdca->max_requesters) + return -ENODEV; + + for (i = 0; i < ioatdca->max_requesters; i++) { + if (ioatdca->req_slots[i].pdev == NULL) { + /* found an empty slot */ + ioatdca->requester_count++; + ioatdca->req_slots[i].pdev = pdev; + ioatdca->req_slots[i].rid = id; + writew(id, ioatdca->dca_base + (i * 4)); + /* make sure the ignore function bit is off */ + writeb(0, ioatdca->dca_base + (i * 4) + 2); + return i; + } + } + /* Error, ioatdma->requester_count is out of whack */ + return -EFAULT; +} + +static int ioat_dca_remove_requester(struct dca_provider *dca, + struct device *dev) +{ + struct ioat_dca_priv *ioatdca = dca_priv(dca); + struct pci_dev *pdev; + int i; + + /* This implementation only supports PCI-Express */ + if (dev->bus != &pci_bus_type) + return -ENODEV; + pdev = to_pci_dev(dev); + + for (i = 0; i < ioatdca->max_requesters; i++) { + if (ioatdca->req_slots[i].pdev == pdev) { + writew(0, ioatdca->dca_base + (i * 4)); + ioatdca->req_slots[i].pdev = NULL; + ioatdca->req_slots[i].rid = 0; + ioatdca->requester_count--; + return i; + } + } + return -ENODEV; +} + +static u8 ioat_dca_get_tag(struct dca_provider *dca, + struct device *dev, + int cpu) +{ + struct ioat_dca_priv *ioatdca = dca_priv(dca); + int i, apic_id, bit, value; + u8 entry, tag; + + tag = 0; + apic_id = cpu_physical_id(cpu); + + for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { + entry = ioatdca->tag_map[i]; + if (entry & DCA_TAG_MAP_VALID) { + bit = entry & ~DCA_TAG_MAP_VALID; + value = (apic_id & (1 << bit)) ? 1 : 0; + } else { + value = entry ? 1 : 0; + } + tag |= (value << i); + } + return tag; +} + +static int ioat_dca_dev_managed(struct dca_provider *dca, + struct device *dev) +{ + struct ioat_dca_priv *ioatdca = dca_priv(dca); + struct pci_dev *pdev; + int i; + + pdev = to_pci_dev(dev); + for (i = 0; i < ioatdca->max_requesters; i++) { + if (ioatdca->req_slots[i].pdev == pdev) + return 1; + } + return 0; +} + +static struct dca_ops ioat_dca_ops = { + .add_requester = ioat_dca_add_requester, + .remove_requester = ioat_dca_remove_requester, + .get_tag = ioat_dca_get_tag, + .dev_managed = ioat_dca_dev_managed, +}; + + +struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) +{ + struct dca_provider *dca; + struct ioat_dca_priv *ioatdca; + u8 *tag_map = NULL; + int i; + int err; + u8 version; + u8 max_requesters; + + if (!system_has_dca_enabled(pdev)) + return NULL; + + /* I/OAT v1 systems must have a known tag_map to support DCA */ + switch (pdev->vendor) { + case PCI_VENDOR_ID_INTEL: + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT: + tag_map = ioat_tag_map_BNB; + break; + case PCI_DEVICE_ID_INTEL_IOAT_CNB: + tag_map = ioat_tag_map_CNB; + break; + case PCI_DEVICE_ID_INTEL_IOAT_SCNB: + tag_map = ioat_tag_map_SCNB; + break; + } + break; + case PCI_VENDOR_ID_UNISYS: + switch (pdev->device) { + case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR: + tag_map = ioat_tag_map_UNISYS; + break; + } + break; + } + if (tag_map == NULL) + return NULL; + + version = readb(iobase + IOAT_VER_OFFSET); + if (version == IOAT_VER_3_0) + max_requesters = IOAT3_DCA_MAX_REQ; + else + max_requesters = IOAT_DCA_MAX_REQ; + + dca = alloc_dca_provider(&ioat_dca_ops, + sizeof(*ioatdca) + + (sizeof(struct ioat_dca_slot) * max_requesters)); + if (!dca) + return NULL; + + ioatdca = dca_priv(dca); + ioatdca->max_requesters = max_requesters; + ioatdca->dca_base = iobase + 0x54; + + /* copy over the APIC ID to DCA tag mapping */ + for (i = 0; i < IOAT_TAG_MAP_LEN; i++) + ioatdca->tag_map[i] = tag_map[i]; + + err = register_dca_provider(dca, &pdev->dev); + if (err) { + free_dca_provider(dca); + return NULL; + } + + return dca; +} + + +static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) +{ + struct ioat_dca_priv *ioatdca = dca_priv(dca); + struct pci_dev *pdev; + int i; + u16 id; + u16 global_req_table; + + /* This implementation only supports PCI-Express */ + if (dev->bus != &pci_bus_type) + return -ENODEV; + pdev = to_pci_dev(dev); + id = dcaid_from_pcidev(pdev); + + if (ioatdca->requester_count == ioatdca->max_requesters) + return -ENODEV; + + for (i = 0; i < ioatdca->max_requesters; i++) { + if (ioatdca->req_slots[i].pdev == NULL) { + /* found an empty slot */ + ioatdca->requester_count++; + ioatdca->req_slots[i].pdev = pdev; + ioatdca->req_slots[i].rid = id; + global_req_table = + readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); + writel(id | IOAT_DCA_GREQID_VALID, + ioatdca->iobase + global_req_table + (i * 4)); + return i; + } + } + /* Error, ioatdma->requester_count is out of whack */ + return -EFAULT; +} + +static int ioat2_dca_remove_requester(struct dca_provider *dca, + struct device *dev) +{ + struct ioat_dca_priv *ioatdca = dca_priv(dca); + struct pci_dev *pdev; + int i; + u16 global_req_table; + + /* This implementation only supports PCI-Express */ + if (dev->bus != &pci_bus_type) + return -ENODEV; + pdev = to_pci_dev(dev); + + for (i = 0; i < ioatdca->max_requesters; i++) { + if (ioatdca->req_slots[i].pdev == pdev) { + global_req_table = + readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); + writel(0, ioatdca->iobase + global_req_table + (i * 4)); + ioatdca->req_slots[i].pdev = NULL; + ioatdca->req_slots[i].rid = 0; + ioatdca->requester_count--; + return i; + } + } + return -ENODEV; +} + +static u8 ioat2_dca_get_tag(struct dca_provider *dca, + struct device *dev, + int cpu) +{ + u8 tag; + + tag = ioat_dca_get_tag(dca, dev, cpu); + tag = (~tag) & 0x1F; + return tag; +} + +static struct dca_ops ioat2_dca_ops = { + .add_requester = ioat2_dca_add_requester, + .remove_requester = ioat2_dca_remove_requester, + .get_tag = ioat2_dca_get_tag, + .dev_managed = ioat_dca_dev_managed, +}; + +static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) +{ + int slots = 0; + u32 req; + u16 global_req_table; + + global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET); + if (global_req_table == 0) + return 0; + do { + req = readl(iobase + global_req_table + (slots * sizeof(u32))); + slots++; + } while ((req & IOAT_DCA_GREQID_LASTID) == 0); + + return slots; +} + +struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) +{ + struct dca_provider *dca; + struct ioat_dca_priv *ioatdca; + int slots; + int i; + int err; + u32 tag_map; + u16 dca_offset; + u16 csi_fsb_control; + u16 pcie_control; + u8 bit; + + if (!system_has_dca_enabled(pdev)) + return NULL; + + dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); + if (dca_offset == 0) + return NULL; + + slots = ioat2_dca_count_dca_slots(iobase, dca_offset); + if (slots == 0) + return NULL; + + dca = alloc_dca_provider(&ioat2_dca_ops, + sizeof(*ioatdca) + + (sizeof(struct ioat_dca_slot) * slots)); + if (!dca) + return NULL; + + ioatdca = dca_priv(dca); + ioatdca->iobase = iobase; + ioatdca->dca_base = iobase + dca_offset; + ioatdca->max_requesters = slots; + + /* some bios might not know to turn these on */ + csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); + if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) { + csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH; + writew(csi_fsb_control, + ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); + } + pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); + if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) { + pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR; + writew(pcie_control, + ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); + } + + + /* TODO version, compatibility and configuration checks */ + + /* copy out the APIC to DCA tag map */ + tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET); + for (i = 0; i < 5; i++) { + bit = (tag_map >> (4 * i)) & 0x0f; + if (bit < 8) + ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID; + else + ioatdca->tag_map[i] = 0; + } + + if (!dca2_tag_map_valid(ioatdca->tag_map)) { + dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, " + "disabling DCA\n"); + free_dca_provider(dca); + return NULL; + } + + err = register_dca_provider(dca, &pdev->dev); + if (err) { + free_dca_provider(dca); + return NULL; + } + + return dca; +} + +static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) +{ + struct ioat_dca_priv *ioatdca = dca_priv(dca); + struct pci_dev *pdev; + int i; + u16 id; + u16 global_req_table; + + /* This implementation only supports PCI-Express */ + if (dev->bus != &pci_bus_type) + return -ENODEV; + pdev = to_pci_dev(dev); + id = dcaid_from_pcidev(pdev); + + if (ioatdca->requester_count == ioatdca->max_requesters) + return -ENODEV; + + for (i = 0; i < ioatdca->max_requesters; i++) { + if (ioatdca->req_slots[i].pdev == NULL) { + /* found an empty slot */ + ioatdca->requester_count++; + ioatdca->req_slots[i].pdev = pdev; + ioatdca->req_slots[i].rid = id; + global_req_table = + readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); + writel(id | IOAT_DCA_GREQID_VALID, + ioatdca->iobase + global_req_table + (i * 4)); + return i; + } + } + /* Error, ioatdma->requester_count is out of whack */ + return -EFAULT; +} + +static int ioat3_dca_remove_requester(struct dca_provider *dca, + struct device *dev) +{ + struct ioat_dca_priv *ioatdca = dca_priv(dca); + struct pci_dev *pdev; + int i; + u16 global_req_table; + + /* This implementation only supports PCI-Express */ + if (dev->bus != &pci_bus_type) + return -ENODEV; + pdev = to_pci_dev(dev); + + for (i = 0; i < ioatdca->max_requesters; i++) { + if (ioatdca->req_slots[i].pdev == pdev) { + global_req_table = + readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); + writel(0, ioatdca->iobase + global_req_table + (i * 4)); + ioatdca->req_slots[i].pdev = NULL; + ioatdca->req_slots[i].rid = 0; + ioatdca->requester_count--; + return i; + } + } + return -ENODEV; +} + +static u8 ioat3_dca_get_tag(struct dca_provider *dca, + struct device *dev, + int cpu) +{ + u8 tag; + + struct ioat_dca_priv *ioatdca = dca_priv(dca); + int i, apic_id, bit, value; + u8 entry; + + tag = 0; + apic_id = cpu_physical_id(cpu); + + for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { + entry = ioatdca->tag_map[i]; + if (entry & DCA3_TAG_MAP_BIT_TO_SEL) { + bit = entry & + ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV); + value = (apic_id & (1 << bit)) ? 1 : 0; + } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) { + bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV; + value = (apic_id & (1 << bit)) ? 0 : 1; + } else { + value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0; + } + tag |= (value << i); + } + + return tag; +} + +static struct dca_ops ioat3_dca_ops = { + .add_requester = ioat3_dca_add_requester, + .remove_requester = ioat3_dca_remove_requester, + .get_tag = ioat3_dca_get_tag, + .dev_managed = ioat_dca_dev_managed, +}; + +static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) +{ + int slots = 0; + u32 req; + u16 global_req_table; + + global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET); + if (global_req_table == 0) + return 0; + + do { + req = readl(iobase + global_req_table + (slots * sizeof(u32))); + slots++; + } while ((req & IOAT_DCA_GREQID_LASTID) == 0); + + return slots; +} + +struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) +{ + struct dca_provider *dca; + struct ioat_dca_priv *ioatdca; + int slots; + int i; + int err; + u16 dca_offset; + u16 csi_fsb_control; + u16 pcie_control; + u8 bit; + + union { + u64 full; + struct { + u32 low; + u32 high; + }; + } tag_map; + + if (!system_has_dca_enabled(pdev)) + return NULL; + + dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); + if (dca_offset == 0) + return NULL; + + slots = ioat3_dca_count_dca_slots(iobase, dca_offset); + if (slots == 0) + return NULL; + + dca = alloc_dca_provider(&ioat3_dca_ops, + sizeof(*ioatdca) + + (sizeof(struct ioat_dca_slot) * slots)); + if (!dca) + return NULL; + + ioatdca = dca_priv(dca); + ioatdca->iobase = iobase; + ioatdca->dca_base = iobase + dca_offset; + ioatdca->max_requesters = slots; + + /* some bios might not know to turn these on */ + csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET); + if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) { + csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH; + writew(csi_fsb_control, + ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET); + } + pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET); + if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) { + pcie_control |= IOAT3_PCI_CONTROL_MEMWR; + writew(pcie_control, + ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET); + } + + + /* TODO version, compatibility and configuration checks */ + + /* copy out the APIC to DCA tag map */ + tag_map.low = + readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW); + tag_map.high = + readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH); + for (i = 0; i < 8; i++) { + bit = tag_map.full >> (8 * i); + ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK; + } + + err = register_dca_provider(dca, &pdev->dev); + if (err) { + free_dca_provider(dca); + return NULL; + } + + return dca; +} diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c new file mode 100644 index 000000000000..648797e83295 --- /dev/null +++ b/drivers/dma/ioat/dma.c @@ -0,0 +1,1741 @@ +/* + * Intel I/OAT DMA Linux driver + * Copyright(c) 2004 - 2009 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +/* + * This driver supports an Intel I/OAT DMA engine, which does asynchronous + * copy operations. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dma.h" +#include "registers.h" +#include "hw.h" + +#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) +#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) +#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) +#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) + +#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) +static int ioat_pending_level = 4; +module_param(ioat_pending_level, int, 0644); +MODULE_PARM_DESC(ioat_pending_level, + "high-water mark for pushing ioat descriptors (default: 4)"); + +#define RESET_DELAY msecs_to_jiffies(100) +#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) +static void ioat_dma_chan_reset_part2(struct work_struct *work); +static void ioat_dma_chan_watchdog(struct work_struct *work); + +/* + * workaround for IOAT ver.3.0 null descriptor issue + * (channel returns error when size is 0) + */ +#define NULL_DESC_BUFFER_SIZE 1 + +/* internal functions */ +static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); +static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); + +static struct ioat_desc_sw * +ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); +static struct ioat_desc_sw * +ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); + +static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( + struct ioatdma_device *device, + int index) +{ + return device->idx[index]; +} + +/** + * ioat_dma_do_interrupt - handler used for single vector interrupt mode + * @irq: interrupt id + * @data: interrupt data + */ +static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) +{ + struct ioatdma_device *instance = data; + struct ioat_dma_chan *ioat_chan; + unsigned long attnstatus; + int bit; + u8 intrctrl; + + intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); + + if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) + return IRQ_NONE; + + if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { + writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); + return IRQ_NONE; + } + + attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); + for_each_bit(bit, &attnstatus, BITS_PER_LONG) { + ioat_chan = ioat_lookup_chan_by_index(instance, bit); + tasklet_schedule(&ioat_chan->cleanup_task); + } + + writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); + return IRQ_HANDLED; +} + +/** + * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode + * @irq: interrupt id + * @data: interrupt data + */ +static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) +{ + struct ioat_dma_chan *ioat_chan = data; + + tasklet_schedule(&ioat_chan->cleanup_task); + + return IRQ_HANDLED; +} + +static void ioat_dma_cleanup_tasklet(unsigned long data); + +/** + * ioat_dma_enumerate_channels - find and initialize the device's channels + * @device: the device to be enumerated + */ +static int ioat_dma_enumerate_channels(struct ioatdma_device *device) +{ + u8 xfercap_scale; + u32 xfercap; + int i; + struct ioat_dma_chan *ioat_chan; + + /* + * IOAT ver.3 workarounds + */ + if (device->version == IOAT_VER_3_0) { + u32 chan_err_mask; + u16 dev_id; + u32 dmauncerrsts; + + /* + * Write CHANERRMSK_INT with 3E07h to mask out the errors + * that can cause stability issues for IOAT ver.3 + */ + chan_err_mask = 0x3E07; + pci_write_config_dword(device->pdev, + IOAT_PCI_CHANERRMASK_INT_OFFSET, + chan_err_mask); + + /* + * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit + * (workaround for spurious config parity error after restart) + */ + pci_read_config_word(device->pdev, + IOAT_PCI_DEVICE_ID_OFFSET, + &dev_id); + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { + dmauncerrsts = 0x10; + pci_write_config_dword(device->pdev, + IOAT_PCI_DMAUNCERRSTS_OFFSET, + dmauncerrsts); + } + } + + device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); + xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); + xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); + +#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL + if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) { + device->common.chancnt--; + } +#endif + for (i = 0; i < device->common.chancnt; i++) { + ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); + if (!ioat_chan) { + device->common.chancnt = i; + break; + } + + ioat_chan->device = device; + ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); + ioat_chan->xfercap = xfercap; + ioat_chan->desccount = 0; + INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); + if (ioat_chan->device->version == IOAT_VER_2_0) + writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | + IOAT_DMA_DCA_ANY_CPU, + ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + else if (ioat_chan->device->version == IOAT_VER_3_0) + writel(IOAT_DMA_DCA_ANY_CPU, + ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + spin_lock_init(&ioat_chan->cleanup_lock); + spin_lock_init(&ioat_chan->desc_lock); + INIT_LIST_HEAD(&ioat_chan->free_desc); + INIT_LIST_HEAD(&ioat_chan->used_desc); + /* This should be made common somewhere in dmaengine.c */ + ioat_chan->common.device = &device->common; + list_add_tail(&ioat_chan->common.device_node, + &device->common.channels); + device->idx[i] = ioat_chan; + tasklet_init(&ioat_chan->cleanup_task, + ioat_dma_cleanup_tasklet, + (unsigned long) ioat_chan); + tasklet_disable(&ioat_chan->cleanup_task); + } + return device->common.chancnt; +} + +/** + * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended + * descriptors to hw + * @chan: DMA channel handle + */ +static inline void __ioat1_dma_memcpy_issue_pending( + struct ioat_dma_chan *ioat_chan) +{ + ioat_chan->pending = 0; + writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); +} + +static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + + if (ioat_chan->pending > 0) { + spin_lock_bh(&ioat_chan->desc_lock); + __ioat1_dma_memcpy_issue_pending(ioat_chan); + spin_unlock_bh(&ioat_chan->desc_lock); + } +} + +static inline void __ioat2_dma_memcpy_issue_pending( + struct ioat_dma_chan *ioat_chan) +{ + ioat_chan->pending = 0; + writew(ioat_chan->dmacount, + ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); +} + +static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + + if (ioat_chan->pending > 0) { + spin_lock_bh(&ioat_chan->desc_lock); + __ioat2_dma_memcpy_issue_pending(ioat_chan); + spin_unlock_bh(&ioat_chan->desc_lock); + } +} + + +/** + * ioat_dma_chan_reset_part2 - reinit the channel after a reset + */ +static void ioat_dma_chan_reset_part2(struct work_struct *work) +{ + struct ioat_dma_chan *ioat_chan = + container_of(work, struct ioat_dma_chan, work.work); + struct ioat_desc_sw *desc; + + spin_lock_bh(&ioat_chan->cleanup_lock); + spin_lock_bh(&ioat_chan->desc_lock); + + ioat_chan->completion_virt->low = 0; + ioat_chan->completion_virt->high = 0; + ioat_chan->pending = 0; + + /* + * count the descriptors waiting, and be sure to do it + * right for both the CB1 line and the CB2 ring + */ + ioat_chan->dmacount = 0; + if (ioat_chan->used_desc.prev) { + desc = to_ioat_desc(ioat_chan->used_desc.prev); + do { + ioat_chan->dmacount++; + desc = to_ioat_desc(desc->node.next); + } while (&desc->node != ioat_chan->used_desc.next); + } + + /* + * write the new starting descriptor address + * this puts channel engine into ARMED state + */ + desc = to_ioat_desc(ioat_chan->used_desc.prev); + switch (ioat_chan->device->version) { + case IOAT_VER_1_2: + writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->async_tx.phys) >> 32, + ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); + + writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); + break; + case IOAT_VER_2_0: + writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->async_tx.phys) >> 32, + ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + + /* tell the engine to go with what's left to be done */ + writew(ioat_chan->dmacount, + ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + + break; + } + dev_err(&ioat_chan->device->pdev->dev, + "chan%d reset - %d descs waiting, %d total desc\n", + chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); + + spin_unlock_bh(&ioat_chan->desc_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); +} + +/** + * ioat_dma_reset_channel - restart a channel + * @ioat_chan: IOAT DMA channel handle + */ +static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) +{ + u32 chansts, chanerr; + + if (!ioat_chan->used_desc.prev) + return; + + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + chansts = (ioat_chan->completion_virt->low + & IOAT_CHANSTS_DMA_TRANSFER_STATUS); + if (chanerr) { + dev_err(&ioat_chan->device->pdev->dev, + "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", + chan_num(ioat_chan), chansts, chanerr); + writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + } + + /* + * whack it upside the head with a reset + * and wait for things to settle out. + * force the pending count to a really big negative + * to make sure no one forces an issue_pending + * while we're waiting. + */ + + spin_lock_bh(&ioat_chan->desc_lock); + ioat_chan->pending = INT_MIN; + writeb(IOAT_CHANCMD_RESET, + ioat_chan->reg_base + + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); + spin_unlock_bh(&ioat_chan->desc_lock); + + /* schedule the 2nd half instead of sleeping a long time */ + schedule_delayed_work(&ioat_chan->work, RESET_DELAY); +} + +/** + * ioat_dma_chan_watchdog - watch for stuck channels + */ +static void ioat_dma_chan_watchdog(struct work_struct *work) +{ + struct ioatdma_device *device = + container_of(work, struct ioatdma_device, work.work); + struct ioat_dma_chan *ioat_chan; + int i; + + union { + u64 full; + struct { + u32 low; + u32 high; + }; + } completion_hw; + unsigned long compl_desc_addr_hw; + + for (i = 0; i < device->common.chancnt; i++) { + ioat_chan = ioat_lookup_chan_by_index(device, i); + + if (ioat_chan->device->version == IOAT_VER_1_2 + /* have we started processing anything yet */ + && ioat_chan->last_completion + /* have we completed any since last watchdog cycle? */ + && (ioat_chan->last_completion == + ioat_chan->watchdog_completion) + /* has TCP stuck on one cookie since last watchdog? */ + && (ioat_chan->watchdog_tcp_cookie == + ioat_chan->watchdog_last_tcp_cookie) + && (ioat_chan->watchdog_tcp_cookie != + ioat_chan->completed_cookie) + /* is there something in the chain to be processed? */ + /* CB1 chain always has at least the last one processed */ + && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next) + && ioat_chan->pending == 0) { + + /* + * check CHANSTS register for completed + * descriptor address. + * if it is different than completion writeback, + * it is not zero + * and it has changed since the last watchdog + * we can assume that channel + * is still working correctly + * and the problem is in completion writeback. + * update completion writeback + * with actual CHANSTS value + * else + * try resetting the channel + */ + + completion_hw.low = readl(ioat_chan->reg_base + + IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version)); + completion_hw.high = readl(ioat_chan->reg_base + + IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version)); +#if (BITS_PER_LONG == 64) + compl_desc_addr_hw = + completion_hw.full + & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; +#else + compl_desc_addr_hw = + completion_hw.low & IOAT_LOW_COMPLETION_MASK; +#endif + + if ((compl_desc_addr_hw != 0) + && (compl_desc_addr_hw != ioat_chan->watchdog_completion) + && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) { + ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw; + ioat_chan->completion_virt->low = completion_hw.low; + ioat_chan->completion_virt->high = completion_hw.high; + } else { + ioat_dma_reset_channel(ioat_chan); + ioat_chan->watchdog_completion = 0; + ioat_chan->last_compl_desc_addr_hw = 0; + } + + /* + * for version 2.0 if there are descriptors yet to be processed + * and the last completed hasn't changed since the last watchdog + * if they haven't hit the pending level + * issue the pending to push them through + * else + * try resetting the channel + */ + } else if (ioat_chan->device->version == IOAT_VER_2_0 + && ioat_chan->used_desc.prev + && ioat_chan->last_completion + && ioat_chan->last_completion == ioat_chan->watchdog_completion) { + + if (ioat_chan->pending < ioat_pending_level) + ioat2_dma_memcpy_issue_pending(&ioat_chan->common); + else { + ioat_dma_reset_channel(ioat_chan); + ioat_chan->watchdog_completion = 0; + } + } else { + ioat_chan->last_compl_desc_addr_hw = 0; + ioat_chan->watchdog_completion + = ioat_chan->last_completion; + } + + ioat_chan->watchdog_last_tcp_cookie = + ioat_chan->watchdog_tcp_cookie; + } + + schedule_delayed_work(&device->work, WATCHDOG_DELAY); +} + +static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); + struct ioat_desc_sw *first = tx_to_ioat_desc(tx); + struct ioat_desc_sw *prev, *new; + struct ioat_dma_descriptor *hw; + dma_cookie_t cookie; + LIST_HEAD(new_chain); + u32 copy; + size_t len; + dma_addr_t src, dst; + unsigned long orig_flags; + unsigned int desc_count = 0; + + /* src and dest and len are stored in the initial descriptor */ + len = first->len; + src = first->src; + dst = first->dst; + orig_flags = first->async_tx.flags; + new = first; + + spin_lock_bh(&ioat_chan->desc_lock); + prev = to_ioat_desc(ioat_chan->used_desc.prev); + prefetch(prev->hw); + do { + copy = min_t(size_t, len, ioat_chan->xfercap); + + async_tx_ack(&new->async_tx); + + hw = new->hw; + hw->size = copy; + hw->ctl = 0; + hw->src_addr = src; + hw->dst_addr = dst; + hw->next = 0; + + /* chain together the physical address list for the HW */ + wmb(); + prev->hw->next = (u64) new->async_tx.phys; + + len -= copy; + dst += copy; + src += copy; + + list_add_tail(&new->node, &new_chain); + desc_count++; + prev = new; + } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); + + if (!new) { + dev_err(&ioat_chan->device->pdev->dev, + "tx submit failed\n"); + spin_unlock_bh(&ioat_chan->desc_lock); + return -ENOMEM; + } + + hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; + if (first->async_tx.callback) { + hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; + if (first != new) { + /* move callback into to last desc */ + new->async_tx.callback = first->async_tx.callback; + new->async_tx.callback_param + = first->async_tx.callback_param; + first->async_tx.callback = NULL; + first->async_tx.callback_param = NULL; + } + } + + new->tx_cnt = desc_count; + new->async_tx.flags = orig_flags; /* client is in control of this ack */ + + /* store the original values for use in later cleanup */ + if (new != first) { + new->src = first->src; + new->dst = first->dst; + new->len = first->len; + } + + /* cookie incr and addition to used_list must be atomic */ + cookie = ioat_chan->common.cookie; + cookie++; + if (cookie < 0) + cookie = 1; + ioat_chan->common.cookie = new->async_tx.cookie = cookie; + + /* write address into NextDescriptor field of last desc in chain */ + to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = + first->async_tx.phys; + list_splice_tail(&new_chain, &ioat_chan->used_desc); + + ioat_chan->dmacount += desc_count; + ioat_chan->pending += desc_count; + if (ioat_chan->pending >= ioat_pending_level) + __ioat1_dma_memcpy_issue_pending(ioat_chan); + spin_unlock_bh(&ioat_chan->desc_lock); + + return cookie; +} + +static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); + struct ioat_desc_sw *first = tx_to_ioat_desc(tx); + struct ioat_desc_sw *new; + struct ioat_dma_descriptor *hw; + dma_cookie_t cookie; + u32 copy; + size_t len; + dma_addr_t src, dst; + unsigned long orig_flags; + unsigned int desc_count = 0; + + /* src and dest and len are stored in the initial descriptor */ + len = first->len; + src = first->src; + dst = first->dst; + orig_flags = first->async_tx.flags; + new = first; + + /* + * ioat_chan->desc_lock is still in force in version 2 path + * it gets unlocked at end of this function + */ + do { + copy = min_t(size_t, len, ioat_chan->xfercap); + + async_tx_ack(&new->async_tx); + + hw = new->hw; + hw->size = copy; + hw->ctl = 0; + hw->src_addr = src; + hw->dst_addr = dst; + + len -= copy; + dst += copy; + src += copy; + desc_count++; + } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); + + if (!new) { + dev_err(&ioat_chan->device->pdev->dev, + "tx submit failed\n"); + spin_unlock_bh(&ioat_chan->desc_lock); + return -ENOMEM; + } + + hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; + if (first->async_tx.callback) { + hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; + if (first != new) { + /* move callback into to last desc */ + new->async_tx.callback = first->async_tx.callback; + new->async_tx.callback_param + = first->async_tx.callback_param; + first->async_tx.callback = NULL; + first->async_tx.callback_param = NULL; + } + } + + new->tx_cnt = desc_count; + new->async_tx.flags = orig_flags; /* client is in control of this ack */ + + /* store the original values for use in later cleanup */ + if (new != first) { + new->src = first->src; + new->dst = first->dst; + new->len = first->len; + } + + /* cookie incr and addition to used_list must be atomic */ + cookie = ioat_chan->common.cookie; + cookie++; + if (cookie < 0) + cookie = 1; + ioat_chan->common.cookie = new->async_tx.cookie = cookie; + + ioat_chan->dmacount += desc_count; + ioat_chan->pending += desc_count; + if (ioat_chan->pending >= ioat_pending_level) + __ioat2_dma_memcpy_issue_pending(ioat_chan); + spin_unlock_bh(&ioat_chan->desc_lock); + + return cookie; +} + +/** + * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair + * @ioat_chan: the channel supplying the memory pool for the descriptors + * @flags: allocation flags + */ +static struct ioat_desc_sw *ioat_dma_alloc_descriptor( + struct ioat_dma_chan *ioat_chan, + gfp_t flags) +{ + struct ioat_dma_descriptor *desc; + struct ioat_desc_sw *desc_sw; + struct ioatdma_device *ioatdma_device; + dma_addr_t phys; + + ioatdma_device = to_ioatdma_device(ioat_chan->common.device); + desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); + if (unlikely(!desc)) + return NULL; + + desc_sw = kzalloc(sizeof(*desc_sw), flags); + if (unlikely(!desc_sw)) { + pci_pool_free(ioatdma_device->dma_pool, desc, phys); + return NULL; + } + + memset(desc, 0, sizeof(*desc)); + dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); + switch (ioat_chan->device->version) { + case IOAT_VER_1_2: + desc_sw->async_tx.tx_submit = ioat1_tx_submit; + break; + case IOAT_VER_2_0: + case IOAT_VER_3_0: + desc_sw->async_tx.tx_submit = ioat2_tx_submit; + break; + } + + desc_sw->hw = desc; + desc_sw->async_tx.phys = phys; + + return desc_sw; +} + +static int ioat_initial_desc_count = 256; +module_param(ioat_initial_desc_count, int, 0644); +MODULE_PARM_DESC(ioat_initial_desc_count, + "initial descriptors per channel (default: 256)"); + +/** + * ioat2_dma_massage_chan_desc - link the descriptors into a circle + * @ioat_chan: the channel to be massaged + */ +static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) +{ + struct ioat_desc_sw *desc, *_desc; + + /* setup used_desc */ + ioat_chan->used_desc.next = ioat_chan->free_desc.next; + ioat_chan->used_desc.prev = NULL; + + /* pull free_desc out of the circle so that every node is a hw + * descriptor, but leave it pointing to the list + */ + ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next; + ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev; + + /* circle link the hw descriptors */ + desc = to_ioat_desc(ioat_chan->free_desc.next); + desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; + list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { + desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; + } +} + +/** + * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors + * @chan: the channel to be filled out + */ +static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_desc_sw *desc; + u16 chanctrl; + u32 chanerr; + int i; + LIST_HEAD(tmp_list); + + /* have we already been set up? */ + if (!list_empty(&ioat_chan->free_desc)) + return ioat_chan->desccount; + + /* Setup register to interrupt and write completion status on error */ + chanctrl = IOAT_CHANCTRL_ERR_INT_EN | + IOAT_CHANCTRL_ANY_ERR_ABORT_EN | + IOAT_CHANCTRL_ERR_COMPLETION_EN; + writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); + + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + if (chanerr) { + dev_err(&ioat_chan->device->pdev->dev, + "CHANERR = %x, clearing\n", chanerr); + writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + } + + /* Allocate descriptors */ + for (i = 0; i < ioat_initial_desc_count; i++) { + desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); + if (!desc) { + dev_err(&ioat_chan->device->pdev->dev, + "Only %d initial descriptors\n", i); + break; + } + list_add_tail(&desc->node, &tmp_list); + } + spin_lock_bh(&ioat_chan->desc_lock); + ioat_chan->desccount = i; + list_splice(&tmp_list, &ioat_chan->free_desc); + if (ioat_chan->device->version != IOAT_VER_1_2) + ioat2_dma_massage_chan_desc(ioat_chan); + spin_unlock_bh(&ioat_chan->desc_lock); + + /* allocate a completion writeback area */ + /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ + ioat_chan->completion_virt = + pci_pool_alloc(ioat_chan->device->completion_pool, + GFP_KERNEL, + &ioat_chan->completion_addr); + memset(ioat_chan->completion_virt, 0, + sizeof(*ioat_chan->completion_virt)); + writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); + writel(((u64) ioat_chan->completion_addr) >> 32, + ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); + + tasklet_enable(&ioat_chan->cleanup_task); + ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */ + return ioat_chan->desccount; +} + +/** + * ioat_dma_free_chan_resources - release all the descriptors + * @chan: the channel to be cleaned + */ +static void ioat_dma_free_chan_resources(struct dma_chan *chan) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device); + struct ioat_desc_sw *desc, *_desc; + int in_use_descs = 0; + + /* Before freeing channel resources first check + * if they have been previously allocated for this channel. + */ + if (ioat_chan->desccount == 0) + return; + + tasklet_disable(&ioat_chan->cleanup_task); + ioat_dma_memcpy_cleanup(ioat_chan); + + /* Delay 100ms after reset to allow internal DMA logic to quiesce + * before removing DMA descriptor resources. + */ + writeb(IOAT_CHANCMD_RESET, + ioat_chan->reg_base + + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); + mdelay(100); + + spin_lock_bh(&ioat_chan->desc_lock); + switch (ioat_chan->device->version) { + case IOAT_VER_1_2: + list_for_each_entry_safe(desc, _desc, + &ioat_chan->used_desc, node) { + in_use_descs++; + list_del(&desc->node); + pci_pool_free(ioatdma_device->dma_pool, desc->hw, + desc->async_tx.phys); + kfree(desc); + } + list_for_each_entry_safe(desc, _desc, + &ioat_chan->free_desc, node) { + list_del(&desc->node); + pci_pool_free(ioatdma_device->dma_pool, desc->hw, + desc->async_tx.phys); + kfree(desc); + } + break; + case IOAT_VER_2_0: + case IOAT_VER_3_0: + list_for_each_entry_safe(desc, _desc, + ioat_chan->free_desc.next, node) { + list_del(&desc->node); + pci_pool_free(ioatdma_device->dma_pool, desc->hw, + desc->async_tx.phys); + kfree(desc); + } + desc = to_ioat_desc(ioat_chan->free_desc.next); + pci_pool_free(ioatdma_device->dma_pool, desc->hw, + desc->async_tx.phys); + kfree(desc); + INIT_LIST_HEAD(&ioat_chan->free_desc); + INIT_LIST_HEAD(&ioat_chan->used_desc); + break; + } + spin_unlock_bh(&ioat_chan->desc_lock); + + pci_pool_free(ioatdma_device->completion_pool, + ioat_chan->completion_virt, + ioat_chan->completion_addr); + + /* one is ok since we left it on there on purpose */ + if (in_use_descs > 1) + dev_err(&ioat_chan->device->pdev->dev, + "Freeing %d in use descriptors!\n", + in_use_descs - 1); + + ioat_chan->last_completion = ioat_chan->completion_addr = 0; + ioat_chan->pending = 0; + ioat_chan->dmacount = 0; + ioat_chan->desccount = 0; + ioat_chan->watchdog_completion = 0; + ioat_chan->last_compl_desc_addr_hw = 0; + ioat_chan->watchdog_tcp_cookie = + ioat_chan->watchdog_last_tcp_cookie = 0; +} + +/** + * ioat_dma_get_next_descriptor - return the next available descriptor + * @ioat_chan: IOAT DMA channel handle + * + * Gets the next descriptor from the chain, and must be called with the + * channel's desc_lock held. Allocates more descriptors if the channel + * has run out. + */ +static struct ioat_desc_sw * +ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) +{ + struct ioat_desc_sw *new; + + if (!list_empty(&ioat_chan->free_desc)) { + new = to_ioat_desc(ioat_chan->free_desc.next); + list_del(&new->node); + } else { + /* try to get another desc */ + new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); + if (!new) { + dev_err(&ioat_chan->device->pdev->dev, + "alloc failed\n"); + return NULL; + } + } + + prefetch(new->hw); + return new; +} + +static struct ioat_desc_sw * +ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) +{ + struct ioat_desc_sw *new; + + /* + * used.prev points to where to start processing + * used.next points to next free descriptor + * if used.prev == NULL, there are none waiting to be processed + * if used.next == used.prev.prev, there is only one free descriptor, + * and we need to use it to as a noop descriptor before + * linking in a new set of descriptors, since the device + * has probably already read the pointer to it + */ + if (ioat_chan->used_desc.prev && + ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { + + struct ioat_desc_sw *desc; + struct ioat_desc_sw *noop_desc; + int i; + + /* set up the noop descriptor */ + noop_desc = to_ioat_desc(ioat_chan->used_desc.next); + /* set size to non-zero value (channel returns error when size is 0) */ + noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; + noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; + noop_desc->hw->src_addr = 0; + noop_desc->hw->dst_addr = 0; + + ioat_chan->used_desc.next = ioat_chan->used_desc.next->next; + ioat_chan->pending++; + ioat_chan->dmacount++; + + /* try to get a few more descriptors */ + for (i = 16; i; i--) { + desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); + if (!desc) { + dev_err(&ioat_chan->device->pdev->dev, + "alloc failed\n"); + break; + } + list_add_tail(&desc->node, ioat_chan->used_desc.next); + + desc->hw->next + = to_ioat_desc(desc->node.next)->async_tx.phys; + to_ioat_desc(desc->node.prev)->hw->next + = desc->async_tx.phys; + ioat_chan->desccount++; + } + + ioat_chan->used_desc.next = noop_desc->node.next; + } + new = to_ioat_desc(ioat_chan->used_desc.next); + prefetch(new); + ioat_chan->used_desc.next = new->node.next; + + if (ioat_chan->used_desc.prev == NULL) + ioat_chan->used_desc.prev = &new->node; + + prefetch(new->hw); + return new; +} + +static struct ioat_desc_sw *ioat_dma_get_next_descriptor( + struct ioat_dma_chan *ioat_chan) +{ + if (!ioat_chan) + return NULL; + + switch (ioat_chan->device->version) { + case IOAT_VER_1_2: + return ioat1_dma_get_next_descriptor(ioat_chan); + case IOAT_VER_2_0: + case IOAT_VER_3_0: + return ioat2_dma_get_next_descriptor(ioat_chan); + } + return NULL; +} + +static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( + struct dma_chan *chan, + dma_addr_t dma_dest, + dma_addr_t dma_src, + size_t len, + unsigned long flags) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_desc_sw *new; + + spin_lock_bh(&ioat_chan->desc_lock); + new = ioat_dma_get_next_descriptor(ioat_chan); + spin_unlock_bh(&ioat_chan->desc_lock); + + if (new) { + new->len = len; + new->dst = dma_dest; + new->src = dma_src; + new->async_tx.flags = flags; + return &new->async_tx; + } else { + dev_err(&ioat_chan->device->pdev->dev, + "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", + chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); + return NULL; + } +} + +static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( + struct dma_chan *chan, + dma_addr_t dma_dest, + dma_addr_t dma_src, + size_t len, + unsigned long flags) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_desc_sw *new; + + spin_lock_bh(&ioat_chan->desc_lock); + new = ioat2_dma_get_next_descriptor(ioat_chan); + + /* + * leave ioat_chan->desc_lock set in ioat 2 path + * it will get unlocked at end of tx_submit + */ + + if (new) { + new->len = len; + new->dst = dma_dest; + new->src = dma_src; + new->async_tx.flags = flags; + return &new->async_tx; + } else { + spin_unlock_bh(&ioat_chan->desc_lock); + dev_err(&ioat_chan->device->pdev->dev, + "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", + chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); + return NULL; + } +} + +static void ioat_dma_cleanup_tasklet(unsigned long data) +{ + struct ioat_dma_chan *chan = (void *)data; + ioat_dma_memcpy_cleanup(chan); + writew(IOAT_CHANCTRL_INT_DISABLE, + chan->reg_base + IOAT_CHANCTRL_OFFSET); +} + +static void +ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) +{ + if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + pci_unmap_single(ioat_chan->device->pdev, + pci_unmap_addr(desc, dst), + pci_unmap_len(desc, len), + PCI_DMA_FROMDEVICE); + else + pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_addr(desc, dst), + pci_unmap_len(desc, len), + PCI_DMA_FROMDEVICE); + } + + if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + pci_unmap_single(ioat_chan->device->pdev, + pci_unmap_addr(desc, src), + pci_unmap_len(desc, len), + PCI_DMA_TODEVICE); + else + pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_addr(desc, src), + pci_unmap_len(desc, len), + PCI_DMA_TODEVICE); + } +} + +/** + * ioat_dma_memcpy_cleanup - cleanup up finished descriptors + * @chan: ioat channel to be cleaned up + */ +static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) +{ + unsigned long phys_complete; + struct ioat_desc_sw *desc, *_desc; + dma_cookie_t cookie = 0; + unsigned long desc_phys; + struct ioat_desc_sw *latest_desc; + + prefetch(ioat_chan->completion_virt); + + if (!spin_trylock_bh(&ioat_chan->cleanup_lock)) + return; + + /* The completion writeback can happen at any time, + so reads by the driver need to be atomic operations + The descriptor physical addresses are limited to 32-bits + when the CPU can only do a 32-bit mov */ + +#if (BITS_PER_LONG == 64) + phys_complete = + ioat_chan->completion_virt->full + & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; +#else + phys_complete = + ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; +#endif + + if ((ioat_chan->completion_virt->full + & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == + IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { + dev_err(&ioat_chan->device->pdev->dev, + "Channel halted, chanerr = %x\n", + readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); + + /* TODO do something to salvage the situation */ + } + + if (phys_complete == ioat_chan->last_completion) { + spin_unlock_bh(&ioat_chan->cleanup_lock); + /* + * perhaps we're stuck so hard that the watchdog can't go off? + * try to catch it after 2 seconds + */ + if (ioat_chan->device->version != IOAT_VER_3_0) { + if (time_after(jiffies, + ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) { + ioat_dma_chan_watchdog(&(ioat_chan->device->work.work)); + ioat_chan->last_completion_time = jiffies; + } + } + return; + } + ioat_chan->last_completion_time = jiffies; + + cookie = 0; + if (!spin_trylock_bh(&ioat_chan->desc_lock)) { + spin_unlock_bh(&ioat_chan->cleanup_lock); + return; + } + + switch (ioat_chan->device->version) { + case IOAT_VER_1_2: + list_for_each_entry_safe(desc, _desc, + &ioat_chan->used_desc, node) { + + /* + * Incoming DMA requests may use multiple descriptors, + * due to exceeding xfercap, perhaps. If so, only the + * last one will have a cookie, and require unmapping. + */ + if (desc->async_tx.cookie) { + cookie = desc->async_tx.cookie; + ioat_dma_unmap(ioat_chan, desc); + if (desc->async_tx.callback) { + desc->async_tx.callback(desc->async_tx.callback_param); + desc->async_tx.callback = NULL; + } + } + + if (desc->async_tx.phys != phys_complete) { + /* + * a completed entry, but not the last, so clean + * up if the client is done with the descriptor + */ + if (async_tx_test_ack(&desc->async_tx)) { + list_move_tail(&desc->node, + &ioat_chan->free_desc); + } else + desc->async_tx.cookie = 0; + } else { + /* + * last used desc. Do not remove, so we can + * append from it, but don't look at it next + * time, either + */ + desc->async_tx.cookie = 0; + + /* TODO check status bits? */ + break; + } + } + break; + case IOAT_VER_2_0: + case IOAT_VER_3_0: + /* has some other thread has already cleaned up? */ + if (ioat_chan->used_desc.prev == NULL) + break; + + /* work backwards to find latest finished desc */ + desc = to_ioat_desc(ioat_chan->used_desc.next); + latest_desc = NULL; + do { + desc = to_ioat_desc(desc->node.prev); + desc_phys = (unsigned long)desc->async_tx.phys + & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; + if (desc_phys == phys_complete) { + latest_desc = desc; + break; + } + } while (&desc->node != ioat_chan->used_desc.prev); + + if (latest_desc != NULL) { + + /* work forwards to clear finished descriptors */ + for (desc = to_ioat_desc(ioat_chan->used_desc.prev); + &desc->node != latest_desc->node.next && + &desc->node != ioat_chan->used_desc.next; + desc = to_ioat_desc(desc->node.next)) { + if (desc->async_tx.cookie) { + cookie = desc->async_tx.cookie; + desc->async_tx.cookie = 0; + ioat_dma_unmap(ioat_chan, desc); + if (desc->async_tx.callback) { + desc->async_tx.callback(desc->async_tx.callback_param); + desc->async_tx.callback = NULL; + } + } + } + + /* move used.prev up beyond those that are finished */ + if (&desc->node == ioat_chan->used_desc.next) + ioat_chan->used_desc.prev = NULL; + else + ioat_chan->used_desc.prev = &desc->node; + } + break; + } + + spin_unlock_bh(&ioat_chan->desc_lock); + + ioat_chan->last_completion = phys_complete; + if (cookie != 0) + ioat_chan->completed_cookie = cookie; + + spin_unlock_bh(&ioat_chan->cleanup_lock); +} + +/** + * ioat_dma_is_complete - poll the status of a IOAT DMA transaction + * @chan: IOAT DMA channel handle + * @cookie: DMA transaction identifier + * @done: if not %NULL, updated with last completed transaction + * @used: if not %NULL, updated with last used transaction + */ +static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, + dma_cookie_t cookie, + dma_cookie_t *done, + dma_cookie_t *used) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_complete; + enum dma_status ret; + + last_used = chan->cookie; + last_complete = ioat_chan->completed_cookie; + ioat_chan->watchdog_tcp_cookie = cookie; + + if (done) + *done = last_complete; + if (used) + *used = last_used; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + if (ret == DMA_SUCCESS) + return ret; + + ioat_dma_memcpy_cleanup(ioat_chan); + + last_used = chan->cookie; + last_complete = ioat_chan->completed_cookie; + + if (done) + *done = last_complete; + if (used) + *used = last_used; + + return dma_async_is_complete(cookie, last_complete, last_used); +} + +static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) +{ + struct ioat_desc_sw *desc; + + spin_lock_bh(&ioat_chan->desc_lock); + + desc = ioat_dma_get_next_descriptor(ioat_chan); + + if (!desc) { + dev_err(&ioat_chan->device->pdev->dev, + "Unable to start null desc - get next desc failed\n"); + spin_unlock_bh(&ioat_chan->desc_lock); + return; + } + + desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL + | IOAT_DMA_DESCRIPTOR_CTL_INT_GN + | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; + /* set size to non-zero value (channel returns error when size is 0) */ + desc->hw->size = NULL_DESC_BUFFER_SIZE; + desc->hw->src_addr = 0; + desc->hw->dst_addr = 0; + async_tx_ack(&desc->async_tx); + switch (ioat_chan->device->version) { + case IOAT_VER_1_2: + desc->hw->next = 0; + list_add_tail(&desc->node, &ioat_chan->used_desc); + + writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->async_tx.phys) >> 32, + ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); + + writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); + break; + case IOAT_VER_2_0: + case IOAT_VER_3_0: + writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->async_tx.phys) >> 32, + ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + + ioat_chan->dmacount++; + __ioat2_dma_memcpy_issue_pending(ioat_chan); + break; + } + spin_unlock_bh(&ioat_chan->desc_lock); +} + +/* + * Perform a IOAT transaction to verify the HW works. + */ +#define IOAT_TEST_SIZE 2000 + +static void ioat_dma_test_callback(void *dma_async_param) +{ + struct completion *cmp = dma_async_param; + + complete(cmp); +} + +/** + * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. + * @device: device to be tested + */ +static int ioat_dma_self_test(struct ioatdma_device *device) +{ + int i; + u8 *src; + u8 *dest; + struct dma_chan *dma_chan; + struct dma_async_tx_descriptor *tx; + dma_addr_t dma_dest, dma_src; + dma_cookie_t cookie; + int err = 0; + struct completion cmp; + unsigned long tmo; + unsigned long flags; + + src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); + if (!src) + return -ENOMEM; + dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); + if (!dest) { + kfree(src); + return -ENOMEM; + } + + /* Fill in src buffer */ + for (i = 0; i < IOAT_TEST_SIZE; i++) + src[i] = (u8)i; + + /* Start copy, using first DMA channel */ + dma_chan = container_of(device->common.channels.next, + struct dma_chan, + device_node); + if (device->common.device_alloc_chan_resources(dma_chan) < 1) { + dev_err(&device->pdev->dev, + "selftest cannot allocate chan resource\n"); + err = -ENODEV; + goto out; + } + + dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, + DMA_TO_DEVICE); + dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, + DMA_FROM_DEVICE); + flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; + tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, + IOAT_TEST_SIZE, flags); + if (!tx) { + dev_err(&device->pdev->dev, + "Self-test prep failed, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(&device->pdev->dev, + "Self-test setup failed, disabling\n"); + err = -ENODEV; + goto free_resources; + } + device->common.device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (tmo == 0 || + device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) + != DMA_SUCCESS) { + dev_err(&device->pdev->dev, + "Self-test copy timed out, disabling\n"); + err = -ENODEV; + goto free_resources; + } + if (memcmp(src, dest, IOAT_TEST_SIZE)) { + dev_err(&device->pdev->dev, + "Self-test copy failed compare, disabling\n"); + err = -ENODEV; + goto free_resources; + } + +free_resources: + device->common.device_free_chan_resources(dma_chan); +out: + kfree(src); + kfree(dest); + return err; +} + +static char ioat_interrupt_style[32] = "msix"; +module_param_string(ioat_interrupt_style, ioat_interrupt_style, + sizeof(ioat_interrupt_style), 0644); +MODULE_PARM_DESC(ioat_interrupt_style, + "set ioat interrupt style: msix (default), " + "msix-single-vector, msi, intx)"); + +/** + * ioat_dma_setup_interrupts - setup interrupt handler + * @device: ioat device + */ +static int ioat_dma_setup_interrupts(struct ioatdma_device *device) +{ + struct ioat_dma_chan *ioat_chan; + int err, i, j, msixcnt; + u8 intrctrl = 0; + + if (!strcmp(ioat_interrupt_style, "msix")) + goto msix; + if (!strcmp(ioat_interrupt_style, "msix-single-vector")) + goto msix_single_vector; + if (!strcmp(ioat_interrupt_style, "msi")) + goto msi; + if (!strcmp(ioat_interrupt_style, "intx")) + goto intx; + dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n", + ioat_interrupt_style); + goto err_no_irq; + +msix: + /* The number of MSI-X vectors should equal the number of channels */ + msixcnt = device->common.chancnt; + for (i = 0; i < msixcnt; i++) + device->msix_entries[i].entry = i; + + err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); + if (err < 0) + goto msi; + if (err > 0) + goto msix_single_vector; + + for (i = 0; i < msixcnt; i++) { + ioat_chan = ioat_lookup_chan_by_index(device, i); + err = request_irq(device->msix_entries[i].vector, + ioat_dma_do_interrupt_msix, + 0, "ioat-msix", ioat_chan); + if (err) { + for (j = 0; j < i; j++) { + ioat_chan = + ioat_lookup_chan_by_index(device, j); + free_irq(device->msix_entries[j].vector, + ioat_chan); + } + goto msix_single_vector; + } + } + intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; + device->irq_mode = msix_multi_vector; + goto done; + +msix_single_vector: + device->msix_entries[0].entry = 0; + err = pci_enable_msix(device->pdev, device->msix_entries, 1); + if (err) + goto msi; + + err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, + 0, "ioat-msix", device); + if (err) { + pci_disable_msix(device->pdev); + goto msi; + } + device->irq_mode = msix_single_vector; + goto done; + +msi: + err = pci_enable_msi(device->pdev); + if (err) + goto intx; + + err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, + 0, "ioat-msi", device); + if (err) { + pci_disable_msi(device->pdev); + goto intx; + } + /* + * CB 1.2 devices need a bit set in configuration space to enable MSI + */ + if (device->version == IOAT_VER_1_2) { + u32 dmactrl; + pci_read_config_dword(device->pdev, + IOAT_PCI_DMACTRL_OFFSET, &dmactrl); + dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; + pci_write_config_dword(device->pdev, + IOAT_PCI_DMACTRL_OFFSET, dmactrl); + } + device->irq_mode = msi; + goto done; + +intx: + err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, + IRQF_SHARED, "ioat-intx", device); + if (err) + goto err_no_irq; + device->irq_mode = intx; + +done: + intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; + writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); + return 0; + +err_no_irq: + /* Disable all interrupt generation */ + writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); + dev_err(&device->pdev->dev, "no usable interrupts\n"); + device->irq_mode = none; + return -1; +} + +/** + * ioat_dma_remove_interrupts - remove whatever interrupts were set + * @device: ioat device + */ +static void ioat_dma_remove_interrupts(struct ioatdma_device *device) +{ + struct ioat_dma_chan *ioat_chan; + int i; + + /* Disable all interrupt generation */ + writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); + + switch (device->irq_mode) { + case msix_multi_vector: + for (i = 0; i < device->common.chancnt; i++) { + ioat_chan = ioat_lookup_chan_by_index(device, i); + free_irq(device->msix_entries[i].vector, ioat_chan); + } + pci_disable_msix(device->pdev); + break; + case msix_single_vector: + free_irq(device->msix_entries[0].vector, device); + pci_disable_msix(device->pdev); + break; + case msi: + free_irq(device->pdev->irq, device); + pci_disable_msi(device->pdev); + break; + case intx: + free_irq(device->pdev->irq, device); + break; + case none: + dev_warn(&device->pdev->dev, + "call to %s without interrupts setup\n", __func__); + } + device->irq_mode = none; +} + +struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, + void __iomem *iobase) +{ + int err; + struct ioatdma_device *device; + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) { + err = -ENOMEM; + goto err_kzalloc; + } + device->pdev = pdev; + device->reg_base = iobase; + device->version = readb(device->reg_base + IOAT_VER_OFFSET); + + /* DMA coherent memory pool for DMA descriptor allocations */ + device->dma_pool = pci_pool_create("dma_desc_pool", pdev, + sizeof(struct ioat_dma_descriptor), + 64, 0); + if (!device->dma_pool) { + err = -ENOMEM; + goto err_dma_pool; + } + + device->completion_pool = pci_pool_create("completion_pool", pdev, + sizeof(u64), SMP_CACHE_BYTES, + SMP_CACHE_BYTES); + if (!device->completion_pool) { + err = -ENOMEM; + goto err_completion_pool; + } + + INIT_LIST_HEAD(&device->common.channels); + ioat_dma_enumerate_channels(device); + + device->common.device_alloc_chan_resources = + ioat_dma_alloc_chan_resources; + device->common.device_free_chan_resources = + ioat_dma_free_chan_resources; + device->common.dev = &pdev->dev; + + dma_cap_set(DMA_MEMCPY, device->common.cap_mask); + device->common.device_is_tx_complete = ioat_dma_is_complete; + switch (device->version) { + case IOAT_VER_1_2: + device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; + device->common.device_issue_pending = + ioat1_dma_memcpy_issue_pending; + break; + case IOAT_VER_2_0: + case IOAT_VER_3_0: + device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; + device->common.device_issue_pending = + ioat2_dma_memcpy_issue_pending; + break; + } + + dev_err(&device->pdev->dev, + "Intel(R) I/OAT DMA Engine found," + " %d channels, device version 0x%02x, driver version %s\n", + device->common.chancnt, device->version, IOAT_DMA_VERSION); + + if (!device->common.chancnt) { + dev_err(&device->pdev->dev, + "Intel(R) I/OAT DMA Engine problem found: " + "zero channels detected\n"); + goto err_setup_interrupts; + } + + err = ioat_dma_setup_interrupts(device); + if (err) + goto err_setup_interrupts; + + err = ioat_dma_self_test(device); + if (err) + goto err_self_test; + + ioat_set_tcp_copy_break(device); + + dma_async_device_register(&device->common); + + if (device->version != IOAT_VER_3_0) { + INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); + schedule_delayed_work(&device->work, + WATCHDOG_DELAY); + } + + return device; + +err_self_test: + ioat_dma_remove_interrupts(device); +err_setup_interrupts: + pci_pool_destroy(device->completion_pool); +err_completion_pool: + pci_pool_destroy(device->dma_pool); +err_dma_pool: + kfree(device); +err_kzalloc: + dev_err(&pdev->dev, + "Intel(R) I/OAT DMA Engine initialization failed\n"); + return NULL; +} + +void ioat_dma_remove(struct ioatdma_device *device) +{ + struct dma_chan *chan, *_chan; + struct ioat_dma_chan *ioat_chan; + + if (device->version != IOAT_VER_3_0) + cancel_delayed_work(&device->work); + + ioat_dma_remove_interrupts(device); + + dma_async_device_unregister(&device->common); + + pci_pool_destroy(device->dma_pool); + pci_pool_destroy(device->completion_pool); + + iounmap(device->reg_base); + pci_release_regions(device->pdev); + pci_disable_device(device->pdev); + + list_for_each_entry_safe(chan, _chan, + &device->common.channels, device_node) { + ioat_chan = to_ioat_chan(chan); + list_del(&chan->device_node); + kfree(ioat_chan); + } + kfree(device); +} + diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h new file mode 100644 index 000000000000..e80e787fe64f --- /dev/null +++ b/drivers/dma/ioat/dma.h @@ -0,0 +1,165 @@ +/* + * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef IOATDMA_H +#define IOATDMA_H + +#include +#include "hw.h" +#include +#include +#include +#include +#include + +#define IOAT_DMA_VERSION "3.64" + +enum ioat_interrupt { + none = 0, + msix_multi_vector = 1, + msix_single_vector = 2, + msi = 3, + intx = 4, +}; + +#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 +#define IOAT_DMA_DCA_ANY_CPU ~0 +#define IOAT_WATCHDOG_PERIOD (2 * HZ) + + +/** + * struct ioatdma_device - internal representation of a IOAT device + * @pdev: PCI-Express device + * @reg_base: MMIO register space base address + * @dma_pool: for allocating DMA descriptors + * @common: embedded struct dma_device + * @version: version of ioatdma device + * @irq_mode: which style irq to use + * @msix_entries: irq handlers + * @idx: per channel data + */ + +struct ioatdma_device { + struct pci_dev *pdev; + void __iomem *reg_base; + struct pci_pool *dma_pool; + struct pci_pool *completion_pool; + struct dma_device common; + u8 version; + enum ioat_interrupt irq_mode; + struct delayed_work work; + struct msix_entry msix_entries[4]; + struct ioat_dma_chan *idx[4]; +}; + +/** + * struct ioat_dma_chan - internal representation of a DMA channel + */ +struct ioat_dma_chan { + + void __iomem *reg_base; + + dma_cookie_t completed_cookie; + unsigned long last_completion; + unsigned long last_completion_time; + + size_t xfercap; /* XFERCAP register value expanded out */ + + spinlock_t cleanup_lock; + spinlock_t desc_lock; + struct list_head free_desc; + struct list_head used_desc; + unsigned long watchdog_completion; + int watchdog_tcp_cookie; + u32 watchdog_last_tcp_cookie; + struct delayed_work work; + + int pending; + int dmacount; + int desccount; + + struct ioatdma_device *device; + struct dma_chan common; + + dma_addr_t completion_addr; + union { + u64 full; /* HW completion writeback */ + struct { + u32 low; + u32 high; + }; + } *completion_virt; + unsigned long last_compl_desc_addr_hw; + struct tasklet_struct cleanup_task; +}; + +/* wrapper around hardware descriptor format + additional software fields */ + +/** + * struct ioat_desc_sw - wrapper around hardware descriptor + * @hw: hardware DMA descriptor + * @node: this descriptor will either be on the free list, + * or attached to a transaction list (async_tx.tx_list) + * @tx_cnt: number of descriptors required to complete the transaction + * @async_tx: the generic software descriptor for all engines + */ +struct ioat_desc_sw { + struct ioat_dma_descriptor *hw; + struct list_head node; + int tx_cnt; + size_t len; + dma_addr_t src; + dma_addr_t dst; + struct dma_async_tx_descriptor async_tx; +}; + +static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) +{ + #ifdef CONFIG_NET_DMA + switch (dev->version) { + case IOAT_VER_1_2: + sysctl_tcp_dma_copybreak = 4096; + break; + case IOAT_VER_2_0: + sysctl_tcp_dma_copybreak = 2048; + break; + case IOAT_VER_3_0: + sysctl_tcp_dma_copybreak = 262144; + break; + } + #endif +} + +#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE) +struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, + void __iomem *iobase); +void ioat_dma_remove(struct ioatdma_device *device); +struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); +struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); +struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); +#else +#define ioat_dma_probe(pdev, iobase) NULL +#define ioat_dma_remove(device) do { } while (0) +#define ioat_dca_init(pdev, iobase) NULL +#define ioat2_dca_init(pdev, iobase) NULL +#define ioat3_dca_init(pdev, iobase) NULL +#endif + +#endif /* IOATDMA_H */ diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h new file mode 100644 index 000000000000..afa57eef86c9 --- /dev/null +++ b/drivers/dma/ioat/hw.h @@ -0,0 +1,70 @@ +/* + * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef _IOAT_HW_H_ +#define _IOAT_HW_H_ + +/* PCI Configuration Space Values */ +#define IOAT_PCI_VID 0x8086 + +/* CB device ID's */ +#define IOAT_PCI_DID_5000 0x1A38 +#define IOAT_PCI_DID_CNB 0x360B +#define IOAT_PCI_DID_SCNB 0x65FF +#define IOAT_PCI_DID_SNB 0x402F + +#define IOAT_PCI_RID 0x00 +#define IOAT_PCI_SVID 0x8086 +#define IOAT_PCI_SID 0x8086 +#define IOAT_VER_1_2 0x12 /* Version 1.2 */ +#define IOAT_VER_2_0 0x20 /* Version 2.0 */ +#define IOAT_VER_3_0 0x30 /* Version 3.0 */ + +struct ioat_dma_descriptor { + uint32_t size; + uint32_t ctl; + uint64_t src_addr; + uint64_t dst_addr; + uint64_t next; + uint64_t rsv1; + uint64_t rsv2; + uint64_t user1; + uint64_t user2; +}; + +#define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x00000001 +#define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x00000002 +#define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x00000004 +#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008 +#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010 +#define IOAT_DMA_DESCRIPTOR_NUL 0x00000020 +#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040 +#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080 +#define IOAT_DMA_DESCRIPTOR_CTL_BNDL 0x00000100 +#define IOAT_DMA_DESCRIPTOR_CTL_DCA 0x00000200 +#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT 0x00000400 + +#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000 +#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA 0x00000000 + +#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA 0x00000001 +#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK 0xFF000000 + +#endif diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c new file mode 100644 index 000000000000..d7948bfd8fba --- /dev/null +++ b/drivers/dma/ioat/pci.c @@ -0,0 +1,202 @@ +/* + * Intel I/OAT DMA Linux driver + * Copyright(c) 2007 - 2009 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +/* + * This driver supports an Intel I/OAT DMA engine, which does asynchronous + * copy operations. + */ + +#include +#include +#include +#include +#include +#include "dma.h" +#include "registers.h" +#include "hw.h" + +MODULE_VERSION(IOAT_DMA_VERSION); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Intel Corporation"); + +static struct pci_device_id ioat_pci_tbl[] = { + /* I/OAT v1 platforms */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) }, + { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) }, + + /* I/OAT v2 platforms */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, + + /* I/OAT v3 platforms */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, + { 0, } +}; + +struct ioat_device { + struct pci_dev *pdev; + void __iomem *iobase; + struct ioatdma_device *dma; + struct dca_provider *dca; +}; + +static int __devinit ioat_probe(struct pci_dev *pdev, + const struct pci_device_id *id); +static void __devexit ioat_remove(struct pci_dev *pdev); + +static int ioat_dca_enabled = 1; +module_param(ioat_dca_enabled, int, 0644); +MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); + +static struct pci_driver ioat_pci_driver = { + .name = "ioatdma", + .id_table = ioat_pci_tbl, + .probe = ioat_probe, + .remove = __devexit_p(ioat_remove), +}; + +static int __devinit ioat_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + void __iomem *iobase; + struct ioat_device *device; + unsigned long mmio_start, mmio_len; + int err; + + err = pci_enable_device(pdev); + if (err) + goto err_enable_device; + + err = pci_request_regions(pdev, ioat_pci_driver.name); + if (err) + goto err_request_regions; + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) + goto err_set_dma_mask; + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) + goto err_set_dma_mask; + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + iobase = ioremap(mmio_start, mmio_len); + if (!iobase) { + err = -ENOMEM; + goto err_ioremap; + } + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) { + err = -ENOMEM; + goto err_kzalloc; + } + device->pdev = pdev; + pci_set_drvdata(pdev, device); + device->iobase = iobase; + + pci_set_master(pdev); + + switch (readb(iobase + IOAT_VER_OFFSET)) { + case IOAT_VER_1_2: + device->dma = ioat_dma_probe(pdev, iobase); + if (device->dma && ioat_dca_enabled) + device->dca = ioat_dca_init(pdev, iobase); + break; + case IOAT_VER_2_0: + device->dma = ioat_dma_probe(pdev, iobase); + if (device->dma && ioat_dca_enabled) + device->dca = ioat2_dca_init(pdev, iobase); + break; + case IOAT_VER_3_0: + device->dma = ioat_dma_probe(pdev, iobase); + if (device->dma && ioat_dca_enabled) + device->dca = ioat3_dca_init(pdev, iobase); + break; + default: + err = -ENODEV; + break; + } + if (!device->dma) + err = -ENODEV; + + if (err) + goto err_version; + + return 0; + +err_version: + kfree(device); +err_kzalloc: + iounmap(iobase); +err_ioremap: +err_set_dma_mask: + pci_release_regions(pdev); + pci_disable_device(pdev); +err_request_regions: +err_enable_device: + return err; +} + +static void __devexit ioat_remove(struct pci_dev *pdev) +{ + struct ioat_device *device = pci_get_drvdata(pdev); + + dev_err(&pdev->dev, "Removing dma and dca services\n"); + if (device->dca) { + unregister_dca_provider(device->dca); + free_dca_provider(device->dca); + device->dca = NULL; + } + + if (device->dma) { + ioat_dma_remove(device->dma); + device->dma = NULL; + } + + kfree(device); +} + +static int __init ioat_init_module(void) +{ + return pci_register_driver(&ioat_pci_driver); +} +module_init(ioat_init_module); + +static void __exit ioat_exit_module(void) +{ + pci_unregister_driver(&ioat_pci_driver); +} +module_exit(ioat_exit_module); diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h new file mode 100644 index 000000000000..49bc277424f8 --- /dev/null +++ b/drivers/dma/ioat/registers.h @@ -0,0 +1,226 @@ +/* + * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef _IOAT_REGISTERS_H_ +#define _IOAT_REGISTERS_H_ + +#define IOAT_PCI_DMACTRL_OFFSET 0x48 +#define IOAT_PCI_DMACTRL_DMA_EN 0x00000001 +#define IOAT_PCI_DMACTRL_MSI_EN 0x00000002 + +#define IOAT_PCI_DEVICE_ID_OFFSET 0x02 +#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148 +#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 + +/* MMIO Device Registers */ +#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ + +#define IOAT_XFERCAP_OFFSET 0x01 /* 8-bit */ +#define IOAT_XFERCAP_4KB 12 +#define IOAT_XFERCAP_8KB 13 +#define IOAT_XFERCAP_16KB 14 +#define IOAT_XFERCAP_32KB 15 +#define IOAT_XFERCAP_32GB 0 + +#define IOAT_GENCTRL_OFFSET 0x02 /* 8-bit */ +#define IOAT_GENCTRL_DEBUG_EN 0x01 + +#define IOAT_INTRCTRL_OFFSET 0x03 /* 8-bit */ +#define IOAT_INTRCTRL_MASTER_INT_EN 0x01 /* Master Interrupt Enable */ +#define IOAT_INTRCTRL_INT_STATUS 0x02 /* ATTNSTATUS -or- Channel Int */ +#define IOAT_INTRCTRL_INT 0x04 /* INT_STATUS -and- MASTER_INT_EN */ +#define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL 0x08 /* Enable all MSI-X vectors */ + +#define IOAT_ATTNSTATUS_OFFSET 0x04 /* Each bit is a channel */ + +#define IOAT_VER_OFFSET 0x08 /* 8-bit */ +#define IOAT_VER_MAJOR_MASK 0xF0 +#define IOAT_VER_MINOR_MASK 0x0F +#define GET_IOAT_VER_MAJOR(x) (((x) & IOAT_VER_MAJOR_MASK) >> 4) +#define GET_IOAT_VER_MINOR(x) ((x) & IOAT_VER_MINOR_MASK) + +#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ + +#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ +#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */ +#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */ + +#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ +#define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001 + +#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ + +/* DMA Channel Registers */ +#define IOAT_CHANCTRL_OFFSET 0x00 /* 16-bit Channel Control Register */ +#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000 +#define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100 +#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020 +#define IOAT_CHANCTRL_ERR_INT_EN 0x0010 +#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008 +#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 +#define IOAT_CHANCTRL_INT_DISABLE 0x0001 + +#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ +#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ +#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */ + + +#define IOAT1_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */ +#define IOAT2_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */ +#define IOAT_CHANSTS_OFFSET(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHANSTS_OFFSET : IOAT2_CHANSTS_OFFSET) +#define IOAT1_CHANSTS_OFFSET_LOW 0x04 +#define IOAT2_CHANSTS_OFFSET_LOW 0x08 +#define IOAT_CHANSTS_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHANSTS_OFFSET_LOW : IOAT2_CHANSTS_OFFSET_LOW) +#define IOAT1_CHANSTS_OFFSET_HIGH 0x08 +#define IOAT2_CHANSTS_OFFSET_HIGH 0x0C +#define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH) +#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F +#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010 +#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008 +#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007 +#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 +#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 +#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 +#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3 + + + +#define IOAT_CHAN_DMACOUNT_OFFSET 0x06 /* 16-bit DMA Count register */ + +#define IOAT_DCACTRL_OFFSET 0x30 /* 32 bit Direct Cache Access Control Register */ +#define IOAT_DCACTRL_CMPL_WRITE_ENABLE 0x10000 +#define IOAT_DCACTRL_TARGET_CPU_MASK 0xFFFF /* APIC ID */ + +/* CB DCA Memory Space Registers */ +#define IOAT_DCAOFFSET_OFFSET 0x14 +/* CB_BAR + IOAT_DCAOFFSET value */ +#define IOAT_DCA_VER_OFFSET 0x00 +#define IOAT_DCA_VER_MAJOR_MASK 0xF0 +#define IOAT_DCA_VER_MINOR_MASK 0x0F + +#define IOAT_DCA_COMP_OFFSET 0x02 +#define IOAT_DCA_COMP_V1 0x1 + +#define IOAT_FSB_CAPABILITY_OFFSET 0x04 +#define IOAT_FSB_CAPABILITY_PREFETCH 0x1 + +#define IOAT_PCI_CAPABILITY_OFFSET 0x06 +#define IOAT_PCI_CAPABILITY_MEMWR 0x1 + +#define IOAT_FSB_CAP_ENABLE_OFFSET 0x08 +#define IOAT_FSB_CAP_ENABLE_PREFETCH 0x1 + +#define IOAT_PCI_CAP_ENABLE_OFFSET 0x0A +#define IOAT_PCI_CAP_ENABLE_MEMWR 0x1 + +#define IOAT_APICID_TAG_MAP_OFFSET 0x0C +#define IOAT_APICID_TAG_MAP_TAG0 0x0000000F +#define IOAT_APICID_TAG_MAP_TAG0_SHIFT 0 +#define IOAT_APICID_TAG_MAP_TAG1 0x000000F0 +#define IOAT_APICID_TAG_MAP_TAG1_SHIFT 4 +#define IOAT_APICID_TAG_MAP_TAG2 0x00000F00 +#define IOAT_APICID_TAG_MAP_TAG2_SHIFT 8 +#define IOAT_APICID_TAG_MAP_TAG3 0x0000F000 +#define IOAT_APICID_TAG_MAP_TAG3_SHIFT 12 +#define IOAT_APICID_TAG_MAP_TAG4 0x000F0000 +#define IOAT_APICID_TAG_MAP_TAG4_SHIFT 16 +#define IOAT_APICID_TAG_CB2_VALID 0x8080808080 + +#define IOAT_DCA_GREQID_OFFSET 0x10 +#define IOAT_DCA_GREQID_SIZE 0x04 +#define IOAT_DCA_GREQID_MASK 0xFFFF +#define IOAT_DCA_GREQID_IGNOREFUN 0x10000000 +#define IOAT_DCA_GREQID_VALID 0x20000000 +#define IOAT_DCA_GREQID_LASTID 0x80000000 + +#define IOAT3_CSI_CAPABILITY_OFFSET 0x08 +#define IOAT3_CSI_CAPABILITY_PREFETCH 0x1 + +#define IOAT3_PCI_CAPABILITY_OFFSET 0x0A +#define IOAT3_PCI_CAPABILITY_MEMWR 0x1 + +#define IOAT3_CSI_CONTROL_OFFSET 0x0C +#define IOAT3_CSI_CONTROL_PREFETCH 0x1 + +#define IOAT3_PCI_CONTROL_OFFSET 0x0E +#define IOAT3_PCI_CONTROL_MEMWR 0x1 + +#define IOAT3_APICID_TAG_MAP_OFFSET 0x10 +#define IOAT3_APICID_TAG_MAP_OFFSET_LOW 0x10 +#define IOAT3_APICID_TAG_MAP_OFFSET_HIGH 0x14 + +#define IOAT3_DCA_GREQID_OFFSET 0x02 + +#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */ +#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */ +#define IOAT_CHAINADDR_OFFSET(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHAINADDR_OFFSET : IOAT2_CHAINADDR_OFFSET) +#define IOAT1_CHAINADDR_OFFSET_LOW 0x0C +#define IOAT2_CHAINADDR_OFFSET_LOW 0x10 +#define IOAT_CHAINADDR_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHAINADDR_OFFSET_LOW : IOAT2_CHAINADDR_OFFSET_LOW) +#define IOAT1_CHAINADDR_OFFSET_HIGH 0x10 +#define IOAT2_CHAINADDR_OFFSET_HIGH 0x14 +#define IOAT_CHAINADDR_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHAINADDR_OFFSET_HIGH : IOAT2_CHAINADDR_OFFSET_HIGH) + +#define IOAT1_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */ +#define IOAT2_CHANCMD_OFFSET 0x04 /* 8-bit DMA Channel Command Register */ +#define IOAT_CHANCMD_OFFSET(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHANCMD_OFFSET : IOAT2_CHANCMD_OFFSET) +#define IOAT_CHANCMD_RESET 0x20 +#define IOAT_CHANCMD_RESUME 0x10 +#define IOAT_CHANCMD_ABORT 0x08 +#define IOAT_CHANCMD_SUSPEND 0x04 +#define IOAT_CHANCMD_APPEND 0x02 +#define IOAT_CHANCMD_START 0x01 + +#define IOAT_CHANCMP_OFFSET 0x18 /* 64-bit Channel Completion Address Register */ +#define IOAT_CHANCMP_OFFSET_LOW 0x18 +#define IOAT_CHANCMP_OFFSET_HIGH 0x1C + +#define IOAT_CDAR_OFFSET 0x20 /* 64-bit Current Descriptor Address Register */ +#define IOAT_CDAR_OFFSET_LOW 0x20 +#define IOAT_CDAR_OFFSET_HIGH 0x24 + +#define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */ +#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001 +#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002 +#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004 +#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008 +#define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010 +#define IOAT_CHANERR_CHANCMD_ERR 0x0020 +#define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040 +#define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080 +#define IOAT_CHANERR_READ_DATA_ERR 0x0100 +#define IOAT_CHANERR_WRITE_DATA_ERR 0x0200 +#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400 +#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800 +#define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 +#define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 +#define IOAT_CHANERR_SOFT_ERR 0x4000 +#define IOAT_CHANERR_UNAFFILIATED_ERR 0x8000 + +#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ + +#endif /* _IOAT_REGISTERS_H_ */ diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c deleted file mode 100644 index c012a1e15043..000000000000 --- a/drivers/dma/ioat_dca.c +++ /dev/null @@ -1,681 +0,0 @@ -/* - * Intel I/OAT DMA Linux driver - * Copyright(c) 2007 - 2009 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - */ - -#include -#include -#include -#include -#include - -/* either a kernel change is needed, or we need something like this in kernel */ -#ifndef CONFIG_SMP -#include -#undef cpu_physical_id -#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24) -#endif - -#include "ioatdma.h" -#include "ioatdma_registers.h" - -/* - * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 - * contain the bit number of the APIC ID to map into the DCA tag. If the valid - * bit is not set, then the value must be 0 or 1 and defines the bit in the tag. - */ -#define DCA_TAG_MAP_VALID 0x80 - -#define DCA3_TAG_MAP_BIT_TO_INV 0x80 -#define DCA3_TAG_MAP_BIT_TO_SEL 0x40 -#define DCA3_TAG_MAP_LITERAL_VAL 0x1 - -#define DCA_TAG_MAP_MASK 0xDF - -/* expected tag map bytes for I/OAT ver.2 */ -#define DCA2_TAG_MAP_BYTE0 0x80 -#define DCA2_TAG_MAP_BYTE1 0x0 -#define DCA2_TAG_MAP_BYTE2 0x81 -#define DCA2_TAG_MAP_BYTE3 0x82 -#define DCA2_TAG_MAP_BYTE4 0x82 - -/* verify if tag map matches expected values */ -static inline int dca2_tag_map_valid(u8 *tag_map) -{ - return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) && - (tag_map[1] == DCA2_TAG_MAP_BYTE1) && - (tag_map[2] == DCA2_TAG_MAP_BYTE2) && - (tag_map[3] == DCA2_TAG_MAP_BYTE3) && - (tag_map[4] == DCA2_TAG_MAP_BYTE4)); -} - -/* - * "Legacy" DCA systems do not implement the DCA register set in the - * I/OAT device. Software needs direct support for their tag mappings. - */ - -#define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x)) -#define IOAT_TAG_MAP_LEN 8 - -static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = { - 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; -static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = { - 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; -static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = { - 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), }; -static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 }; - -/* pack PCI B/D/F into a u16 */ -static inline u16 dcaid_from_pcidev(struct pci_dev *pci) -{ - return (pci->bus->number << 8) | pci->devfn; -} - -static int dca_enabled_in_bios(struct pci_dev *pdev) -{ - /* CPUID level 9 returns DCA configuration */ - /* Bit 0 indicates DCA enabled by the BIOS */ - unsigned long cpuid_level_9; - int res; - - cpuid_level_9 = cpuid_eax(9); - res = test_bit(0, &cpuid_level_9); - if (!res) - dev_err(&pdev->dev, "DCA is disabled in BIOS\n"); - - return res; -} - -static int system_has_dca_enabled(struct pci_dev *pdev) -{ - if (boot_cpu_has(X86_FEATURE_DCA)) - return dca_enabled_in_bios(pdev); - - dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n"); - return 0; -} - -struct ioat_dca_slot { - struct pci_dev *pdev; /* requester device */ - u16 rid; /* requester id, as used by IOAT */ -}; - -#define IOAT_DCA_MAX_REQ 6 -#define IOAT3_DCA_MAX_REQ 2 - -struct ioat_dca_priv { - void __iomem *iobase; - void __iomem *dca_base; - int max_requesters; - int requester_count; - u8 tag_map[IOAT_TAG_MAP_LEN]; - struct ioat_dca_slot req_slots[0]; -}; - -/* 5000 series chipset DCA Port Requester ID Table Entry Format - * [15:8] PCI-Express Bus Number - * [7:3] PCI-Express Device Number - * [2:0] PCI-Express Function Number - * - * 5000 series chipset DCA control register format - * [7:1] Reserved (0) - * [0] Ignore Function Number - */ - -static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - u16 id; - - /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) - return -ENODEV; - pdev = to_pci_dev(dev); - id = dcaid_from_pcidev(pdev); - - if (ioatdca->requester_count == ioatdca->max_requesters) - return -ENODEV; - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == NULL) { - /* found an empty slot */ - ioatdca->requester_count++; - ioatdca->req_slots[i].pdev = pdev; - ioatdca->req_slots[i].rid = id; - writew(id, ioatdca->dca_base + (i * 4)); - /* make sure the ignore function bit is off */ - writeb(0, ioatdca->dca_base + (i * 4) + 2); - return i; - } - } - /* Error, ioatdma->requester_count is out of whack */ - return -EFAULT; -} - -static int ioat_dca_remove_requester(struct dca_provider *dca, - struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - - /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) - return -ENODEV; - pdev = to_pci_dev(dev); - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == pdev) { - writew(0, ioatdca->dca_base + (i * 4)); - ioatdca->req_slots[i].pdev = NULL; - ioatdca->req_slots[i].rid = 0; - ioatdca->requester_count--; - return i; - } - } - return -ENODEV; -} - -static u8 ioat_dca_get_tag(struct dca_provider *dca, - struct device *dev, - int cpu) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - int i, apic_id, bit, value; - u8 entry, tag; - - tag = 0; - apic_id = cpu_physical_id(cpu); - - for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { - entry = ioatdca->tag_map[i]; - if (entry & DCA_TAG_MAP_VALID) { - bit = entry & ~DCA_TAG_MAP_VALID; - value = (apic_id & (1 << bit)) ? 1 : 0; - } else { - value = entry ? 1 : 0; - } - tag |= (value << i); - } - return tag; -} - -static int ioat_dca_dev_managed(struct dca_provider *dca, - struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - - pdev = to_pci_dev(dev); - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == pdev) - return 1; - } - return 0; -} - -static struct dca_ops ioat_dca_ops = { - .add_requester = ioat_dca_add_requester, - .remove_requester = ioat_dca_remove_requester, - .get_tag = ioat_dca_get_tag, - .dev_managed = ioat_dca_dev_managed, -}; - - -struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) -{ - struct dca_provider *dca; - struct ioat_dca_priv *ioatdca; - u8 *tag_map = NULL; - int i; - int err; - u8 version; - u8 max_requesters; - - if (!system_has_dca_enabled(pdev)) - return NULL; - - /* I/OAT v1 systems must have a known tag_map to support DCA */ - switch (pdev->vendor) { - case PCI_VENDOR_ID_INTEL: - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT: - tag_map = ioat_tag_map_BNB; - break; - case PCI_DEVICE_ID_INTEL_IOAT_CNB: - tag_map = ioat_tag_map_CNB; - break; - case PCI_DEVICE_ID_INTEL_IOAT_SCNB: - tag_map = ioat_tag_map_SCNB; - break; - } - break; - case PCI_VENDOR_ID_UNISYS: - switch (pdev->device) { - case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR: - tag_map = ioat_tag_map_UNISYS; - break; - } - break; - } - if (tag_map == NULL) - return NULL; - - version = readb(iobase + IOAT_VER_OFFSET); - if (version == IOAT_VER_3_0) - max_requesters = IOAT3_DCA_MAX_REQ; - else - max_requesters = IOAT_DCA_MAX_REQ; - - dca = alloc_dca_provider(&ioat_dca_ops, - sizeof(*ioatdca) + - (sizeof(struct ioat_dca_slot) * max_requesters)); - if (!dca) - return NULL; - - ioatdca = dca_priv(dca); - ioatdca->max_requesters = max_requesters; - ioatdca->dca_base = iobase + 0x54; - - /* copy over the APIC ID to DCA tag mapping */ - for (i = 0; i < IOAT_TAG_MAP_LEN; i++) - ioatdca->tag_map[i] = tag_map[i]; - - err = register_dca_provider(dca, &pdev->dev); - if (err) { - free_dca_provider(dca); - return NULL; - } - - return dca; -} - - -static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - u16 id; - u16 global_req_table; - - /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) - return -ENODEV; - pdev = to_pci_dev(dev); - id = dcaid_from_pcidev(pdev); - - if (ioatdca->requester_count == ioatdca->max_requesters) - return -ENODEV; - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == NULL) { - /* found an empty slot */ - ioatdca->requester_count++; - ioatdca->req_slots[i].pdev = pdev; - ioatdca->req_slots[i].rid = id; - global_req_table = - readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); - writel(id | IOAT_DCA_GREQID_VALID, - ioatdca->iobase + global_req_table + (i * 4)); - return i; - } - } - /* Error, ioatdma->requester_count is out of whack */ - return -EFAULT; -} - -static int ioat2_dca_remove_requester(struct dca_provider *dca, - struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - u16 global_req_table; - - /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) - return -ENODEV; - pdev = to_pci_dev(dev); - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == pdev) { - global_req_table = - readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); - writel(0, ioatdca->iobase + global_req_table + (i * 4)); - ioatdca->req_slots[i].pdev = NULL; - ioatdca->req_slots[i].rid = 0; - ioatdca->requester_count--; - return i; - } - } - return -ENODEV; -} - -static u8 ioat2_dca_get_tag(struct dca_provider *dca, - struct device *dev, - int cpu) -{ - u8 tag; - - tag = ioat_dca_get_tag(dca, dev, cpu); - tag = (~tag) & 0x1F; - return tag; -} - -static struct dca_ops ioat2_dca_ops = { - .add_requester = ioat2_dca_add_requester, - .remove_requester = ioat2_dca_remove_requester, - .get_tag = ioat2_dca_get_tag, - .dev_managed = ioat_dca_dev_managed, -}; - -static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) -{ - int slots = 0; - u32 req; - u16 global_req_table; - - global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET); - if (global_req_table == 0) - return 0; - do { - req = readl(iobase + global_req_table + (slots * sizeof(u32))); - slots++; - } while ((req & IOAT_DCA_GREQID_LASTID) == 0); - - return slots; -} - -struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) -{ - struct dca_provider *dca; - struct ioat_dca_priv *ioatdca; - int slots; - int i; - int err; - u32 tag_map; - u16 dca_offset; - u16 csi_fsb_control; - u16 pcie_control; - u8 bit; - - if (!system_has_dca_enabled(pdev)) - return NULL; - - dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); - if (dca_offset == 0) - return NULL; - - slots = ioat2_dca_count_dca_slots(iobase, dca_offset); - if (slots == 0) - return NULL; - - dca = alloc_dca_provider(&ioat2_dca_ops, - sizeof(*ioatdca) - + (sizeof(struct ioat_dca_slot) * slots)); - if (!dca) - return NULL; - - ioatdca = dca_priv(dca); - ioatdca->iobase = iobase; - ioatdca->dca_base = iobase + dca_offset; - ioatdca->max_requesters = slots; - - /* some bios might not know to turn these on */ - csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); - if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) { - csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH; - writew(csi_fsb_control, - ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); - } - pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); - if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) { - pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR; - writew(pcie_control, - ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); - } - - - /* TODO version, compatibility and configuration checks */ - - /* copy out the APIC to DCA tag map */ - tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET); - for (i = 0; i < 5; i++) { - bit = (tag_map >> (4 * i)) & 0x0f; - if (bit < 8) - ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID; - else - ioatdca->tag_map[i] = 0; - } - - if (!dca2_tag_map_valid(ioatdca->tag_map)) { - dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, " - "disabling DCA\n"); - free_dca_provider(dca); - return NULL; - } - - err = register_dca_provider(dca, &pdev->dev); - if (err) { - free_dca_provider(dca); - return NULL; - } - - return dca; -} - -static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - u16 id; - u16 global_req_table; - - /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) - return -ENODEV; - pdev = to_pci_dev(dev); - id = dcaid_from_pcidev(pdev); - - if (ioatdca->requester_count == ioatdca->max_requesters) - return -ENODEV; - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == NULL) { - /* found an empty slot */ - ioatdca->requester_count++; - ioatdca->req_slots[i].pdev = pdev; - ioatdca->req_slots[i].rid = id; - global_req_table = - readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); - writel(id | IOAT_DCA_GREQID_VALID, - ioatdca->iobase + global_req_table + (i * 4)); - return i; - } - } - /* Error, ioatdma->requester_count is out of whack */ - return -EFAULT; -} - -static int ioat3_dca_remove_requester(struct dca_provider *dca, - struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - u16 global_req_table; - - /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) - return -ENODEV; - pdev = to_pci_dev(dev); - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == pdev) { - global_req_table = - readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); - writel(0, ioatdca->iobase + global_req_table + (i * 4)); - ioatdca->req_slots[i].pdev = NULL; - ioatdca->req_slots[i].rid = 0; - ioatdca->requester_count--; - return i; - } - } - return -ENODEV; -} - -static u8 ioat3_dca_get_tag(struct dca_provider *dca, - struct device *dev, - int cpu) -{ - u8 tag; - - struct ioat_dca_priv *ioatdca = dca_priv(dca); - int i, apic_id, bit, value; - u8 entry; - - tag = 0; - apic_id = cpu_physical_id(cpu); - - for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { - entry = ioatdca->tag_map[i]; - if (entry & DCA3_TAG_MAP_BIT_TO_SEL) { - bit = entry & - ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV); - value = (apic_id & (1 << bit)) ? 1 : 0; - } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) { - bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV; - value = (apic_id & (1 << bit)) ? 0 : 1; - } else { - value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0; - } - tag |= (value << i); - } - - return tag; -} - -static struct dca_ops ioat3_dca_ops = { - .add_requester = ioat3_dca_add_requester, - .remove_requester = ioat3_dca_remove_requester, - .get_tag = ioat3_dca_get_tag, - .dev_managed = ioat_dca_dev_managed, -}; - -static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) -{ - int slots = 0; - u32 req; - u16 global_req_table; - - global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET); - if (global_req_table == 0) - return 0; - - do { - req = readl(iobase + global_req_table + (slots * sizeof(u32))); - slots++; - } while ((req & IOAT_DCA_GREQID_LASTID) == 0); - - return slots; -} - -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) -{ - struct dca_provider *dca; - struct ioat_dca_priv *ioatdca; - int slots; - int i; - int err; - u16 dca_offset; - u16 csi_fsb_control; - u16 pcie_control; - u8 bit; - - union { - u64 full; - struct { - u32 low; - u32 high; - }; - } tag_map; - - if (!system_has_dca_enabled(pdev)) - return NULL; - - dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); - if (dca_offset == 0) - return NULL; - - slots = ioat3_dca_count_dca_slots(iobase, dca_offset); - if (slots == 0) - return NULL; - - dca = alloc_dca_provider(&ioat3_dca_ops, - sizeof(*ioatdca) - + (sizeof(struct ioat_dca_slot) * slots)); - if (!dca) - return NULL; - - ioatdca = dca_priv(dca); - ioatdca->iobase = iobase; - ioatdca->dca_base = iobase + dca_offset; - ioatdca->max_requesters = slots; - - /* some bios might not know to turn these on */ - csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET); - if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) { - csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH; - writew(csi_fsb_control, - ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET); - } - pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET); - if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) { - pcie_control |= IOAT3_PCI_CONTROL_MEMWR; - writew(pcie_control, - ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET); - } - - - /* TODO version, compatibility and configuration checks */ - - /* copy out the APIC to DCA tag map */ - tag_map.low = - readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW); - tag_map.high = - readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH); - for (i = 0; i < 8; i++) { - bit = tag_map.full >> (8 * i); - ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK; - } - - err = register_dca_provider(dca, &pdev->dev); - if (err) { - free_dca_provider(dca); - return NULL; - } - - return dca; -} diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c deleted file mode 100644 index a600fc0f7962..000000000000 --- a/drivers/dma/ioat_dma.c +++ /dev/null @@ -1,1741 +0,0 @@ -/* - * Intel I/OAT DMA Linux driver - * Copyright(c) 2004 - 2009 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - */ - -/* - * This driver supports an Intel I/OAT DMA engine, which does asynchronous - * copy operations. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "ioatdma.h" -#include "ioatdma_registers.h" -#include "ioatdma_hw.h" - -#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) -#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) -#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) -#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) - -#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) -static int ioat_pending_level = 4; -module_param(ioat_pending_level, int, 0644); -MODULE_PARM_DESC(ioat_pending_level, - "high-water mark for pushing ioat descriptors (default: 4)"); - -#define RESET_DELAY msecs_to_jiffies(100) -#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) -static void ioat_dma_chan_reset_part2(struct work_struct *work); -static void ioat_dma_chan_watchdog(struct work_struct *work); - -/* - * workaround for IOAT ver.3.0 null descriptor issue - * (channel returns error when size is 0) - */ -#define NULL_DESC_BUFFER_SIZE 1 - -/* internal functions */ -static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); -static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); - -static struct ioat_desc_sw * -ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); -static struct ioat_desc_sw * -ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); - -static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( - struct ioatdma_device *device, - int index) -{ - return device->idx[index]; -} - -/** - * ioat_dma_do_interrupt - handler used for single vector interrupt mode - * @irq: interrupt id - * @data: interrupt data - */ -static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) -{ - struct ioatdma_device *instance = data; - struct ioat_dma_chan *ioat_chan; - unsigned long attnstatus; - int bit; - u8 intrctrl; - - intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); - - if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) - return IRQ_NONE; - - if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { - writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); - return IRQ_NONE; - } - - attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); - for_each_bit(bit, &attnstatus, BITS_PER_LONG) { - ioat_chan = ioat_lookup_chan_by_index(instance, bit); - tasklet_schedule(&ioat_chan->cleanup_task); - } - - writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); - return IRQ_HANDLED; -} - -/** - * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode - * @irq: interrupt id - * @data: interrupt data - */ -static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) -{ - struct ioat_dma_chan *ioat_chan = data; - - tasklet_schedule(&ioat_chan->cleanup_task); - - return IRQ_HANDLED; -} - -static void ioat_dma_cleanup_tasklet(unsigned long data); - -/** - * ioat_dma_enumerate_channels - find and initialize the device's channels - * @device: the device to be enumerated - */ -static int ioat_dma_enumerate_channels(struct ioatdma_device *device) -{ - u8 xfercap_scale; - u32 xfercap; - int i; - struct ioat_dma_chan *ioat_chan; - - /* - * IOAT ver.3 workarounds - */ - if (device->version == IOAT_VER_3_0) { - u32 chan_err_mask; - u16 dev_id; - u32 dmauncerrsts; - - /* - * Write CHANERRMSK_INT with 3E07h to mask out the errors - * that can cause stability issues for IOAT ver.3 - */ - chan_err_mask = 0x3E07; - pci_write_config_dword(device->pdev, - IOAT_PCI_CHANERRMASK_INT_OFFSET, - chan_err_mask); - - /* - * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit - * (workaround for spurious config parity error after restart) - */ - pci_read_config_word(device->pdev, - IOAT_PCI_DEVICE_ID_OFFSET, - &dev_id); - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { - dmauncerrsts = 0x10; - pci_write_config_dword(device->pdev, - IOAT_PCI_DMAUNCERRSTS_OFFSET, - dmauncerrsts); - } - } - - device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); - xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); - xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); - -#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL - if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) { - device->common.chancnt--; - } -#endif - for (i = 0; i < device->common.chancnt; i++) { - ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); - if (!ioat_chan) { - device->common.chancnt = i; - break; - } - - ioat_chan->device = device; - ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); - ioat_chan->xfercap = xfercap; - ioat_chan->desccount = 0; - INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); - if (ioat_chan->device->version == IOAT_VER_2_0) - writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | - IOAT_DMA_DCA_ANY_CPU, - ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); - else if (ioat_chan->device->version == IOAT_VER_3_0) - writel(IOAT_DMA_DCA_ANY_CPU, - ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); - spin_lock_init(&ioat_chan->cleanup_lock); - spin_lock_init(&ioat_chan->desc_lock); - INIT_LIST_HEAD(&ioat_chan->free_desc); - INIT_LIST_HEAD(&ioat_chan->used_desc); - /* This should be made common somewhere in dmaengine.c */ - ioat_chan->common.device = &device->common; - list_add_tail(&ioat_chan->common.device_node, - &device->common.channels); - device->idx[i] = ioat_chan; - tasklet_init(&ioat_chan->cleanup_task, - ioat_dma_cleanup_tasklet, - (unsigned long) ioat_chan); - tasklet_disable(&ioat_chan->cleanup_task); - } - return device->common.chancnt; -} - -/** - * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended - * descriptors to hw - * @chan: DMA channel handle - */ -static inline void __ioat1_dma_memcpy_issue_pending( - struct ioat_dma_chan *ioat_chan) -{ - ioat_chan->pending = 0; - writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); -} - -static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) -{ - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - - if (ioat_chan->pending > 0) { - spin_lock_bh(&ioat_chan->desc_lock); - __ioat1_dma_memcpy_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); - } -} - -static inline void __ioat2_dma_memcpy_issue_pending( - struct ioat_dma_chan *ioat_chan) -{ - ioat_chan->pending = 0; - writew(ioat_chan->dmacount, - ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); -} - -static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) -{ - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - - if (ioat_chan->pending > 0) { - spin_lock_bh(&ioat_chan->desc_lock); - __ioat2_dma_memcpy_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); - } -} - - -/** - * ioat_dma_chan_reset_part2 - reinit the channel after a reset - */ -static void ioat_dma_chan_reset_part2(struct work_struct *work) -{ - struct ioat_dma_chan *ioat_chan = - container_of(work, struct ioat_dma_chan, work.work); - struct ioat_desc_sw *desc; - - spin_lock_bh(&ioat_chan->cleanup_lock); - spin_lock_bh(&ioat_chan->desc_lock); - - ioat_chan->completion_virt->low = 0; - ioat_chan->completion_virt->high = 0; - ioat_chan->pending = 0; - - /* - * count the descriptors waiting, and be sure to do it - * right for both the CB1 line and the CB2 ring - */ - ioat_chan->dmacount = 0; - if (ioat_chan->used_desc.prev) { - desc = to_ioat_desc(ioat_chan->used_desc.prev); - do { - ioat_chan->dmacount++; - desc = to_ioat_desc(desc->node.next); - } while (&desc->node != ioat_chan->used_desc.next); - } - - /* - * write the new starting descriptor address - * this puts channel engine into ARMED state - */ - desc = to_ioat_desc(ioat_chan->used_desc.prev); - switch (ioat_chan->device->version) { - case IOAT_VER_1_2: - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, - ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - - writeb(IOAT_CHANCMD_START, ioat_chan->reg_base - + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); - break; - case IOAT_VER_2_0: - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, - ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); - - /* tell the engine to go with what's left to be done */ - writew(ioat_chan->dmacount, - ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); - - break; - } - dev_err(&ioat_chan->device->pdev->dev, - "chan%d reset - %d descs waiting, %d total desc\n", - chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); - - spin_unlock_bh(&ioat_chan->desc_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); -} - -/** - * ioat_dma_reset_channel - restart a channel - * @ioat_chan: IOAT DMA channel handle - */ -static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) -{ - u32 chansts, chanerr; - - if (!ioat_chan->used_desc.prev) - return; - - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - chansts = (ioat_chan->completion_virt->low - & IOAT_CHANSTS_DMA_TRANSFER_STATUS); - if (chanerr) { - dev_err(&ioat_chan->device->pdev->dev, - "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", - chan_num(ioat_chan), chansts, chanerr); - writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - } - - /* - * whack it upside the head with a reset - * and wait for things to settle out. - * force the pending count to a really big negative - * to make sure no one forces an issue_pending - * while we're waiting. - */ - - spin_lock_bh(&ioat_chan->desc_lock); - ioat_chan->pending = INT_MIN; - writeb(IOAT_CHANCMD_RESET, - ioat_chan->reg_base - + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); - spin_unlock_bh(&ioat_chan->desc_lock); - - /* schedule the 2nd half instead of sleeping a long time */ - schedule_delayed_work(&ioat_chan->work, RESET_DELAY); -} - -/** - * ioat_dma_chan_watchdog - watch for stuck channels - */ -static void ioat_dma_chan_watchdog(struct work_struct *work) -{ - struct ioatdma_device *device = - container_of(work, struct ioatdma_device, work.work); - struct ioat_dma_chan *ioat_chan; - int i; - - union { - u64 full; - struct { - u32 low; - u32 high; - }; - } completion_hw; - unsigned long compl_desc_addr_hw; - - for (i = 0; i < device->common.chancnt; i++) { - ioat_chan = ioat_lookup_chan_by_index(device, i); - - if (ioat_chan->device->version == IOAT_VER_1_2 - /* have we started processing anything yet */ - && ioat_chan->last_completion - /* have we completed any since last watchdog cycle? */ - && (ioat_chan->last_completion == - ioat_chan->watchdog_completion) - /* has TCP stuck on one cookie since last watchdog? */ - && (ioat_chan->watchdog_tcp_cookie == - ioat_chan->watchdog_last_tcp_cookie) - && (ioat_chan->watchdog_tcp_cookie != - ioat_chan->completed_cookie) - /* is there something in the chain to be processed? */ - /* CB1 chain always has at least the last one processed */ - && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next) - && ioat_chan->pending == 0) { - - /* - * check CHANSTS register for completed - * descriptor address. - * if it is different than completion writeback, - * it is not zero - * and it has changed since the last watchdog - * we can assume that channel - * is still working correctly - * and the problem is in completion writeback. - * update completion writeback - * with actual CHANSTS value - * else - * try resetting the channel - */ - - completion_hw.low = readl(ioat_chan->reg_base + - IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version)); - completion_hw.high = readl(ioat_chan->reg_base + - IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version)); -#if (BITS_PER_LONG == 64) - compl_desc_addr_hw = - completion_hw.full - & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; -#else - compl_desc_addr_hw = - completion_hw.low & IOAT_LOW_COMPLETION_MASK; -#endif - - if ((compl_desc_addr_hw != 0) - && (compl_desc_addr_hw != ioat_chan->watchdog_completion) - && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) { - ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw; - ioat_chan->completion_virt->low = completion_hw.low; - ioat_chan->completion_virt->high = completion_hw.high; - } else { - ioat_dma_reset_channel(ioat_chan); - ioat_chan->watchdog_completion = 0; - ioat_chan->last_compl_desc_addr_hw = 0; - } - - /* - * for version 2.0 if there are descriptors yet to be processed - * and the last completed hasn't changed since the last watchdog - * if they haven't hit the pending level - * issue the pending to push them through - * else - * try resetting the channel - */ - } else if (ioat_chan->device->version == IOAT_VER_2_0 - && ioat_chan->used_desc.prev - && ioat_chan->last_completion - && ioat_chan->last_completion == ioat_chan->watchdog_completion) { - - if (ioat_chan->pending < ioat_pending_level) - ioat2_dma_memcpy_issue_pending(&ioat_chan->common); - else { - ioat_dma_reset_channel(ioat_chan); - ioat_chan->watchdog_completion = 0; - } - } else { - ioat_chan->last_compl_desc_addr_hw = 0; - ioat_chan->watchdog_completion - = ioat_chan->last_completion; - } - - ioat_chan->watchdog_last_tcp_cookie = - ioat_chan->watchdog_tcp_cookie; - } - - schedule_delayed_work(&device->work, WATCHDOG_DELAY); -} - -static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); - struct ioat_desc_sw *first = tx_to_ioat_desc(tx); - struct ioat_desc_sw *prev, *new; - struct ioat_dma_descriptor *hw; - dma_cookie_t cookie; - LIST_HEAD(new_chain); - u32 copy; - size_t len; - dma_addr_t src, dst; - unsigned long orig_flags; - unsigned int desc_count = 0; - - /* src and dest and len are stored in the initial descriptor */ - len = first->len; - src = first->src; - dst = first->dst; - orig_flags = first->async_tx.flags; - new = first; - - spin_lock_bh(&ioat_chan->desc_lock); - prev = to_ioat_desc(ioat_chan->used_desc.prev); - prefetch(prev->hw); - do { - copy = min_t(size_t, len, ioat_chan->xfercap); - - async_tx_ack(&new->async_tx); - - hw = new->hw; - hw->size = copy; - hw->ctl = 0; - hw->src_addr = src; - hw->dst_addr = dst; - hw->next = 0; - - /* chain together the physical address list for the HW */ - wmb(); - prev->hw->next = (u64) new->async_tx.phys; - - len -= copy; - dst += copy; - src += copy; - - list_add_tail(&new->node, &new_chain); - desc_count++; - prev = new; - } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); - - if (!new) { - dev_err(&ioat_chan->device->pdev->dev, - "tx submit failed\n"); - spin_unlock_bh(&ioat_chan->desc_lock); - return -ENOMEM; - } - - hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; - if (first->async_tx.callback) { - hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; - if (first != new) { - /* move callback into to last desc */ - new->async_tx.callback = first->async_tx.callback; - new->async_tx.callback_param - = first->async_tx.callback_param; - first->async_tx.callback = NULL; - first->async_tx.callback_param = NULL; - } - } - - new->tx_cnt = desc_count; - new->async_tx.flags = orig_flags; /* client is in control of this ack */ - - /* store the original values for use in later cleanup */ - if (new != first) { - new->src = first->src; - new->dst = first->dst; - new->len = first->len; - } - - /* cookie incr and addition to used_list must be atomic */ - cookie = ioat_chan->common.cookie; - cookie++; - if (cookie < 0) - cookie = 1; - ioat_chan->common.cookie = new->async_tx.cookie = cookie; - - /* write address into NextDescriptor field of last desc in chain */ - to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = - first->async_tx.phys; - list_splice_tail(&new_chain, &ioat_chan->used_desc); - - ioat_chan->dmacount += desc_count; - ioat_chan->pending += desc_count; - if (ioat_chan->pending >= ioat_pending_level) - __ioat1_dma_memcpy_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); - - return cookie; -} - -static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); - struct ioat_desc_sw *first = tx_to_ioat_desc(tx); - struct ioat_desc_sw *new; - struct ioat_dma_descriptor *hw; - dma_cookie_t cookie; - u32 copy; - size_t len; - dma_addr_t src, dst; - unsigned long orig_flags; - unsigned int desc_count = 0; - - /* src and dest and len are stored in the initial descriptor */ - len = first->len; - src = first->src; - dst = first->dst; - orig_flags = first->async_tx.flags; - new = first; - - /* - * ioat_chan->desc_lock is still in force in version 2 path - * it gets unlocked at end of this function - */ - do { - copy = min_t(size_t, len, ioat_chan->xfercap); - - async_tx_ack(&new->async_tx); - - hw = new->hw; - hw->size = copy; - hw->ctl = 0; - hw->src_addr = src; - hw->dst_addr = dst; - - len -= copy; - dst += copy; - src += copy; - desc_count++; - } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); - - if (!new) { - dev_err(&ioat_chan->device->pdev->dev, - "tx submit failed\n"); - spin_unlock_bh(&ioat_chan->desc_lock); - return -ENOMEM; - } - - hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; - if (first->async_tx.callback) { - hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; - if (first != new) { - /* move callback into to last desc */ - new->async_tx.callback = first->async_tx.callback; - new->async_tx.callback_param - = first->async_tx.callback_param; - first->async_tx.callback = NULL; - first->async_tx.callback_param = NULL; - } - } - - new->tx_cnt = desc_count; - new->async_tx.flags = orig_flags; /* client is in control of this ack */ - - /* store the original values for use in later cleanup */ - if (new != first) { - new->src = first->src; - new->dst = first->dst; - new->len = first->len; - } - - /* cookie incr and addition to used_list must be atomic */ - cookie = ioat_chan->common.cookie; - cookie++; - if (cookie < 0) - cookie = 1; - ioat_chan->common.cookie = new->async_tx.cookie = cookie; - - ioat_chan->dmacount += desc_count; - ioat_chan->pending += desc_count; - if (ioat_chan->pending >= ioat_pending_level) - __ioat2_dma_memcpy_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); - - return cookie; -} - -/** - * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair - * @ioat_chan: the channel supplying the memory pool for the descriptors - * @flags: allocation flags - */ -static struct ioat_desc_sw *ioat_dma_alloc_descriptor( - struct ioat_dma_chan *ioat_chan, - gfp_t flags) -{ - struct ioat_dma_descriptor *desc; - struct ioat_desc_sw *desc_sw; - struct ioatdma_device *ioatdma_device; - dma_addr_t phys; - - ioatdma_device = to_ioatdma_device(ioat_chan->common.device); - desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); - if (unlikely(!desc)) - return NULL; - - desc_sw = kzalloc(sizeof(*desc_sw), flags); - if (unlikely(!desc_sw)) { - pci_pool_free(ioatdma_device->dma_pool, desc, phys); - return NULL; - } - - memset(desc, 0, sizeof(*desc)); - dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); - switch (ioat_chan->device->version) { - case IOAT_VER_1_2: - desc_sw->async_tx.tx_submit = ioat1_tx_submit; - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - desc_sw->async_tx.tx_submit = ioat2_tx_submit; - break; - } - - desc_sw->hw = desc; - desc_sw->async_tx.phys = phys; - - return desc_sw; -} - -static int ioat_initial_desc_count = 256; -module_param(ioat_initial_desc_count, int, 0644); -MODULE_PARM_DESC(ioat_initial_desc_count, - "initial descriptors per channel (default: 256)"); - -/** - * ioat2_dma_massage_chan_desc - link the descriptors into a circle - * @ioat_chan: the channel to be massaged - */ -static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) -{ - struct ioat_desc_sw *desc, *_desc; - - /* setup used_desc */ - ioat_chan->used_desc.next = ioat_chan->free_desc.next; - ioat_chan->used_desc.prev = NULL; - - /* pull free_desc out of the circle so that every node is a hw - * descriptor, but leave it pointing to the list - */ - ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next; - ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev; - - /* circle link the hw descriptors */ - desc = to_ioat_desc(ioat_chan->free_desc.next); - desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; - list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { - desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; - } -} - -/** - * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors - * @chan: the channel to be filled out - */ -static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) -{ - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - struct ioat_desc_sw *desc; - u16 chanctrl; - u32 chanerr; - int i; - LIST_HEAD(tmp_list); - - /* have we already been set up? */ - if (!list_empty(&ioat_chan->free_desc)) - return ioat_chan->desccount; - - /* Setup register to interrupt and write completion status on error */ - chanctrl = IOAT_CHANCTRL_ERR_INT_EN | - IOAT_CHANCTRL_ANY_ERR_ABORT_EN | - IOAT_CHANCTRL_ERR_COMPLETION_EN; - writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); - - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - if (chanerr) { - dev_err(&ioat_chan->device->pdev->dev, - "CHANERR = %x, clearing\n", chanerr); - writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - } - - /* Allocate descriptors */ - for (i = 0; i < ioat_initial_desc_count; i++) { - desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); - if (!desc) { - dev_err(&ioat_chan->device->pdev->dev, - "Only %d initial descriptors\n", i); - break; - } - list_add_tail(&desc->node, &tmp_list); - } - spin_lock_bh(&ioat_chan->desc_lock); - ioat_chan->desccount = i; - list_splice(&tmp_list, &ioat_chan->free_desc); - if (ioat_chan->device->version != IOAT_VER_1_2) - ioat2_dma_massage_chan_desc(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); - - /* allocate a completion writeback area */ - /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ - ioat_chan->completion_virt = - pci_pool_alloc(ioat_chan->device->completion_pool, - GFP_KERNEL, - &ioat_chan->completion_addr); - memset(ioat_chan->completion_virt, 0, - sizeof(*ioat_chan->completion_virt)); - writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(((u64) ioat_chan->completion_addr) >> 32, - ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); - - tasklet_enable(&ioat_chan->cleanup_task); - ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */ - return ioat_chan->desccount; -} - -/** - * ioat_dma_free_chan_resources - release all the descriptors - * @chan: the channel to be cleaned - */ -static void ioat_dma_free_chan_resources(struct dma_chan *chan) -{ - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device); - struct ioat_desc_sw *desc, *_desc; - int in_use_descs = 0; - - /* Before freeing channel resources first check - * if they have been previously allocated for this channel. - */ - if (ioat_chan->desccount == 0) - return; - - tasklet_disable(&ioat_chan->cleanup_task); - ioat_dma_memcpy_cleanup(ioat_chan); - - /* Delay 100ms after reset to allow internal DMA logic to quiesce - * before removing DMA descriptor resources. - */ - writeb(IOAT_CHANCMD_RESET, - ioat_chan->reg_base - + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); - mdelay(100); - - spin_lock_bh(&ioat_chan->desc_lock); - switch (ioat_chan->device->version) { - case IOAT_VER_1_2: - list_for_each_entry_safe(desc, _desc, - &ioat_chan->used_desc, node) { - in_use_descs++; - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); - kfree(desc); - } - list_for_each_entry_safe(desc, _desc, - &ioat_chan->free_desc, node) { - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); - kfree(desc); - } - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - list_for_each_entry_safe(desc, _desc, - ioat_chan->free_desc.next, node) { - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); - kfree(desc); - } - desc = to_ioat_desc(ioat_chan->free_desc.next); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); - kfree(desc); - INIT_LIST_HEAD(&ioat_chan->free_desc); - INIT_LIST_HEAD(&ioat_chan->used_desc); - break; - } - spin_unlock_bh(&ioat_chan->desc_lock); - - pci_pool_free(ioatdma_device->completion_pool, - ioat_chan->completion_virt, - ioat_chan->completion_addr); - - /* one is ok since we left it on there on purpose */ - if (in_use_descs > 1) - dev_err(&ioat_chan->device->pdev->dev, - "Freeing %d in use descriptors!\n", - in_use_descs - 1); - - ioat_chan->last_completion = ioat_chan->completion_addr = 0; - ioat_chan->pending = 0; - ioat_chan->dmacount = 0; - ioat_chan->desccount = 0; - ioat_chan->watchdog_completion = 0; - ioat_chan->last_compl_desc_addr_hw = 0; - ioat_chan->watchdog_tcp_cookie = - ioat_chan->watchdog_last_tcp_cookie = 0; -} - -/** - * ioat_dma_get_next_descriptor - return the next available descriptor - * @ioat_chan: IOAT DMA channel handle - * - * Gets the next descriptor from the chain, and must be called with the - * channel's desc_lock held. Allocates more descriptors if the channel - * has run out. - */ -static struct ioat_desc_sw * -ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) -{ - struct ioat_desc_sw *new; - - if (!list_empty(&ioat_chan->free_desc)) { - new = to_ioat_desc(ioat_chan->free_desc.next); - list_del(&new->node); - } else { - /* try to get another desc */ - new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); - if (!new) { - dev_err(&ioat_chan->device->pdev->dev, - "alloc failed\n"); - return NULL; - } - } - - prefetch(new->hw); - return new; -} - -static struct ioat_desc_sw * -ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) -{ - struct ioat_desc_sw *new; - - /* - * used.prev points to where to start processing - * used.next points to next free descriptor - * if used.prev == NULL, there are none waiting to be processed - * if used.next == used.prev.prev, there is only one free descriptor, - * and we need to use it to as a noop descriptor before - * linking in a new set of descriptors, since the device - * has probably already read the pointer to it - */ - if (ioat_chan->used_desc.prev && - ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { - - struct ioat_desc_sw *desc; - struct ioat_desc_sw *noop_desc; - int i; - - /* set up the noop descriptor */ - noop_desc = to_ioat_desc(ioat_chan->used_desc.next); - /* set size to non-zero value (channel returns error when size is 0) */ - noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; - noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; - noop_desc->hw->src_addr = 0; - noop_desc->hw->dst_addr = 0; - - ioat_chan->used_desc.next = ioat_chan->used_desc.next->next; - ioat_chan->pending++; - ioat_chan->dmacount++; - - /* try to get a few more descriptors */ - for (i = 16; i; i--) { - desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); - if (!desc) { - dev_err(&ioat_chan->device->pdev->dev, - "alloc failed\n"); - break; - } - list_add_tail(&desc->node, ioat_chan->used_desc.next); - - desc->hw->next - = to_ioat_desc(desc->node.next)->async_tx.phys; - to_ioat_desc(desc->node.prev)->hw->next - = desc->async_tx.phys; - ioat_chan->desccount++; - } - - ioat_chan->used_desc.next = noop_desc->node.next; - } - new = to_ioat_desc(ioat_chan->used_desc.next); - prefetch(new); - ioat_chan->used_desc.next = new->node.next; - - if (ioat_chan->used_desc.prev == NULL) - ioat_chan->used_desc.prev = &new->node; - - prefetch(new->hw); - return new; -} - -static struct ioat_desc_sw *ioat_dma_get_next_descriptor( - struct ioat_dma_chan *ioat_chan) -{ - if (!ioat_chan) - return NULL; - - switch (ioat_chan->device->version) { - case IOAT_VER_1_2: - return ioat1_dma_get_next_descriptor(ioat_chan); - case IOAT_VER_2_0: - case IOAT_VER_3_0: - return ioat2_dma_get_next_descriptor(ioat_chan); - } - return NULL; -} - -static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( - struct dma_chan *chan, - dma_addr_t dma_dest, - dma_addr_t dma_src, - size_t len, - unsigned long flags) -{ - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - struct ioat_desc_sw *new; - - spin_lock_bh(&ioat_chan->desc_lock); - new = ioat_dma_get_next_descriptor(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); - - if (new) { - new->len = len; - new->dst = dma_dest; - new->src = dma_src; - new->async_tx.flags = flags; - return &new->async_tx; - } else { - dev_err(&ioat_chan->device->pdev->dev, - "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", - chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); - return NULL; - } -} - -static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( - struct dma_chan *chan, - dma_addr_t dma_dest, - dma_addr_t dma_src, - size_t len, - unsigned long flags) -{ - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - struct ioat_desc_sw *new; - - spin_lock_bh(&ioat_chan->desc_lock); - new = ioat2_dma_get_next_descriptor(ioat_chan); - - /* - * leave ioat_chan->desc_lock set in ioat 2 path - * it will get unlocked at end of tx_submit - */ - - if (new) { - new->len = len; - new->dst = dma_dest; - new->src = dma_src; - new->async_tx.flags = flags; - return &new->async_tx; - } else { - spin_unlock_bh(&ioat_chan->desc_lock); - dev_err(&ioat_chan->device->pdev->dev, - "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", - chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); - return NULL; - } -} - -static void ioat_dma_cleanup_tasklet(unsigned long data) -{ - struct ioat_dma_chan *chan = (void *)data; - ioat_dma_memcpy_cleanup(chan); - writew(IOAT_CHANCTRL_INT_DISABLE, - chan->reg_base + IOAT_CHANCTRL_OFFSET); -} - -static void -ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) -{ - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - pci_unmap_single(ioat_chan->device->pdev, - pci_unmap_addr(desc, dst), - pci_unmap_len(desc, len), - PCI_DMA_FROMDEVICE); - else - pci_unmap_page(ioat_chan->device->pdev, - pci_unmap_addr(desc, dst), - pci_unmap_len(desc, len), - PCI_DMA_FROMDEVICE); - } - - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - pci_unmap_single(ioat_chan->device->pdev, - pci_unmap_addr(desc, src), - pci_unmap_len(desc, len), - PCI_DMA_TODEVICE); - else - pci_unmap_page(ioat_chan->device->pdev, - pci_unmap_addr(desc, src), - pci_unmap_len(desc, len), - PCI_DMA_TODEVICE); - } -} - -/** - * ioat_dma_memcpy_cleanup - cleanup up finished descriptors - * @chan: ioat channel to be cleaned up - */ -static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) -{ - unsigned long phys_complete; - struct ioat_desc_sw *desc, *_desc; - dma_cookie_t cookie = 0; - unsigned long desc_phys; - struct ioat_desc_sw *latest_desc; - - prefetch(ioat_chan->completion_virt); - - if (!spin_trylock_bh(&ioat_chan->cleanup_lock)) - return; - - /* The completion writeback can happen at any time, - so reads by the driver need to be atomic operations - The descriptor physical addresses are limited to 32-bits - when the CPU can only do a 32-bit mov */ - -#if (BITS_PER_LONG == 64) - phys_complete = - ioat_chan->completion_virt->full - & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; -#else - phys_complete = - ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; -#endif - - if ((ioat_chan->completion_virt->full - & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == - IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { - dev_err(&ioat_chan->device->pdev->dev, - "Channel halted, chanerr = %x\n", - readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); - - /* TODO do something to salvage the situation */ - } - - if (phys_complete == ioat_chan->last_completion) { - spin_unlock_bh(&ioat_chan->cleanup_lock); - /* - * perhaps we're stuck so hard that the watchdog can't go off? - * try to catch it after 2 seconds - */ - if (ioat_chan->device->version != IOAT_VER_3_0) { - if (time_after(jiffies, - ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) { - ioat_dma_chan_watchdog(&(ioat_chan->device->work.work)); - ioat_chan->last_completion_time = jiffies; - } - } - return; - } - ioat_chan->last_completion_time = jiffies; - - cookie = 0; - if (!spin_trylock_bh(&ioat_chan->desc_lock)) { - spin_unlock_bh(&ioat_chan->cleanup_lock); - return; - } - - switch (ioat_chan->device->version) { - case IOAT_VER_1_2: - list_for_each_entry_safe(desc, _desc, - &ioat_chan->used_desc, node) { - - /* - * Incoming DMA requests may use multiple descriptors, - * due to exceeding xfercap, perhaps. If so, only the - * last one will have a cookie, and require unmapping. - */ - if (desc->async_tx.cookie) { - cookie = desc->async_tx.cookie; - ioat_dma_unmap(ioat_chan, desc); - if (desc->async_tx.callback) { - desc->async_tx.callback(desc->async_tx.callback_param); - desc->async_tx.callback = NULL; - } - } - - if (desc->async_tx.phys != phys_complete) { - /* - * a completed entry, but not the last, so clean - * up if the client is done with the descriptor - */ - if (async_tx_test_ack(&desc->async_tx)) { - list_move_tail(&desc->node, - &ioat_chan->free_desc); - } else - desc->async_tx.cookie = 0; - } else { - /* - * last used desc. Do not remove, so we can - * append from it, but don't look at it next - * time, either - */ - desc->async_tx.cookie = 0; - - /* TODO check status bits? */ - break; - } - } - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - /* has some other thread has already cleaned up? */ - if (ioat_chan->used_desc.prev == NULL) - break; - - /* work backwards to find latest finished desc */ - desc = to_ioat_desc(ioat_chan->used_desc.next); - latest_desc = NULL; - do { - desc = to_ioat_desc(desc->node.prev); - desc_phys = (unsigned long)desc->async_tx.phys - & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; - if (desc_phys == phys_complete) { - latest_desc = desc; - break; - } - } while (&desc->node != ioat_chan->used_desc.prev); - - if (latest_desc != NULL) { - - /* work forwards to clear finished descriptors */ - for (desc = to_ioat_desc(ioat_chan->used_desc.prev); - &desc->node != latest_desc->node.next && - &desc->node != ioat_chan->used_desc.next; - desc = to_ioat_desc(desc->node.next)) { - if (desc->async_tx.cookie) { - cookie = desc->async_tx.cookie; - desc->async_tx.cookie = 0; - ioat_dma_unmap(ioat_chan, desc); - if (desc->async_tx.callback) { - desc->async_tx.callback(desc->async_tx.callback_param); - desc->async_tx.callback = NULL; - } - } - } - - /* move used.prev up beyond those that are finished */ - if (&desc->node == ioat_chan->used_desc.next) - ioat_chan->used_desc.prev = NULL; - else - ioat_chan->used_desc.prev = &desc->node; - } - break; - } - - spin_unlock_bh(&ioat_chan->desc_lock); - - ioat_chan->last_completion = phys_complete; - if (cookie != 0) - ioat_chan->completed_cookie = cookie; - - spin_unlock_bh(&ioat_chan->cleanup_lock); -} - -/** - * ioat_dma_is_complete - poll the status of a IOAT DMA transaction - * @chan: IOAT DMA channel handle - * @cookie: DMA transaction identifier - * @done: if not %NULL, updated with last completed transaction - * @used: if not %NULL, updated with last used transaction - */ -static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, - dma_cookie_t cookie, - dma_cookie_t *done, - dma_cookie_t *used) -{ - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - dma_cookie_t last_used; - dma_cookie_t last_complete; - enum dma_status ret; - - last_used = chan->cookie; - last_complete = ioat_chan->completed_cookie; - ioat_chan->watchdog_tcp_cookie = cookie; - - if (done) - *done = last_complete; - if (used) - *used = last_used; - - ret = dma_async_is_complete(cookie, last_complete, last_used); - if (ret == DMA_SUCCESS) - return ret; - - ioat_dma_memcpy_cleanup(ioat_chan); - - last_used = chan->cookie; - last_complete = ioat_chan->completed_cookie; - - if (done) - *done = last_complete; - if (used) - *used = last_used; - - return dma_async_is_complete(cookie, last_complete, last_used); -} - -static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) -{ - struct ioat_desc_sw *desc; - - spin_lock_bh(&ioat_chan->desc_lock); - - desc = ioat_dma_get_next_descriptor(ioat_chan); - - if (!desc) { - dev_err(&ioat_chan->device->pdev->dev, - "Unable to start null desc - get next desc failed\n"); - spin_unlock_bh(&ioat_chan->desc_lock); - return; - } - - desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL - | IOAT_DMA_DESCRIPTOR_CTL_INT_GN - | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; - /* set size to non-zero value (channel returns error when size is 0) */ - desc->hw->size = NULL_DESC_BUFFER_SIZE; - desc->hw->src_addr = 0; - desc->hw->dst_addr = 0; - async_tx_ack(&desc->async_tx); - switch (ioat_chan->device->version) { - case IOAT_VER_1_2: - desc->hw->next = 0; - list_add_tail(&desc->node, &ioat_chan->used_desc); - - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, - ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - - writeb(IOAT_CHANCMD_START, ioat_chan->reg_base - + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, - ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); - - ioat_chan->dmacount++; - __ioat2_dma_memcpy_issue_pending(ioat_chan); - break; - } - spin_unlock_bh(&ioat_chan->desc_lock); -} - -/* - * Perform a IOAT transaction to verify the HW works. - */ -#define IOAT_TEST_SIZE 2000 - -static void ioat_dma_test_callback(void *dma_async_param) -{ - struct completion *cmp = dma_async_param; - - complete(cmp); -} - -/** - * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. - * @device: device to be tested - */ -static int ioat_dma_self_test(struct ioatdma_device *device) -{ - int i; - u8 *src; - u8 *dest; - struct dma_chan *dma_chan; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - int err = 0; - struct completion cmp; - unsigned long tmo; - unsigned long flags; - - src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); - if (!src) - return -ENOMEM; - dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); - if (!dest) { - kfree(src); - return -ENOMEM; - } - - /* Fill in src buffer */ - for (i = 0; i < IOAT_TEST_SIZE; i++) - src[i] = (u8)i; - - /* Start copy, using first DMA channel */ - dma_chan = container_of(device->common.channels.next, - struct dma_chan, - device_node); - if (device->common.device_alloc_chan_resources(dma_chan) < 1) { - dev_err(&device->pdev->dev, - "selftest cannot allocate chan resource\n"); - err = -ENODEV; - goto out; - } - - dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, - DMA_TO_DEVICE); - dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, - DMA_FROM_DEVICE); - flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; - tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, - IOAT_TEST_SIZE, flags); - if (!tx) { - dev_err(&device->pdev->dev, - "Self-test prep failed, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - async_tx_ack(tx); - init_completion(&cmp); - tx->callback = ioat_dma_test_callback; - tx->callback_param = &cmp; - cookie = tx->tx_submit(tx); - if (cookie < 0) { - dev_err(&device->pdev->dev, - "Self-test setup failed, disabling\n"); - err = -ENODEV; - goto free_resources; - } - device->common.device_issue_pending(dma_chan); - - tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - - if (tmo == 0 || - device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) - != DMA_SUCCESS) { - dev_err(&device->pdev->dev, - "Self-test copy timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - if (memcmp(src, dest, IOAT_TEST_SIZE)) { - dev_err(&device->pdev->dev, - "Self-test copy failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - -free_resources: - device->common.device_free_chan_resources(dma_chan); -out: - kfree(src); - kfree(dest); - return err; -} - -static char ioat_interrupt_style[32] = "msix"; -module_param_string(ioat_interrupt_style, ioat_interrupt_style, - sizeof(ioat_interrupt_style), 0644); -MODULE_PARM_DESC(ioat_interrupt_style, - "set ioat interrupt style: msix (default), " - "msix-single-vector, msi, intx)"); - -/** - * ioat_dma_setup_interrupts - setup interrupt handler - * @device: ioat device - */ -static int ioat_dma_setup_interrupts(struct ioatdma_device *device) -{ - struct ioat_dma_chan *ioat_chan; - int err, i, j, msixcnt; - u8 intrctrl = 0; - - if (!strcmp(ioat_interrupt_style, "msix")) - goto msix; - if (!strcmp(ioat_interrupt_style, "msix-single-vector")) - goto msix_single_vector; - if (!strcmp(ioat_interrupt_style, "msi")) - goto msi; - if (!strcmp(ioat_interrupt_style, "intx")) - goto intx; - dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n", - ioat_interrupt_style); - goto err_no_irq; - -msix: - /* The number of MSI-X vectors should equal the number of channels */ - msixcnt = device->common.chancnt; - for (i = 0; i < msixcnt; i++) - device->msix_entries[i].entry = i; - - err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); - if (err < 0) - goto msi; - if (err > 0) - goto msix_single_vector; - - for (i = 0; i < msixcnt; i++) { - ioat_chan = ioat_lookup_chan_by_index(device, i); - err = request_irq(device->msix_entries[i].vector, - ioat_dma_do_interrupt_msix, - 0, "ioat-msix", ioat_chan); - if (err) { - for (j = 0; j < i; j++) { - ioat_chan = - ioat_lookup_chan_by_index(device, j); - free_irq(device->msix_entries[j].vector, - ioat_chan); - } - goto msix_single_vector; - } - } - intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; - device->irq_mode = msix_multi_vector; - goto done; - -msix_single_vector: - device->msix_entries[0].entry = 0; - err = pci_enable_msix(device->pdev, device->msix_entries, 1); - if (err) - goto msi; - - err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, - 0, "ioat-msix", device); - if (err) { - pci_disable_msix(device->pdev); - goto msi; - } - device->irq_mode = msix_single_vector; - goto done; - -msi: - err = pci_enable_msi(device->pdev); - if (err) - goto intx; - - err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, - 0, "ioat-msi", device); - if (err) { - pci_disable_msi(device->pdev); - goto intx; - } - /* - * CB 1.2 devices need a bit set in configuration space to enable MSI - */ - if (device->version == IOAT_VER_1_2) { - u32 dmactrl; - pci_read_config_dword(device->pdev, - IOAT_PCI_DMACTRL_OFFSET, &dmactrl); - dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; - pci_write_config_dword(device->pdev, - IOAT_PCI_DMACTRL_OFFSET, dmactrl); - } - device->irq_mode = msi; - goto done; - -intx: - err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, - IRQF_SHARED, "ioat-intx", device); - if (err) - goto err_no_irq; - device->irq_mode = intx; - -done: - intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; - writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); - return 0; - -err_no_irq: - /* Disable all interrupt generation */ - writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); - dev_err(&device->pdev->dev, "no usable interrupts\n"); - device->irq_mode = none; - return -1; -} - -/** - * ioat_dma_remove_interrupts - remove whatever interrupts were set - * @device: ioat device - */ -static void ioat_dma_remove_interrupts(struct ioatdma_device *device) -{ - struct ioat_dma_chan *ioat_chan; - int i; - - /* Disable all interrupt generation */ - writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); - - switch (device->irq_mode) { - case msix_multi_vector: - for (i = 0; i < device->common.chancnt; i++) { - ioat_chan = ioat_lookup_chan_by_index(device, i); - free_irq(device->msix_entries[i].vector, ioat_chan); - } - pci_disable_msix(device->pdev); - break; - case msix_single_vector: - free_irq(device->msix_entries[0].vector, device); - pci_disable_msix(device->pdev); - break; - case msi: - free_irq(device->pdev->irq, device); - pci_disable_msi(device->pdev); - break; - case intx: - free_irq(device->pdev->irq, device); - break; - case none: - dev_warn(&device->pdev->dev, - "call to %s without interrupts setup\n", __func__); - } - device->irq_mode = none; -} - -struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, - void __iomem *iobase) -{ - int err; - struct ioatdma_device *device; - - device = kzalloc(sizeof(*device), GFP_KERNEL); - if (!device) { - err = -ENOMEM; - goto err_kzalloc; - } - device->pdev = pdev; - device->reg_base = iobase; - device->version = readb(device->reg_base + IOAT_VER_OFFSET); - - /* DMA coherent memory pool for DMA descriptor allocations */ - device->dma_pool = pci_pool_create("dma_desc_pool", pdev, - sizeof(struct ioat_dma_descriptor), - 64, 0); - if (!device->dma_pool) { - err = -ENOMEM; - goto err_dma_pool; - } - - device->completion_pool = pci_pool_create("completion_pool", pdev, - sizeof(u64), SMP_CACHE_BYTES, - SMP_CACHE_BYTES); - if (!device->completion_pool) { - err = -ENOMEM; - goto err_completion_pool; - } - - INIT_LIST_HEAD(&device->common.channels); - ioat_dma_enumerate_channels(device); - - device->common.device_alloc_chan_resources = - ioat_dma_alloc_chan_resources; - device->common.device_free_chan_resources = - ioat_dma_free_chan_resources; - device->common.dev = &pdev->dev; - - dma_cap_set(DMA_MEMCPY, device->common.cap_mask); - device->common.device_is_tx_complete = ioat_dma_is_complete; - switch (device->version) { - case IOAT_VER_1_2: - device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; - device->common.device_issue_pending = - ioat1_dma_memcpy_issue_pending; - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; - device->common.device_issue_pending = - ioat2_dma_memcpy_issue_pending; - break; - } - - dev_err(&device->pdev->dev, - "Intel(R) I/OAT DMA Engine found," - " %d channels, device version 0x%02x, driver version %s\n", - device->common.chancnt, device->version, IOAT_DMA_VERSION); - - if (!device->common.chancnt) { - dev_err(&device->pdev->dev, - "Intel(R) I/OAT DMA Engine problem found: " - "zero channels detected\n"); - goto err_setup_interrupts; - } - - err = ioat_dma_setup_interrupts(device); - if (err) - goto err_setup_interrupts; - - err = ioat_dma_self_test(device); - if (err) - goto err_self_test; - - ioat_set_tcp_copy_break(device); - - dma_async_device_register(&device->common); - - if (device->version != IOAT_VER_3_0) { - INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); - schedule_delayed_work(&device->work, - WATCHDOG_DELAY); - } - - return device; - -err_self_test: - ioat_dma_remove_interrupts(device); -err_setup_interrupts: - pci_pool_destroy(device->completion_pool); -err_completion_pool: - pci_pool_destroy(device->dma_pool); -err_dma_pool: - kfree(device); -err_kzalloc: - dev_err(&pdev->dev, - "Intel(R) I/OAT DMA Engine initialization failed\n"); - return NULL; -} - -void ioat_dma_remove(struct ioatdma_device *device) -{ - struct dma_chan *chan, *_chan; - struct ioat_dma_chan *ioat_chan; - - if (device->version != IOAT_VER_3_0) - cancel_delayed_work(&device->work); - - ioat_dma_remove_interrupts(device); - - dma_async_device_unregister(&device->common); - - pci_pool_destroy(device->dma_pool); - pci_pool_destroy(device->completion_pool); - - iounmap(device->reg_base); - pci_release_regions(device->pdev); - pci_disable_device(device->pdev); - - list_for_each_entry_safe(chan, _chan, - &device->common.channels, device_node) { - ioat_chan = to_ioat_chan(chan); - list_del(&chan->device_node); - kfree(ioat_chan); - } - kfree(device); -} - diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h deleted file mode 100644 index a52ff4bd4601..000000000000 --- a/drivers/dma/ioatdma.h +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - */ -#ifndef IOATDMA_H -#define IOATDMA_H - -#include -#include "ioatdma_hw.h" -#include -#include -#include -#include -#include - -#define IOAT_DMA_VERSION "3.64" - -enum ioat_interrupt { - none = 0, - msix_multi_vector = 1, - msix_single_vector = 2, - msi = 3, - intx = 4, -}; - -#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 -#define IOAT_DMA_DCA_ANY_CPU ~0 -#define IOAT_WATCHDOG_PERIOD (2 * HZ) - - -/** - * struct ioatdma_device - internal representation of a IOAT device - * @pdev: PCI-Express device - * @reg_base: MMIO register space base address - * @dma_pool: for allocating DMA descriptors - * @common: embedded struct dma_device - * @version: version of ioatdma device - * @irq_mode: which style irq to use - * @msix_entries: irq handlers - * @idx: per channel data - */ - -struct ioatdma_device { - struct pci_dev *pdev; - void __iomem *reg_base; - struct pci_pool *dma_pool; - struct pci_pool *completion_pool; - struct dma_device common; - u8 version; - enum ioat_interrupt irq_mode; - struct delayed_work work; - struct msix_entry msix_entries[4]; - struct ioat_dma_chan *idx[4]; -}; - -/** - * struct ioat_dma_chan - internal representation of a DMA channel - */ -struct ioat_dma_chan { - - void __iomem *reg_base; - - dma_cookie_t completed_cookie; - unsigned long last_completion; - unsigned long last_completion_time; - - size_t xfercap; /* XFERCAP register value expanded out */ - - spinlock_t cleanup_lock; - spinlock_t desc_lock; - struct list_head free_desc; - struct list_head used_desc; - unsigned long watchdog_completion; - int watchdog_tcp_cookie; - u32 watchdog_last_tcp_cookie; - struct delayed_work work; - - int pending; - int dmacount; - int desccount; - - struct ioatdma_device *device; - struct dma_chan common; - - dma_addr_t completion_addr; - union { - u64 full; /* HW completion writeback */ - struct { - u32 low; - u32 high; - }; - } *completion_virt; - unsigned long last_compl_desc_addr_hw; - struct tasklet_struct cleanup_task; -}; - -/* wrapper around hardware descriptor format + additional software fields */ - -/** - * struct ioat_desc_sw - wrapper around hardware descriptor - * @hw: hardware DMA descriptor - * @node: this descriptor will either be on the free list, - * or attached to a transaction list (async_tx.tx_list) - * @tx_cnt: number of descriptors required to complete the transaction - * @async_tx: the generic software descriptor for all engines - */ -struct ioat_desc_sw { - struct ioat_dma_descriptor *hw; - struct list_head node; - int tx_cnt; - size_t len; - dma_addr_t src; - dma_addr_t dst; - struct dma_async_tx_descriptor async_tx; -}; - -static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) -{ - #ifdef CONFIG_NET_DMA - switch (dev->version) { - case IOAT_VER_1_2: - sysctl_tcp_dma_copybreak = 4096; - break; - case IOAT_VER_2_0: - sysctl_tcp_dma_copybreak = 2048; - break; - case IOAT_VER_3_0: - sysctl_tcp_dma_copybreak = 262144; - break; - } - #endif -} - -#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE) -struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, - void __iomem *iobase); -void ioat_dma_remove(struct ioatdma_device *device); -struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); -struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); -#else -#define ioat_dma_probe(pdev, iobase) NULL -#define ioat_dma_remove(device) do { } while (0) -#define ioat_dca_init(pdev, iobase) NULL -#define ioat2_dca_init(pdev, iobase) NULL -#define ioat3_dca_init(pdev, iobase) NULL -#endif - -#endif /* IOATDMA_H */ diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h deleted file mode 100644 index afa57eef86c9..000000000000 --- a/drivers/dma/ioatdma_hw.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - */ -#ifndef _IOAT_HW_H_ -#define _IOAT_HW_H_ - -/* PCI Configuration Space Values */ -#define IOAT_PCI_VID 0x8086 - -/* CB device ID's */ -#define IOAT_PCI_DID_5000 0x1A38 -#define IOAT_PCI_DID_CNB 0x360B -#define IOAT_PCI_DID_SCNB 0x65FF -#define IOAT_PCI_DID_SNB 0x402F - -#define IOAT_PCI_RID 0x00 -#define IOAT_PCI_SVID 0x8086 -#define IOAT_PCI_SID 0x8086 -#define IOAT_VER_1_2 0x12 /* Version 1.2 */ -#define IOAT_VER_2_0 0x20 /* Version 2.0 */ -#define IOAT_VER_3_0 0x30 /* Version 3.0 */ - -struct ioat_dma_descriptor { - uint32_t size; - uint32_t ctl; - uint64_t src_addr; - uint64_t dst_addr; - uint64_t next; - uint64_t rsv1; - uint64_t rsv2; - uint64_t user1; - uint64_t user2; -}; - -#define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x00000001 -#define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x00000002 -#define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x00000004 -#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008 -#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010 -#define IOAT_DMA_DESCRIPTOR_NUL 0x00000020 -#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040 -#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080 -#define IOAT_DMA_DESCRIPTOR_CTL_BNDL 0x00000100 -#define IOAT_DMA_DESCRIPTOR_CTL_DCA 0x00000200 -#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT 0x00000400 - -#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000 -#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA 0x00000000 - -#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA 0x00000001 -#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK 0xFF000000 - -#endif diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h deleted file mode 100644 index 49bc277424f8..000000000000 --- a/drivers/dma/ioatdma_registers.h +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - */ -#ifndef _IOAT_REGISTERS_H_ -#define _IOAT_REGISTERS_H_ - -#define IOAT_PCI_DMACTRL_OFFSET 0x48 -#define IOAT_PCI_DMACTRL_DMA_EN 0x00000001 -#define IOAT_PCI_DMACTRL_MSI_EN 0x00000002 - -#define IOAT_PCI_DEVICE_ID_OFFSET 0x02 -#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148 -#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 - -/* MMIO Device Registers */ -#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ - -#define IOAT_XFERCAP_OFFSET 0x01 /* 8-bit */ -#define IOAT_XFERCAP_4KB 12 -#define IOAT_XFERCAP_8KB 13 -#define IOAT_XFERCAP_16KB 14 -#define IOAT_XFERCAP_32KB 15 -#define IOAT_XFERCAP_32GB 0 - -#define IOAT_GENCTRL_OFFSET 0x02 /* 8-bit */ -#define IOAT_GENCTRL_DEBUG_EN 0x01 - -#define IOAT_INTRCTRL_OFFSET 0x03 /* 8-bit */ -#define IOAT_INTRCTRL_MASTER_INT_EN 0x01 /* Master Interrupt Enable */ -#define IOAT_INTRCTRL_INT_STATUS 0x02 /* ATTNSTATUS -or- Channel Int */ -#define IOAT_INTRCTRL_INT 0x04 /* INT_STATUS -and- MASTER_INT_EN */ -#define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL 0x08 /* Enable all MSI-X vectors */ - -#define IOAT_ATTNSTATUS_OFFSET 0x04 /* Each bit is a channel */ - -#define IOAT_VER_OFFSET 0x08 /* 8-bit */ -#define IOAT_VER_MAJOR_MASK 0xF0 -#define IOAT_VER_MINOR_MASK 0x0F -#define GET_IOAT_VER_MAJOR(x) (((x) & IOAT_VER_MAJOR_MASK) >> 4) -#define GET_IOAT_VER_MINOR(x) ((x) & IOAT_VER_MINOR_MASK) - -#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ - -#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ -#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */ -#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */ - -#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ -#define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001 - -#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ - -/* DMA Channel Registers */ -#define IOAT_CHANCTRL_OFFSET 0x00 /* 16-bit Channel Control Register */ -#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000 -#define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100 -#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020 -#define IOAT_CHANCTRL_ERR_INT_EN 0x0010 -#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008 -#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 -#define IOAT_CHANCTRL_INT_DISABLE 0x0001 - -#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ -#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ -#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */ - - -#define IOAT1_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */ -#define IOAT2_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */ -#define IOAT_CHANSTS_OFFSET(ver) ((ver) < IOAT_VER_2_0 \ - ? IOAT1_CHANSTS_OFFSET : IOAT2_CHANSTS_OFFSET) -#define IOAT1_CHANSTS_OFFSET_LOW 0x04 -#define IOAT2_CHANSTS_OFFSET_LOW 0x08 -#define IOAT_CHANSTS_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \ - ? IOAT1_CHANSTS_OFFSET_LOW : IOAT2_CHANSTS_OFFSET_LOW) -#define IOAT1_CHANSTS_OFFSET_HIGH 0x08 -#define IOAT2_CHANSTS_OFFSET_HIGH 0x0C -#define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ - ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH) -#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F -#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010 -#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3 - - - -#define IOAT_CHAN_DMACOUNT_OFFSET 0x06 /* 16-bit DMA Count register */ - -#define IOAT_DCACTRL_OFFSET 0x30 /* 32 bit Direct Cache Access Control Register */ -#define IOAT_DCACTRL_CMPL_WRITE_ENABLE 0x10000 -#define IOAT_DCACTRL_TARGET_CPU_MASK 0xFFFF /* APIC ID */ - -/* CB DCA Memory Space Registers */ -#define IOAT_DCAOFFSET_OFFSET 0x14 -/* CB_BAR + IOAT_DCAOFFSET value */ -#define IOAT_DCA_VER_OFFSET 0x00 -#define IOAT_DCA_VER_MAJOR_MASK 0xF0 -#define IOAT_DCA_VER_MINOR_MASK 0x0F - -#define IOAT_DCA_COMP_OFFSET 0x02 -#define IOAT_DCA_COMP_V1 0x1 - -#define IOAT_FSB_CAPABILITY_OFFSET 0x04 -#define IOAT_FSB_CAPABILITY_PREFETCH 0x1 - -#define IOAT_PCI_CAPABILITY_OFFSET 0x06 -#define IOAT_PCI_CAPABILITY_MEMWR 0x1 - -#define IOAT_FSB_CAP_ENABLE_OFFSET 0x08 -#define IOAT_FSB_CAP_ENABLE_PREFETCH 0x1 - -#define IOAT_PCI_CAP_ENABLE_OFFSET 0x0A -#define IOAT_PCI_CAP_ENABLE_MEMWR 0x1 - -#define IOAT_APICID_TAG_MAP_OFFSET 0x0C -#define IOAT_APICID_TAG_MAP_TAG0 0x0000000F -#define IOAT_APICID_TAG_MAP_TAG0_SHIFT 0 -#define IOAT_APICID_TAG_MAP_TAG1 0x000000F0 -#define IOAT_APICID_TAG_MAP_TAG1_SHIFT 4 -#define IOAT_APICID_TAG_MAP_TAG2 0x00000F00 -#define IOAT_APICID_TAG_MAP_TAG2_SHIFT 8 -#define IOAT_APICID_TAG_MAP_TAG3 0x0000F000 -#define IOAT_APICID_TAG_MAP_TAG3_SHIFT 12 -#define IOAT_APICID_TAG_MAP_TAG4 0x000F0000 -#define IOAT_APICID_TAG_MAP_TAG4_SHIFT 16 -#define IOAT_APICID_TAG_CB2_VALID 0x8080808080 - -#define IOAT_DCA_GREQID_OFFSET 0x10 -#define IOAT_DCA_GREQID_SIZE 0x04 -#define IOAT_DCA_GREQID_MASK 0xFFFF -#define IOAT_DCA_GREQID_IGNOREFUN 0x10000000 -#define IOAT_DCA_GREQID_VALID 0x20000000 -#define IOAT_DCA_GREQID_LASTID 0x80000000 - -#define IOAT3_CSI_CAPABILITY_OFFSET 0x08 -#define IOAT3_CSI_CAPABILITY_PREFETCH 0x1 - -#define IOAT3_PCI_CAPABILITY_OFFSET 0x0A -#define IOAT3_PCI_CAPABILITY_MEMWR 0x1 - -#define IOAT3_CSI_CONTROL_OFFSET 0x0C -#define IOAT3_CSI_CONTROL_PREFETCH 0x1 - -#define IOAT3_PCI_CONTROL_OFFSET 0x0E -#define IOAT3_PCI_CONTROL_MEMWR 0x1 - -#define IOAT3_APICID_TAG_MAP_OFFSET 0x10 -#define IOAT3_APICID_TAG_MAP_OFFSET_LOW 0x10 -#define IOAT3_APICID_TAG_MAP_OFFSET_HIGH 0x14 - -#define IOAT3_DCA_GREQID_OFFSET 0x02 - -#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */ -#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */ -#define IOAT_CHAINADDR_OFFSET(ver) ((ver) < IOAT_VER_2_0 \ - ? IOAT1_CHAINADDR_OFFSET : IOAT2_CHAINADDR_OFFSET) -#define IOAT1_CHAINADDR_OFFSET_LOW 0x0C -#define IOAT2_CHAINADDR_OFFSET_LOW 0x10 -#define IOAT_CHAINADDR_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \ - ? IOAT1_CHAINADDR_OFFSET_LOW : IOAT2_CHAINADDR_OFFSET_LOW) -#define IOAT1_CHAINADDR_OFFSET_HIGH 0x10 -#define IOAT2_CHAINADDR_OFFSET_HIGH 0x14 -#define IOAT_CHAINADDR_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ - ? IOAT1_CHAINADDR_OFFSET_HIGH : IOAT2_CHAINADDR_OFFSET_HIGH) - -#define IOAT1_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */ -#define IOAT2_CHANCMD_OFFSET 0x04 /* 8-bit DMA Channel Command Register */ -#define IOAT_CHANCMD_OFFSET(ver) ((ver) < IOAT_VER_2_0 \ - ? IOAT1_CHANCMD_OFFSET : IOAT2_CHANCMD_OFFSET) -#define IOAT_CHANCMD_RESET 0x20 -#define IOAT_CHANCMD_RESUME 0x10 -#define IOAT_CHANCMD_ABORT 0x08 -#define IOAT_CHANCMD_SUSPEND 0x04 -#define IOAT_CHANCMD_APPEND 0x02 -#define IOAT_CHANCMD_START 0x01 - -#define IOAT_CHANCMP_OFFSET 0x18 /* 64-bit Channel Completion Address Register */ -#define IOAT_CHANCMP_OFFSET_LOW 0x18 -#define IOAT_CHANCMP_OFFSET_HIGH 0x1C - -#define IOAT_CDAR_OFFSET 0x20 /* 64-bit Current Descriptor Address Register */ -#define IOAT_CDAR_OFFSET_LOW 0x20 -#define IOAT_CDAR_OFFSET_HIGH 0x24 - -#define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */ -#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001 -#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002 -#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004 -#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008 -#define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010 -#define IOAT_CHANERR_CHANCMD_ERR 0x0020 -#define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040 -#define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080 -#define IOAT_CHANERR_READ_DATA_ERR 0x0100 -#define IOAT_CHANERR_WRITE_DATA_ERR 0x0200 -#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400 -#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800 -#define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 -#define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 -#define IOAT_CHANERR_SOFT_ERR 0x4000 -#define IOAT_CHANERR_UNAFFILIATED_ERR 0x8000 - -#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ - -#endif /* _IOAT_REGISTERS_H_ */ -- cgit v1.2.3 From 1f27adc2f050836c12deb4d99afe507636537a0b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:29:02 -0700 Subject: ioat: move definitions to dma.h Some of these defines may be useful outside of dma.c and the header is private so there are no namespace pollution concerns. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 14 -------------- drivers/dma/ioat/dma.h | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 14 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 648797e83295..16c080786a65 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -38,28 +38,14 @@ #include "registers.h" #include "hw.h" -#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) -#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) -#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) -#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) - -#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) static int ioat_pending_level = 4; module_param(ioat_pending_level, int, 0644); MODULE_PARM_DESC(ioat_pending_level, "high-water mark for pushing ioat descriptors (default: 4)"); -#define RESET_DELAY msecs_to_jiffies(100) -#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) static void ioat_dma_chan_reset_part2(struct work_struct *work); static void ioat_dma_chan_watchdog(struct work_struct *work); -/* - * workaround for IOAT ver.3.0 null descriptor issue - * (channel returns error when size is 0) - */ -#define NULL_DESC_BUFFER_SIZE 1 - /* internal functions */ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index e80e787fe64f..ccb400f5e279 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -43,6 +43,22 @@ enum ioat_interrupt { #define IOAT_DMA_DCA_ANY_CPU ~0 #define IOAT_WATCHDOG_PERIOD (2 * HZ) +#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) +#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) +#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) +#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) + +#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) + +#define RESET_DELAY msecs_to_jiffies(100) +#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) + +/* + * workaround for IOAT ver.3.0 null descriptor issue + * (channel returns error when size is 0) + */ +#define NULL_DESC_BUFFER_SIZE 1 + /** * struct ioatdma_device - internal representation of a IOAT device -- cgit v1.2.3 From e6c0b69a43150c1a37cf342ce5faedf12583bf79 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:29:44 -0700 Subject: ioat: convert ioat_probe to pcim/devm The driver currently duplicates much of what these routines offer, so just use the common code. For example ->irq_mode tracks what interrupt mode was initialized, which duplicates the ->msix_enabled and ->msi_enabled handling in pcim_release. This also adds a check to the return value of dma_async_device_register, which can fail. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 130 ++++++++++++++++--------------------------------- drivers/dma/ioat/dma.h | 11 ----- drivers/dma/ioat/hw.h | 1 + drivers/dma/ioat/pci.c | 67 +++++++++---------------- 4 files changed, 68 insertions(+), 141 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 16c080786a65..65f8b7492a4d 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -121,6 +121,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) u32 xfercap; int i; struct ioat_dma_chan *ioat_chan; + struct device *dev = &device->pdev->dev; /* * IOAT ver.3 workarounds @@ -164,7 +165,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) } #endif for (i = 0; i < device->common.chancnt; i++) { - ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); + ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); if (!ioat_chan) { device->common.chancnt = i; break; @@ -1450,7 +1451,11 @@ MODULE_PARM_DESC(ioat_interrupt_style, static int ioat_dma_setup_interrupts(struct ioatdma_device *device) { struct ioat_dma_chan *ioat_chan; - int err, i, j, msixcnt; + struct pci_dev *pdev = device->pdev; + struct device *dev = &pdev->dev; + struct msix_entry *msix; + int i, j, msixcnt; + int err = -EINVAL; u8 intrctrl = 0; if (!strcmp(ioat_interrupt_style, "msix")) @@ -1461,8 +1466,7 @@ static int ioat_dma_setup_interrupts(struct ioatdma_device *device) goto msi; if (!strcmp(ioat_interrupt_style, "intx")) goto intx; - dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n", - ioat_interrupt_style); + dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); goto err_no_irq; msix: @@ -1471,55 +1475,55 @@ msix: for (i = 0; i < msixcnt; i++) device->msix_entries[i].entry = i; - err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); + err = pci_enable_msix(pdev, device->msix_entries, msixcnt); if (err < 0) goto msi; if (err > 0) goto msix_single_vector; for (i = 0; i < msixcnt; i++) { + msix = &device->msix_entries[i]; ioat_chan = ioat_lookup_chan_by_index(device, i); - err = request_irq(device->msix_entries[i].vector, - ioat_dma_do_interrupt_msix, - 0, "ioat-msix", ioat_chan); + err = devm_request_irq(dev, msix->vector, + ioat_dma_do_interrupt_msix, 0, + "ioat-msix", ioat_chan); if (err) { for (j = 0; j < i; j++) { + msix = &device->msix_entries[j]; ioat_chan = ioat_lookup_chan_by_index(device, j); - free_irq(device->msix_entries[j].vector, - ioat_chan); + devm_free_irq(dev, msix->vector, ioat_chan); } goto msix_single_vector; } } intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; - device->irq_mode = msix_multi_vector; goto done; msix_single_vector: - device->msix_entries[0].entry = 0; - err = pci_enable_msix(device->pdev, device->msix_entries, 1); + msix = &device->msix_entries[0]; + msix->entry = 0; + err = pci_enable_msix(pdev, device->msix_entries, 1); if (err) goto msi; - err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, - 0, "ioat-msix", device); + err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, + "ioat-msix", device); if (err) { - pci_disable_msix(device->pdev); + pci_disable_msix(pdev); goto msi; } - device->irq_mode = msix_single_vector; goto done; msi: - err = pci_enable_msi(device->pdev); + err = pci_enable_msi(pdev); if (err) goto intx; - err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, - 0, "ioat-msi", device); + err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, + "ioat-msi", device); if (err) { - pci_disable_msi(device->pdev); + pci_disable_msi(pdev); goto intx; } /* @@ -1527,21 +1531,17 @@ msi: */ if (device->version == IOAT_VER_1_2) { u32 dmactrl; - pci_read_config_dword(device->pdev, - IOAT_PCI_DMACTRL_OFFSET, &dmactrl); + pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; - pci_write_config_dword(device->pdev, - IOAT_PCI_DMACTRL_OFFSET, dmactrl); + pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); } - device->irq_mode = msi; goto done; intx: - err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, - IRQF_SHARED, "ioat-intx", device); + err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, + IRQF_SHARED, "ioat-intx", device); if (err) goto err_no_irq; - device->irq_mode = intx; done: intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; @@ -1551,60 +1551,26 @@ done: err_no_irq: /* Disable all interrupt generation */ writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); - dev_err(&device->pdev->dev, "no usable interrupts\n"); - device->irq_mode = none; - return -1; + dev_err(dev, "no usable interrupts\n"); + return err; } -/** - * ioat_dma_remove_interrupts - remove whatever interrupts were set - * @device: ioat device - */ -static void ioat_dma_remove_interrupts(struct ioatdma_device *device) +static void ioat_disable_interrupts(struct ioatdma_device *device) { - struct ioat_dma_chan *ioat_chan; - int i; - /* Disable all interrupt generation */ writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); - - switch (device->irq_mode) { - case msix_multi_vector: - for (i = 0; i < device->common.chancnt; i++) { - ioat_chan = ioat_lookup_chan_by_index(device, i); - free_irq(device->msix_entries[i].vector, ioat_chan); - } - pci_disable_msix(device->pdev); - break; - case msix_single_vector: - free_irq(device->msix_entries[0].vector, device); - pci_disable_msix(device->pdev); - break; - case msi: - free_irq(device->pdev->irq, device); - pci_disable_msi(device->pdev); - break; - case intx: - free_irq(device->pdev->irq, device); - break; - case none: - dev_warn(&device->pdev->dev, - "call to %s without interrupts setup\n", __func__); - } - device->irq_mode = none; } struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) { int err; + struct device *dev = &pdev->dev; struct ioatdma_device *device; - device = kzalloc(sizeof(*device), GFP_KERNEL); - if (!device) { + device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); + if (!device) err = -ENOMEM; - goto err_kzalloc; - } device->pdev = pdev; device->reg_base = iobase; device->version = readb(device->reg_base + IOAT_VER_OFFSET); @@ -1651,14 +1617,12 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, break; } - dev_err(&device->pdev->dev, - "Intel(R) I/OAT DMA Engine found," + dev_err(dev, "Intel(R) I/OAT DMA Engine found," " %d channels, device version 0x%02x, driver version %s\n", device->common.chancnt, device->version, IOAT_DMA_VERSION); if (!device->common.chancnt) { - dev_err(&device->pdev->dev, - "Intel(R) I/OAT DMA Engine problem found: " + dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: " "zero channels detected\n"); goto err_setup_interrupts; } @@ -1671,9 +1635,11 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, if (err) goto err_self_test; - ioat_set_tcp_copy_break(device); + err = dma_async_device_register(&device->common); + if (err) + goto err_self_test; - dma_async_device_register(&device->common); + ioat_set_tcp_copy_break(device); if (device->version != IOAT_VER_3_0) { INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); @@ -1684,16 +1650,12 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, return device; err_self_test: - ioat_dma_remove_interrupts(device); + ioat_disable_interrupts(device); err_setup_interrupts: pci_pool_destroy(device->completion_pool); err_completion_pool: pci_pool_destroy(device->dma_pool); err_dma_pool: - kfree(device); -err_kzalloc: - dev_err(&pdev->dev, - "Intel(R) I/OAT DMA Engine initialization failed\n"); return NULL; } @@ -1705,23 +1667,17 @@ void ioat_dma_remove(struct ioatdma_device *device) if (device->version != IOAT_VER_3_0) cancel_delayed_work(&device->work); - ioat_dma_remove_interrupts(device); + ioat_disable_interrupts(device); dma_async_device_unregister(&device->common); pci_pool_destroy(device->dma_pool); pci_pool_destroy(device->completion_pool); - iounmap(device->reg_base); - pci_release_regions(device->pdev); - pci_disable_device(device->pdev); - list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) { ioat_chan = to_ioat_chan(chan); list_del(&chan->device_node); - kfree(ioat_chan); } - kfree(device); } diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index ccb400f5e279..5e8d7cfabc21 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -31,14 +31,6 @@ #define IOAT_DMA_VERSION "3.64" -enum ioat_interrupt { - none = 0, - msix_multi_vector = 1, - msix_single_vector = 2, - msi = 3, - intx = 4, -}; - #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 #define IOAT_DMA_DCA_ANY_CPU ~0 #define IOAT_WATCHDOG_PERIOD (2 * HZ) @@ -59,7 +51,6 @@ enum ioat_interrupt { */ #define NULL_DESC_BUFFER_SIZE 1 - /** * struct ioatdma_device - internal representation of a IOAT device * @pdev: PCI-Express device @@ -67,7 +58,6 @@ enum ioat_interrupt { * @dma_pool: for allocating DMA descriptors * @common: embedded struct dma_device * @version: version of ioatdma device - * @irq_mode: which style irq to use * @msix_entries: irq handlers * @idx: per channel data */ @@ -79,7 +69,6 @@ struct ioatdma_device { struct pci_pool *completion_pool; struct dma_device common; u8 version; - enum ioat_interrupt irq_mode; struct delayed_work work; struct msix_entry msix_entries[4]; struct ioat_dma_chan *idx[4]; diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index afa57eef86c9..1438fa5c4d1a 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -23,6 +23,7 @@ /* PCI Configuration Space Values */ #define IOAT_PCI_VID 0x8086 +#define IOAT_MMIO_BAR 0 /* CB device ID's */ #define IOAT_PCI_DID_5000 0x1A38 diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index d7948bfd8fba..982e38fd177c 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -62,7 +62,6 @@ static struct pci_device_id ioat_pci_tbl[] = { struct ioat_device { struct pci_dev *pdev; - void __iomem *iobase; struct ioatdma_device *dma; struct dca_provider *dca; }; @@ -75,8 +74,10 @@ static int ioat_dca_enabled = 1; module_param(ioat_dca_enabled, int, 0644); MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); +#define DRV_NAME "ioatdma" + static struct pci_driver ioat_pci_driver = { - .name = "ioatdma", + .name = DRV_NAME, .id_table = ioat_pci_tbl, .probe = ioat_probe, .remove = __devexit_p(ioat_remove), @@ -85,47 +86,42 @@ static struct pci_driver ioat_pci_driver = { static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + void __iomem * const *iomap; void __iomem *iobase; + struct device *dev = &pdev->dev; struct ioat_device *device; - unsigned long mmio_start, mmio_len; int err; - err = pci_enable_device(pdev); + err = pcim_enable_device(pdev); if (err) - goto err_enable_device; + return err; - err = pci_request_regions(pdev, ioat_pci_driver.name); + err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); if (err) - goto err_request_regions; + return err; + iomap = pcim_iomap_table(pdev); + if (!iomap) + return -ENOMEM; err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) - goto err_set_dma_mask; + return err; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) - goto err_set_dma_mask; - - mmio_start = pci_resource_start(pdev, 0); - mmio_len = pci_resource_len(pdev, 0); - iobase = ioremap(mmio_start, mmio_len); - if (!iobase) { - err = -ENOMEM; - goto err_ioremap; - } + return err; + + device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); + if (!device) + return -ENOMEM; - device = kzalloc(sizeof(*device), GFP_KERNEL); - if (!device) { - err = -ENOMEM; - goto err_kzalloc; - } device->pdev = pdev; pci_set_drvdata(pdev, device); - device->iobase = iobase; + iobase = iomap[IOAT_MMIO_BAR]; pci_set_master(pdev); @@ -146,28 +142,15 @@ static int __devinit ioat_probe(struct pci_dev *pdev, device->dca = ioat3_dca_init(pdev, iobase); break; default: - err = -ENODEV; - break; + return -ENODEV; } - if (!device->dma) - err = -ENODEV; - if (err) - goto err_version; + if (!device->dma) { + dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); + return -ENODEV; + } return 0; - -err_version: - kfree(device); -err_kzalloc: - iounmap(iobase); -err_ioremap: -err_set_dma_mask: - pci_release_regions(pdev); - pci_disable_device(pdev); -err_request_regions: -err_enable_device: - return err; } static void __devexit ioat_remove(struct pci_dev *pdev) @@ -185,8 +168,6 @@ static void __devexit ioat_remove(struct pci_dev *pdev) ioat_dma_remove(device->dma); device->dma = NULL; } - - kfree(device); } static int __init ioat_init_module(void) -- cgit v1.2.3 From bc3c70258526a635325f1f15138a96297879bc1a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:33:42 -0700 Subject: ioat: cleanup some long deref chains and 80 column collisions * reduce device->common. to dma-> in ioat_dma_{probe,remove,selftest} * ioat_lookup_chan_by_index to ioat_chan_by_index * multi-line function definitions * ioat_desc_sw.async_tx to ioat_desc_sw.txd * desc->txd. to tx-> in cleanup routine Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 304 +++++++++++++++++++++++-------------------------- drivers/dma/ioat/dma.h | 7 +- 2 files changed, 144 insertions(+), 167 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 65f8b7492a4d..462dae627191 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -55,9 +55,8 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); static struct ioat_desc_sw * ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); -static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( - struct ioatdma_device *device, - int index) +static inline struct ioat_dma_chan * +ioat_chan_by_index(struct ioatdma_device *device, int index) { return device->idx[index]; } @@ -87,7 +86,7 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); for_each_bit(bit, &attnstatus, BITS_PER_LONG) { - ioat_chan = ioat_lookup_chan_by_index(instance, bit); + ioat_chan = ioat_chan_by_index(instance, bit); tasklet_schedule(&ioat_chan->cleanup_task); } @@ -205,8 +204,8 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) * descriptors to hw * @chan: DMA channel handle */ -static inline void __ioat1_dma_memcpy_issue_pending( - struct ioat_dma_chan *ioat_chan) +static inline void +__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan) { ioat_chan->pending = 0; writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); @@ -223,8 +222,8 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) } } -static inline void __ioat2_dma_memcpy_issue_pending( - struct ioat_dma_chan *ioat_chan) +static inline void +__ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan) { ioat_chan->pending = 0; writew(ioat_chan->dmacount, @@ -279,18 +278,18 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work) desc = to_ioat_desc(ioat_chan->used_desc.prev); switch (ioat_chan->device->version) { case IOAT_VER_1_2: - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, + writel(((u64) desc->txd.phys) >> 32, ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); break; case IOAT_VER_2_0: - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, + writel(((u64) desc->txd.phys) >> 32, ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); /* tell the engine to go with what's left to be done */ @@ -299,7 +298,7 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work) break; } - dev_err(&ioat_chan->device->pdev->dev, + dev_err(to_dev(ioat_chan), "chan%d reset - %d descs waiting, %d total desc\n", chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); @@ -322,7 +321,7 @@ static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) chansts = (ioat_chan->completion_virt->low & IOAT_CHANSTS_DMA_TRANSFER_STATUS); if (chanerr) { - dev_err(&ioat_chan->device->pdev->dev, + dev_err(to_dev(ioat_chan), "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", chan_num(ioat_chan), chansts, chanerr); writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); @@ -367,7 +366,7 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) unsigned long compl_desc_addr_hw; for (i = 0; i < device->common.chancnt; i++) { - ioat_chan = ioat_lookup_chan_by_index(device, i); + ioat_chan = ioat_chan_by_index(device, i); if (ioat_chan->device->version == IOAT_VER_1_2 /* have we started processing anything yet */ @@ -475,7 +474,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) len = first->len; src = first->src; dst = first->dst; - orig_flags = first->async_tx.flags; + orig_flags = first->txd.flags; new = first; spin_lock_bh(&ioat_chan->desc_lock); @@ -484,7 +483,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) do { copy = min_t(size_t, len, ioat_chan->xfercap); - async_tx_ack(&new->async_tx); + async_tx_ack(&new->txd); hw = new->hw; hw->size = copy; @@ -495,7 +494,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) /* chain together the physical address list for the HW */ wmb(); - prev->hw->next = (u64) new->async_tx.phys; + prev->hw->next = (u64) new->txd.phys; len -= copy; dst += copy; @@ -507,27 +506,26 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); if (!new) { - dev_err(&ioat_chan->device->pdev->dev, - "tx submit failed\n"); + dev_err(to_dev(ioat_chan), "tx submit failed\n"); spin_unlock_bh(&ioat_chan->desc_lock); return -ENOMEM; } hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; - if (first->async_tx.callback) { + if (first->txd.callback) { hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; if (first != new) { /* move callback into to last desc */ - new->async_tx.callback = first->async_tx.callback; - new->async_tx.callback_param - = first->async_tx.callback_param; - first->async_tx.callback = NULL; - first->async_tx.callback_param = NULL; + new->txd.callback = first->txd.callback; + new->txd.callback_param + = first->txd.callback_param; + first->txd.callback = NULL; + first->txd.callback_param = NULL; } } new->tx_cnt = desc_count; - new->async_tx.flags = orig_flags; /* client is in control of this ack */ + new->txd.flags = orig_flags; /* client is in control of this ack */ /* store the original values for use in later cleanup */ if (new != first) { @@ -541,11 +539,11 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) cookie++; if (cookie < 0) cookie = 1; - ioat_chan->common.cookie = new->async_tx.cookie = cookie; + ioat_chan->common.cookie = new->txd.cookie = cookie; /* write address into NextDescriptor field of last desc in chain */ to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = - first->async_tx.phys; + first->txd.phys; list_splice_tail(&new_chain, &ioat_chan->used_desc); ioat_chan->dmacount += desc_count; @@ -574,7 +572,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) len = first->len; src = first->src; dst = first->dst; - orig_flags = first->async_tx.flags; + orig_flags = first->txd.flags; new = first; /* @@ -584,7 +582,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) do { copy = min_t(size_t, len, ioat_chan->xfercap); - async_tx_ack(&new->async_tx); + async_tx_ack(&new->txd); hw = new->hw; hw->size = copy; @@ -599,27 +597,26 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); if (!new) { - dev_err(&ioat_chan->device->pdev->dev, - "tx submit failed\n"); + dev_err(to_dev(ioat_chan), "tx submit failed\n"); spin_unlock_bh(&ioat_chan->desc_lock); return -ENOMEM; } hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; - if (first->async_tx.callback) { + if (first->txd.callback) { hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; if (first != new) { /* move callback into to last desc */ - new->async_tx.callback = first->async_tx.callback; - new->async_tx.callback_param - = first->async_tx.callback_param; - first->async_tx.callback = NULL; - first->async_tx.callback_param = NULL; + new->txd.callback = first->txd.callback; + new->txd.callback_param + = first->txd.callback_param; + first->txd.callback = NULL; + first->txd.callback_param = NULL; } } new->tx_cnt = desc_count; - new->async_tx.flags = orig_flags; /* client is in control of this ack */ + new->txd.flags = orig_flags; /* client is in control of this ack */ /* store the original values for use in later cleanup */ if (new != first) { @@ -633,7 +630,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) cookie++; if (cookie < 0) cookie = 1; - ioat_chan->common.cookie = new->async_tx.cookie = cookie; + ioat_chan->common.cookie = new->txd.cookie = cookie; ioat_chan->dmacount += desc_count; ioat_chan->pending += desc_count; @@ -649,9 +646,8 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) * @ioat_chan: the channel supplying the memory pool for the descriptors * @flags: allocation flags */ -static struct ioat_desc_sw *ioat_dma_alloc_descriptor( - struct ioat_dma_chan *ioat_chan, - gfp_t flags) +static struct ioat_desc_sw * +ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags) { struct ioat_dma_descriptor *desc; struct ioat_desc_sw *desc_sw; @@ -670,19 +666,19 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( } memset(desc, 0, sizeof(*desc)); - dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); + dma_async_tx_descriptor_init(&desc_sw->txd, &ioat_chan->common); switch (ioat_chan->device->version) { case IOAT_VER_1_2: - desc_sw->async_tx.tx_submit = ioat1_tx_submit; + desc_sw->txd.tx_submit = ioat1_tx_submit; break; case IOAT_VER_2_0: case IOAT_VER_3_0: - desc_sw->async_tx.tx_submit = ioat2_tx_submit; + desc_sw->txd.tx_submit = ioat2_tx_submit; break; } desc_sw->hw = desc; - desc_sw->async_tx.phys = phys; + desc_sw->txd.phys = phys; return desc_sw; } @@ -712,9 +708,9 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) /* circle link the hw descriptors */ desc = to_ioat_desc(ioat_chan->free_desc.next); - desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; + desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { - desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; + desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; } } @@ -743,8 +739,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); if (chanerr) { - dev_err(&ioat_chan->device->pdev->dev, - "CHANERR = %x, clearing\n", chanerr); + dev_err(to_dev(ioat_chan), "CHANERR = %x, clearing\n", chanerr); writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); } @@ -752,7 +747,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) for (i = 0; i < ioat_initial_desc_count; i++) { desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); if (!desc) { - dev_err(&ioat_chan->device->pdev->dev, + dev_err(to_dev(ioat_chan), "Only %d initial descriptors\n", i); break; } @@ -819,14 +814,14 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) in_use_descs++; list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); + desc->txd.phys); kfree(desc); } list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) { list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); + desc->txd.phys); kfree(desc); } break; @@ -836,12 +831,12 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) ioat_chan->free_desc.next, node) { list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); + desc->txd.phys); kfree(desc); } desc = to_ioat_desc(ioat_chan->free_desc.next); pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); + desc->txd.phys); kfree(desc); INIT_LIST_HEAD(&ioat_chan->free_desc); INIT_LIST_HEAD(&ioat_chan->used_desc); @@ -855,8 +850,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) /* one is ok since we left it on there on purpose */ if (in_use_descs > 1) - dev_err(&ioat_chan->device->pdev->dev, - "Freeing %d in use descriptors!\n", + dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", in_use_descs - 1); ioat_chan->last_completion = ioat_chan->completion_addr = 0; @@ -889,8 +883,7 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) /* try to get another desc */ new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); if (!new) { - dev_err(&ioat_chan->device->pdev->dev, - "alloc failed\n"); + dev_err(to_dev(ioat_chan), "alloc failed\n"); return NULL; } } @@ -936,16 +929,15 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) for (i = 16; i; i--) { desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); if (!desc) { - dev_err(&ioat_chan->device->pdev->dev, - "alloc failed\n"); + dev_err(to_dev(ioat_chan), "alloc failed\n"); break; } list_add_tail(&desc->node, ioat_chan->used_desc.next); desc->hw->next - = to_ioat_desc(desc->node.next)->async_tx.phys; + = to_ioat_desc(desc->node.next)->txd.phys; to_ioat_desc(desc->node.prev)->hw->next - = desc->async_tx.phys; + = desc->txd.phys; ioat_chan->desccount++; } @@ -962,8 +954,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) return new; } -static struct ioat_desc_sw *ioat_dma_get_next_descriptor( - struct ioat_dma_chan *ioat_chan) +static struct ioat_desc_sw * +ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) { if (!ioat_chan) return NULL; @@ -978,12 +970,9 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor( return NULL; } -static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( - struct dma_chan *chan, - dma_addr_t dma_dest, - dma_addr_t dma_src, - size_t len, - unsigned long flags) +static struct dma_async_tx_descriptor * +ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_desc_sw *new; @@ -996,22 +985,19 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( new->len = len; new->dst = dma_dest; new->src = dma_src; - new->async_tx.flags = flags; - return &new->async_tx; + new->txd.flags = flags; + return &new->txd; } else { - dev_err(&ioat_chan->device->pdev->dev, + dev_err(to_dev(ioat_chan), "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); return NULL; } } -static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( - struct dma_chan *chan, - dma_addr_t dma_dest, - dma_addr_t dma_src, - size_t len, - unsigned long flags) +static struct dma_async_tx_descriptor * +ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_desc_sw *new; @@ -1028,11 +1014,11 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( new->len = len; new->dst = dma_dest; new->src = dma_src; - new->async_tx.flags = flags; - return &new->async_tx; + new->txd.flags = flags; + return &new->txd; } else { spin_unlock_bh(&ioat_chan->desc_lock); - dev_err(&ioat_chan->device->pdev->dev, + dev_err(to_dev(ioat_chan), "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); return NULL; @@ -1050,8 +1036,8 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) static void ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) { - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) pci_unmap_single(ioat_chan->device->pdev, pci_unmap_addr(desc, dst), pci_unmap_len(desc, len), @@ -1063,8 +1049,8 @@ ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) PCI_DMA_FROMDEVICE); } - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) pci_unmap_single(ioat_chan->device->pdev, pci_unmap_addr(desc, src), pci_unmap_len(desc, len), @@ -1088,6 +1074,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) dma_cookie_t cookie = 0; unsigned long desc_phys; struct ioat_desc_sw *latest_desc; + struct dma_async_tx_descriptor *tx; prefetch(ioat_chan->completion_virt); @@ -1111,8 +1098,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) if ((ioat_chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { - dev_err(&ioat_chan->device->pdev->dev, - "Channel halted, chanerr = %x\n", + dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n", readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); /* TODO do something to salvage the situation */ @@ -1145,38 +1131,38 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) case IOAT_VER_1_2: list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { - + tx = &desc->txd; /* * Incoming DMA requests may use multiple descriptors, * due to exceeding xfercap, perhaps. If so, only the * last one will have a cookie, and require unmapping. */ - if (desc->async_tx.cookie) { - cookie = desc->async_tx.cookie; + if (tx->cookie) { + cookie = tx->cookie; ioat_dma_unmap(ioat_chan, desc); - if (desc->async_tx.callback) { - desc->async_tx.callback(desc->async_tx.callback_param); - desc->async_tx.callback = NULL; + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; } } - if (desc->async_tx.phys != phys_complete) { + if (tx->phys != phys_complete) { /* * a completed entry, but not the last, so clean * up if the client is done with the descriptor */ - if (async_tx_test_ack(&desc->async_tx)) { + if (async_tx_test_ack(tx)) { list_move_tail(&desc->node, &ioat_chan->free_desc); } else - desc->async_tx.cookie = 0; + tx->cookie = 0; } else { /* * last used desc. Do not remove, so we can * append from it, but don't look at it next * time, either */ - desc->async_tx.cookie = 0; + tx->cookie = 0; /* TODO check status bits? */ break; @@ -1191,10 +1177,11 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) /* work backwards to find latest finished desc */ desc = to_ioat_desc(ioat_chan->used_desc.next); + tx = &desc->txd; latest_desc = NULL; do { desc = to_ioat_desc(desc->node.prev); - desc_phys = (unsigned long)desc->async_tx.phys + desc_phys = (unsigned long)tx->phys & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; if (desc_phys == phys_complete) { latest_desc = desc; @@ -1203,19 +1190,18 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) } while (&desc->node != ioat_chan->used_desc.prev); if (latest_desc != NULL) { - /* work forwards to clear finished descriptors */ for (desc = to_ioat_desc(ioat_chan->used_desc.prev); &desc->node != latest_desc->node.next && &desc->node != ioat_chan->used_desc.next; desc = to_ioat_desc(desc->node.next)) { - if (desc->async_tx.cookie) { - cookie = desc->async_tx.cookie; - desc->async_tx.cookie = 0; + if (tx->cookie) { + cookie = tx->cookie; + tx->cookie = 0; ioat_dma_unmap(ioat_chan, desc); - if (desc->async_tx.callback) { - desc->async_tx.callback(desc->async_tx.callback_param); - desc->async_tx.callback = NULL; + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; } } } @@ -1245,10 +1231,9 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) * @done: if not %NULL, updated with last completed transaction * @used: if not %NULL, updated with last used transaction */ -static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, - dma_cookie_t cookie, - dma_cookie_t *done, - dma_cookie_t *used) +static enum dma_status +ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); dma_cookie_t last_used; @@ -1290,7 +1275,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) desc = ioat_dma_get_next_descriptor(ioat_chan); if (!desc) { - dev_err(&ioat_chan->device->pdev->dev, + dev_err(to_dev(ioat_chan), "Unable to start null desc - get next desc failed\n"); spin_unlock_bh(&ioat_chan->desc_lock); return; @@ -1303,15 +1288,15 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) desc->hw->size = NULL_DESC_BUFFER_SIZE; desc->hw->src_addr = 0; desc->hw->dst_addr = 0; - async_tx_ack(&desc->async_tx); + async_tx_ack(&desc->txd); switch (ioat_chan->device->version) { case IOAT_VER_1_2: desc->hw->next = 0; list_add_tail(&desc->node, &ioat_chan->used_desc); - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, + writel(((u64) desc->txd.phys) >> 32, ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); writeb(IOAT_CHANCMD_START, ioat_chan->reg_base @@ -1319,9 +1304,9 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) break; case IOAT_VER_2_0: case IOAT_VER_3_0: - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, + writel(((u64) desc->txd.phys) >> 32, ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); ioat_chan->dmacount++; @@ -1352,6 +1337,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device) int i; u8 *src; u8 *dest; + struct dma_device *dma = &device->common; + struct device *dev = &device->pdev->dev; struct dma_chan *dma_chan; struct dma_async_tx_descriptor *tx; dma_addr_t dma_dest, dma_src; @@ -1375,26 +1362,21 @@ static int ioat_dma_self_test(struct ioatdma_device *device) src[i] = (u8)i; /* Start copy, using first DMA channel */ - dma_chan = container_of(device->common.channels.next, - struct dma_chan, + dma_chan = container_of(dma->channels.next, struct dma_chan, device_node); - if (device->common.device_alloc_chan_resources(dma_chan) < 1) { - dev_err(&device->pdev->dev, - "selftest cannot allocate chan resource\n"); + if (dma->device_alloc_chan_resources(dma_chan) < 1) { + dev_err(dev, "selftest cannot allocate chan resource\n"); err = -ENODEV; goto out; } - dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, - DMA_TO_DEVICE); - dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, - DMA_FROM_DEVICE); + dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); + dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, IOAT_TEST_SIZE, flags); if (!tx) { - dev_err(&device->pdev->dev, - "Self-test prep failed, disabling\n"); + dev_err(dev, "Self-test prep failed, disabling\n"); err = -ENODEV; goto free_resources; } @@ -1405,32 +1387,29 @@ static int ioat_dma_self_test(struct ioatdma_device *device) tx->callback_param = &cmp; cookie = tx->tx_submit(tx); if (cookie < 0) { - dev_err(&device->pdev->dev, - "Self-test setup failed, disabling\n"); + dev_err(dev, "Self-test setup failed, disabling\n"); err = -ENODEV; goto free_resources; } - device->common.device_issue_pending(dma_chan); + dma->device_issue_pending(dma_chan); tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); if (tmo == 0 || - device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) + dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { - dev_err(&device->pdev->dev, - "Self-test copy timed out, disabling\n"); + dev_err(dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; goto free_resources; } if (memcmp(src, dest, IOAT_TEST_SIZE)) { - dev_err(&device->pdev->dev, - "Self-test copy failed compare, disabling\n"); + dev_err(dev, "Self-test copy failed compare, disabling\n"); err = -ENODEV; goto free_resources; } free_resources: - device->common.device_free_chan_resources(dma_chan); + dma->device_free_chan_resources(dma_chan); out: kfree(src); kfree(dest); @@ -1483,15 +1462,14 @@ msix: for (i = 0; i < msixcnt; i++) { msix = &device->msix_entries[i]; - ioat_chan = ioat_lookup_chan_by_index(device, i); + ioat_chan = ioat_chan_by_index(device, i); err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt_msix, 0, "ioat-msix", ioat_chan); if (err) { for (j = 0; j < i; j++) { msix = &device->msix_entries[j]; - ioat_chan = - ioat_lookup_chan_by_index(device, j); + ioat_chan = ioat_chan_by_index(device, j); devm_free_irq(dev, msix->vector, ioat_chan); } goto msix_single_vector; @@ -1561,12 +1539,13 @@ static void ioat_disable_interrupts(struct ioatdma_device *device) writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); } -struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, - void __iomem *iobase) +struct ioatdma_device * +ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) { int err; struct device *dev = &pdev->dev; struct ioatdma_device *device; + struct dma_device *dma; device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); if (!device) @@ -1574,6 +1553,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, device->pdev = pdev; device->reg_base = iobase; device->version = readb(device->reg_base + IOAT_VER_OFFSET); + dma = &device->common; /* DMA coherent memory pool for DMA descriptor allocations */ device->dma_pool = pci_pool_create("dma_desc_pool", pdev, @@ -1592,36 +1572,32 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, goto err_completion_pool; } - INIT_LIST_HEAD(&device->common.channels); + INIT_LIST_HEAD(&dma->channels); ioat_dma_enumerate_channels(device); - device->common.device_alloc_chan_resources = - ioat_dma_alloc_chan_resources; - device->common.device_free_chan_resources = - ioat_dma_free_chan_resources; - device->common.dev = &pdev->dev; + dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources; + dma->device_free_chan_resources = ioat_dma_free_chan_resources; + dma->dev = &pdev->dev; - dma_cap_set(DMA_MEMCPY, device->common.cap_mask); - device->common.device_is_tx_complete = ioat_dma_is_complete; + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->device_is_tx_complete = ioat_dma_is_complete; switch (device->version) { case IOAT_VER_1_2: - device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; - device->common.device_issue_pending = - ioat1_dma_memcpy_issue_pending; + dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; + dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; break; case IOAT_VER_2_0: case IOAT_VER_3_0: - device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; - device->common.device_issue_pending = - ioat2_dma_memcpy_issue_pending; + dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; + dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; break; } dev_err(dev, "Intel(R) I/OAT DMA Engine found," " %d channels, device version 0x%02x, driver version %s\n", - device->common.chancnt, device->version, IOAT_DMA_VERSION); + dma->chancnt, device->version, IOAT_DMA_VERSION); - if (!device->common.chancnt) { + if (!dma->chancnt) { dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: " "zero channels detected\n"); goto err_setup_interrupts; @@ -1635,7 +1611,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, if (err) goto err_self_test; - err = dma_async_device_register(&device->common); + err = dma_async_device_register(dma); if (err) goto err_self_test; @@ -1663,19 +1639,19 @@ void ioat_dma_remove(struct ioatdma_device *device) { struct dma_chan *chan, *_chan; struct ioat_dma_chan *ioat_chan; + struct dma_device *dma = &device->common; if (device->version != IOAT_VER_3_0) cancel_delayed_work(&device->work); ioat_disable_interrupts(device); - dma_async_device_unregister(&device->common); + dma_async_device_unregister(dma); pci_pool_destroy(device->dma_pool); pci_pool_destroy(device->completion_pool); - list_for_each_entry_safe(chan, _chan, - &device->common.channels, device_node) { + list_for_each_entry_safe(chan, _chan, &dma->channels, device_node) { ioat_chan = to_ioat_chan(chan); list_del(&chan->device_node); } diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 5e8d7cfabc21..c5eabae4c1b9 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -38,7 +38,8 @@ #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) -#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) +#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) +#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) @@ -123,7 +124,7 @@ struct ioat_dma_chan { * @node: this descriptor will either be on the free list, * or attached to a transaction list (async_tx.tx_list) * @tx_cnt: number of descriptors required to complete the transaction - * @async_tx: the generic software descriptor for all engines + * @txd: the generic software descriptor for all engines */ struct ioat_desc_sw { struct ioat_dma_descriptor *hw; @@ -132,7 +133,7 @@ struct ioat_desc_sw { size_t len; dma_addr_t src; dma_addr_t dst; - struct dma_async_tx_descriptor async_tx; + struct dma_async_tx_descriptor txd; }; static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) -- cgit v1.2.3 From b31b78f1ab7806759622b703357e39a21f757281 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:42:32 -0700 Subject: ioat: kill function prototype ifdef guards The only .c files that utilize these protected prototypes depend on CONFIG_INTEL_IOATDMA=y, so there is no value gained in providing empty prototypes. [ Impact: pure cleanup ] Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.h | 9 --------- 1 file changed, 9 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index c5eabae4c1b9..6e27ddb1e98a 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -153,19 +153,10 @@ static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) #endif } -#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE) struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase); void ioat_dma_remove(struct ioatdma_device *device); struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); -#else -#define ioat_dma_probe(pdev, iobase) NULL -#define ioat_dma_remove(device) do { } while (0) -#define ioat_dca_init(pdev, iobase) NULL -#define ioat2_dca_init(pdev, iobase) NULL -#define ioat3_dca_init(pdev, iobase) NULL -#endif - #endif /* IOATDMA_H */ -- cgit v1.2.3 From f2427e276ffec5ce599c6bc116e0927269a360ef Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:42:38 -0700 Subject: ioat: split ioat_dma_probe into core/version-specific routines Towards the removal of ioatdma_device.version split the initialization path into distinct versions. This conversion: 1/ moves version specific probe code to version specific routines 2/ removes the need for ioat_device 3/ turns off the ioat1 msi quirk if the device is reinitialized for intx Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 253 ++++++++++++++++++++++++++++++------------------- drivers/dma/ioat/dma.h | 23 ++--- drivers/dma/ioat/pci.c | 79 ++++++++------- 3 files changed, 200 insertions(+), 155 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 462dae627191..b7508041c6d7 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -121,52 +121,21 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) int i; struct ioat_dma_chan *ioat_chan; struct device *dev = &device->pdev->dev; + struct dma_device *dma = &device->common; - /* - * IOAT ver.3 workarounds - */ - if (device->version == IOAT_VER_3_0) { - u32 chan_err_mask; - u16 dev_id; - u32 dmauncerrsts; - - /* - * Write CHANERRMSK_INT with 3E07h to mask out the errors - * that can cause stability issues for IOAT ver.3 - */ - chan_err_mask = 0x3E07; - pci_write_config_dword(device->pdev, - IOAT_PCI_CHANERRMASK_INT_OFFSET, - chan_err_mask); - - /* - * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit - * (workaround for spurious config parity error after restart) - */ - pci_read_config_word(device->pdev, - IOAT_PCI_DEVICE_ID_OFFSET, - &dev_id); - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { - dmauncerrsts = 0x10; - pci_write_config_dword(device->pdev, - IOAT_PCI_DMAUNCERRSTS_OFFSET, - dmauncerrsts); - } - } - - device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); + INIT_LIST_HEAD(&dma->channels); + dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL - if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) { - device->common.chancnt--; - } + if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) + dma->chancnt--; #endif - for (i = 0; i < device->common.chancnt; i++) { + for (i = 0; i < dma->chancnt; i++) { ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); if (!ioat_chan) { - device->common.chancnt = i; + dma->chancnt = i; break; } @@ -175,28 +144,20 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) ioat_chan->xfercap = xfercap; ioat_chan->desccount = 0; INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); - if (ioat_chan->device->version == IOAT_VER_2_0) - writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | - IOAT_DMA_DCA_ANY_CPU, - ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); - else if (ioat_chan->device->version == IOAT_VER_3_0) - writel(IOAT_DMA_DCA_ANY_CPU, - ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); spin_lock_init(&ioat_chan->cleanup_lock); spin_lock_init(&ioat_chan->desc_lock); INIT_LIST_HEAD(&ioat_chan->free_desc); INIT_LIST_HEAD(&ioat_chan->used_desc); /* This should be made common somewhere in dmaengine.c */ ioat_chan->common.device = &device->common; - list_add_tail(&ioat_chan->common.device_node, - &device->common.channels); + list_add_tail(&ioat_chan->common.device_node, &dma->channels); device->idx[i] = ioat_chan; tasklet_init(&ioat_chan->cleanup_task, ioat_dma_cleanup_tasklet, (unsigned long) ioat_chan); tasklet_disable(&ioat_chan->cleanup_task); } - return device->common.chancnt; + return dma->chancnt; } /** @@ -1504,15 +1465,6 @@ msi: pci_disable_msi(pdev); goto intx; } - /* - * CB 1.2 devices need a bit set in configuration space to enable MSI - */ - if (device->version == IOAT_VER_1_2) { - u32 dmactrl; - pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); - dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; - pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); - } goto done; intx: @@ -1522,6 +1474,8 @@ intx: goto err_no_irq; done: + if (device->intr_quirk) + device->intr_quirk(device); intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); return 0; @@ -1539,21 +1493,12 @@ static void ioat_disable_interrupts(struct ioatdma_device *device) writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); } -struct ioatdma_device * -ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) +static int ioat_probe(struct ioatdma_device *device) { - int err; + int err = -ENODEV; + struct dma_device *dma = &device->common; + struct pci_dev *pdev = device->pdev; struct device *dev = &pdev->dev; - struct ioatdma_device *device; - struct dma_device *dma; - - device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); - if (!device) - err = -ENOMEM; - device->pdev = pdev; - device->reg_base = iobase; - device->version = readb(device->reg_base + IOAT_VER_OFFSET); - dma = &device->common; /* DMA coherent memory pool for DMA descriptor allocations */ device->dma_pool = pci_pool_create("dma_desc_pool", pdev, @@ -1572,26 +1517,13 @@ ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) goto err_completion_pool; } - INIT_LIST_HEAD(&dma->channels); ioat_dma_enumerate_channels(device); + dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources; dma->device_free_chan_resources = ioat_dma_free_chan_resources; - dma->dev = &pdev->dev; - - dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma->device_is_tx_complete = ioat_dma_is_complete; - switch (device->version) { - case IOAT_VER_1_2: - dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; - dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; - dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; - break; - } + dma->dev = &pdev->dev; dev_err(dev, "Intel(R) I/OAT DMA Engine found," " %d channels, device version 0x%02x, driver version %s\n", @@ -1611,19 +1543,7 @@ ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) if (err) goto err_self_test; - err = dma_async_device_register(dma); - if (err) - goto err_self_test; - - ioat_set_tcp_copy_break(device); - - if (device->version != IOAT_VER_3_0) { - INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); - schedule_delayed_work(&device->work, - WATCHDOG_DELAY); - } - - return device; + return 0; err_self_test: ioat_disable_interrupts(device); @@ -1632,7 +1552,142 @@ err_setup_interrupts: err_completion_pool: pci_pool_destroy(device->dma_pool); err_dma_pool: - return NULL; + return err; +} + +static int ioat_register(struct ioatdma_device *device) +{ + int err = dma_async_device_register(&device->common); + + if (err) { + ioat_disable_interrupts(device); + pci_pool_destroy(device->completion_pool); + pci_pool_destroy(device->dma_pool); + } + + return err; +} + +/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ +static void ioat1_intr_quirk(struct ioatdma_device *device) +{ + struct pci_dev *pdev = device->pdev; + u32 dmactrl; + + pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); + if (pdev->msi_enabled) + dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; + else + dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; + pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); +} + +int ioat1_dma_probe(struct ioatdma_device *device, int dca) +{ + struct pci_dev *pdev = device->pdev; + struct dma_device *dma; + int err; + + device->intr_quirk = ioat1_intr_quirk; + dma = &device->common; + dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; + dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; + + err = ioat_probe(device); + if (err) + return err; + ioat_set_tcp_copy_break(4096); + err = ioat_register(device); + if (err) + return err; + if (dca) + device->dca = ioat_dca_init(pdev, device->reg_base); + + INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); + schedule_delayed_work(&device->work, WATCHDOG_DELAY); + + return err; +} + +int ioat2_dma_probe(struct ioatdma_device *device, int dca) +{ + struct pci_dev *pdev = device->pdev; + struct dma_device *dma; + struct dma_chan *chan; + struct ioat_dma_chan *ioat_chan; + int err; + + dma = &device->common; + dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; + dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; + + err = ioat_probe(device); + if (err) + return err; + ioat_set_tcp_copy_break(2048); + + list_for_each_entry(chan, &dma->channels, device_node) { + ioat_chan = to_ioat_chan(chan); + writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, + ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + } + + err = ioat_register(device); + if (err) + return err; + if (dca) + device->dca = ioat2_dca_init(pdev, device->reg_base); + + INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); + schedule_delayed_work(&device->work, WATCHDOG_DELAY); + + return err; +} + +int ioat3_dma_probe(struct ioatdma_device *device, int dca) +{ + struct pci_dev *pdev = device->pdev; + struct dma_device *dma; + struct dma_chan *chan; + struct ioat_dma_chan *ioat_chan; + int err; + u16 dev_id; + + dma = &device->common; + dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; + dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; + + /* -= IOAT ver.3 workarounds =- */ + /* Write CHANERRMSK_INT with 3E07h to mask out the errors + * that can cause stability issues for IOAT ver.3 + */ + pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); + + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit + * (workaround for spurious config parity error after restart) + */ + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) + pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); + + err = ioat_probe(device); + if (err) + return err; + ioat_set_tcp_copy_break(262144); + + list_for_each_entry(chan, &dma->channels, device_node) { + ioat_chan = to_ioat_chan(chan); + writel(IOAT_DMA_DCA_ANY_CPU, + ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + } + + err = ioat_register(device); + if (err) + return err; + if (dca) + device->dca = ioat3_dca_init(pdev, device->reg_base); + + return err; } void ioat_dma_remove(struct ioatdma_device *device) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 6e27ddb1e98a..1226e35f2709 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -61,6 +61,8 @@ * @version: version of ioatdma device * @msix_entries: irq handlers * @idx: per channel data + * @dca: direct cache access context + * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) */ struct ioatdma_device { @@ -73,6 +75,8 @@ struct ioatdma_device { struct delayed_work work; struct msix_entry msix_entries[4]; struct ioat_dma_chan *idx[4]; + struct dca_provider *dca; + void (*intr_quirk)(struct ioatdma_device *device); }; /** @@ -136,25 +140,16 @@ struct ioat_desc_sw { struct dma_async_tx_descriptor txd; }; -static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) +static inline void ioat_set_tcp_copy_break(unsigned long copybreak) { #ifdef CONFIG_NET_DMA - switch (dev->version) { - case IOAT_VER_1_2: - sysctl_tcp_dma_copybreak = 4096; - break; - case IOAT_VER_2_0: - sysctl_tcp_dma_copybreak = 2048; - break; - case IOAT_VER_3_0: - sysctl_tcp_dma_copybreak = 262144; - break; - } + sysctl_tcp_dma_copybreak = copybreak; #endif } -struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, - void __iomem *iobase); +int ioat1_dma_probe(struct ioatdma_device *dev, int dca); +int ioat2_dma_probe(struct ioatdma_device *dev, int dca); +int ioat3_dma_probe(struct ioatdma_device *dev, int dca); void ioat_dma_remove(struct ioatdma_device *device); struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 982e38fd177c..55414d88ac1b 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -60,14 +60,8 @@ static struct pci_device_id ioat_pci_tbl[] = { { 0, } }; -struct ioat_device { - struct pci_dev *pdev; - struct ioatdma_device *dma; - struct dca_provider *dca; -}; - -static int __devinit ioat_probe(struct pci_dev *pdev, - const struct pci_device_id *id); +static int __devinit ioat_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id); static void __devexit ioat_remove(struct pci_dev *pdev); static int ioat_dca_enabled = 1; @@ -79,17 +73,28 @@ MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)" static struct pci_driver ioat_pci_driver = { .name = DRV_NAME, .id_table = ioat_pci_tbl, - .probe = ioat_probe, + .probe = ioat_pci_probe, .remove = __devexit_p(ioat_remove), }; -static int __devinit ioat_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +static struct ioatdma_device * +alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) +{ + struct device *dev = &pdev->dev; + struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); + + if (!d) + return NULL; + d->pdev = pdev; + d->reg_base = iobase; + return d; +} + +static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { void __iomem * const *iomap; - void __iomem *iobase; struct device *dev = &pdev->dev; - struct ioat_device *device; + struct ioatdma_device *device; int err; err = pcim_enable_device(pdev); @@ -119,33 +124,24 @@ static int __devinit ioat_probe(struct pci_dev *pdev, if (!device) return -ENOMEM; - device->pdev = pdev; - pci_set_drvdata(pdev, device); - iobase = iomap[IOAT_MMIO_BAR]; - pci_set_master(pdev); - switch (readb(iobase + IOAT_VER_OFFSET)) { - case IOAT_VER_1_2: - device->dma = ioat_dma_probe(pdev, iobase); - if (device->dma && ioat_dca_enabled) - device->dca = ioat_dca_init(pdev, iobase); - break; - case IOAT_VER_2_0: - device->dma = ioat_dma_probe(pdev, iobase); - if (device->dma && ioat_dca_enabled) - device->dca = ioat2_dca_init(pdev, iobase); - break; - case IOAT_VER_3_0: - device->dma = ioat_dma_probe(pdev, iobase); - if (device->dma && ioat_dca_enabled) - device->dca = ioat3_dca_init(pdev, iobase); - break; - default: + device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); + if (!device) + return -ENOMEM; + pci_set_drvdata(pdev, device); + + device->version = readb(device->reg_base + IOAT_VER_OFFSET); + if (device->version == IOAT_VER_1_2) + err = ioat1_dma_probe(device, ioat_dca_enabled); + else if (device->version == IOAT_VER_2_0) + err = ioat2_dma_probe(device, ioat_dca_enabled); + else if (device->version >= IOAT_VER_3_0) + err = ioat3_dma_probe(device, ioat_dca_enabled); + else return -ENODEV; - } - if (!device->dma) { + if (err) { dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); return -ENODEV; } @@ -155,7 +151,10 @@ static int __devinit ioat_probe(struct pci_dev *pdev, static void __devexit ioat_remove(struct pci_dev *pdev) { - struct ioat_device *device = pci_get_drvdata(pdev); + struct ioatdma_device *device = pci_get_drvdata(pdev); + + if (!device) + return; dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { @@ -163,11 +162,7 @@ static void __devexit ioat_remove(struct pci_dev *pdev) free_dca_provider(device->dca); device->dca = NULL; } - - if (device->dma) { - ioat_dma_remove(device->dma); - device->dma = NULL; - } + ioat_dma_remove(device); } static int __init ioat_init_module(void) -- cgit v1.2.3 From 77867fff033ea549096c49d863c564ad7d8be36f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:44:04 -0700 Subject: ioat: fix type mismatch for ->dmacount ->dmacount tracks the sequence number of active descriptors. It is written to the DMACOUNT register to update the channel's view of pending descriptors in the chain. The register is 16-bits so ->dmacount should be unsigned and 16-bit as well. Also modify ->desccount to maintain alignment. This was never a problem in practice because we never compared dmacount values, but this is a bug waiting to happen. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 1226e35f2709..9f0c853b6a77 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -102,8 +102,8 @@ struct ioat_dma_chan { struct delayed_work work; int pending; - int dmacount; - int desccount; + u16 dmacount; + u16 desccount; struct ioatdma_device *device; struct dma_chan common; -- cgit v1.2.3 From c7984f4e4e3af3bf8027d636283ea8658c7f80b9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:44:04 -0700 Subject: ioat: define descriptor control bit-field This cleans up a mess of and'ing and or'ing bit definitions, and allows simple assignments from the specified dma_ctrl_flags parameter. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 28 ++++++++++++++++------------ drivers/dma/ioat/hw.h | 38 ++++++++++++++++++-------------------- 2 files changed, 34 insertions(+), 32 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index b7508041c6d7..4840d4805d8c 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -472,9 +472,9 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) return -ENOMEM; } - hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; + hw->ctl_f.compl_write = 1; if (first->txd.callback) { - hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; + hw->ctl_f.int_en = 1; if (first != new) { /* move callback into to last desc */ new->txd.callback = first->txd.callback; @@ -563,9 +563,9 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) return -ENOMEM; } - hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; + hw->ctl_f.compl_write = 1; if (first->txd.callback) { - hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; + hw->ctl_f.int_en = 1; if (first != new) { /* move callback into to last desc */ new->txd.callback = first->txd.callback; @@ -878,7 +878,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) noop_desc = to_ioat_desc(ioat_chan->used_desc.next); /* set size to non-zero value (channel returns error when size is 0) */ noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; - noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; + noop_desc->hw->ctl = 0; + noop_desc->hw->ctl_f.null = 1; noop_desc->hw->src_addr = 0; noop_desc->hw->dst_addr = 0; @@ -1230,6 +1231,7 @@ ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) { struct ioat_desc_sw *desc; + struct ioat_dma_descriptor *hw; spin_lock_bh(&ioat_chan->desc_lock); @@ -1242,17 +1244,19 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) return; } - desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL - | IOAT_DMA_DESCRIPTOR_CTL_INT_GN - | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; + hw = desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = 1; + hw->ctl_f.compl_write = 1; /* set size to non-zero value (channel returns error when size is 0) */ - desc->hw->size = NULL_DESC_BUFFER_SIZE; - desc->hw->src_addr = 0; - desc->hw->dst_addr = 0; + hw->size = NULL_DESC_BUFFER_SIZE; + hw->src_addr = 0; + hw->dst_addr = 0; async_tx_ack(&desc->txd); switch (ioat_chan->device->version) { case IOAT_VER_1_2: - desc->hw->next = 0; + hw->next = 0; list_add_tail(&desc->node, &ioat_chan->used_desc); writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 1438fa5c4d1a..e13f3ed47763 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -40,7 +40,24 @@ struct ioat_dma_descriptor { uint32_t size; - uint32_t ctl; + union { + uint32_t ctl; + struct { + unsigned int int_en:1; + unsigned int src_snoop_dis:1; + unsigned int dest_snoop_dis:1; + unsigned int compl_write:1; + unsigned int fence:1; + unsigned int null:1; + unsigned int src_brk:1; + unsigned int dest_brk:1; + unsigned int bundle:1; + unsigned int dest_dca:1; + unsigned int hint:1; + unsigned int rsvd2:13; + unsigned int op:8; + } ctl_f; + }; uint64_t src_addr; uint64_t dst_addr; uint64_t next; @@ -49,23 +66,4 @@ struct ioat_dma_descriptor { uint64_t user1; uint64_t user2; }; - -#define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x00000001 -#define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x00000002 -#define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x00000004 -#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008 -#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010 -#define IOAT_DMA_DESCRIPTOR_NUL 0x00000020 -#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040 -#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080 -#define IOAT_DMA_DESCRIPTOR_CTL_BNDL 0x00000100 -#define IOAT_DMA_DESCRIPTOR_CTL_DCA 0x00000200 -#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT 0x00000400 - -#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000 -#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA 0x00000000 - -#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA 0x00000001 -#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK 0xFF000000 - #endif -- cgit v1.2.3 From a0587bcf3e64029a4da2a5666cad18df38db0d56 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:44:04 -0700 Subject: ioat1: move descriptor allocation from submit to prep The async_tx api assumes that after a successful ->prep a subsequent ->submit will not fail due to a lack of resources. This also fixes a bug in the allocation failure case. Previously the descriptors allocated prior to the allocation failure would not be returned to the free list. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 154 +++++++++++++++++++++---------------------------- 1 file changed, 65 insertions(+), 89 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 4840d4805d8c..c4333be07608 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -420,95 +420,29 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); - struct ioat_desc_sw *first = tx_to_ioat_desc(tx); - struct ioat_desc_sw *prev, *new; - struct ioat_dma_descriptor *hw; + struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); + struct ioat_desc_sw *first; + struct ioat_desc_sw *chain_tail; dma_cookie_t cookie; - LIST_HEAD(new_chain); - u32 copy; - size_t len; - dma_addr_t src, dst; - unsigned long orig_flags; - unsigned int desc_count = 0; - - /* src and dest and len are stored in the initial descriptor */ - len = first->len; - src = first->src; - dst = first->dst; - orig_flags = first->txd.flags; - new = first; spin_lock_bh(&ioat_chan->desc_lock); - prev = to_ioat_desc(ioat_chan->used_desc.prev); - prefetch(prev->hw); - do { - copy = min_t(size_t, len, ioat_chan->xfercap); - - async_tx_ack(&new->txd); - - hw = new->hw; - hw->size = copy; - hw->ctl = 0; - hw->src_addr = src; - hw->dst_addr = dst; - hw->next = 0; - - /* chain together the physical address list for the HW */ - wmb(); - prev->hw->next = (u64) new->txd.phys; - - len -= copy; - dst += copy; - src += copy; - - list_add_tail(&new->node, &new_chain); - desc_count++; - prev = new; - } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); - - if (!new) { - dev_err(to_dev(ioat_chan), "tx submit failed\n"); - spin_unlock_bh(&ioat_chan->desc_lock); - return -ENOMEM; - } - - hw->ctl_f.compl_write = 1; - if (first->txd.callback) { - hw->ctl_f.int_en = 1; - if (first != new) { - /* move callback into to last desc */ - new->txd.callback = first->txd.callback; - new->txd.callback_param - = first->txd.callback_param; - first->txd.callback = NULL; - first->txd.callback_param = NULL; - } - } - - new->tx_cnt = desc_count; - new->txd.flags = orig_flags; /* client is in control of this ack */ - - /* store the original values for use in later cleanup */ - if (new != first) { - new->src = first->src; - new->dst = first->dst; - new->len = first->len; - } - /* cookie incr and addition to used_list must be atomic */ cookie = ioat_chan->common.cookie; cookie++; if (cookie < 0) cookie = 1; - ioat_chan->common.cookie = new->txd.cookie = cookie; + ioat_chan->common.cookie = tx->cookie = cookie; /* write address into NextDescriptor field of last desc in chain */ - to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = - first->txd.phys; - list_splice_tail(&new_chain, &ioat_chan->used_desc); - - ioat_chan->dmacount += desc_count; - ioat_chan->pending += desc_count; + first = to_ioat_desc(tx->tx_list.next); + chain_tail = to_ioat_desc(ioat_chan->used_desc.prev); + /* make descriptor updates globally visible before chaining */ + wmb(); + chain_tail->hw->next = first->txd.phys; + list_splice_tail_init(&tx->tx_list, &ioat_chan->used_desc); + + ioat_chan->dmacount += desc->tx_cnt; + ioat_chan->pending += desc->tx_cnt; if (ioat_chan->pending >= ioat_pending_level) __ioat1_dma_memcpy_issue_pending(ioat_chan); spin_unlock_bh(&ioat_chan->desc_lock); @@ -937,24 +871,66 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - struct ioat_desc_sw *new; + struct ioat_desc_sw *desc; + size_t copy; + LIST_HEAD(chain); + dma_addr_t src = dma_src; + dma_addr_t dest = dma_dest; + size_t total_len = len; + struct ioat_dma_descriptor *hw = NULL; + int tx_cnt = 0; spin_lock_bh(&ioat_chan->desc_lock); - new = ioat_dma_get_next_descriptor(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); + desc = ioat_dma_get_next_descriptor(ioat_chan); + do { + if (!desc) + break; - if (new) { - new->len = len; - new->dst = dma_dest; - new->src = dma_src; - new->txd.flags = flags; - return &new->txd; - } else { + tx_cnt++; + copy = min_t(size_t, len, ioat_chan->xfercap); + + hw = desc->hw; + hw->size = copy; + hw->ctl = 0; + hw->src_addr = src; + hw->dst_addr = dest; + + list_add_tail(&desc->node, &chain); + + len -= copy; + dest += copy; + src += copy; + if (len) { + struct ioat_desc_sw *next; + + async_tx_ack(&desc->txd); + next = ioat_dma_get_next_descriptor(ioat_chan); + hw->next = next ? next->txd.phys : 0; + desc = next; + } else + hw->next = 0; + } while (len); + + if (!desc) { dev_err(to_dev(ioat_chan), "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); + list_splice(&chain, &ioat_chan->free_desc); + spin_unlock_bh(&ioat_chan->desc_lock); return NULL; } + spin_unlock_bh(&ioat_chan->desc_lock); + + desc->txd.flags = flags; + desc->tx_cnt = tx_cnt; + desc->src = dma_src; + desc->dst = dma_dest; + desc->len = total_len; + list_splice(&chain, &desc->txd.tx_list); + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.compl_write = 1; + + return &desc->txd; } static struct dma_async_tx_descriptor * -- cgit v1.2.3 From a6a39ca1badbeafc16941fcf2c1010c8c65c8ddc Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:44:05 -0700 Subject: ioat: fix self test interrupts If a callback is to be attached to a descriptor the channel needs to know at ->prep time so it can set the interrupt enable bit. This is in preparation for moving descriptor ioat2 descriptor preparation from ->submit to ->prep. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index c4333be07608..cc5c557ddc83 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -1313,7 +1313,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device) dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); - flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; + flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE | + DMA_PREP_INTERRUPT; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, IOAT_TEST_SIZE, flags); if (!tx) { -- cgit v1.2.3 From dcbc853af6f0c056088e4df0794d9bf36184809e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:44:50 -0700 Subject: ioat: prepare the code for ioat[12]_dma_chan split Prepare the code for the conversion of the ioat2 linked-list-ring into a native ring buffer. After this conversion ioat2 channels will share less of the ioat1 infrastructure, but there will still be places where sharing is possible. struct ioat_chan_common is created to house the channel attributes that will remain common between ioat1 and ioat2 channels. For every routine that accesses both common and hardware specific fields the old unified 'ioat_chan' pointer is split into an 'ioat' and 'chan' pointer. Where 'chan' references common fields and 'ioat' the hardware/version specific. [ Impact: pure structure member movement/variable renames, no logic changes ] Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 711 +++++++++++++++++++++++++------------------------ drivers/dma/ioat/dma.h | 49 ++-- 2 files changed, 390 insertions(+), 370 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index cc5c557ddc83..2e81e0c76e61 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -47,15 +47,15 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work); static void ioat_dma_chan_watchdog(struct work_struct *work); /* internal functions */ -static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); -static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); +static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat); +static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat); static struct ioat_desc_sw * -ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); +ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat); static struct ioat_desc_sw * -ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); +ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat); -static inline struct ioat_dma_chan * +static inline struct ioat_chan_common * ioat_chan_by_index(struct ioatdma_device *device, int index) { return device->idx[index]; @@ -69,7 +69,7 @@ ioat_chan_by_index(struct ioatdma_device *device, int index) static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) { struct ioatdma_device *instance = data; - struct ioat_dma_chan *ioat_chan; + struct ioat_chan_common *chan; unsigned long attnstatus; int bit; u8 intrctrl; @@ -86,8 +86,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); for_each_bit(bit, &attnstatus, BITS_PER_LONG) { - ioat_chan = ioat_chan_by_index(instance, bit); - tasklet_schedule(&ioat_chan->cleanup_task); + chan = ioat_chan_by_index(instance, bit); + tasklet_schedule(&chan->cleanup_task); } writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); @@ -101,9 +101,9 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) */ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) { - struct ioat_dma_chan *ioat_chan = data; + struct ioat_chan_common *chan = data; - tasklet_schedule(&ioat_chan->cleanup_task); + tasklet_schedule(&chan->cleanup_task); return IRQ_HANDLED; } @@ -119,7 +119,8 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) u8 xfercap_scale; u32 xfercap; int i; - struct ioat_dma_chan *ioat_chan; + struct ioat_chan_common *chan; + struct ioat_dma_chan *ioat; struct device *dev = &device->pdev->dev; struct dma_device *dma = &device->common; @@ -133,29 +134,30 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) dma->chancnt--; #endif for (i = 0; i < dma->chancnt; i++) { - ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); - if (!ioat_chan) { + ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); + if (!ioat) { dma->chancnt = i; break; } - ioat_chan->device = device; - ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); - ioat_chan->xfercap = xfercap; - ioat_chan->desccount = 0; - INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); - spin_lock_init(&ioat_chan->cleanup_lock); - spin_lock_init(&ioat_chan->desc_lock); - INIT_LIST_HEAD(&ioat_chan->free_desc); - INIT_LIST_HEAD(&ioat_chan->used_desc); + chan = &ioat->base; + chan->device = device; + chan->reg_base = device->reg_base + (0x80 * (i + 1)); + ioat->xfercap = xfercap; + ioat->desccount = 0; + INIT_DELAYED_WORK(&chan->work, ioat_dma_chan_reset_part2); + spin_lock_init(&chan->cleanup_lock); + spin_lock_init(&ioat->desc_lock); + INIT_LIST_HEAD(&ioat->free_desc); + INIT_LIST_HEAD(&ioat->used_desc); /* This should be made common somewhere in dmaengine.c */ - ioat_chan->common.device = &device->common; - list_add_tail(&ioat_chan->common.device_node, &dma->channels); - device->idx[i] = ioat_chan; - tasklet_init(&ioat_chan->cleanup_task, + chan->common.device = &device->common; + list_add_tail(&chan->common.device_node, &dma->channels); + device->idx[i] = chan; + tasklet_init(&chan->cleanup_task, ioat_dma_cleanup_tasklet, - (unsigned long) ioat_chan); - tasklet_disable(&ioat_chan->cleanup_task); + (unsigned long) ioat); + tasklet_disable(&chan->cleanup_task); } return dma->chancnt; } @@ -166,39 +168,42 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) * @chan: DMA channel handle */ static inline void -__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan) +__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) { - ioat_chan->pending = 0; - writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); + void __iomem *reg_base = ioat->base.reg_base; + + ioat->pending = 0; + writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); } static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_dma_chan *ioat = to_ioat_chan(chan); - if (ioat_chan->pending > 0) { - spin_lock_bh(&ioat_chan->desc_lock); - __ioat1_dma_memcpy_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); + if (ioat->pending > 0) { + spin_lock_bh(&ioat->desc_lock); + __ioat1_dma_memcpy_issue_pending(ioat); + spin_unlock_bh(&ioat->desc_lock); } } static inline void -__ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan) +__ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) { - ioat_chan->pending = 0; - writew(ioat_chan->dmacount, - ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + void __iomem *reg_base = ioat->base.reg_base; + + ioat->pending = 0; + writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); } static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_dma_chan *ioat = to_ioat_chan(chan); - if (ioat_chan->pending > 0) { - spin_lock_bh(&ioat_chan->desc_lock); - __ioat2_dma_memcpy_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); + if (ioat->pending > 0) { + spin_lock_bh(&ioat->desc_lock); + __ioat2_dma_memcpy_issue_pending(ioat); + spin_unlock_bh(&ioat->desc_lock); } } @@ -208,84 +213,88 @@ static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) */ static void ioat_dma_chan_reset_part2(struct work_struct *work) { - struct ioat_dma_chan *ioat_chan = - container_of(work, struct ioat_dma_chan, work.work); + struct ioat_chan_common *chan; + struct ioat_dma_chan *ioat; struct ioat_desc_sw *desc; - spin_lock_bh(&ioat_chan->cleanup_lock); - spin_lock_bh(&ioat_chan->desc_lock); + chan = container_of(work, struct ioat_chan_common, work.work); + ioat = container_of(chan, struct ioat_dma_chan, base); + spin_lock_bh(&chan->cleanup_lock); + spin_lock_bh(&ioat->desc_lock); - ioat_chan->completion_virt->low = 0; - ioat_chan->completion_virt->high = 0; - ioat_chan->pending = 0; + chan->completion_virt->low = 0; + chan->completion_virt->high = 0; + ioat->pending = 0; /* * count the descriptors waiting, and be sure to do it * right for both the CB1 line and the CB2 ring */ - ioat_chan->dmacount = 0; - if (ioat_chan->used_desc.prev) { - desc = to_ioat_desc(ioat_chan->used_desc.prev); + ioat->dmacount = 0; + if (ioat->used_desc.prev) { + desc = to_ioat_desc(ioat->used_desc.prev); do { - ioat_chan->dmacount++; + ioat->dmacount++; desc = to_ioat_desc(desc->node.next); - } while (&desc->node != ioat_chan->used_desc.next); + } while (&desc->node != ioat->used_desc.next); } /* * write the new starting descriptor address * this puts channel engine into ARMED state */ - desc = to_ioat_desc(ioat_chan->used_desc.prev); - switch (ioat_chan->device->version) { + desc = to_ioat_desc(ioat->used_desc.prev); + switch (chan->device->version) { case IOAT_VER_1_2: writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); + chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); writel(((u64) desc->txd.phys) >> 32, - ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); + chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - writeb(IOAT_CHANCMD_START, ioat_chan->reg_base - + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); + writeb(IOAT_CHANCMD_START, chan->reg_base + + IOAT_CHANCMD_OFFSET(chan->device->version)); break; case IOAT_VER_2_0: writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); writel(((u64) desc->txd.phys) >> 32, - ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); /* tell the engine to go with what's left to be done */ - writew(ioat_chan->dmacount, - ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + writew(ioat->dmacount, + chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); break; } - dev_err(to_dev(ioat_chan), + dev_err(to_dev(chan), "chan%d reset - %d descs waiting, %d total desc\n", - chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); + chan_num(chan), ioat->dmacount, ioat->desccount); - spin_unlock_bh(&ioat_chan->desc_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); + spin_unlock_bh(&ioat->desc_lock); + spin_unlock_bh(&chan->cleanup_lock); } /** * ioat_dma_reset_channel - restart a channel - * @ioat_chan: IOAT DMA channel handle + * @ioat: IOAT DMA channel handle */ -static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) +static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat) { + struct ioat_chan_common *chan = &ioat->base; + void __iomem *reg_base = chan->reg_base; u32 chansts, chanerr; - if (!ioat_chan->used_desc.prev) + if (!ioat->used_desc.prev) return; - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - chansts = (ioat_chan->completion_virt->low + chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); + chansts = (chan->completion_virt->low & IOAT_CHANSTS_DMA_TRANSFER_STATUS); if (chanerr) { - dev_err(to_dev(ioat_chan), + dev_err(to_dev(chan), "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", - chan_num(ioat_chan), chansts, chanerr); - writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + chan_num(chan), chansts, chanerr); + writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); } /* @@ -296,15 +305,14 @@ static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) * while we're waiting. */ - spin_lock_bh(&ioat_chan->desc_lock); - ioat_chan->pending = INT_MIN; + spin_lock_bh(&ioat->desc_lock); + ioat->pending = INT_MIN; writeb(IOAT_CHANCMD_RESET, - ioat_chan->reg_base - + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); - spin_unlock_bh(&ioat_chan->desc_lock); + reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); + spin_unlock_bh(&ioat->desc_lock); /* schedule the 2nd half instead of sleeping a long time */ - schedule_delayed_work(&ioat_chan->work, RESET_DELAY); + schedule_delayed_work(&chan->work, RESET_DELAY); } /** @@ -314,7 +322,8 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) { struct ioatdma_device *device = container_of(work, struct ioatdma_device, work.work); - struct ioat_dma_chan *ioat_chan; + struct ioat_dma_chan *ioat; + struct ioat_chan_common *chan; int i; union { @@ -327,23 +336,21 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) unsigned long compl_desc_addr_hw; for (i = 0; i < device->common.chancnt; i++) { - ioat_chan = ioat_chan_by_index(device, i); + chan = ioat_chan_by_index(device, i); + ioat = container_of(chan, struct ioat_dma_chan, base); - if (ioat_chan->device->version == IOAT_VER_1_2 + if (chan->device->version == IOAT_VER_1_2 /* have we started processing anything yet */ - && ioat_chan->last_completion + && chan->last_completion /* have we completed any since last watchdog cycle? */ - && (ioat_chan->last_completion == - ioat_chan->watchdog_completion) + && (chan->last_completion == chan->watchdog_completion) /* has TCP stuck on one cookie since last watchdog? */ - && (ioat_chan->watchdog_tcp_cookie == - ioat_chan->watchdog_last_tcp_cookie) - && (ioat_chan->watchdog_tcp_cookie != - ioat_chan->completed_cookie) + && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie) + && (chan->watchdog_tcp_cookie != chan->completed_cookie) /* is there something in the chain to be processed? */ /* CB1 chain always has at least the last one processed */ - && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next) - && ioat_chan->pending == 0) { + && (ioat->used_desc.prev != ioat->used_desc.next) + && ioat->pending == 0) { /* * check CHANSTS register for completed @@ -360,10 +367,10 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) * try resetting the channel */ - completion_hw.low = readl(ioat_chan->reg_base + - IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version)); - completion_hw.high = readl(ioat_chan->reg_base + - IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version)); + completion_hw.low = readl(chan->reg_base + + IOAT_CHANSTS_OFFSET_LOW(chan->device->version)); + completion_hw.high = readl(chan->reg_base + + IOAT_CHANSTS_OFFSET_HIGH(chan->device->version)); #if (BITS_PER_LONG == 64) compl_desc_addr_hw = completion_hw.full @@ -374,15 +381,15 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) #endif if ((compl_desc_addr_hw != 0) - && (compl_desc_addr_hw != ioat_chan->watchdog_completion) - && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) { - ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw; - ioat_chan->completion_virt->low = completion_hw.low; - ioat_chan->completion_virt->high = completion_hw.high; + && (compl_desc_addr_hw != chan->watchdog_completion) + && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) { + chan->last_compl_desc_addr_hw = compl_desc_addr_hw; + chan->completion_virt->low = completion_hw.low; + chan->completion_virt->high = completion_hw.high; } else { - ioat_dma_reset_channel(ioat_chan); - ioat_chan->watchdog_completion = 0; - ioat_chan->last_compl_desc_addr_hw = 0; + ioat_dma_reset_channel(ioat); + chan->watchdog_completion = 0; + chan->last_compl_desc_addr_hw = 0; } /* @@ -393,25 +400,22 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) * else * try resetting the channel */ - } else if (ioat_chan->device->version == IOAT_VER_2_0 - && ioat_chan->used_desc.prev - && ioat_chan->last_completion - && ioat_chan->last_completion == ioat_chan->watchdog_completion) { + } else if (chan->device->version == IOAT_VER_2_0 + && ioat->used_desc.prev + && chan->last_completion + && chan->last_completion == chan->watchdog_completion) { - if (ioat_chan->pending < ioat_pending_level) - ioat2_dma_memcpy_issue_pending(&ioat_chan->common); + if (ioat->pending < ioat_pending_level) + ioat2_dma_memcpy_issue_pending(&chan->common); else { - ioat_dma_reset_channel(ioat_chan); - ioat_chan->watchdog_completion = 0; + ioat_dma_reset_channel(ioat); + chan->watchdog_completion = 0; } } else { - ioat_chan->last_compl_desc_addr_hw = 0; - ioat_chan->watchdog_completion - = ioat_chan->last_completion; + chan->last_compl_desc_addr_hw = 0; + chan->watchdog_completion = chan->last_completion; } - - ioat_chan->watchdog_last_tcp_cookie = - ioat_chan->watchdog_tcp_cookie; + chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; } schedule_delayed_work(&device->work, WATCHDOG_DELAY); @@ -419,40 +423,42 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); + struct dma_chan *c = tx->chan; + struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); struct ioat_desc_sw *first; struct ioat_desc_sw *chain_tail; dma_cookie_t cookie; - spin_lock_bh(&ioat_chan->desc_lock); + spin_lock_bh(&ioat->desc_lock); /* cookie incr and addition to used_list must be atomic */ - cookie = ioat_chan->common.cookie; + cookie = c->cookie; cookie++; if (cookie < 0) cookie = 1; - ioat_chan->common.cookie = tx->cookie = cookie; + c->cookie = cookie; + tx->cookie = cookie; /* write address into NextDescriptor field of last desc in chain */ first = to_ioat_desc(tx->tx_list.next); - chain_tail = to_ioat_desc(ioat_chan->used_desc.prev); + chain_tail = to_ioat_desc(ioat->used_desc.prev); /* make descriptor updates globally visible before chaining */ wmb(); chain_tail->hw->next = first->txd.phys; - list_splice_tail_init(&tx->tx_list, &ioat_chan->used_desc); + list_splice_tail_init(&tx->tx_list, &ioat->used_desc); - ioat_chan->dmacount += desc->tx_cnt; - ioat_chan->pending += desc->tx_cnt; - if (ioat_chan->pending >= ioat_pending_level) - __ioat1_dma_memcpy_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); + ioat->dmacount += desc->tx_cnt; + ioat->pending += desc->tx_cnt; + if (ioat->pending >= ioat_pending_level) + __ioat1_dma_memcpy_issue_pending(ioat); + spin_unlock_bh(&ioat->desc_lock); return cookie; } static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); + struct ioat_dma_chan *ioat = to_ioat_chan(tx->chan); struct ioat_desc_sw *first = tx_to_ioat_desc(tx); struct ioat_desc_sw *new; struct ioat_dma_descriptor *hw; @@ -471,11 +477,11 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) new = first; /* - * ioat_chan->desc_lock is still in force in version 2 path + * ioat->desc_lock is still in force in version 2 path * it gets unlocked at end of this function */ do { - copy = min_t(size_t, len, ioat_chan->xfercap); + copy = min_t(size_t, len, ioat->xfercap); async_tx_ack(&new->txd); @@ -489,11 +495,11 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) dst += copy; src += copy; desc_count++; - } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); + } while (len && (new = ioat2_dma_get_next_descriptor(ioat))); if (!new) { - dev_err(to_dev(ioat_chan), "tx submit failed\n"); - spin_unlock_bh(&ioat_chan->desc_lock); + dev_err(to_dev(&ioat->base), "tx submit failed\n"); + spin_unlock_bh(&ioat->desc_lock); return -ENOMEM; } @@ -521,35 +527,35 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) } /* cookie incr and addition to used_list must be atomic */ - cookie = ioat_chan->common.cookie; + cookie = ioat->base.common.cookie; cookie++; if (cookie < 0) cookie = 1; - ioat_chan->common.cookie = new->txd.cookie = cookie; + ioat->base.common.cookie = new->txd.cookie = cookie; - ioat_chan->dmacount += desc_count; - ioat_chan->pending += desc_count; - if (ioat_chan->pending >= ioat_pending_level) - __ioat2_dma_memcpy_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); + ioat->dmacount += desc_count; + ioat->pending += desc_count; + if (ioat->pending >= ioat_pending_level) + __ioat2_dma_memcpy_issue_pending(ioat); + spin_unlock_bh(&ioat->desc_lock); return cookie; } /** * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair - * @ioat_chan: the channel supplying the memory pool for the descriptors + * @ioat: the channel supplying the memory pool for the descriptors * @flags: allocation flags */ static struct ioat_desc_sw * -ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags) +ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) { struct ioat_dma_descriptor *desc; struct ioat_desc_sw *desc_sw; struct ioatdma_device *ioatdma_device; dma_addr_t phys; - ioatdma_device = to_ioatdma_device(ioat_chan->common.device); + ioatdma_device = ioat->base.device; desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); if (unlikely(!desc)) return NULL; @@ -561,8 +567,8 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags) } memset(desc, 0, sizeof(*desc)); - dma_async_tx_descriptor_init(&desc_sw->txd, &ioat_chan->common); - switch (ioat_chan->device->version) { + dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); + switch (ioatdma_device->version) { case IOAT_VER_1_2: desc_sw->txd.tx_submit = ioat1_tx_submit; break; @@ -585,26 +591,26 @@ MODULE_PARM_DESC(ioat_initial_desc_count, /** * ioat2_dma_massage_chan_desc - link the descriptors into a circle - * @ioat_chan: the channel to be massaged + * @ioat: the channel to be massaged */ -static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) +static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat) { struct ioat_desc_sw *desc, *_desc; /* setup used_desc */ - ioat_chan->used_desc.next = ioat_chan->free_desc.next; - ioat_chan->used_desc.prev = NULL; + ioat->used_desc.next = ioat->free_desc.next; + ioat->used_desc.prev = NULL; /* pull free_desc out of the circle so that every node is a hw * descriptor, but leave it pointing to the list */ - ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next; - ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev; + ioat->free_desc.prev->next = ioat->free_desc.next; + ioat->free_desc.next->prev = ioat->free_desc.prev; /* circle link the hw descriptors */ - desc = to_ioat_desc(ioat_chan->free_desc.next); + desc = to_ioat_desc(ioat->free_desc.next); desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; - list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { + list_for_each_entry_safe(desc, _desc, ioat->free_desc.next, node) { desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; } } @@ -613,9 +619,10 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors * @chan: the channel to be filled out */ -static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) +static int ioat_dma_alloc_chan_resources(struct dma_chan *c) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_dma_chan *ioat = to_ioat_chan(c); + struct ioat_chan_common *chan = &ioat->base; struct ioat_desc_sw *desc; u16 chanctrl; u32 chanerr; @@ -623,89 +630,87 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) LIST_HEAD(tmp_list); /* have we already been set up? */ - if (!list_empty(&ioat_chan->free_desc)) - return ioat_chan->desccount; + if (!list_empty(&ioat->free_desc)) + return ioat->desccount; /* Setup register to interrupt and write completion status on error */ chanctrl = IOAT_CHANCTRL_ERR_INT_EN | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | IOAT_CHANCTRL_ERR_COMPLETION_EN; - writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); + writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET); - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); if (chanerr) { - dev_err(to_dev(ioat_chan), "CHANERR = %x, clearing\n", chanerr); - writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); + writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); } /* Allocate descriptors */ for (i = 0; i < ioat_initial_desc_count; i++) { - desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); + desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); if (!desc) { - dev_err(to_dev(ioat_chan), - "Only %d initial descriptors\n", i); + dev_err(to_dev(chan), "Only %d initial descriptors\n", i); break; } list_add_tail(&desc->node, &tmp_list); } - spin_lock_bh(&ioat_chan->desc_lock); - ioat_chan->desccount = i; - list_splice(&tmp_list, &ioat_chan->free_desc); - if (ioat_chan->device->version != IOAT_VER_1_2) - ioat2_dma_massage_chan_desc(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); + spin_lock_bh(&ioat->desc_lock); + ioat->desccount = i; + list_splice(&tmp_list, &ioat->free_desc); + if (chan->device->version != IOAT_VER_1_2) + ioat2_dma_massage_chan_desc(ioat); + spin_unlock_bh(&ioat->desc_lock); /* allocate a completion writeback area */ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ - ioat_chan->completion_virt = - pci_pool_alloc(ioat_chan->device->completion_pool, - GFP_KERNEL, - &ioat_chan->completion_addr); - memset(ioat_chan->completion_virt, 0, - sizeof(*ioat_chan->completion_virt)); - writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(((u64) ioat_chan->completion_addr) >> 32, - ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); - - tasklet_enable(&ioat_chan->cleanup_task); - ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */ - return ioat_chan->desccount; + chan->completion_virt = pci_pool_alloc(chan->device->completion_pool, + GFP_KERNEL, + &chan->completion_addr); + memset(chan->completion_virt, 0, + sizeof(*chan->completion_virt)); + writel(((u64) chan->completion_addr) & 0x00000000FFFFFFFF, + chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); + writel(((u64) chan->completion_addr) >> 32, + chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); + + tasklet_enable(&chan->cleanup_task); + ioat_dma_start_null_desc(ioat); /* give chain to dma device */ + return ioat->desccount; } /** * ioat_dma_free_chan_resources - release all the descriptors * @chan: the channel to be cleaned */ -static void ioat_dma_free_chan_resources(struct dma_chan *chan) +static void ioat_dma_free_chan_resources(struct dma_chan *c) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device); + struct ioat_dma_chan *ioat = to_ioat_chan(c); + struct ioat_chan_common *chan = &ioat->base; + struct ioatdma_device *ioatdma_device = chan->device; struct ioat_desc_sw *desc, *_desc; int in_use_descs = 0; /* Before freeing channel resources first check * if they have been previously allocated for this channel. */ - if (ioat_chan->desccount == 0) + if (ioat->desccount == 0) return; - tasklet_disable(&ioat_chan->cleanup_task); - ioat_dma_memcpy_cleanup(ioat_chan); + tasklet_disable(&chan->cleanup_task); + ioat_dma_memcpy_cleanup(ioat); /* Delay 100ms after reset to allow internal DMA logic to quiesce * before removing DMA descriptor resources. */ writeb(IOAT_CHANCMD_RESET, - ioat_chan->reg_base - + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); + chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); mdelay(100); - spin_lock_bh(&ioat_chan->desc_lock); - switch (ioat_chan->device->version) { + spin_lock_bh(&ioat->desc_lock); + switch (chan->device->version) { case IOAT_VER_1_2: list_for_each_entry_safe(desc, _desc, - &ioat_chan->used_desc, node) { + &ioat->used_desc, node) { in_use_descs++; list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, @@ -713,7 +718,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) kfree(desc); } list_for_each_entry_safe(desc, _desc, - &ioat_chan->free_desc, node) { + &ioat->free_desc, node) { list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, desc->txd.phys); @@ -723,62 +728,61 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) case IOAT_VER_2_0: case IOAT_VER_3_0: list_for_each_entry_safe(desc, _desc, - ioat_chan->free_desc.next, node) { + ioat->free_desc.next, node) { list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, desc->txd.phys); kfree(desc); } - desc = to_ioat_desc(ioat_chan->free_desc.next); + desc = to_ioat_desc(ioat->free_desc.next); pci_pool_free(ioatdma_device->dma_pool, desc->hw, desc->txd.phys); kfree(desc); - INIT_LIST_HEAD(&ioat_chan->free_desc); - INIT_LIST_HEAD(&ioat_chan->used_desc); + INIT_LIST_HEAD(&ioat->free_desc); + INIT_LIST_HEAD(&ioat->used_desc); break; } - spin_unlock_bh(&ioat_chan->desc_lock); + spin_unlock_bh(&ioat->desc_lock); pci_pool_free(ioatdma_device->completion_pool, - ioat_chan->completion_virt, - ioat_chan->completion_addr); + chan->completion_virt, + chan->completion_addr); /* one is ok since we left it on there on purpose */ if (in_use_descs > 1) - dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", + dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", in_use_descs - 1); - ioat_chan->last_completion = ioat_chan->completion_addr = 0; - ioat_chan->pending = 0; - ioat_chan->dmacount = 0; - ioat_chan->desccount = 0; - ioat_chan->watchdog_completion = 0; - ioat_chan->last_compl_desc_addr_hw = 0; - ioat_chan->watchdog_tcp_cookie = - ioat_chan->watchdog_last_tcp_cookie = 0; + chan->last_completion = chan->completion_addr = 0; + chan->watchdog_completion = 0; + chan->last_compl_desc_addr_hw = 0; + chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0; + ioat->pending = 0; + ioat->dmacount = 0; + ioat->desccount = 0; } /** - * ioat_dma_get_next_descriptor - return the next available descriptor - * @ioat_chan: IOAT DMA channel handle + * ioat1_dma_get_next_descriptor - return the next available descriptor + * @ioat: IOAT DMA channel handle * * Gets the next descriptor from the chain, and must be called with the * channel's desc_lock held. Allocates more descriptors if the channel * has run out. */ static struct ioat_desc_sw * -ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) +ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) { struct ioat_desc_sw *new; - if (!list_empty(&ioat_chan->free_desc)) { - new = to_ioat_desc(ioat_chan->free_desc.next); + if (!list_empty(&ioat->free_desc)) { + new = to_ioat_desc(ioat->free_desc.next); list_del(&new->node); } else { /* try to get another desc */ - new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); + new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); if (!new) { - dev_err(to_dev(ioat_chan), "alloc failed\n"); + dev_err(to_dev(&ioat->base), "alloc failed\n"); return NULL; } } @@ -788,7 +792,7 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) } static struct ioat_desc_sw * -ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) +ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat) { struct ioat_desc_sw *new; @@ -801,15 +805,15 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) * linking in a new set of descriptors, since the device * has probably already read the pointer to it */ - if (ioat_chan->used_desc.prev && - ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { + if (ioat->used_desc.prev && + ioat->used_desc.next == ioat->used_desc.prev->prev) { struct ioat_desc_sw *desc; struct ioat_desc_sw *noop_desc; int i; /* set up the noop descriptor */ - noop_desc = to_ioat_desc(ioat_chan->used_desc.next); + noop_desc = to_ioat_desc(ioat->used_desc.next); /* set size to non-zero value (channel returns error when size is 0) */ noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; noop_desc->hw->ctl = 0; @@ -817,60 +821,61 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) noop_desc->hw->src_addr = 0; noop_desc->hw->dst_addr = 0; - ioat_chan->used_desc.next = ioat_chan->used_desc.next->next; - ioat_chan->pending++; - ioat_chan->dmacount++; + ioat->used_desc.next = ioat->used_desc.next->next; + ioat->pending++; + ioat->dmacount++; /* try to get a few more descriptors */ for (i = 16; i; i--) { - desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); + desc = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); if (!desc) { - dev_err(to_dev(ioat_chan), "alloc failed\n"); + dev_err(to_dev(&ioat->base), + "alloc failed\n"); break; } - list_add_tail(&desc->node, ioat_chan->used_desc.next); + list_add_tail(&desc->node, ioat->used_desc.next); desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; to_ioat_desc(desc->node.prev)->hw->next = desc->txd.phys; - ioat_chan->desccount++; + ioat->desccount++; } - ioat_chan->used_desc.next = noop_desc->node.next; + ioat->used_desc.next = noop_desc->node.next; } - new = to_ioat_desc(ioat_chan->used_desc.next); + new = to_ioat_desc(ioat->used_desc.next); prefetch(new); - ioat_chan->used_desc.next = new->node.next; + ioat->used_desc.next = new->node.next; - if (ioat_chan->used_desc.prev == NULL) - ioat_chan->used_desc.prev = &new->node; + if (ioat->used_desc.prev == NULL) + ioat->used_desc.prev = &new->node; prefetch(new->hw); return new; } static struct ioat_desc_sw * -ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) +ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat) { - if (!ioat_chan) + if (!ioat) return NULL; - switch (ioat_chan->device->version) { + switch (ioat->base.device->version) { case IOAT_VER_1_2: - return ioat1_dma_get_next_descriptor(ioat_chan); + return ioat1_dma_get_next_descriptor(ioat); case IOAT_VER_2_0: case IOAT_VER_3_0: - return ioat2_dma_get_next_descriptor(ioat_chan); + return ioat2_dma_get_next_descriptor(ioat); } return NULL; } static struct dma_async_tx_descriptor * -ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, +ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_desc_sw *desc; size_t copy; LIST_HEAD(chain); @@ -880,14 +885,14 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, struct ioat_dma_descriptor *hw = NULL; int tx_cnt = 0; - spin_lock_bh(&ioat_chan->desc_lock); - desc = ioat_dma_get_next_descriptor(ioat_chan); + spin_lock_bh(&ioat->desc_lock); + desc = ioat_dma_get_next_descriptor(ioat); do { if (!desc) break; tx_cnt++; - copy = min_t(size_t, len, ioat_chan->xfercap); + copy = min_t(size_t, len, ioat->xfercap); hw = desc->hw; hw->size = copy; @@ -904,7 +909,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, struct ioat_desc_sw *next; async_tx_ack(&desc->txd); - next = ioat_dma_get_next_descriptor(ioat_chan); + next = ioat_dma_get_next_descriptor(ioat); hw->next = next ? next->txd.phys : 0; desc = next; } else @@ -912,14 +917,16 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, } while (len); if (!desc) { - dev_err(to_dev(ioat_chan), + struct ioat_chan_common *chan = &ioat->base; + + dev_err(to_dev(chan), "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", - chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); - list_splice(&chain, &ioat_chan->free_desc); - spin_unlock_bh(&ioat_chan->desc_lock); + chan_num(chan), ioat->dmacount, ioat->desccount); + list_splice(&chain, &ioat->free_desc); + spin_unlock_bh(&ioat->desc_lock); return NULL; } - spin_unlock_bh(&ioat_chan->desc_lock); + spin_unlock_bh(&ioat->desc_lock); desc->txd.flags = flags; desc->tx_cnt = tx_cnt; @@ -934,17 +941,17 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, } static struct dma_async_tx_descriptor * -ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, +ioat2_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_desc_sw *new; - spin_lock_bh(&ioat_chan->desc_lock); - new = ioat2_dma_get_next_descriptor(ioat_chan); + spin_lock_bh(&ioat->desc_lock); + new = ioat2_dma_get_next_descriptor(ioat); /* - * leave ioat_chan->desc_lock set in ioat 2 path + * leave ioat->desc_lock set in ioat 2 path * it will get unlocked at end of tx_submit */ @@ -955,10 +962,12 @@ ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, new->txd.flags = flags; return &new->txd; } else { - spin_unlock_bh(&ioat_chan->desc_lock); - dev_err(to_dev(ioat_chan), + struct ioat_chan_common *chan = &ioat->base; + + spin_unlock_bh(&ioat->desc_lock); + dev_err(to_dev(chan), "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", - chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); + chan_num(chan), ioat->dmacount, ioat->desccount); return NULL; } } @@ -968,20 +977,20 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) struct ioat_dma_chan *chan = (void *)data; ioat_dma_memcpy_cleanup(chan); writew(IOAT_CHANCTRL_INT_DISABLE, - chan->reg_base + IOAT_CHANCTRL_OFFSET); + chan->base.reg_base + IOAT_CHANCTRL_OFFSET); } static void -ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) +ioat_dma_unmap(struct ioat_chan_common *chan, struct ioat_desc_sw *desc) { if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - pci_unmap_single(ioat_chan->device->pdev, + pci_unmap_single(chan->device->pdev, pci_unmap_addr(desc, dst), pci_unmap_len(desc, len), PCI_DMA_FROMDEVICE); else - pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_page(chan->device->pdev, pci_unmap_addr(desc, dst), pci_unmap_len(desc, len), PCI_DMA_FROMDEVICE); @@ -989,12 +998,12 @@ ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - pci_unmap_single(ioat_chan->device->pdev, + pci_unmap_single(chan->device->pdev, pci_unmap_addr(desc, src), pci_unmap_len(desc, len), PCI_DMA_TODEVICE); else - pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_page(chan->device->pdev, pci_unmap_addr(desc, src), pci_unmap_len(desc, len), PCI_DMA_TODEVICE); @@ -1005,8 +1014,9 @@ ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) * ioat_dma_memcpy_cleanup - cleanup up finished descriptors * @chan: ioat channel to be cleaned up */ -static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) +static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat) { + struct ioat_chan_common *chan = &ioat->base; unsigned long phys_complete; struct ioat_desc_sw *desc, *_desc; dma_cookie_t cookie = 0; @@ -1014,9 +1024,9 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) struct ioat_desc_sw *latest_desc; struct dma_async_tx_descriptor *tx; - prefetch(ioat_chan->completion_virt); + prefetch(chan->completion_virt); - if (!spin_trylock_bh(&ioat_chan->cleanup_lock)) + if (!spin_trylock_bh(&chan->cleanup_lock)) return; /* The completion writeback can happen at any time, @@ -1026,49 +1036,47 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) #if (BITS_PER_LONG == 64) phys_complete = - ioat_chan->completion_virt->full + chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; #else - phys_complete = - ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; + phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; #endif - if ((ioat_chan->completion_virt->full + if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { - dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n", - readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); + dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", + readl(chan->reg_base + IOAT_CHANERR_OFFSET)); /* TODO do something to salvage the situation */ } - if (phys_complete == ioat_chan->last_completion) { - spin_unlock_bh(&ioat_chan->cleanup_lock); + if (phys_complete == chan->last_completion) { + spin_unlock_bh(&chan->cleanup_lock); /* * perhaps we're stuck so hard that the watchdog can't go off? * try to catch it after 2 seconds */ - if (ioat_chan->device->version != IOAT_VER_3_0) { + if (chan->device->version != IOAT_VER_3_0) { if (time_after(jiffies, - ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) { - ioat_dma_chan_watchdog(&(ioat_chan->device->work.work)); - ioat_chan->last_completion_time = jiffies; + chan->last_completion_time + HZ*WATCHDOG_DELAY)) { + ioat_dma_chan_watchdog(&(chan->device->work.work)); + chan->last_completion_time = jiffies; } } return; } - ioat_chan->last_completion_time = jiffies; + chan->last_completion_time = jiffies; cookie = 0; - if (!spin_trylock_bh(&ioat_chan->desc_lock)) { - spin_unlock_bh(&ioat_chan->cleanup_lock); + if (!spin_trylock_bh(&ioat->desc_lock)) { + spin_unlock_bh(&chan->cleanup_lock); return; } - switch (ioat_chan->device->version) { + switch (chan->device->version) { case IOAT_VER_1_2: - list_for_each_entry_safe(desc, _desc, - &ioat_chan->used_desc, node) { + list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { tx = &desc->txd; /* * Incoming DMA requests may use multiple descriptors, @@ -1077,7 +1085,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) */ if (tx->cookie) { cookie = tx->cookie; - ioat_dma_unmap(ioat_chan, desc); + ioat_dma_unmap(chan, desc); if (tx->callback) { tx->callback(tx->callback_param); tx->callback = NULL; @@ -1091,7 +1099,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) */ if (async_tx_test_ack(tx)) { list_move_tail(&desc->node, - &ioat_chan->free_desc); + &ioat->free_desc); } else tx->cookie = 0; } else { @@ -1110,11 +1118,11 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) case IOAT_VER_2_0: case IOAT_VER_3_0: /* has some other thread has already cleaned up? */ - if (ioat_chan->used_desc.prev == NULL) + if (ioat->used_desc.prev == NULL) break; /* work backwards to find latest finished desc */ - desc = to_ioat_desc(ioat_chan->used_desc.next); + desc = to_ioat_desc(ioat->used_desc.next); tx = &desc->txd; latest_desc = NULL; do { @@ -1125,18 +1133,18 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) latest_desc = desc; break; } - } while (&desc->node != ioat_chan->used_desc.prev); + } while (&desc->node != ioat->used_desc.prev); if (latest_desc != NULL) { /* work forwards to clear finished descriptors */ - for (desc = to_ioat_desc(ioat_chan->used_desc.prev); + for (desc = to_ioat_desc(ioat->used_desc.prev); &desc->node != latest_desc->node.next && - &desc->node != ioat_chan->used_desc.next; + &desc->node != ioat->used_desc.next; desc = to_ioat_desc(desc->node.next)) { if (tx->cookie) { cookie = tx->cookie; tx->cookie = 0; - ioat_dma_unmap(ioat_chan, desc); + ioat_dma_unmap(chan, desc); if (tx->callback) { tx->callback(tx->callback_param); tx->callback = NULL; @@ -1145,21 +1153,21 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) } /* move used.prev up beyond those that are finished */ - if (&desc->node == ioat_chan->used_desc.next) - ioat_chan->used_desc.prev = NULL; + if (&desc->node == ioat->used_desc.next) + ioat->used_desc.prev = NULL; else - ioat_chan->used_desc.prev = &desc->node; + ioat->used_desc.prev = &desc->node; } break; } - spin_unlock_bh(&ioat_chan->desc_lock); + spin_unlock_bh(&ioat->desc_lock); - ioat_chan->last_completion = phys_complete; + chan->last_completion = phys_complete; if (cookie != 0) - ioat_chan->completed_cookie = cookie; + chan->completed_cookie = cookie; - spin_unlock_bh(&ioat_chan->cleanup_lock); + spin_unlock_bh(&chan->cleanup_lock); } /** @@ -1170,17 +1178,18 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) * @used: if not %NULL, updated with last used transaction */ static enum dma_status -ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, +ioat_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_dma_chan *ioat = to_ioat_chan(c); + struct ioat_chan_common *chan = &ioat->base; dma_cookie_t last_used; dma_cookie_t last_complete; enum dma_status ret; - last_used = chan->cookie; - last_complete = ioat_chan->completed_cookie; - ioat_chan->watchdog_tcp_cookie = cookie; + last_used = c->cookie; + last_complete = chan->completed_cookie; + chan->watchdog_tcp_cookie = cookie; if (done) *done = last_complete; @@ -1191,10 +1200,10 @@ ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, if (ret == DMA_SUCCESS) return ret; - ioat_dma_memcpy_cleanup(ioat_chan); + ioat_dma_memcpy_cleanup(ioat); - last_used = chan->cookie; - last_complete = ioat_chan->completed_cookie; + last_used = c->cookie; + last_complete = chan->completed_cookie; if (done) *done = last_complete; @@ -1204,19 +1213,20 @@ ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, return dma_async_is_complete(cookie, last_complete, last_used); } -static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) +static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat) { + struct ioat_chan_common *chan = &ioat->base; struct ioat_desc_sw *desc; struct ioat_dma_descriptor *hw; - spin_lock_bh(&ioat_chan->desc_lock); + spin_lock_bh(&ioat->desc_lock); - desc = ioat_dma_get_next_descriptor(ioat_chan); + desc = ioat_dma_get_next_descriptor(ioat); if (!desc) { - dev_err(to_dev(ioat_chan), + dev_err(to_dev(chan), "Unable to start null desc - get next desc failed\n"); - spin_unlock_bh(&ioat_chan->desc_lock); + spin_unlock_bh(&ioat->desc_lock); return; } @@ -1230,31 +1240,31 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) hw->src_addr = 0; hw->dst_addr = 0; async_tx_ack(&desc->txd); - switch (ioat_chan->device->version) { + switch (chan->device->version) { case IOAT_VER_1_2: hw->next = 0; - list_add_tail(&desc->node, &ioat_chan->used_desc); + list_add_tail(&desc->node, &ioat->used_desc); writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); + chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); writel(((u64) desc->txd.phys) >> 32, - ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); + chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - writeb(IOAT_CHANCMD_START, ioat_chan->reg_base - + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); + writeb(IOAT_CHANCMD_START, chan->reg_base + + IOAT_CHANCMD_OFFSET(chan->device->version)); break; case IOAT_VER_2_0: case IOAT_VER_3_0: writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); writel(((u64) desc->txd.phys) >> 32, - ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); - ioat_chan->dmacount++; - __ioat2_dma_memcpy_issue_pending(ioat_chan); + ioat->dmacount++; + __ioat2_dma_memcpy_issue_pending(ioat); break; } - spin_unlock_bh(&ioat_chan->desc_lock); + spin_unlock_bh(&ioat->desc_lock); } /* @@ -1371,7 +1381,7 @@ MODULE_PARM_DESC(ioat_interrupt_style, */ static int ioat_dma_setup_interrupts(struct ioatdma_device *device) { - struct ioat_dma_chan *ioat_chan; + struct ioat_chan_common *chan; struct pci_dev *pdev = device->pdev; struct device *dev = &pdev->dev; struct msix_entry *msix; @@ -1404,15 +1414,15 @@ msix: for (i = 0; i < msixcnt; i++) { msix = &device->msix_entries[i]; - ioat_chan = ioat_chan_by_index(device, i); + chan = ioat_chan_by_index(device, i); err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt_msix, 0, - "ioat-msix", ioat_chan); + "ioat-msix", chan); if (err) { for (j = 0; j < i; j++) { msix = &device->msix_entries[j]; - ioat_chan = ioat_chan_by_index(device, j); - devm_free_irq(dev, msix->vector, ioat_chan); + chan = ioat_chan_by_index(device, j); + devm_free_irq(dev, msix->vector, chan); } goto msix_single_vector; } @@ -1594,8 +1604,8 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; struct dma_device *dma; - struct dma_chan *chan; - struct ioat_dma_chan *ioat_chan; + struct dma_chan *c; + struct ioat_chan_common *chan; int err; dma = &device->common; @@ -1607,10 +1617,10 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca) return err; ioat_set_tcp_copy_break(2048); - list_for_each_entry(chan, &dma->channels, device_node) { - ioat_chan = to_ioat_chan(chan); + list_for_each_entry(c, &dma->channels, device_node) { + chan = to_chan_common(c); writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, - ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + chan->reg_base + IOAT_DCACTRL_OFFSET); } err = ioat_register(device); @@ -1629,8 +1639,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; struct dma_device *dma; - struct dma_chan *chan; - struct ioat_dma_chan *ioat_chan; + struct dma_chan *c; + struct ioat_chan_common *chan; int err; u16 dev_id; @@ -1656,10 +1666,10 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) return err; ioat_set_tcp_copy_break(262144); - list_for_each_entry(chan, &dma->channels, device_node) { - ioat_chan = to_ioat_chan(chan); + list_for_each_entry(c, &dma->channels, device_node) { + chan = to_chan_common(c); writel(IOAT_DMA_DCA_ANY_CPU, - ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + chan->reg_base + IOAT_DCACTRL_OFFSET); } err = ioat_register(device); @@ -1673,8 +1683,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) void ioat_dma_remove(struct ioatdma_device *device) { - struct dma_chan *chan, *_chan; - struct ioat_dma_chan *ioat_chan; struct dma_device *dma = &device->common; if (device->version != IOAT_VER_3_0) @@ -1687,9 +1695,6 @@ void ioat_dma_remove(struct ioatdma_device *device) pci_pool_destroy(device->dma_pool); pci_pool_destroy(device->completion_pool); - list_for_each_entry_safe(chan, _chan, &dma->channels, device_node) { - ioat_chan = to_ioat_chan(chan); - list_del(&chan->device_node); - } + INIT_LIST_HEAD(&dma->channels); } diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 9f0c853b6a77..5b31db73ad8e 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -35,7 +35,6 @@ #define IOAT_DMA_DCA_ANY_CPU ~0 #define IOAT_WATCHDOG_PERIOD (2 * HZ) -#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) @@ -74,37 +73,24 @@ struct ioatdma_device { u8 version; struct delayed_work work; struct msix_entry msix_entries[4]; - struct ioat_dma_chan *idx[4]; + struct ioat_chan_common *idx[4]; struct dca_provider *dca; void (*intr_quirk)(struct ioatdma_device *device); }; -/** - * struct ioat_dma_chan - internal representation of a DMA channel - */ -struct ioat_dma_chan { - +struct ioat_chan_common { void __iomem *reg_base; - dma_cookie_t completed_cookie; unsigned long last_completion; unsigned long last_completion_time; - size_t xfercap; /* XFERCAP register value expanded out */ - spinlock_t cleanup_lock; - spinlock_t desc_lock; - struct list_head free_desc; - struct list_head used_desc; + dma_cookie_t completed_cookie; unsigned long watchdog_completion; int watchdog_tcp_cookie; u32 watchdog_last_tcp_cookie; struct delayed_work work; - int pending; - u16 dmacount; - u16 desccount; - struct ioatdma_device *device; struct dma_chan common; @@ -120,6 +106,35 @@ struct ioat_dma_chan { struct tasklet_struct cleanup_task; }; +/** + * struct ioat_dma_chan - internal representation of a DMA channel + */ +struct ioat_dma_chan { + struct ioat_chan_common base; + + size_t xfercap; /* XFERCAP register value expanded out */ + + spinlock_t desc_lock; + struct list_head free_desc; + struct list_head used_desc; + + int pending; + u16 dmacount; + u16 desccount; +}; + +static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) +{ + return container_of(c, struct ioat_chan_common, common); +} + +static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) +{ + struct ioat_chan_common *chan = to_chan_common(c); + + return container_of(chan, struct ioat_dma_chan, base); +} + /* wrapper around hardware descriptor format + additional software fields */ /** -- cgit v1.2.3 From 5cbafa65b92ee4f5b8ba915cddf94b91f186b989 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 26 Aug 2009 13:01:44 -0700 Subject: ioat2,3: convert to a true ring buffer Replace the current linked list munged into a ring with a native ring buffer implementation. The benefit of this approach is reduced overhead as many parameters can be derived from ring position with simple pointer comparisons and descriptor allocation/freeing becomes just a manipulation of head/tail pointers. It requires a contiguous allocation for the software descriptor information. Since this arrangement is significantly different from the ioat1 chain, move ioat2,3 support into its own file and header. Common routines are exported from driver/dma/ioat/dma.[ch]. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/Makefile | 2 +- drivers/dma/ioat/dma.c | 874 ++++++++++------------------------------------ drivers/dma/ioat/dma.h | 50 ++- drivers/dma/ioat/dma_v2.c | 750 +++++++++++++++++++++++++++++++++++++++ drivers/dma/ioat/dma_v2.h | 131 +++++++ drivers/dma/ioat/pci.c | 1 + 6 files changed, 1122 insertions(+), 686 deletions(-) create mode 100644 drivers/dma/ioat/dma_v2.c create mode 100644 drivers/dma/ioat/dma_v2.h (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index 2ce3d3a4270b..205a639e84df 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o -ioatdma-objs := pci.o dma.o dca.o +ioatdma-objs := pci.o dma.o dma_v2.o dca.o diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 2e81e0c76e61..64b4d75059aa 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -38,28 +38,14 @@ #include "registers.h" #include "hw.h" -static int ioat_pending_level = 4; +int ioat_pending_level = 4; module_param(ioat_pending_level, int, 0644); MODULE_PARM_DESC(ioat_pending_level, "high-water mark for pushing ioat descriptors (default: 4)"); -static void ioat_dma_chan_reset_part2(struct work_struct *work); -static void ioat_dma_chan_watchdog(struct work_struct *work); - /* internal functions */ -static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat); -static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat); - -static struct ioat_desc_sw * -ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat); -static struct ioat_desc_sw * -ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat); - -static inline struct ioat_chan_common * -ioat_chan_by_index(struct ioatdma_device *device, int index) -{ - return device->idx[index]; -} +static void ioat1_cleanup(struct ioat_dma_chan *ioat); +static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); /** * ioat_dma_do_interrupt - handler used for single vector interrupt mode @@ -108,18 +94,38 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) return IRQ_HANDLED; } -static void ioat_dma_cleanup_tasklet(unsigned long data); +static void ioat1_cleanup_tasklet(unsigned long data); + +/* common channel initialization */ +void ioat_init_channel(struct ioatdma_device *device, + struct ioat_chan_common *chan, int idx, + work_func_t work_fn, void (*tasklet)(unsigned long), + unsigned long tasklet_data) +{ + struct dma_device *dma = &device->common; + + chan->device = device; + chan->reg_base = device->reg_base + (0x80 * (idx + 1)); + INIT_DELAYED_WORK(&chan->work, work_fn); + spin_lock_init(&chan->cleanup_lock); + chan->common.device = dma; + list_add_tail(&chan->common.device_node, &dma->channels); + device->idx[idx] = chan; + tasklet_init(&chan->cleanup_task, tasklet, tasklet_data); + tasklet_disable(&chan->cleanup_task); +} + +static void ioat1_reset_part2(struct work_struct *work); /** - * ioat_dma_enumerate_channels - find and initialize the device's channels + * ioat1_dma_enumerate_channels - find and initialize the device's channels * @device: the device to be enumerated */ -static int ioat_dma_enumerate_channels(struct ioatdma_device *device) +static int ioat1_enumerate_channels(struct ioatdma_device *device) { u8 xfercap_scale; u32 xfercap; int i; - struct ioat_chan_common *chan; struct ioat_dma_chan *ioat; struct device *dev = &device->pdev->dev; struct dma_device *dma = &device->common; @@ -135,31 +141,20 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) #endif for (i = 0; i < dma->chancnt; i++) { ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); - if (!ioat) { - dma->chancnt = i; + if (!ioat) break; - } - chan = &ioat->base; - chan->device = device; - chan->reg_base = device->reg_base + (0x80 * (i + 1)); + ioat_init_channel(device, &ioat->base, i, + ioat1_reset_part2, + ioat1_cleanup_tasklet, + (unsigned long) ioat); ioat->xfercap = xfercap; - ioat->desccount = 0; - INIT_DELAYED_WORK(&chan->work, ioat_dma_chan_reset_part2); - spin_lock_init(&chan->cleanup_lock); spin_lock_init(&ioat->desc_lock); INIT_LIST_HEAD(&ioat->free_desc); INIT_LIST_HEAD(&ioat->used_desc); - /* This should be made common somewhere in dmaengine.c */ - chan->common.device = &device->common; - list_add_tail(&chan->common.device_node, &dma->channels); - device->idx[i] = chan; - tasklet_init(&chan->cleanup_task, - ioat_dma_cleanup_tasklet, - (unsigned long) ioat); - tasklet_disable(&chan->cleanup_task); } - return dma->chancnt; + dma->chancnt = i; + return i; } /** @@ -187,35 +182,16 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) } } -static inline void -__ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) -{ - void __iomem *reg_base = ioat->base.reg_base; - - ioat->pending = 0; - writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); -} - -static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) -{ - struct ioat_dma_chan *ioat = to_ioat_chan(chan); - - if (ioat->pending > 0) { - spin_lock_bh(&ioat->desc_lock); - __ioat2_dma_memcpy_issue_pending(ioat); - spin_unlock_bh(&ioat->desc_lock); - } -} - - /** - * ioat_dma_chan_reset_part2 - reinit the channel after a reset + * ioat1_reset_part2 - reinit the channel after a reset */ -static void ioat_dma_chan_reset_part2(struct work_struct *work) +static void ioat1_reset_part2(struct work_struct *work) { struct ioat_chan_common *chan; struct ioat_dma_chan *ioat; struct ioat_desc_sw *desc; + int dmacount; + bool start_null = false; chan = container_of(work, struct ioat_chan_common, work.work); ioat = container_of(chan, struct ioat_dma_chan, base); @@ -226,26 +202,22 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work) chan->completion_virt->high = 0; ioat->pending = 0; - /* - * count the descriptors waiting, and be sure to do it - * right for both the CB1 line and the CB2 ring - */ - ioat->dmacount = 0; + /* count the descriptors waiting */ + dmacount = 0; if (ioat->used_desc.prev) { desc = to_ioat_desc(ioat->used_desc.prev); do { - ioat->dmacount++; + dmacount++; desc = to_ioat_desc(desc->node.next); } while (&desc->node != ioat->used_desc.next); } - /* - * write the new starting descriptor address - * this puts channel engine into ARMED state - */ - desc = to_ioat_desc(ioat->used_desc.prev); - switch (chan->device->version) { - case IOAT_VER_1_2: + if (dmacount) { + /* + * write the new starting descriptor address + * this puts channel engine into ARMED state + */ + desc = to_ioat_desc(ioat->used_desc.prev); writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); writel(((u64) desc->txd.phys) >> 32, @@ -253,32 +225,24 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work) writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); - break; - case IOAT_VER_2_0: - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); - - /* tell the engine to go with what's left to be done */ - writew(ioat->dmacount, - chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + } else + start_null = true; + spin_unlock_bh(&ioat->desc_lock); + spin_unlock_bh(&chan->cleanup_lock); - break; - } dev_err(to_dev(chan), "chan%d reset - %d descs waiting, %d total desc\n", - chan_num(chan), ioat->dmacount, ioat->desccount); + chan_num(chan), dmacount, ioat->desccount); - spin_unlock_bh(&ioat->desc_lock); - spin_unlock_bh(&chan->cleanup_lock); + if (start_null) + ioat1_dma_start_null_desc(ioat); } /** - * ioat_dma_reset_channel - restart a channel + * ioat1_reset_channel - restart a channel * @ioat: IOAT DMA channel handle */ -static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat) +static void ioat1_reset_channel(struct ioat_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; void __iomem *reg_base = chan->reg_base; @@ -316,9 +280,9 @@ static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat) } /** - * ioat_dma_chan_watchdog - watch for stuck channels + * ioat1_chan_watchdog - watch for stuck channels */ -static void ioat_dma_chan_watchdog(struct work_struct *work) +static void ioat1_chan_watchdog(struct work_struct *work) { struct ioatdma_device *device = container_of(work, struct ioatdma_device, work.work); @@ -339,16 +303,15 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) chan = ioat_chan_by_index(device, i); ioat = container_of(chan, struct ioat_dma_chan, base); - if (chan->device->version == IOAT_VER_1_2 - /* have we started processing anything yet */ - && chan->last_completion - /* have we completed any since last watchdog cycle? */ + if (/* have we started processing anything yet */ + chan->last_completion + /* have we completed any since last watchdog cycle? */ && (chan->last_completion == chan->watchdog_completion) - /* has TCP stuck on one cookie since last watchdog? */ + /* has TCP stuck on one cookie since last watchdog? */ && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie) && (chan->watchdog_tcp_cookie != chan->completed_cookie) - /* is there something in the chain to be processed? */ - /* CB1 chain always has at least the last one processed */ + /* is there something in the chain to be processed? */ + /* CB1 chain always has at least the last one processed */ && (ioat->used_desc.prev != ioat->used_desc.next) && ioat->pending == 0) { @@ -387,34 +350,15 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) chan->completion_virt->low = completion_hw.low; chan->completion_virt->high = completion_hw.high; } else { - ioat_dma_reset_channel(ioat); + ioat1_reset_channel(ioat); chan->watchdog_completion = 0; chan->last_compl_desc_addr_hw = 0; } - - /* - * for version 2.0 if there are descriptors yet to be processed - * and the last completed hasn't changed since the last watchdog - * if they haven't hit the pending level - * issue the pending to push them through - * else - * try resetting the channel - */ - } else if (chan->device->version == IOAT_VER_2_0 - && ioat->used_desc.prev - && chan->last_completion - && chan->last_completion == chan->watchdog_completion) { - - if (ioat->pending < ioat_pending_level) - ioat2_dma_memcpy_issue_pending(&chan->common); - else { - ioat_dma_reset_channel(ioat); - chan->watchdog_completion = 0; - } } else { chan->last_compl_desc_addr_hw = 0; chan->watchdog_completion = chan->last_completion; } + chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; } @@ -447,7 +391,6 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) chain_tail->hw->next = first->txd.phys; list_splice_tail_init(&tx->tx_list, &ioat->used_desc); - ioat->dmacount += desc->tx_cnt; ioat->pending += desc->tx_cnt; if (ioat->pending >= ioat_pending_level) __ioat1_dma_memcpy_issue_pending(ioat); @@ -456,92 +399,6 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) return cookie; } -static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct ioat_dma_chan *ioat = to_ioat_chan(tx->chan); - struct ioat_desc_sw *first = tx_to_ioat_desc(tx); - struct ioat_desc_sw *new; - struct ioat_dma_descriptor *hw; - dma_cookie_t cookie; - u32 copy; - size_t len; - dma_addr_t src, dst; - unsigned long orig_flags; - unsigned int desc_count = 0; - - /* src and dest and len are stored in the initial descriptor */ - len = first->len; - src = first->src; - dst = first->dst; - orig_flags = first->txd.flags; - new = first; - - /* - * ioat->desc_lock is still in force in version 2 path - * it gets unlocked at end of this function - */ - do { - copy = min_t(size_t, len, ioat->xfercap); - - async_tx_ack(&new->txd); - - hw = new->hw; - hw->size = copy; - hw->ctl = 0; - hw->src_addr = src; - hw->dst_addr = dst; - - len -= copy; - dst += copy; - src += copy; - desc_count++; - } while (len && (new = ioat2_dma_get_next_descriptor(ioat))); - - if (!new) { - dev_err(to_dev(&ioat->base), "tx submit failed\n"); - spin_unlock_bh(&ioat->desc_lock); - return -ENOMEM; - } - - hw->ctl_f.compl_write = 1; - if (first->txd.callback) { - hw->ctl_f.int_en = 1; - if (first != new) { - /* move callback into to last desc */ - new->txd.callback = first->txd.callback; - new->txd.callback_param - = first->txd.callback_param; - first->txd.callback = NULL; - first->txd.callback_param = NULL; - } - } - - new->tx_cnt = desc_count; - new->txd.flags = orig_flags; /* client is in control of this ack */ - - /* store the original values for use in later cleanup */ - if (new != first) { - new->src = first->src; - new->dst = first->dst; - new->len = first->len; - } - - /* cookie incr and addition to used_list must be atomic */ - cookie = ioat->base.common.cookie; - cookie++; - if (cookie < 0) - cookie = 1; - ioat->base.common.cookie = new->txd.cookie = cookie; - - ioat->dmacount += desc_count; - ioat->pending += desc_count; - if (ioat->pending >= ioat_pending_level) - __ioat2_dma_memcpy_issue_pending(ioat); - spin_unlock_bh(&ioat->desc_lock); - - return cookie; -} - /** * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair * @ioat: the channel supplying the memory pool for the descriptors @@ -567,17 +424,9 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) } memset(desc, 0, sizeof(*desc)); - dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); - switch (ioatdma_device->version) { - case IOAT_VER_1_2: - desc_sw->txd.tx_submit = ioat1_tx_submit; - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - desc_sw->txd.tx_submit = ioat2_tx_submit; - break; - } + dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); + desc_sw->txd.tx_submit = ioat1_tx_submit; desc_sw->hw = desc; desc_sw->txd.phys = phys; @@ -587,39 +436,12 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) static int ioat_initial_desc_count = 256; module_param(ioat_initial_desc_count, int, 0644); MODULE_PARM_DESC(ioat_initial_desc_count, - "initial descriptors per channel (default: 256)"); - -/** - * ioat2_dma_massage_chan_desc - link the descriptors into a circle - * @ioat: the channel to be massaged - */ -static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat) -{ - struct ioat_desc_sw *desc, *_desc; - - /* setup used_desc */ - ioat->used_desc.next = ioat->free_desc.next; - ioat->used_desc.prev = NULL; - - /* pull free_desc out of the circle so that every node is a hw - * descriptor, but leave it pointing to the list - */ - ioat->free_desc.prev->next = ioat->free_desc.next; - ioat->free_desc.next->prev = ioat->free_desc.prev; - - /* circle link the hw descriptors */ - desc = to_ioat_desc(ioat->free_desc.next); - desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; - list_for_each_entry_safe(desc, _desc, ioat->free_desc.next, node) { - desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; - } -} - + "ioat1: initial descriptors per channel (default: 256)"); /** - * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors + * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors * @chan: the channel to be filled out */ -static int ioat_dma_alloc_chan_resources(struct dma_chan *c) +static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) { struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_chan_common *chan = &ioat->base; @@ -657,8 +479,6 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *c) spin_lock_bh(&ioat->desc_lock); ioat->desccount = i; list_splice(&tmp_list, &ioat->free_desc); - if (chan->device->version != IOAT_VER_1_2) - ioat2_dma_massage_chan_desc(ioat); spin_unlock_bh(&ioat->desc_lock); /* allocate a completion writeback area */ @@ -674,15 +494,15 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *c) chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); tasklet_enable(&chan->cleanup_task); - ioat_dma_start_null_desc(ioat); /* give chain to dma device */ + ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ return ioat->desccount; } /** - * ioat_dma_free_chan_resources - release all the descriptors + * ioat1_dma_free_chan_resources - release all the descriptors * @chan: the channel to be cleaned */ -static void ioat_dma_free_chan_resources(struct dma_chan *c) +static void ioat1_dma_free_chan_resources(struct dma_chan *c) { struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_chan_common *chan = &ioat->base; @@ -697,7 +517,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *c) return; tasklet_disable(&chan->cleanup_task); - ioat_dma_memcpy_cleanup(ioat); + ioat1_cleanup(ioat); /* Delay 100ms after reset to allow internal DMA logic to quiesce * before removing DMA descriptor resources. @@ -707,40 +527,20 @@ static void ioat_dma_free_chan_resources(struct dma_chan *c) mdelay(100); spin_lock_bh(&ioat->desc_lock); - switch (chan->device->version) { - case IOAT_VER_1_2: - list_for_each_entry_safe(desc, _desc, - &ioat->used_desc, node) { - in_use_descs++; - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->txd.phys); - kfree(desc); - } - list_for_each_entry_safe(desc, _desc, - &ioat->free_desc, node) { - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->txd.phys); - kfree(desc); - } - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - list_for_each_entry_safe(desc, _desc, - ioat->free_desc.next, node) { - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->txd.phys); - kfree(desc); - } - desc = to_ioat_desc(ioat->free_desc.next); + list_for_each_entry_safe(desc, _desc, + &ioat->used_desc, node) { + in_use_descs++; + list_del(&desc->node); + pci_pool_free(ioatdma_device->dma_pool, desc->hw, + desc->txd.phys); + kfree(desc); + } + list_for_each_entry_safe(desc, _desc, + &ioat->free_desc, node) { + list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, desc->txd.phys); kfree(desc); - INIT_LIST_HEAD(&ioat->free_desc); - INIT_LIST_HEAD(&ioat->used_desc); - break; } spin_unlock_bh(&ioat->desc_lock); @@ -758,7 +558,6 @@ static void ioat_dma_free_chan_resources(struct dma_chan *c) chan->last_compl_desc_addr_hw = 0; chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0; ioat->pending = 0; - ioat->dmacount = 0; ioat->desccount = 0; } @@ -791,86 +590,6 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) return new; } -static struct ioat_desc_sw * -ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat) -{ - struct ioat_desc_sw *new; - - /* - * used.prev points to where to start processing - * used.next points to next free descriptor - * if used.prev == NULL, there are none waiting to be processed - * if used.next == used.prev.prev, there is only one free descriptor, - * and we need to use it to as a noop descriptor before - * linking in a new set of descriptors, since the device - * has probably already read the pointer to it - */ - if (ioat->used_desc.prev && - ioat->used_desc.next == ioat->used_desc.prev->prev) { - - struct ioat_desc_sw *desc; - struct ioat_desc_sw *noop_desc; - int i; - - /* set up the noop descriptor */ - noop_desc = to_ioat_desc(ioat->used_desc.next); - /* set size to non-zero value (channel returns error when size is 0) */ - noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; - noop_desc->hw->ctl = 0; - noop_desc->hw->ctl_f.null = 1; - noop_desc->hw->src_addr = 0; - noop_desc->hw->dst_addr = 0; - - ioat->used_desc.next = ioat->used_desc.next->next; - ioat->pending++; - ioat->dmacount++; - - /* try to get a few more descriptors */ - for (i = 16; i; i--) { - desc = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); - if (!desc) { - dev_err(to_dev(&ioat->base), - "alloc failed\n"); - break; - } - list_add_tail(&desc->node, ioat->used_desc.next); - - desc->hw->next - = to_ioat_desc(desc->node.next)->txd.phys; - to_ioat_desc(desc->node.prev)->hw->next - = desc->txd.phys; - ioat->desccount++; - } - - ioat->used_desc.next = noop_desc->node.next; - } - new = to_ioat_desc(ioat->used_desc.next); - prefetch(new); - ioat->used_desc.next = new->node.next; - - if (ioat->used_desc.prev == NULL) - ioat->used_desc.prev = &new->node; - - prefetch(new->hw); - return new; -} - -static struct ioat_desc_sw * -ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat) -{ - if (!ioat) - return NULL; - - switch (ioat->base.device->version) { - case IOAT_VER_1_2: - return ioat1_dma_get_next_descriptor(ioat); - case IOAT_VER_2_0: - case IOAT_VER_3_0: - return ioat2_dma_get_next_descriptor(ioat); - } - return NULL; -} - static struct dma_async_tx_descriptor * ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) @@ -886,7 +605,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, int tx_cnt = 0; spin_lock_bh(&ioat->desc_lock); - desc = ioat_dma_get_next_descriptor(ioat); + desc = ioat1_dma_get_next_descriptor(ioat); do { if (!desc) break; @@ -909,7 +628,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, struct ioat_desc_sw *next; async_tx_ack(&desc->txd); - next = ioat_dma_get_next_descriptor(ioat); + next = ioat1_dma_get_next_descriptor(ioat); hw->next = next ? next->txd.phys : 0; desc = next; } else @@ -920,8 +639,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, struct ioat_chan_common *chan = &ioat->base; dev_err(to_dev(chan), - "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", - chan_num(chan), ioat->dmacount, ioat->desccount); + "chan%d - get_next_desc failed\n", chan_num(chan)); list_splice(&chain, &ioat->free_desc); spin_unlock_bh(&ioat->desc_lock); return NULL; @@ -940,94 +658,43 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, return &desc->txd; } -static struct dma_async_tx_descriptor * -ioat2_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, - dma_addr_t dma_src, size_t len, unsigned long flags) -{ - struct ioat_dma_chan *ioat = to_ioat_chan(c); - struct ioat_desc_sw *new; - - spin_lock_bh(&ioat->desc_lock); - new = ioat2_dma_get_next_descriptor(ioat); - - /* - * leave ioat->desc_lock set in ioat 2 path - * it will get unlocked at end of tx_submit - */ - - if (new) { - new->len = len; - new->dst = dma_dest; - new->src = dma_src; - new->txd.flags = flags; - return &new->txd; - } else { - struct ioat_chan_common *chan = &ioat->base; - - spin_unlock_bh(&ioat->desc_lock); - dev_err(to_dev(chan), - "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", - chan_num(chan), ioat->dmacount, ioat->desccount); - return NULL; - } -} - -static void ioat_dma_cleanup_tasklet(unsigned long data) +static void ioat1_cleanup_tasklet(unsigned long data) { struct ioat_dma_chan *chan = (void *)data; - ioat_dma_memcpy_cleanup(chan); + ioat1_cleanup(chan); writew(IOAT_CHANCTRL_INT_DISABLE, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); } -static void -ioat_dma_unmap(struct ioat_chan_common *chan, struct ioat_desc_sw *desc) +static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, + int direction, enum dma_ctrl_flags flags, bool dst) { - if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - pci_unmap_single(chan->device->pdev, - pci_unmap_addr(desc, dst), - pci_unmap_len(desc, len), - PCI_DMA_FROMDEVICE); - else - pci_unmap_page(chan->device->pdev, - pci_unmap_addr(desc, dst), - pci_unmap_len(desc, len), - PCI_DMA_FROMDEVICE); - } - - if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - pci_unmap_single(chan->device->pdev, - pci_unmap_addr(desc, src), - pci_unmap_len(desc, len), - PCI_DMA_TODEVICE); - else - pci_unmap_page(chan->device->pdev, - pci_unmap_addr(desc, src), - pci_unmap_len(desc, len), - PCI_DMA_TODEVICE); - } + if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || + (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) + pci_unmap_single(pdev, addr, len, direction); + else + pci_unmap_page(pdev, addr, len, direction); } -/** - * ioat_dma_memcpy_cleanup - cleanup up finished descriptors - * @chan: ioat channel to be cleaned up - */ -static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat) + +void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, + size_t len, struct ioat_dma_descriptor *hw) { - struct ioat_chan_common *chan = &ioat->base; - unsigned long phys_complete; - struct ioat_desc_sw *desc, *_desc; - dma_cookie_t cookie = 0; - unsigned long desc_phys; - struct ioat_desc_sw *latest_desc; - struct dma_async_tx_descriptor *tx; + struct pci_dev *pdev = chan->device->pdev; + size_t offset = len - hw->size; - prefetch(chan->completion_virt); + if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) + ioat_unmap(pdev, hw->dst_addr - offset, len, + PCI_DMA_FROMDEVICE, flags, 1); - if (!spin_trylock_bh(&chan->cleanup_lock)) - return; + if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) + ioat_unmap(pdev, hw->src_addr - offset, len, + PCI_DMA_TODEVICE, flags, 0); +} + +unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) +{ + unsigned long phys_complete; /* The completion writeback can happen at any time, so reads by the driver need to be atomic operations @@ -1051,18 +718,37 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat) /* TODO do something to salvage the situation */ } + return phys_complete; +} + +/** + * ioat1_cleanup - cleanup up finished descriptors + * @chan: ioat channel to be cleaned up + */ +static void ioat1_cleanup(struct ioat_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + struct ioat_desc_sw *desc, *_desc; + dma_cookie_t cookie = 0; + struct dma_async_tx_descriptor *tx; + + prefetch(chan->completion_virt); + + if (!spin_trylock_bh(&chan->cleanup_lock)) + return; + + phys_complete = ioat_get_current_completion(chan); if (phys_complete == chan->last_completion) { spin_unlock_bh(&chan->cleanup_lock); /* * perhaps we're stuck so hard that the watchdog can't go off? * try to catch it after 2 seconds */ - if (chan->device->version != IOAT_VER_3_0) { - if (time_after(jiffies, - chan->last_completion_time + HZ*WATCHDOG_DELAY)) { - ioat_dma_chan_watchdog(&(chan->device->work.work)); - chan->last_completion_time = jiffies; - } + if (time_after(jiffies, + chan->last_completion_time + HZ*WATCHDOG_DELAY)) { + ioat1_chan_watchdog(&(chan->device->work.work)); + chan->last_completion_time = jiffies; } return; } @@ -1074,91 +760,42 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat) return; } - switch (chan->device->version) { - case IOAT_VER_1_2: - list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { - tx = &desc->txd; - /* - * Incoming DMA requests may use multiple descriptors, - * due to exceeding xfercap, perhaps. If so, only the - * last one will have a cookie, and require unmapping. - */ - if (tx->cookie) { - cookie = tx->cookie; - ioat_dma_unmap(chan, desc); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } + list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { + tx = &desc->txd; + /* + * Incoming DMA requests may use multiple descriptors, + * due to exceeding xfercap, perhaps. If so, only the + * last one will have a cookie, and require unmapping. + */ + if (tx->cookie) { + cookie = tx->cookie; + ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; } + } - if (tx->phys != phys_complete) { - /* - * a completed entry, but not the last, so clean - * up if the client is done with the descriptor - */ - if (async_tx_test_ack(tx)) { - list_move_tail(&desc->node, - &ioat->free_desc); - } else - tx->cookie = 0; - } else { - /* - * last used desc. Do not remove, so we can - * append from it, but don't look at it next - * time, either - */ + if (tx->phys != phys_complete) { + /* + * a completed entry, but not the last, so clean + * up if the client is done with the descriptor + */ + if (async_tx_test_ack(tx)) + list_move_tail(&desc->node, &ioat->free_desc); + else tx->cookie = 0; + } else { + /* + * last used desc. Do not remove, so we can + * append from it, but don't look at it next + * time, either + */ + tx->cookie = 0; - /* TODO check status bits? */ - break; - } - } - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - /* has some other thread has already cleaned up? */ - if (ioat->used_desc.prev == NULL) + /* TODO check status bits? */ break; - - /* work backwards to find latest finished desc */ - desc = to_ioat_desc(ioat->used_desc.next); - tx = &desc->txd; - latest_desc = NULL; - do { - desc = to_ioat_desc(desc->node.prev); - desc_phys = (unsigned long)tx->phys - & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; - if (desc_phys == phys_complete) { - latest_desc = desc; - break; - } - } while (&desc->node != ioat->used_desc.prev); - - if (latest_desc != NULL) { - /* work forwards to clear finished descriptors */ - for (desc = to_ioat_desc(ioat->used_desc.prev); - &desc->node != latest_desc->node.next && - &desc->node != ioat->used_desc.next; - desc = to_ioat_desc(desc->node.next)) { - if (tx->cookie) { - cookie = tx->cookie; - tx->cookie = 0; - ioat_dma_unmap(chan, desc); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } - } - - /* move used.prev up beyond those that are finished */ - if (&desc->node == ioat->used_desc.next) - ioat->used_desc.prev = NULL; - else - ioat->used_desc.prev = &desc->node; } - break; } spin_unlock_bh(&ioat->desc_lock); @@ -1170,50 +807,21 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat) spin_unlock_bh(&chan->cleanup_lock); } -/** - * ioat_dma_is_complete - poll the status of a IOAT DMA transaction - * @chan: IOAT DMA channel handle - * @cookie: DMA transaction identifier - * @done: if not %NULL, updated with last completed transaction - * @used: if not %NULL, updated with last used transaction - */ static enum dma_status -ioat_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) +ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) { struct ioat_dma_chan *ioat = to_ioat_chan(c); - struct ioat_chan_common *chan = &ioat->base; - dma_cookie_t last_used; - dma_cookie_t last_complete; - enum dma_status ret; - - last_used = c->cookie; - last_complete = chan->completed_cookie; - chan->watchdog_tcp_cookie = cookie; - - if (done) - *done = last_complete; - if (used) - *used = last_used; - - ret = dma_async_is_complete(cookie, last_complete, last_used); - if (ret == DMA_SUCCESS) - return ret; - ioat_dma_memcpy_cleanup(ioat); + if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) + return DMA_SUCCESS; - last_used = c->cookie; - last_complete = chan->completed_cookie; + ioat1_cleanup(ioat); - if (done) - *done = last_complete; - if (used) - *used = last_used; - - return dma_async_is_complete(cookie, last_complete, last_used); + return ioat_is_complete(c, cookie, done, used); } -static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat) +static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; struct ioat_desc_sw *desc; @@ -1221,7 +829,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat) spin_lock_bh(&ioat->desc_lock); - desc = ioat_dma_get_next_descriptor(ioat); + desc = ioat1_dma_get_next_descriptor(ioat); if (!desc) { dev_err(to_dev(chan), @@ -1240,30 +848,16 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat) hw->src_addr = 0; hw->dst_addr = 0; async_tx_ack(&desc->txd); - switch (chan->device->version) { - case IOAT_VER_1_2: - hw->next = 0; - list_add_tail(&desc->node, &ioat->used_desc); + hw->next = 0; + list_add_tail(&desc->node, &ioat->used_desc); - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - - writeb(IOAT_CHANCMD_START, chan->reg_base - + IOAT_CHANCMD_OFFSET(chan->device->version)); - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, + chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->txd.phys) >> 32, + chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - ioat->dmacount++; - __ioat2_dma_memcpy_issue_pending(ioat); - break; - } + writeb(IOAT_CHANCMD_START, chan->reg_base + + IOAT_CHANCMD_OFFSET(chan->device->version)); spin_unlock_bh(&ioat->desc_lock); } @@ -1484,7 +1078,7 @@ static void ioat_disable_interrupts(struct ioatdma_device *device) writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); } -static int ioat_probe(struct ioatdma_device *device) +int ioat_probe(struct ioatdma_device *device) { int err = -ENODEV; struct dma_device *dma = &device->common; @@ -1503,17 +1097,15 @@ static int ioat_probe(struct ioatdma_device *device) device->completion_pool = pci_pool_create("completion_pool", pdev, sizeof(u64), SMP_CACHE_BYTES, SMP_CACHE_BYTES); + if (!device->completion_pool) { err = -ENOMEM; goto err_completion_pool; } - ioat_dma_enumerate_channels(device); + device->enumerate_channels(device); dma_cap_set(DMA_MEMCPY, dma->cap_mask); - dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources; - dma->device_free_chan_resources = ioat_dma_free_chan_resources; - dma->device_is_tx_complete = ioat_dma_is_complete; dma->dev = &pdev->dev; dev_err(dev, "Intel(R) I/OAT DMA Engine found," @@ -1546,7 +1138,7 @@ err_dma_pool: return err; } -static int ioat_register(struct ioatdma_device *device) +int ioat_register(struct ioatdma_device *device) { int err = dma_async_device_register(&device->common); @@ -1580,9 +1172,13 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca) int err; device->intr_quirk = ioat1_intr_quirk; + device->enumerate_channels = ioat1_enumerate_channels; dma = &device->common; dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; + dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; + dma->device_free_chan_resources = ioat1_dma_free_chan_resources; + dma->device_is_tx_complete = ioat1_dma_is_complete; err = ioat_probe(device); if (err) @@ -1594,93 +1190,12 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca) if (dca) device->dca = ioat_dca_init(pdev, device->reg_base); - INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); - schedule_delayed_work(&device->work, WATCHDOG_DELAY); - - return err; -} - -int ioat2_dma_probe(struct ioatdma_device *device, int dca) -{ - struct pci_dev *pdev = device->pdev; - struct dma_device *dma; - struct dma_chan *c; - struct ioat_chan_common *chan; - int err; - - dma = &device->common; - dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; - dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; - - err = ioat_probe(device); - if (err) - return err; - ioat_set_tcp_copy_break(2048); - - list_for_each_entry(c, &dma->channels, device_node) { - chan = to_chan_common(c); - writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, - chan->reg_base + IOAT_DCACTRL_OFFSET); - } - - err = ioat_register(device); - if (err) - return err; - if (dca) - device->dca = ioat2_dca_init(pdev, device->reg_base); - - INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); + INIT_DELAYED_WORK(&device->work, ioat1_chan_watchdog); schedule_delayed_work(&device->work, WATCHDOG_DELAY); return err; } -int ioat3_dma_probe(struct ioatdma_device *device, int dca) -{ - struct pci_dev *pdev = device->pdev; - struct dma_device *dma; - struct dma_chan *c; - struct ioat_chan_common *chan; - int err; - u16 dev_id; - - dma = &device->common; - dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; - dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; - - /* -= IOAT ver.3 workarounds =- */ - /* Write CHANERRMSK_INT with 3E07h to mask out the errors - * that can cause stability issues for IOAT ver.3 - */ - pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); - - /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit - * (workaround for spurious config parity error after restart) - */ - pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) - pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); - - err = ioat_probe(device); - if (err) - return err; - ioat_set_tcp_copy_break(262144); - - list_for_each_entry(c, &dma->channels, device_node) { - chan = to_chan_common(c); - writel(IOAT_DMA_DCA_ANY_CPU, - chan->reg_base + IOAT_DCACTRL_OFFSET); - } - - err = ioat_register(device); - if (err) - return err; - if (dca) - device->dca = ioat3_dca_init(pdev, device->reg_base); - - return err; -} - void ioat_dma_remove(struct ioatdma_device *device) { struct dma_device *dma = &device->common; @@ -1697,4 +1212,3 @@ void ioat_dma_remove(struct ioatdma_device *device) INIT_LIST_HEAD(&dma->channels); } - diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 5b31db73ad8e..84065dfa4d40 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -62,6 +62,7 @@ * @idx: per channel data * @dca: direct cache access context * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) + * @enumerate_channels: hw version specific channel enumeration */ struct ioatdma_device { @@ -76,6 +77,7 @@ struct ioatdma_device { struct ioat_chan_common *idx[4]; struct dca_provider *dca; void (*intr_quirk)(struct ioatdma_device *device); + int (*enumerate_channels)(struct ioatdma_device *device); }; struct ioat_chan_common { @@ -106,6 +108,7 @@ struct ioat_chan_common { struct tasklet_struct cleanup_task; }; + /** * struct ioat_dma_chan - internal representation of a DMA channel */ @@ -119,7 +122,6 @@ struct ioat_dma_chan { struct list_head used_desc; int pending; - u16 dmacount; u16 desccount; }; @@ -135,6 +137,33 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) return container_of(chan, struct ioat_dma_chan, base); } +/** + * ioat_is_complete - poll the status of an ioat transaction + * @c: channel handle + * @cookie: transaction identifier + * @done: if set, updated with last completed transaction + * @used: if set, updated with last used transaction + */ +static inline enum dma_status +ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct ioat_chan_common *chan = to_chan_common(c); + dma_cookie_t last_used; + dma_cookie_t last_complete; + + last_used = c->cookie; + last_complete = chan->completed_cookie; + chan->watchdog_tcp_cookie = cookie; + + if (done) + *done = last_complete; + if (used) + *used = last_used; + + return dma_async_is_complete(cookie, last_complete, last_used); +} + /* wrapper around hardware descriptor format + additional software fields */ /** @@ -162,11 +191,22 @@ static inline void ioat_set_tcp_copy_break(unsigned long copybreak) #endif } +static inline struct ioat_chan_common * +ioat_chan_by_index(struct ioatdma_device *device, int index) +{ + return device->idx[index]; +} + +int ioat_probe(struct ioatdma_device *device); +int ioat_register(struct ioatdma_device *device); int ioat1_dma_probe(struct ioatdma_device *dev, int dca); -int ioat2_dma_probe(struct ioatdma_device *dev, int dca); -int ioat3_dma_probe(struct ioatdma_device *dev, int dca); void ioat_dma_remove(struct ioatdma_device *device); struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); -struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); +unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); +void ioat_init_channel(struct ioatdma_device *device, + struct ioat_chan_common *chan, int idx, + work_func_t work_fn, void (*tasklet)(unsigned long), + unsigned long tasklet_data); +void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, + size_t len, struct ioat_dma_descriptor *hw); #endif /* IOATDMA_H */ diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c new file mode 100644 index 000000000000..49ba1c73d95e --- /dev/null +++ b/drivers/dma/ioat/dma_v2.c @@ -0,0 +1,750 @@ +/* + * Intel I/OAT DMA Linux driver + * Copyright(c) 2004 - 2009 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +/* + * This driver supports an Intel I/OAT DMA engine (versions >= 2), which + * does asynchronous data movement and checksumming operations. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dma.h" +#include "dma_v2.h" +#include "registers.h" +#include "hw.h" + +static int ioat_ring_alloc_order = 8; +module_param(ioat_ring_alloc_order, int, 0644); +MODULE_PARM_DESC(ioat_ring_alloc_order, + "ioat2+: allocate 2^n descriptors per channel (default: n=8)"); + +static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) +{ + void * __iomem reg_base = ioat->base.reg_base; + + ioat->pending = 0; + ioat->dmacount += ioat2_ring_pending(ioat); + ioat->issued = ioat->head; + /* make descriptor updates globally visible before notifying channel */ + wmb(); + writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + +} + +static void ioat2_issue_pending(struct dma_chan *chan) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); + + spin_lock_bh(&ioat->ring_lock); + if (ioat->pending == 1) + __ioat2_issue_pending(ioat); + spin_unlock_bh(&ioat->ring_lock); +} + +/** + * ioat2_update_pending - log pending descriptors + * @ioat: ioat2+ channel + * + * set pending to '1' unless pending is already set to '2', pending == 2 + * indicates that submission is temporarily blocked due to an in-flight + * reset. If we are already above the ioat_pending_level threshold then + * just issue pending. + * + * called with ring_lock held + */ +static void ioat2_update_pending(struct ioat2_dma_chan *ioat) +{ + if (unlikely(ioat->pending == 2)) + return; + else if (ioat2_ring_pending(ioat) > ioat_pending_level) + __ioat2_issue_pending(ioat); + else + ioat->pending = 1; +} + +static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) +{ + void __iomem *reg_base = ioat->base.reg_base; + struct ioat_ring_ent *desc; + struct ioat_dma_descriptor *hw; + int idx; + + if (ioat2_ring_space(ioat) < 1) { + dev_err(to_dev(&ioat->base), + "Unable to start null desc - ring full\n"); + return; + } + + idx = ioat2_desc_alloc(ioat, 1); + desc = ioat2_get_ring_ent(ioat, idx); + + hw = desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = 1; + hw->ctl_f.compl_write = 1; + /* set size to non-zero value (channel returns error when size is 0) */ + hw->size = NULL_DESC_BUFFER_SIZE; + hw->src_addr = 0; + hw->dst_addr = 0; + async_tx_ack(&desc->txd); + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, + reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->txd.phys) >> 32, + reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + __ioat2_issue_pending(ioat); +} + +static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) +{ + spin_lock_bh(&ioat->ring_lock); + __ioat2_start_null_desc(ioat); + spin_unlock_bh(&ioat->ring_lock); +} + +static void ioat2_cleanup(struct ioat2_dma_chan *ioat); + +/** + * ioat2_reset_part2 - reinit the channel after a reset + */ +static void ioat2_reset_part2(struct work_struct *work) +{ + struct ioat_chan_common *chan; + struct ioat2_dma_chan *ioat; + + chan = container_of(work, struct ioat_chan_common, work.work); + ioat = container_of(chan, struct ioat2_dma_chan, base); + + /* ensure that ->tail points to the stalled descriptor + * (ioat->pending is set to 2 at this point so no new + * descriptors will be issued while we perform this cleanup) + */ + ioat2_cleanup(ioat); + + spin_lock_bh(&chan->cleanup_lock); + spin_lock_bh(&ioat->ring_lock); + + /* set the tail to be re-issued */ + ioat->issued = ioat->tail; + ioat->dmacount = 0; + + if (ioat2_ring_pending(ioat)) { + struct ioat_ring_ent *desc; + + desc = ioat2_get_ring_ent(ioat, ioat->tail); + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, + chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->txd.phys) >> 32, + chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + __ioat2_issue_pending(ioat); + } else + __ioat2_start_null_desc(ioat); + + spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&chan->cleanup_lock); + + dev_info(to_dev(chan), + "chan%d reset - %d descs waiting, %d total desc\n", + chan_num(chan), ioat->dmacount, 1 << ioat->alloc_order); +} + +/** + * ioat2_reset_channel - restart a channel + * @ioat: IOAT DMA channel handle + */ +static void ioat2_reset_channel(struct ioat2_dma_chan *ioat) +{ + u32 chansts, chanerr; + struct ioat_chan_common *chan = &ioat->base; + u16 active; + + spin_lock_bh(&ioat->ring_lock); + active = ioat2_ring_active(ioat); + spin_unlock_bh(&ioat->ring_lock); + if (!active) + return; + + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + chansts = (chan->completion_virt->low + & IOAT_CHANSTS_DMA_TRANSFER_STATUS); + if (chanerr) { + dev_err(to_dev(chan), + "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", + chan_num(chan), chansts, chanerr); + writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); + } + + spin_lock_bh(&ioat->ring_lock); + ioat->pending = 2; + writeb(IOAT_CHANCMD_RESET, + chan->reg_base + + IOAT_CHANCMD_OFFSET(chan->device->version)); + spin_unlock_bh(&ioat->ring_lock); + schedule_delayed_work(&chan->work, RESET_DELAY); +} + +/** + * ioat2_chan_watchdog - watch for stuck channels + */ +static void ioat2_chan_watchdog(struct work_struct *work) +{ + struct ioatdma_device *device = + container_of(work, struct ioatdma_device, work.work); + struct ioat2_dma_chan *ioat; + struct ioat_chan_common *chan; + u16 active; + int i; + + for (i = 0; i < device->common.chancnt; i++) { + chan = ioat_chan_by_index(device, i); + ioat = container_of(chan, struct ioat2_dma_chan, base); + + /* + * for version 2.0 if there are descriptors yet to be processed + * and the last completed hasn't changed since the last watchdog + * if they haven't hit the pending level + * issue the pending to push them through + * else + * try resetting the channel + */ + spin_lock_bh(&ioat->ring_lock); + active = ioat2_ring_active(ioat); + spin_unlock_bh(&ioat->ring_lock); + + if (active && + chan->last_completion && + chan->last_completion == chan->watchdog_completion) { + + if (ioat->pending == 1) + ioat2_issue_pending(&chan->common); + else { + ioat2_reset_channel(ioat); + chan->watchdog_completion = 0; + } + } else { + chan->last_compl_desc_addr_hw = 0; + chan->watchdog_completion = chan->last_completion; + } + chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; + } + schedule_delayed_work(&device->work, WATCHDOG_DELAY); +} + +/** + * ioat2_cleanup - clean finished descriptors (advance tail pointer) + * @chan: ioat channel to be cleaned up + */ +static void ioat2_cleanup(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + struct ioat_ring_ent *desc; + bool seen_current = false; + u16 active; + int i; + struct dma_async_tx_descriptor *tx; + + prefetch(chan->completion_virt); + + spin_lock_bh(&chan->cleanup_lock); + phys_complete = ioat_get_current_completion(chan); + if (phys_complete == chan->last_completion) { + spin_unlock_bh(&chan->cleanup_lock); + /* + * perhaps we're stuck so hard that the watchdog can't go off? + * try to catch it after WATCHDOG_DELAY seconds + */ + if (chan->device->version < IOAT_VER_3_0) { + unsigned long tmo; + + tmo = chan->last_completion_time + HZ*WATCHDOG_DELAY; + if (time_after(jiffies, tmo)) { + ioat2_chan_watchdog(&(chan->device->work.work)); + chan->last_completion_time = jiffies; + } + } + return; + } + chan->last_completion_time = jiffies; + + spin_lock_bh(&ioat->ring_lock); + + active = ioat2_ring_active(ioat); + for (i = 0; i < active && !seen_current; i++) { + prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); + desc = ioat2_get_ring_ent(ioat, ioat->tail + i); + tx = &desc->txd; + if (tx->cookie) { + ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); + chan->completed_cookie = tx->cookie; + tx->cookie = 0; + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; + } + } + + if (tx->phys == phys_complete) + seen_current = true; + } + ioat->tail += i; + BUG_ON(!seen_current); /* no active descs have written a completion? */ + spin_unlock_bh(&ioat->ring_lock); + + chan->last_completion = phys_complete; + + spin_unlock_bh(&chan->cleanup_lock); +} + +static void ioat2_cleanup_tasklet(unsigned long data) +{ + struct ioat2_dma_chan *ioat = (void *) data; + + ioat2_cleanup(ioat); + writew(IOAT_CHANCTRL_INT_DISABLE, + ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); +} + +/** + * ioat2_enumerate_channels - find and initialize the device's channels + * @device: the device to be enumerated + */ +static int ioat2_enumerate_channels(struct ioatdma_device *device) +{ + struct ioat2_dma_chan *ioat; + struct device *dev = &device->pdev->dev; + struct dma_device *dma = &device->common; + u8 xfercap_log; + int i; + + INIT_LIST_HEAD(&dma->channels); + dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); + xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); + if (xfercap_log == 0) + return 0; + + /* FIXME which i/oat version is i7300? */ +#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL + if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) + dma->chancnt--; +#endif + for (i = 0; i < dma->chancnt; i++) { + ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); + if (!ioat) + break; + + ioat_init_channel(device, &ioat->base, i, + ioat2_reset_part2, + ioat2_cleanup_tasklet, + (unsigned long) ioat); + ioat->xfercap_log = xfercap_log; + spin_lock_init(&ioat->ring_lock); + } + dma->chancnt = i; + return i; +} + +static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) +{ + struct dma_chan *c = tx->chan; + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + dma_cookie_t cookie = c->cookie; + + cookie++; + if (cookie < 0) + cookie = 1; + tx->cookie = cookie; + c->cookie = cookie; + ioat2_update_pending(ioat); + spin_unlock_bh(&ioat->ring_lock); + + return cookie; +} + +static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan) +{ + struct ioat_dma_descriptor *hw; + struct ioat_ring_ent *desc; + struct ioatdma_device *dma; + dma_addr_t phys; + + dma = to_ioatdma_device(chan->device); + hw = pci_pool_alloc(dma->dma_pool, GFP_KERNEL, &phys); + if (!hw) + return NULL; + memset(hw, 0, sizeof(*hw)); + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) { + pci_pool_free(dma->dma_pool, hw, phys); + return NULL; + } + + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.tx_submit = ioat2_tx_submit_unlock; + desc->hw = hw; + desc->txd.phys = phys; + return desc; +} + +static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) +{ + struct ioatdma_device *dma; + + dma = to_ioatdma_device(chan->device); + pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); + kfree(desc); +} + +/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring + * @chan: channel to be initialized + */ +static int ioat2_alloc_chan_resources(struct dma_chan *c) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_chan_common *chan = &ioat->base; + struct ioat_ring_ent **ring; + u16 chanctrl; + u32 chanerr; + int descs; + int i; + + /* have we already been set up? */ + if (ioat->ring) + return 1 << ioat->alloc_order; + + /* Setup register to interrupt and write completion status on error */ + chanctrl = IOAT_CHANCTRL_ERR_INT_EN | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | + IOAT_CHANCTRL_ERR_COMPLETION_EN; + writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET); + + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + if (chanerr) { + dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); + writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); + } + + /* allocate a completion writeback area */ + /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ + chan->completion_virt = pci_pool_alloc(chan->device->completion_pool, + GFP_KERNEL, + &chan->completion_addr); + if (!chan->completion_virt) + return -ENOMEM; + + memset(chan->completion_virt, 0, + sizeof(*chan->completion_virt)); + writel(((u64) chan->completion_addr) & 0x00000000FFFFFFFF, + chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); + writel(((u64) chan->completion_addr) >> 32, + chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); + + ioat->alloc_order = ioat_get_alloc_order(); + descs = 1 << ioat->alloc_order; + + /* allocate the array to hold the software ring */ + ring = kcalloc(descs, sizeof(*ring), GFP_KERNEL); + if (!ring) + return -ENOMEM; + for (i = 0; i < descs; i++) { + ring[i] = ioat2_alloc_ring_ent(c); + if (!ring[i]) { + while (i--) + ioat2_free_ring_ent(ring[i], c); + kfree(ring); + return -ENOMEM; + } + } + + /* link descs */ + for (i = 0; i < descs-1; i++) { + struct ioat_ring_ent *next = ring[i+1]; + struct ioat_dma_descriptor *hw = ring[i]->hw; + + hw->next = next->txd.phys; + } + ring[i]->hw->next = ring[0]->txd.phys; + + spin_lock_bh(&ioat->ring_lock); + ioat->ring = ring; + ioat->head = 0; + ioat->issued = 0; + ioat->tail = 0; + ioat->pending = 0; + spin_unlock_bh(&ioat->ring_lock); + + tasklet_enable(&chan->cleanup_task); + ioat2_start_null_desc(ioat); + + return descs; +} + +/** + * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops + * @idx: gets starting descriptor index on successful allocation + * @ioat: ioat2,3 channel (ring) to operate on + * @num_descs: allocation length + */ +static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) +{ + struct ioat_chan_common *chan = &ioat->base; + + spin_lock_bh(&ioat->ring_lock); + if (unlikely(ioat2_ring_space(ioat) < num_descs)) { + if (printk_ratelimit()) + dev_dbg(to_dev(chan), + "%s: ring full! num_descs: %d (%x:%x:%x)\n", + __func__, num_descs, ioat->head, ioat->tail, + ioat->issued); + spin_unlock_bh(&ioat->ring_lock); + + /* do direct reclaim in the allocation failure case */ + ioat2_cleanup(ioat); + + return -ENOMEM; + } + + dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", + __func__, num_descs, ioat->head, ioat->tail, ioat->issued); + + *idx = ioat2_desc_alloc(ioat, num_descs); + return 0; /* with ioat->ring_lock held */ +} + +static struct dma_async_tx_descriptor * +ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_dma_descriptor *hw; + struct ioat_ring_ent *desc; + dma_addr_t dst = dma_dest; + dma_addr_t src = dma_src; + size_t total_len = len; + int num_descs; + u16 idx; + int i; + + num_descs = ioat2_xferlen_to_descs(ioat, len); + if (likely(num_descs) && + ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) + /* pass */; + else + return NULL; + for (i = 0; i < num_descs; i++) { + size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log); + + desc = ioat2_get_ring_ent(ioat, idx + i); + hw = desc->hw; + + hw->size = copy; + hw->ctl = 0; + hw->src_addr = src; + hw->dst_addr = dst; + + len -= copy; + dst += copy; + src += copy; + } + + desc->txd.flags = flags; + desc->len = total_len; + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.compl_write = 1; + /* we leave the channel locked to ensure in order submission */ + + return &desc->txd; +} + +/** + * ioat2_free_chan_resources - release all the descriptors + * @chan: the channel to be cleaned + */ +static void ioat2_free_chan_resources(struct dma_chan *c) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_chan_common *chan = &ioat->base; + struct ioatdma_device *ioatdma_device = chan->device; + struct ioat_ring_ent *desc; + const u16 total_descs = 1 << ioat->alloc_order; + int descs; + int i; + + /* Before freeing channel resources first check + * if they have been previously allocated for this channel. + */ + if (!ioat->ring) + return; + + tasklet_disable(&chan->cleanup_task); + ioat2_cleanup(ioat); + + /* Delay 100ms after reset to allow internal DMA logic to quiesce + * before removing DMA descriptor resources. + */ + writeb(IOAT_CHANCMD_RESET, + chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); + mdelay(100); + + spin_lock_bh(&ioat->ring_lock); + descs = ioat2_ring_space(ioat); + for (i = 0; i < descs; i++) { + desc = ioat2_get_ring_ent(ioat, ioat->head + i); + ioat2_free_ring_ent(desc, c); + } + + if (descs < total_descs) + dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", + total_descs - descs); + + for (i = 0; i < total_descs - descs; i++) { + desc = ioat2_get_ring_ent(ioat, ioat->tail + i); + ioat2_free_ring_ent(desc, c); + } + + kfree(ioat->ring); + ioat->ring = NULL; + ioat->alloc_order = 0; + pci_pool_free(ioatdma_device->completion_pool, + chan->completion_virt, + chan->completion_addr); + spin_unlock_bh(&ioat->ring_lock); + + chan->last_completion = 0; + chan->completion_addr = 0; + ioat->pending = 0; + ioat->dmacount = 0; + chan->watchdog_completion = 0; + chan->last_compl_desc_addr_hw = 0; + chan->watchdog_tcp_cookie = 0; + chan->watchdog_last_tcp_cookie = 0; +} + +static enum dma_status +ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + + if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) + return DMA_SUCCESS; + + ioat2_cleanup(ioat); + + return ioat_is_complete(c, cookie, done, used); +} + +int ioat2_dma_probe(struct ioatdma_device *device, int dca) +{ + struct pci_dev *pdev = device->pdev; + struct dma_device *dma; + struct dma_chan *c; + struct ioat_chan_common *chan; + int err; + + device->enumerate_channels = ioat2_enumerate_channels; + dma = &device->common; + dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; + dma->device_issue_pending = ioat2_issue_pending; + dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; + dma->device_free_chan_resources = ioat2_free_chan_resources; + dma->device_is_tx_complete = ioat2_is_complete; + + err = ioat_probe(device); + if (err) + return err; + ioat_set_tcp_copy_break(2048); + + list_for_each_entry(c, &dma->channels, device_node) { + chan = to_chan_common(c); + writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, + chan->reg_base + IOAT_DCACTRL_OFFSET); + } + + err = ioat_register(device); + if (err) + return err; + if (dca) + device->dca = ioat2_dca_init(pdev, device->reg_base); + + INIT_DELAYED_WORK(&device->work, ioat2_chan_watchdog); + schedule_delayed_work(&device->work, WATCHDOG_DELAY); + + return err; +} + +int ioat3_dma_probe(struct ioatdma_device *device, int dca) +{ + struct pci_dev *pdev = device->pdev; + struct dma_device *dma; + struct dma_chan *c; + struct ioat_chan_common *chan; + int err; + u16 dev_id; + + device->enumerate_channels = ioat2_enumerate_channels; + dma = &device->common; + dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; + dma->device_issue_pending = ioat2_issue_pending; + dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; + dma->device_free_chan_resources = ioat2_free_chan_resources; + dma->device_is_tx_complete = ioat2_is_complete; + + /* -= IOAT ver.3 workarounds =- */ + /* Write CHANERRMSK_INT with 3E07h to mask out the errors + * that can cause stability issues for IOAT ver.3 + */ + pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); + + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit + * (workaround for spurious config parity error after restart) + */ + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) + pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); + + err = ioat_probe(device); + if (err) + return err; + ioat_set_tcp_copy_break(262144); + + list_for_each_entry(c, &dma->channels, device_node) { + chan = to_chan_common(c); + writel(IOAT_DMA_DCA_ANY_CPU, + chan->reg_base + IOAT_DCACTRL_OFFSET); + } + + err = ioat_register(device); + if (err) + return err; + if (dca) + device->dca = ioat3_dca_init(pdev, device->reg_base); + + return err; +} diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h new file mode 100644 index 000000000000..94a553eacdbd --- /dev/null +++ b/drivers/dma/ioat/dma_v2.h @@ -0,0 +1,131 @@ +/* + * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef IOATDMA_V2_H +#define IOATDMA_V2_H + +#include +#include "dma.h" +#include "hw.h" + + +extern int ioat_pending_level; + +/* + * workaround for IOAT ver.3.0 null descriptor issue + * (channel returns error when size is 0) + */ +#define NULL_DESC_BUFFER_SIZE 1 + +#define IOAT_MAX_ORDER 16 +#define ioat_get_alloc_order() \ + (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) + +/* struct ioat2_dma_chan - ioat v2 / v3 channel attributes + * @base: common ioat channel parameters + * @xfercap_log; log2 of channel max transfer length (for fast division) + * @head: allocated index + * @issued: hardware notification point + * @tail: cleanup index + * @pending: lock free indicator for issued != head + * @dmacount: identical to 'head' except for occasionally resetting to zero + * @alloc_order: log2 of the number of allocated descriptors + * @ring: software ring buffer implementation of hardware ring + * @ring_lock: protects ring attributes + */ +struct ioat2_dma_chan { + struct ioat_chan_common base; + size_t xfercap_log; + u16 head; + u16 issued; + u16 tail; + u16 dmacount; + u16 alloc_order; + int pending; + struct ioat_ring_ent **ring; + spinlock_t ring_lock; +}; + +static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) +{ + struct ioat_chan_common *chan = to_chan_common(c); + + return container_of(chan, struct ioat2_dma_chan, base); +} + +static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat) +{ + return (1 << ioat->alloc_order) - 1; +} + +/* count of descriptors in flight with the engine */ +static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) +{ + return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat); +} + +/* count of descriptors pending submission to hardware */ +static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) +{ + return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat); +} + +static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) +{ + u16 num_descs = ioat2_ring_mask(ioat) + 1; + u16 active = ioat2_ring_active(ioat); + + BUG_ON(active > num_descs); + + return num_descs - active; +} + +/* assumes caller already checked space */ +static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len) +{ + ioat->head += len; + return ioat->head - len; +} + +static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) +{ + u16 num_descs = len >> ioat->xfercap_log; + + num_descs += !!(len & ((1 << ioat->xfercap_log) - 1)); + return num_descs; +} + +struct ioat_ring_ent { + struct ioat_dma_descriptor *hw; + struct dma_async_tx_descriptor txd; + size_t len; +}; + +static inline struct ioat_ring_ent * +ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) +{ + return ioat->ring[idx & ioat2_ring_mask(ioat)]; +} + +int ioat2_dma_probe(struct ioatdma_device *dev, int dca); +int ioat3_dma_probe(struct ioatdma_device *dev, int dca); +struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); +struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); +#endif /* IOATDMA_V2_H */ diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 55414d88ac1b..c4e432269252 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -31,6 +31,7 @@ #include #include #include "dma.h" +#include "dma_v2.h" #include "registers.h" #include "hw.h" -- cgit v1.2.3 From 38e12f64a165e83617c21dae3c15972fd8d639f5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:00:46 -0700 Subject: ioat1: kill unused unmap parameters The unified ioat1/ioat2 ioat_dma_unmap() implementation derives the source and dest addresses from the unmap descriptor. There is no longer a need to track this information in struct ioat_desc_sw. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 2 -- drivers/dma/ioat/dma.h | 2 -- 2 files changed, 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 64b4d75059aa..696d4de3bb8f 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -648,8 +648,6 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, desc->txd.flags = flags; desc->tx_cnt = tx_cnt; - desc->src = dma_src; - desc->dst = dma_dest; desc->len = total_len; list_splice(&chain, &desc->txd.tx_list); hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 84065dfa4d40..fa15e77652a0 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -179,8 +179,6 @@ struct ioat_desc_sw { struct list_head node; int tx_cnt; size_t len; - dma_addr_t src; - dma_addr_t dst; struct dma_async_tx_descriptor txd; }; -- cgit v1.2.3 From 6df9183a153291a2585a8dfe67597fc18c201147 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:00:55 -0700 Subject: ioat: add some dev_dbg() calls Provide some output for debugging the driver. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 29 ++++++++++++++++++++++++++--- drivers/dma/ioat/dma.h | 28 ++++++++++++++++++++++++++++ drivers/dma/ioat/dma_v2.c | 25 ++++++++++++++++++++++++- drivers/dma/ioat/dma_v2.h | 3 +++ 4 files changed, 81 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 696d4de3bb8f..edf4f5e5de73 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -134,6 +134,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); + dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) @@ -167,6 +168,8 @@ __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) { void __iomem *reg_base = ioat->base.reg_base; + dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", + __func__, ioat->pending); ioat->pending = 0; writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); } @@ -251,6 +254,7 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat) if (!ioat->used_desc.prev) return; + dev_dbg(to_dev(chan), "%s\n", __func__); chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); chansts = (chan->completion_virt->low & IOAT_CHANSTS_DMA_TRANSFER_STATUS); @@ -382,6 +386,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) cookie = 1; c->cookie = cookie; tx->cookie = cookie; + dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); /* write address into NextDescriptor field of last desc in chain */ first = to_ioat_desc(tx->tx_list.next); @@ -390,6 +395,8 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) wmb(); chain_tail->hw->next = first->txd.phys; list_splice_tail_init(&tx->tx_list, &ioat->used_desc); + dump_desc_dbg(ioat, chain_tail); + dump_desc_dbg(ioat, first); ioat->pending += desc->tx_cnt; if (ioat->pending >= ioat_pending_level) @@ -429,6 +436,7 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) desc_sw->txd.tx_submit = ioat1_tx_submit; desc_sw->hw = desc; desc_sw->txd.phys = phys; + set_desc_id(desc_sw, -1); return desc_sw; } @@ -474,6 +482,7 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) dev_err(to_dev(chan), "Only %d initial descriptors\n", i); break; } + set_desc_id(desc, i); list_add_tail(&desc->node, &tmp_list); } spin_lock_bh(&ioat->desc_lock); @@ -495,6 +504,8 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) tasklet_enable(&chan->cleanup_task); ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ + dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", + __func__, ioat->desccount); return ioat->desccount; } @@ -527,8 +538,10 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) mdelay(100); spin_lock_bh(&ioat->desc_lock); - list_for_each_entry_safe(desc, _desc, - &ioat->used_desc, node) { + list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { + dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", + __func__, desc_id(desc)); + dump_desc_dbg(ioat, desc); in_use_descs++; list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, @@ -585,7 +598,8 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) return NULL; } } - + dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", + __func__, desc_id(new)); prefetch(new->hw); return new; } @@ -630,6 +644,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, async_tx_ack(&desc->txd); next = ioat1_dma_get_next_descriptor(ioat); hw->next = next ? next->txd.phys : 0; + dump_desc_dbg(ioat, desc); desc = next; } else hw->next = 0; @@ -652,6 +667,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, list_splice(&chain, &desc->txd.tx_list); hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); hw->ctl_f.compl_write = 1; + dump_desc_dbg(ioat, desc); return &desc->txd; } @@ -707,6 +723,9 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; #endif + dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, + (unsigned long long) phys_complete); + if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { @@ -758,6 +777,8 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) return; } + dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", + __func__, phys_complete); list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { tx = &desc->txd; /* @@ -765,6 +786,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) * due to exceeding xfercap, perhaps. If so, only the * last one will have a cookie, and require unmapping. */ + dump_desc_dbg(ioat, desc); if (tx->cookie) { cookie = tx->cookie; ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); @@ -848,6 +870,7 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) async_tx_ack(&desc->txd); hw->next = 0; list_add_tail(&desc->node, &ioat->used_desc); + dump_desc_dbg(ioat, desc); writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index fa15e77652a0..9f9edc2cd079 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -173,6 +173,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, * or attached to a transaction list (async_tx.tx_list) * @tx_cnt: number of descriptors required to complete the transaction * @txd: the generic software descriptor for all engines + * @id: identifier for debug */ struct ioat_desc_sw { struct ioat_dma_descriptor *hw; @@ -180,8 +181,35 @@ struct ioat_desc_sw { int tx_cnt; size_t len; struct dma_async_tx_descriptor txd; + #ifdef DEBUG + int id; + #endif }; +#ifdef DEBUG +#define set_desc_id(desc, i) ((desc)->id = (i)) +#define desc_id(desc) ((desc)->id) +#else +#define set_desc_id(desc, i) +#define desc_id(desc) (0) +#endif + +static inline void +__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, + struct dma_async_tx_descriptor *tx, int id) +{ + struct device *dev = to_dev(chan); + + dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" + " ctl: %#x (op: %d int_en: %d compl: %d)\n", id, + (unsigned long long) tx->phys, + (unsigned long long) hw->next, tx->cookie, tx->flags, + hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); +} + +#define dump_desc_dbg(c, d) \ + ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) + static inline void ioat_set_tcp_copy_break(unsigned long copybreak) { #ifdef CONFIG_NET_DMA diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 49ba1c73d95e..58881860f400 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -54,7 +54,9 @@ static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) /* make descriptor updates globally visible before notifying channel */ wmb(); writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); - + dev_dbg(to_dev(&ioat->base), + "%s: head: %#x tail: %#x issued: %#x count: %#x\n", + __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); } static void ioat2_issue_pending(struct dma_chan *chan) @@ -101,6 +103,8 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) return; } + dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", + __func__, ioat->head, ioat->tail, ioat->issued); idx = ioat2_desc_alloc(ioat, 1); desc = ioat2_get_ring_ent(ioat, idx); @@ -118,6 +122,7 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) reg_base + IOAT2_CHAINADDR_OFFSET_LOW); writel(((u64) desc->txd.phys) >> 32, reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + dump_desc_dbg(ioat, desc); __ioat2_issue_pending(ioat); } @@ -154,6 +159,10 @@ static void ioat2_reset_part2(struct work_struct *work) ioat->issued = ioat->tail; ioat->dmacount = 0; + dev_dbg(to_dev(&ioat->base), + "%s: head: %#x tail: %#x issued: %#x count: %#x\n", + __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); + if (ioat2_ring_pending(ioat)) { struct ioat_ring_ent *desc; @@ -221,6 +230,8 @@ static void ioat2_chan_watchdog(struct work_struct *work) u16 active; int i; + dev_dbg(&device->pdev->dev, "%s\n", __func__); + for (i = 0; i < device->common.chancnt; i++) { chan = ioat_chan_by_index(device, i); ioat = container_of(chan, struct ioat2_dma_chan, base); @@ -295,11 +306,15 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) spin_lock_bh(&ioat->ring_lock); + dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", + __func__, ioat->head, ioat->tail, ioat->issued); + active = ioat2_ring_active(ioat); for (i = 0; i < active && !seen_current; i++) { prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); desc = ioat2_get_ring_ent(ioat, ioat->tail + i); tx = &desc->txd; + dump_desc_dbg(ioat, desc); if (tx->cookie) { ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); chan->completed_cookie = tx->cookie; @@ -348,6 +363,7 @@ static int ioat2_enumerate_channels(struct ioatdma_device *device) xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); if (xfercap_log == 0) return 0; + dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); /* FIXME which i/oat version is i7300? */ #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL @@ -381,6 +397,8 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) cookie = 1; tx->cookie = cookie; c->cookie = cookie; + dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); + ioat2_update_pending(ioat); spin_unlock_bh(&ioat->ring_lock); @@ -480,6 +498,7 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) kfree(ring); return -ENOMEM; } + set_desc_id(ring[i], i); } /* link descs */ @@ -571,12 +590,14 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, len -= copy; dst += copy; src += copy; + dump_desc_dbg(ioat, desc); } desc->txd.flags = flags; desc->len = total_len; hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); hw->ctl_f.compl_write = 1; + dump_desc_dbg(ioat, desc); /* we leave the channel locked to ensure in order submission */ return &desc->txd; @@ -614,6 +635,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c) spin_lock_bh(&ioat->ring_lock); descs = ioat2_ring_space(ioat); + dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); for (i = 0; i < descs; i++) { desc = ioat2_get_ring_ent(ioat, ioat->head + i); ioat2_free_ring_ent(desc, c); @@ -625,6 +647,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c) for (i = 0; i < total_descs - descs; i++) { desc = ioat2_get_ring_ent(ioat, ioat->tail + i); + dump_desc_dbg(ioat, desc); ioat2_free_ring_ent(desc, c); } diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 94a553eacdbd..c72ccb5dfd5b 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -116,6 +116,9 @@ struct ioat_ring_ent { struct ioat_dma_descriptor *hw; struct dma_async_tx_descriptor txd; size_t len; + #ifdef DEBUG + int id; + #endif }; static inline struct ioat_ring_ent * -- cgit v1.2.3 From 4fb9b9e8d55880523db550043dfb204696dd0422 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:01:04 -0700 Subject: ioat: cleanup completion status reads The cleanup path makes an effort to only perform an atomic read of the 64-bit completion address. However in the 32-bit case it does not matter if we read the upper-32 and lower-32 non-atomically because the upper-32 will always be zero. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 78 ++++++++++++++++---------------------------- drivers/dma/ioat/dma.h | 10 ++---- drivers/dma/ioat/dma_v2.c | 25 +++++++------- drivers/dma/ioat/registers.h | 8 ++--- 4 files changed, 46 insertions(+), 75 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index edf4f5e5de73..08417ad4edca 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -201,8 +201,7 @@ static void ioat1_reset_part2(struct work_struct *work) spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&ioat->desc_lock); - chan->completion_virt->low = 0; - chan->completion_virt->high = 0; + *chan->completion = 0; ioat->pending = 0; /* count the descriptors waiting */ @@ -256,8 +255,7 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat) dev_dbg(to_dev(chan), "%s\n", __func__); chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); - chansts = (chan->completion_virt->low - & IOAT_CHANSTS_DMA_TRANSFER_STATUS); + chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; if (chanerr) { dev_err(to_dev(chan), "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", @@ -293,14 +291,8 @@ static void ioat1_chan_watchdog(struct work_struct *work) struct ioat_dma_chan *ioat; struct ioat_chan_common *chan; int i; - - union { - u64 full; - struct { - u32 low; - u32 high; - }; - } completion_hw; + u64 completion; + u32 completion_low; unsigned long compl_desc_addr_hw; for (i = 0; i < device->common.chancnt; i++) { @@ -334,25 +326,24 @@ static void ioat1_chan_watchdog(struct work_struct *work) * try resetting the channel */ - completion_hw.low = readl(chan->reg_base + + /* we need to read the low address first as this + * causes the chipset to latch the upper bits + * for the subsequent read + */ + completion_low = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(chan->device->version)); - completion_hw.high = readl(chan->reg_base + + completion = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(chan->device->version)); -#if (BITS_PER_LONG == 64) - compl_desc_addr_hw = - completion_hw.full - & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; -#else - compl_desc_addr_hw = - completion_hw.low & IOAT_LOW_COMPLETION_MASK; -#endif + completion <<= 32; + completion |= completion_low; + compl_desc_addr_hw = completion & + IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; if ((compl_desc_addr_hw != 0) && (compl_desc_addr_hw != chan->watchdog_completion) && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) { chan->last_compl_desc_addr_hw = compl_desc_addr_hw; - chan->completion_virt->low = completion_hw.low; - chan->completion_virt->high = completion_hw.high; + *chan->completion = completion; } else { ioat1_reset_channel(ioat); chan->watchdog_completion = 0; @@ -492,14 +483,12 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) /* allocate a completion writeback area */ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ - chan->completion_virt = pci_pool_alloc(chan->device->completion_pool, - GFP_KERNEL, - &chan->completion_addr); - memset(chan->completion_virt, 0, - sizeof(*chan->completion_virt)); - writel(((u64) chan->completion_addr) & 0x00000000FFFFFFFF, + chan->completion = pci_pool_alloc(chan->device->completion_pool, + GFP_KERNEL, &chan->completion_dma); + memset(chan->completion, 0, sizeof(*chan->completion)); + writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(((u64) chan->completion_addr) >> 32, + writel(((u64) chan->completion_dma) >> 32, chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); tasklet_enable(&chan->cleanup_task); @@ -558,15 +547,16 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) spin_unlock_bh(&ioat->desc_lock); pci_pool_free(ioatdma_device->completion_pool, - chan->completion_virt, - chan->completion_addr); + chan->completion, + chan->completion_dma); /* one is ok since we left it on there on purpose */ if (in_use_descs > 1) dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", in_use_descs - 1); - chan->last_completion = chan->completion_addr = 0; + chan->last_completion = 0; + chan->completion_dma = 0; chan->watchdog_completion = 0; chan->last_compl_desc_addr_hw = 0; chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0; @@ -709,25 +699,15 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) { unsigned long phys_complete; + u64 completion; - /* The completion writeback can happen at any time, - so reads by the driver need to be atomic operations - The descriptor physical addresses are limited to 32-bits - when the CPU can only do a 32-bit mov */ - -#if (BITS_PER_LONG == 64) - phys_complete = - chan->completion_virt->full - & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; -#else - phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; -#endif + completion = *chan->completion; + phys_complete = completion & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, (unsigned long long) phys_complete); - if ((chan->completion_virt->full - & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == + if ((completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", readl(chan->reg_base + IOAT_CHANERR_OFFSET)); @@ -750,7 +730,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) dma_cookie_t cookie = 0; struct dma_async_tx_descriptor *tx; - prefetch(chan->completion_virt); + prefetch(chan->completion); if (!spin_trylock_bh(&chan->cleanup_lock)) return; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 9f9edc2cd079..5fd6e2de84db 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -96,14 +96,8 @@ struct ioat_chan_common { struct ioatdma_device *device; struct dma_chan common; - dma_addr_t completion_addr; - union { - u64 full; /* HW completion writeback */ - struct { - u32 low; - u32 high; - }; - } *completion_virt; + dma_addr_t completion_dma; + u64 *completion; unsigned long last_compl_desc_addr_hw; struct tasklet_struct cleanup_task; }; diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 58881860f400..ca1134249341 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -200,8 +200,7 @@ static void ioat2_reset_channel(struct ioat2_dma_chan *ioat) return; chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - chansts = (chan->completion_virt->low - & IOAT_CHANSTS_DMA_TRANSFER_STATUS); + chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; if (chanerr) { dev_err(to_dev(chan), "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", @@ -281,7 +280,7 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) int i; struct dma_async_tx_descriptor *tx; - prefetch(chan->completion_virt); + prefetch(chan->completion); spin_lock_bh(&chan->cleanup_lock); phys_complete = ioat_get_current_completion(chan); @@ -470,17 +469,15 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) /* allocate a completion writeback area */ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ - chan->completion_virt = pci_pool_alloc(chan->device->completion_pool, - GFP_KERNEL, - &chan->completion_addr); - if (!chan->completion_virt) + chan->completion = pci_pool_alloc(chan->device->completion_pool, + GFP_KERNEL, &chan->completion_dma); + if (!chan->completion) return -ENOMEM; - memset(chan->completion_virt, 0, - sizeof(*chan->completion_virt)); - writel(((u64) chan->completion_addr) & 0x00000000FFFFFFFF, + memset(chan->completion, 0, sizeof(*chan->completion)); + writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(((u64) chan->completion_addr) >> 32, + writel(((u64) chan->completion_dma) >> 32, chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); ioat->alloc_order = ioat_get_alloc_order(); @@ -655,12 +652,12 @@ static void ioat2_free_chan_resources(struct dma_chan *c) ioat->ring = NULL; ioat->alloc_order = 0; pci_pool_free(ioatdma_device->completion_pool, - chan->completion_virt, - chan->completion_addr); + chan->completion, + chan->completion_dma); spin_unlock_bh(&ioat->ring_lock); chan->last_completion = 0; - chan->completion_addr = 0; + chan->completion_dma = 0; ioat->pending = 0; ioat->dmacount = 0; chan->watchdog_completion = 0; diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 49bc277424f8..a83c7332125c 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -94,10 +94,10 @@ #define IOAT2_CHANSTS_OFFSET_HIGH 0x0C #define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH) -#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F -#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010 -#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007 +#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL) +#define IOAT_CHANSTS_SOFT_ERR 0x10ULL +#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL +#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x7ULL #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 -- cgit v1.2.3 From bb3207863014c7310593146f11fbc6573eab43c8 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:01:14 -0700 Subject: ioat: ignore reserved bits for chancnt and xfercap Don't trust that the reserved bits are always zero, also sanity check the returned value. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 7 +++++++ drivers/dma/ioat/dma_v2.c | 7 +++++++ 2 files changed, 14 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 08417ad4edca..5173ba97ba31 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -132,7 +132,14 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) INIT_LIST_HEAD(&dma->channels); dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); + dma->chancnt &= 0x1f; /* bits [4:0] valid */ + if (dma->chancnt > ARRAY_SIZE(device->idx)) { + dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", + dma->chancnt, ARRAY_SIZE(device->idx)); + dma->chancnt = ARRAY_SIZE(device->idx); + } xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); + xfercap_scale &= 0x1f; /* bits [4:0] valid */ xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index ca1134249341..137cf879265f 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -359,7 +359,14 @@ static int ioat2_enumerate_channels(struct ioatdma_device *device) INIT_LIST_HEAD(&dma->channels); dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); + dma->chancnt &= 0x1f; /* bits [4:0] valid */ + if (dma->chancnt > ARRAY_SIZE(device->idx)) { + dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", + dma->chancnt, ARRAY_SIZE(device->idx)); + dma->chancnt = ARRAY_SIZE(device->idx); + } xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); + xfercap_log &= 0x1f; /* bits [4:0] valid */ if (xfercap_log == 0) return 0; dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); -- cgit v1.2.3 From f6ab95b55735fa03cad8d0f966647e5df206e207 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:01:21 -0700 Subject: ioat: preserve chanctrl bits when re-arming interrupts The register write in ioat_dma_cleanup_tasklet is unfortunate in two ways: 1/ It clears the extra 'enable' bits that we set at alloc_chan_resources time 2/ It gives the impression that it disables interrupts when it is in fact re-arming interrupts [ Impact: fix, persist the value of the chanctrl register when re-arming ] Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 10 +++------- drivers/dma/ioat/dma_v2.c | 8 ++------ drivers/dma/ioat/registers.h | 6 +++++- 3 files changed, 10 insertions(+), 14 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 5173ba97ba31..6dd0af194b8a 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -452,7 +452,6 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_chan_common *chan = &ioat->base; struct ioat_desc_sw *desc; - u16 chanctrl; u32 chanerr; int i; LIST_HEAD(tmp_list); @@ -462,10 +461,7 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) return ioat->desccount; /* Setup register to interrupt and write completion status on error */ - chanctrl = IOAT_CHANCTRL_ERR_INT_EN | - IOAT_CHANCTRL_ANY_ERR_ABORT_EN | - IOAT_CHANCTRL_ERR_COMPLETION_EN; - writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET); + writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); if (chanerr) { @@ -672,9 +668,9 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, static void ioat1_cleanup_tasklet(unsigned long data) { struct ioat_dma_chan *chan = (void *)data; + ioat1_cleanup(chan); - writew(IOAT_CHANCTRL_INT_DISABLE, - chan->base.reg_base + IOAT_CHANCTRL_OFFSET); + writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); } static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 137cf879265f..2f34f290041e 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -341,8 +341,7 @@ static void ioat2_cleanup_tasklet(unsigned long data) struct ioat2_dma_chan *ioat = (void *) data; ioat2_cleanup(ioat); - writew(IOAT_CHANCTRL_INT_DISABLE, - ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); + writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } /** @@ -454,7 +453,6 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_chan_common *chan = &ioat->base; struct ioat_ring_ent **ring; - u16 chanctrl; u32 chanerr; int descs; int i; @@ -464,9 +462,7 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) return 1 << ioat->alloc_order; /* Setup register to interrupt and write completion status on error */ - chanctrl = IOAT_CHANCTRL_ERR_INT_EN | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | - IOAT_CHANCTRL_ERR_COMPLETION_EN; - writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET); + writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); if (chanerr) { diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index a83c7332125c..4380f6fbf056 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -75,7 +75,11 @@ #define IOAT_CHANCTRL_ERR_INT_EN 0x0010 #define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008 #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 -#define IOAT_CHANCTRL_INT_DISABLE 0x0001 +#define IOAT_CHANCTRL_INT_REARM 0x0001 +#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ + IOAT_CHANCTRL_ERR_COMPLETION_EN |\ + IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\ + IOAT_CHANCTRL_ERR_INT_EN) #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ -- cgit v1.2.3 From 345d852391cf3fdc73f23a9ca522c6e7b5eb5a52 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:01:30 -0700 Subject: ioat: ___devinit annotate the initialization paths Mark all single use initialization routines with __devinit. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dca.c | 9 ++++++--- drivers/dma/ioat/dma.c | 12 ++++++------ drivers/dma/ioat/dma.h | 11 ++++++----- drivers/dma/ioat/dma_v2.c | 4 ++-- drivers/dma/ioat/dma_v2.h | 8 ++++---- 5 files changed, 24 insertions(+), 20 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index af1c762dd9d0..69d02615c4d6 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c @@ -242,7 +242,8 @@ static struct dca_ops ioat_dca_ops = { }; -struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) +struct dca_provider * __devinit +ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; @@ -407,7 +408,8 @@ static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) return slots; } -struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) +struct dca_provider * __devinit +ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; @@ -602,7 +604,8 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) return slots; } -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) +struct dca_provider * __devinit +ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 6dd0af194b8a..abc96c4c0796 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -870,7 +870,7 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) */ #define IOAT_TEST_SIZE 2000 -static void ioat_dma_test_callback(void *dma_async_param) +static void __devinit ioat_dma_test_callback(void *dma_async_param) { struct completion *cmp = dma_async_param; @@ -881,7 +881,7 @@ static void ioat_dma_test_callback(void *dma_async_param) * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. * @device: device to be tested */ -static int ioat_dma_self_test(struct ioatdma_device *device) +static int __devinit ioat_dma_self_test(struct ioatdma_device *device) { int i; u8 *src; @@ -1082,7 +1082,7 @@ static void ioat_disable_interrupts(struct ioatdma_device *device) writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); } -int ioat_probe(struct ioatdma_device *device) +int __devinit ioat_probe(struct ioatdma_device *device) { int err = -ENODEV; struct dma_device *dma = &device->common; @@ -1142,7 +1142,7 @@ err_dma_pool: return err; } -int ioat_register(struct ioatdma_device *device) +int __devinit ioat_register(struct ioatdma_device *device) { int err = dma_async_device_register(&device->common); @@ -1169,7 +1169,7 @@ static void ioat1_intr_quirk(struct ioatdma_device *device) pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); } -int ioat1_dma_probe(struct ioatdma_device *device, int dca) +int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; struct dma_device *dma; @@ -1200,7 +1200,7 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca) return err; } -void ioat_dma_remove(struct ioatdma_device *device) +void __devexit ioat_dma_remove(struct ioatdma_device *device) { struct dma_device *dma = &device->common; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 5fd6e2de84db..e47083b52ee7 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -217,11 +217,12 @@ ioat_chan_by_index(struct ioatdma_device *device, int index) return device->idx[index]; } -int ioat_probe(struct ioatdma_device *device); -int ioat_register(struct ioatdma_device *device); -int ioat1_dma_probe(struct ioatdma_device *dev, int dca); -void ioat_dma_remove(struct ioatdma_device *device); -struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); +int __devinit ioat_probe(struct ioatdma_device *device); +int __devinit ioat_register(struct ioatdma_device *device); +int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); +void __devexit ioat_dma_remove(struct ioatdma_device *device); +struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, + void __iomem *iobase); unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx, diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 2f34f290041e..1aa2974e7a93 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -683,7 +683,7 @@ ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, return ioat_is_complete(c, cookie, done, used); } -int ioat2_dma_probe(struct ioatdma_device *device, int dca) +int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; struct dma_device *dma; @@ -722,7 +722,7 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca) return err; } -int ioat3_dma_probe(struct ioatdma_device *device, int dca) +int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; struct dma_device *dma; diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index c72ccb5dfd5b..bdde5373cf66 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -127,8 +127,8 @@ ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) return ioat->ring[idx & ioat2_ring_mask(ioat)]; } -int ioat2_dma_probe(struct ioatdma_device *dev, int dca); -int ioat3_dma_probe(struct ioatdma_device *dev, int dca); -struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); +int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); +int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); +struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); +struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); #endif /* IOATDMA_V2_H */ -- cgit v1.2.3 From ad643f54c8514998333bc6c7b201fda2267496be Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:01:38 -0700 Subject: ioat1: trim ioat_dma_desc_sw Save 4 bytes per software descriptor by transmitting tx_cnt in an unused portion of the hardware descriptor. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 4 ++-- drivers/dma/ioat/dma.h | 2 -- drivers/dma/ioat/hw.h | 6 +++++- 3 files changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index abc96c4c0796..f59b6f42f866 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -396,7 +396,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) dump_desc_dbg(ioat, chain_tail); dump_desc_dbg(ioat, first); - ioat->pending += desc->tx_cnt; + ioat->pending += desc->hw->tx_cnt; if (ioat->pending >= ioat_pending_level) __ioat1_dma_memcpy_issue_pending(ioat); spin_unlock_bh(&ioat->desc_lock); @@ -655,11 +655,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, spin_unlock_bh(&ioat->desc_lock); desc->txd.flags = flags; - desc->tx_cnt = tx_cnt; desc->len = total_len; list_splice(&chain, &desc->txd.tx_list); hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); hw->ctl_f.compl_write = 1; + hw->tx_cnt = tx_cnt; dump_desc_dbg(ioat, desc); return &desc->txd; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index e47083b52ee7..ec851cf5345c 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -165,14 +165,12 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, * @hw: hardware DMA descriptor * @node: this descriptor will either be on the free list, * or attached to a transaction list (async_tx.tx_list) - * @tx_cnt: number of descriptors required to complete the transaction * @txd: the generic software descriptor for all engines * @id: identifier for debug */ struct ioat_desc_sw { struct ioat_dma_descriptor *hw; struct list_head node; - int tx_cnt; size_t len; struct dma_async_tx_descriptor txd; #ifdef DEBUG diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index e13f3ed47763..7481fb13ce00 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -63,7 +63,11 @@ struct ioat_dma_descriptor { uint64_t next; uint64_t rsv1; uint64_t rsv2; - uint64_t user1; + /* store some driver data in an unused portion of the descriptor */ + union { + uint64_t user1; + uint64_t tx_cnt; + }; uint64_t user2; }; #endif -- cgit v1.2.3 From 09c8a5b85e5f1e74a19bdd7c85547429d51df1cd Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:01:49 -0700 Subject: ioat: switch watchdog and reset handler from workqueue to timer In order to support dynamic resizing of the descriptor ring or polling for a descriptor in the presence of a hung channel the reset handler needs to make progress while in a non-preemptible context. The current workqueue implementation precludes polling channel reset completion under spin_lock(). This conversion also allows us to return to opportunistic cleanup in the ioat2 case as the timer implementation guarantees at least one cleanup after every descriptor is submitted. This means the worst case completion latency becomes the timer frequency (for exceptional circumstances), but with the benefit of avoiding busy waiting when the lock is contended. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 351 +++++++++++++++++-------------------------- drivers/dma/ioat/dma.h | 112 +++++++++++--- drivers/dma/ioat/dma_v2.c | 321 +++++++++++++++++---------------------- drivers/dma/ioat/dma_v2.h | 10 ++ drivers/dma/ioat/registers.h | 22 +-- 5 files changed, 388 insertions(+), 428 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index f59b6f42f866..17a518d0386f 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -99,23 +99,26 @@ static void ioat1_cleanup_tasklet(unsigned long data); /* common channel initialization */ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx, - work_func_t work_fn, void (*tasklet)(unsigned long), - unsigned long tasklet_data) + void (*timer_fn)(unsigned long), + void (*tasklet)(unsigned long), + unsigned long ioat) { struct dma_device *dma = &device->common; chan->device = device; chan->reg_base = device->reg_base + (0x80 * (idx + 1)); - INIT_DELAYED_WORK(&chan->work, work_fn); spin_lock_init(&chan->cleanup_lock); chan->common.device = dma; list_add_tail(&chan->common.device_node, &dma->channels); device->idx[idx] = chan; - tasklet_init(&chan->cleanup_task, tasklet, tasklet_data); + init_timer(&chan->timer); + chan->timer.function = timer_fn; + chan->timer.data = ioat; + tasklet_init(&chan->cleanup_task, tasklet, ioat); tasklet_disable(&chan->cleanup_task); } -static void ioat1_reset_part2(struct work_struct *work); +static void ioat1_timer_event(unsigned long data); /** * ioat1_dma_enumerate_channels - find and initialize the device's channels @@ -153,7 +156,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) break; ioat_init_channel(device, &ioat->base, i, - ioat1_reset_part2, + ioat1_timer_event, ioat1_cleanup_tasklet, (unsigned long) ioat); ioat->xfercap = xfercap; @@ -192,61 +195,6 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) } } -/** - * ioat1_reset_part2 - reinit the channel after a reset - */ -static void ioat1_reset_part2(struct work_struct *work) -{ - struct ioat_chan_common *chan; - struct ioat_dma_chan *ioat; - struct ioat_desc_sw *desc; - int dmacount; - bool start_null = false; - - chan = container_of(work, struct ioat_chan_common, work.work); - ioat = container_of(chan, struct ioat_dma_chan, base); - spin_lock_bh(&chan->cleanup_lock); - spin_lock_bh(&ioat->desc_lock); - - *chan->completion = 0; - ioat->pending = 0; - - /* count the descriptors waiting */ - dmacount = 0; - if (ioat->used_desc.prev) { - desc = to_ioat_desc(ioat->used_desc.prev); - do { - dmacount++; - desc = to_ioat_desc(desc->node.next); - } while (&desc->node != ioat->used_desc.next); - } - - if (dmacount) { - /* - * write the new starting descriptor address - * this puts channel engine into ARMED state - */ - desc = to_ioat_desc(ioat->used_desc.prev); - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - - writeb(IOAT_CHANCMD_START, chan->reg_base - + IOAT_CHANCMD_OFFSET(chan->device->version)); - } else - start_null = true; - spin_unlock_bh(&ioat->desc_lock); - spin_unlock_bh(&chan->cleanup_lock); - - dev_err(to_dev(chan), - "chan%d reset - %d descs waiting, %d total desc\n", - chan_num(chan), dmacount, ioat->desccount); - - if (start_null) - ioat1_dma_start_null_desc(ioat); -} - /** * ioat1_reset_channel - restart a channel * @ioat: IOAT DMA channel handle @@ -257,12 +205,9 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat) void __iomem *reg_base = chan->reg_base; u32 chansts, chanerr; - if (!ioat->used_desc.prev) - return; - - dev_dbg(to_dev(chan), "%s\n", __func__); + dev_warn(to_dev(chan), "reset\n"); chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); - chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; + chansts = *chan->completion & IOAT_CHANSTS_STATUS; if (chanerr) { dev_err(to_dev(chan), "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", @@ -278,93 +223,11 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat) * while we're waiting. */ - spin_lock_bh(&ioat->desc_lock); ioat->pending = INT_MIN; writeb(IOAT_CHANCMD_RESET, reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); - spin_unlock_bh(&ioat->desc_lock); - - /* schedule the 2nd half instead of sleeping a long time */ - schedule_delayed_work(&chan->work, RESET_DELAY); -} - -/** - * ioat1_chan_watchdog - watch for stuck channels - */ -static void ioat1_chan_watchdog(struct work_struct *work) -{ - struct ioatdma_device *device = - container_of(work, struct ioatdma_device, work.work); - struct ioat_dma_chan *ioat; - struct ioat_chan_common *chan; - int i; - u64 completion; - u32 completion_low; - unsigned long compl_desc_addr_hw; - - for (i = 0; i < device->common.chancnt; i++) { - chan = ioat_chan_by_index(device, i); - ioat = container_of(chan, struct ioat_dma_chan, base); - - if (/* have we started processing anything yet */ - chan->last_completion - /* have we completed any since last watchdog cycle? */ - && (chan->last_completion == chan->watchdog_completion) - /* has TCP stuck on one cookie since last watchdog? */ - && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie) - && (chan->watchdog_tcp_cookie != chan->completed_cookie) - /* is there something in the chain to be processed? */ - /* CB1 chain always has at least the last one processed */ - && (ioat->used_desc.prev != ioat->used_desc.next) - && ioat->pending == 0) { - - /* - * check CHANSTS register for completed - * descriptor address. - * if it is different than completion writeback, - * it is not zero - * and it has changed since the last watchdog - * we can assume that channel - * is still working correctly - * and the problem is in completion writeback. - * update completion writeback - * with actual CHANSTS value - * else - * try resetting the channel - */ - - /* we need to read the low address first as this - * causes the chipset to latch the upper bits - * for the subsequent read - */ - completion_low = readl(chan->reg_base + - IOAT_CHANSTS_OFFSET_LOW(chan->device->version)); - completion = readl(chan->reg_base + - IOAT_CHANSTS_OFFSET_HIGH(chan->device->version)); - completion <<= 32; - completion |= completion_low; - compl_desc_addr_hw = completion & - IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; - - if ((compl_desc_addr_hw != 0) - && (compl_desc_addr_hw != chan->watchdog_completion) - && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) { - chan->last_compl_desc_addr_hw = compl_desc_addr_hw; - *chan->completion = completion; - } else { - ioat1_reset_channel(ioat); - chan->watchdog_completion = 0; - chan->last_compl_desc_addr_hw = 0; - } - } else { - chan->last_compl_desc_addr_hw = 0; - chan->watchdog_completion = chan->last_completion; - } - - chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; - } - - schedule_delayed_work(&device->work, WATCHDOG_DELAY); + set_bit(IOAT_RESET_PENDING, &chan->state); + mod_timer(&chan->timer, jiffies + RESET_DELAY); } static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) @@ -372,6 +235,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) struct dma_chan *c = tx->chan; struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); + struct ioat_chan_common *chan = &ioat->base; struct ioat_desc_sw *first; struct ioat_desc_sw *chain_tail; dma_cookie_t cookie; @@ -396,6 +260,9 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) dump_desc_dbg(ioat, chain_tail); dump_desc_dbg(ioat, first); + if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + ioat->pending += desc->hw->tx_cnt; if (ioat->pending >= ioat_pending_level) __ioat1_dma_memcpy_issue_pending(ioat); @@ -520,6 +387,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) return; tasklet_disable(&chan->cleanup_task); + del_timer_sync(&chan->timer); ioat1_cleanup(ioat); /* Delay 100ms after reset to allow internal DMA logic to quiesce @@ -560,9 +428,6 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) chan->last_completion = 0; chan->completion_dma = 0; - chan->watchdog_completion = 0; - chan->last_compl_desc_addr_hw = 0; - chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0; ioat->pending = 0; ioat->desccount = 0; } @@ -705,15 +570,15 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) u64 completion; completion = *chan->completion; - phys_complete = completion & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; + phys_complete = ioat_chansts_to_addr(completion); dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, (unsigned long long) phys_complete); - if ((completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == - IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { + if (is_ioat_halted(completion)) { + u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", - readl(chan->reg_base + IOAT_CHANERR_OFFSET)); + chanerr); /* TODO do something to salvage the situation */ } @@ -721,48 +586,31 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) return phys_complete; } -/** - * ioat1_cleanup - cleanup up finished descriptors - * @chan: ioat channel to be cleaned up - */ -static void ioat1_cleanup(struct ioat_dma_chan *ioat) +bool ioat_cleanup_preamble(struct ioat_chan_common *chan, + unsigned long *phys_complete) { - struct ioat_chan_common *chan = &ioat->base; - unsigned long phys_complete; - struct ioat_desc_sw *desc, *_desc; - dma_cookie_t cookie = 0; - struct dma_async_tx_descriptor *tx; - - prefetch(chan->completion); - - if (!spin_trylock_bh(&chan->cleanup_lock)) - return; + *phys_complete = ioat_get_current_completion(chan); + if (*phys_complete == chan->last_completion) + return false; + clear_bit(IOAT_COMPLETION_ACK, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - phys_complete = ioat_get_current_completion(chan); - if (phys_complete == chan->last_completion) { - spin_unlock_bh(&chan->cleanup_lock); - /* - * perhaps we're stuck so hard that the watchdog can't go off? - * try to catch it after 2 seconds - */ - if (time_after(jiffies, - chan->last_completion_time + HZ*WATCHDOG_DELAY)) { - ioat1_chan_watchdog(&(chan->device->work.work)); - chan->last_completion_time = jiffies; - } - return; - } - chan->last_completion_time = jiffies; + return true; +} - cookie = 0; - if (!spin_trylock_bh(&ioat->desc_lock)) { - spin_unlock_bh(&chan->cleanup_lock); - return; - } +static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) +{ + struct ioat_chan_common *chan = &ioat->base; + struct list_head *_desc, *n; + struct dma_async_tx_descriptor *tx; dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", __func__, phys_complete); - list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { + list_for_each_safe(_desc, n, &ioat->used_desc) { + struct ioat_desc_sw *desc; + + prefetch(n); + desc = list_entry(_desc, typeof(*desc), node); tx = &desc->txd; /* * Incoming DMA requests may use multiple descriptors, @@ -771,7 +619,8 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) */ dump_desc_dbg(ioat, desc); if (tx->cookie) { - cookie = tx->cookie; + chan->completed_cookie = tx->cookie; + tx->cookie = 0; ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); if (tx->callback) { tx->callback(tx->callback_param); @@ -786,27 +635,110 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) */ if (async_tx_test_ack(tx)) list_move_tail(&desc->node, &ioat->free_desc); - else - tx->cookie = 0; } else { /* * last used desc. Do not remove, so we can - * append from it, but don't look at it next - * time, either + * append from it. */ - tx->cookie = 0; + + /* if nothing else is pending, cancel the + * completion timeout + */ + if (n == &ioat->used_desc) { + dev_dbg(to_dev(chan), + "%s cancel completion timeout\n", + __func__); + clear_bit(IOAT_COMPLETION_PENDING, &chan->state); + } /* TODO check status bits? */ break; } } + chan->last_completion = phys_complete; +} + +/** + * ioat1_cleanup - cleanup up finished descriptors + * @chan: ioat channel to be cleaned up + * + * To prevent lock contention we defer cleanup when the locks are + * contended with a terminal timeout that forces cleanup and catches + * completion notification errors. + */ +static void ioat1_cleanup(struct ioat_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + + prefetch(chan->completion); + + if (!spin_trylock_bh(&chan->cleanup_lock)) + return; + + if (!ioat_cleanup_preamble(chan, &phys_complete)) { + spin_unlock_bh(&chan->cleanup_lock); + return; + } + + if (!spin_trylock_bh(&ioat->desc_lock)) { + spin_unlock_bh(&chan->cleanup_lock); + return; + } + + __cleanup(ioat, phys_complete); + spin_unlock_bh(&ioat->desc_lock); + spin_unlock_bh(&chan->cleanup_lock); +} - chan->last_completion = phys_complete; - if (cookie != 0) - chan->completed_cookie = cookie; +static void ioat1_timer_event(unsigned long data) +{ + struct ioat_dma_chan *ioat = (void *) data; + struct ioat_chan_common *chan = &ioat->base; + dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); + + spin_lock_bh(&chan->cleanup_lock); + if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { + struct ioat_desc_sw *desc; + + spin_lock_bh(&ioat->desc_lock); + + /* restart active descriptors */ + desc = to_ioat_desc(ioat->used_desc.prev); + ioat_set_chainaddr(ioat, desc->txd.phys); + ioat_start(chan); + + ioat->pending = 0; + set_bit(IOAT_COMPLETION_PENDING, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + spin_unlock_bh(&ioat->desc_lock); + } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { + unsigned long phys_complete; + + spin_lock_bh(&ioat->desc_lock); + /* if we haven't made progress and we have already + * acknowledged a pending completion once, then be more + * forceful with a restart + */ + if (ioat_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); + else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) + ioat1_reset_channel(ioat); + else { + u64 status = ioat_chansts(chan); + + /* manually update the last completion address */ + if (ioat_chansts_to_addr(status) != 0) + *chan->completion = status; + + set_bit(IOAT_COMPLETION_ACK, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + } + spin_unlock_bh(&ioat->desc_lock); + } spin_unlock_bh(&chan->cleanup_lock); } @@ -855,13 +787,8 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) list_add_tail(&desc->node, &ioat->used_desc); dump_desc_dbg(ioat, desc); - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - - writeb(IOAT_CHANCMD_START, chan->reg_base - + IOAT_CHANCMD_OFFSET(chan->device->version)); + ioat_set_chainaddr(ioat, desc->txd.phys); + ioat_start(chan); spin_unlock_bh(&ioat->desc_lock); } @@ -1194,9 +1121,6 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) if (dca) device->dca = ioat_dca_init(pdev, device->reg_base); - INIT_DELAYED_WORK(&device->work, ioat1_chan_watchdog); - schedule_delayed_work(&device->work, WATCHDOG_DELAY); - return err; } @@ -1204,9 +1128,6 @@ void __devexit ioat_dma_remove(struct ioatdma_device *device) { struct dma_device *dma = &device->common; - if (device->version != IOAT_VER_3_0) - cancel_delayed_work(&device->work); - ioat_disable_interrupts(device); dma_async_device_unregister(dma); diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index ec851cf5345c..dbfccac3e80c 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -23,6 +23,7 @@ #include #include "hw.h" +#include "registers.h" #include #include #include @@ -33,7 +34,6 @@ #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 #define IOAT_DMA_DCA_ANY_CPU ~0 -#define IOAT_WATCHDOG_PERIOD (2 * HZ) #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) @@ -42,9 +42,6 @@ #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) -#define RESET_DELAY msecs_to_jiffies(100) -#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) - /* * workaround for IOAT ver.3.0 null descriptor issue * (channel returns error when size is 0) @@ -72,7 +69,6 @@ struct ioatdma_device { struct pci_pool *completion_pool; struct dma_device common; u8 version; - struct delayed_work work; struct msix_entry msix_entries[4]; struct ioat_chan_common *idx[4]; struct dca_provider *dca; @@ -81,24 +77,21 @@ struct ioatdma_device { }; struct ioat_chan_common { + struct dma_chan common; void __iomem *reg_base; - unsigned long last_completion; - unsigned long last_completion_time; - spinlock_t cleanup_lock; dma_cookie_t completed_cookie; - unsigned long watchdog_completion; - int watchdog_tcp_cookie; - u32 watchdog_last_tcp_cookie; - struct delayed_work work; - + unsigned long state; + #define IOAT_COMPLETION_PENDING 0 + #define IOAT_COMPLETION_ACK 1 + #define IOAT_RESET_PENDING 2 + struct timer_list timer; + #define COMPLETION_TIMEOUT msecs_to_jiffies(100) + #define RESET_DELAY msecs_to_jiffies(100) struct ioatdma_device *device; - struct dma_chan common; - dma_addr_t completion_dma; u64 *completion; - unsigned long last_compl_desc_addr_hw; struct tasklet_struct cleanup_task; }; @@ -148,7 +141,6 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, last_used = c->cookie; last_complete = chan->completed_cookie; - chan->watchdog_tcp_cookie = cookie; if (done) *done = last_complete; @@ -215,6 +207,85 @@ ioat_chan_by_index(struct ioatdma_device *device, int index) return device->idx[index]; } +static inline u64 ioat_chansts(struct ioat_chan_common *chan) +{ + u8 ver = chan->device->version; + u64 status; + u32 status_lo; + + /* We need to read the low address first as this causes the + * chipset to latch the upper bits for the subsequent read + */ + status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); + status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); + status <<= 32; + status |= status_lo; + + return status; +} + +static inline void ioat_start(struct ioat_chan_common *chan) +{ + u8 ver = chan->device->version; + + writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); +} + +static inline u64 ioat_chansts_to_addr(u64 status) +{ + return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; +} + +static inline u32 ioat_chanerr(struct ioat_chan_common *chan) +{ + return readl(chan->reg_base + IOAT_CHANERR_OFFSET); +} + +static inline void ioat_suspend(struct ioat_chan_common *chan) +{ + u8 ver = chan->device->version; + + writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); +} + +static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) +{ + struct ioat_chan_common *chan = &ioat->base; + + writel(addr & 0x00000000FFFFFFFF, + chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); + writel(addr >> 32, + chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); +} + +static inline bool is_ioat_active(unsigned long status) +{ + return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); +} + +static inline bool is_ioat_idle(unsigned long status) +{ + return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); +} + +static inline bool is_ioat_halted(unsigned long status) +{ + return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); +} + +static inline bool is_ioat_suspended(unsigned long status) +{ + return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); +} + +/* channel was fatally programmed */ +static inline bool is_ioat_bug(unsigned long err) +{ + return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR| + IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR| + IOAT_CHANERR_LENGTH_ERR)); +} + int __devinit ioat_probe(struct ioatdma_device *device); int __devinit ioat_register(struct ioatdma_device *device); int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); @@ -224,8 +295,11 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx, - work_func_t work_fn, void (*tasklet)(unsigned long), - unsigned long tasklet_data); + void (*timer_fn)(unsigned long), + void (*tasklet)(unsigned long), + unsigned long ioat); void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, size_t len, struct ioat_dma_descriptor *hw); +bool ioat_cleanup_preamble(struct ioat_chan_common *chan, + unsigned long *phys_complete); #endif /* IOATDMA_H */ diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 1aa2974e7a93..72e59a0d0f2e 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -49,7 +49,7 @@ static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) void * __iomem reg_base = ioat->base.reg_base; ioat->pending = 0; - ioat->dmacount += ioat2_ring_pending(ioat); + ioat->dmacount += ioat2_ring_pending(ioat);; ioat->issued = ioat->head; /* make descriptor updates globally visible before notifying channel */ wmb(); @@ -92,7 +92,6 @@ static void ioat2_update_pending(struct ioat2_dma_chan *ioat) static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) { - void __iomem *reg_base = ioat->base.reg_base; struct ioat_ring_ent *desc; struct ioat_dma_descriptor *hw; int idx; @@ -118,10 +117,7 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) hw->src_addr = 0; hw->dst_addr = 0; async_tx_ack(&desc->txd); - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + ioat2_set_chainaddr(ioat, desc->txd.phys); dump_desc_dbg(ioat, desc); __ioat2_issue_pending(ioat); } @@ -133,177 +129,14 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) spin_unlock_bh(&ioat->ring_lock); } -static void ioat2_cleanup(struct ioat2_dma_chan *ioat); - -/** - * ioat2_reset_part2 - reinit the channel after a reset - */ -static void ioat2_reset_part2(struct work_struct *work) -{ - struct ioat_chan_common *chan; - struct ioat2_dma_chan *ioat; - - chan = container_of(work, struct ioat_chan_common, work.work); - ioat = container_of(chan, struct ioat2_dma_chan, base); - - /* ensure that ->tail points to the stalled descriptor - * (ioat->pending is set to 2 at this point so no new - * descriptors will be issued while we perform this cleanup) - */ - ioat2_cleanup(ioat); - - spin_lock_bh(&chan->cleanup_lock); - spin_lock_bh(&ioat->ring_lock); - - /* set the tail to be re-issued */ - ioat->issued = ioat->tail; - ioat->dmacount = 0; - - dev_dbg(to_dev(&ioat->base), - "%s: head: %#x tail: %#x issued: %#x count: %#x\n", - __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); - - if (ioat2_ring_pending(ioat)) { - struct ioat_ring_ent *desc; - - desc = ioat2_get_ring_ent(ioat, ioat->tail); - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); - __ioat2_issue_pending(ioat); - } else - __ioat2_start_null_desc(ioat); - - spin_unlock_bh(&ioat->ring_lock); - spin_unlock_bh(&chan->cleanup_lock); - - dev_info(to_dev(chan), - "chan%d reset - %d descs waiting, %d total desc\n", - chan_num(chan), ioat->dmacount, 1 << ioat->alloc_order); -} - -/** - * ioat2_reset_channel - restart a channel - * @ioat: IOAT DMA channel handle - */ -static void ioat2_reset_channel(struct ioat2_dma_chan *ioat) +static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) { - u32 chansts, chanerr; struct ioat_chan_common *chan = &ioat->base; - u16 active; - - spin_lock_bh(&ioat->ring_lock); - active = ioat2_ring_active(ioat); - spin_unlock_bh(&ioat->ring_lock); - if (!active) - return; - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; - if (chanerr) { - dev_err(to_dev(chan), - "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", - chan_num(chan), chansts, chanerr); - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); - } - - spin_lock_bh(&ioat->ring_lock); - ioat->pending = 2; - writeb(IOAT_CHANCMD_RESET, - chan->reg_base - + IOAT_CHANCMD_OFFSET(chan->device->version)); - spin_unlock_bh(&ioat->ring_lock); - schedule_delayed_work(&chan->work, RESET_DELAY); -} - -/** - * ioat2_chan_watchdog - watch for stuck channels - */ -static void ioat2_chan_watchdog(struct work_struct *work) -{ - struct ioatdma_device *device = - container_of(work, struct ioatdma_device, work.work); - struct ioat2_dma_chan *ioat; - struct ioat_chan_common *chan; - u16 active; - int i; - - dev_dbg(&device->pdev->dev, "%s\n", __func__); - - for (i = 0; i < device->common.chancnt; i++) { - chan = ioat_chan_by_index(device, i); - ioat = container_of(chan, struct ioat2_dma_chan, base); - - /* - * for version 2.0 if there are descriptors yet to be processed - * and the last completed hasn't changed since the last watchdog - * if they haven't hit the pending level - * issue the pending to push them through - * else - * try resetting the channel - */ - spin_lock_bh(&ioat->ring_lock); - active = ioat2_ring_active(ioat); - spin_unlock_bh(&ioat->ring_lock); - - if (active && - chan->last_completion && - chan->last_completion == chan->watchdog_completion) { - - if (ioat->pending == 1) - ioat2_issue_pending(&chan->common); - else { - ioat2_reset_channel(ioat); - chan->watchdog_completion = 0; - } - } else { - chan->last_compl_desc_addr_hw = 0; - chan->watchdog_completion = chan->last_completion; - } - chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; - } - schedule_delayed_work(&device->work, WATCHDOG_DELAY); -} - -/** - * ioat2_cleanup - clean finished descriptors (advance tail pointer) - * @chan: ioat channel to be cleaned up - */ -static void ioat2_cleanup(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - unsigned long phys_complete; + struct dma_async_tx_descriptor *tx; struct ioat_ring_ent *desc; bool seen_current = false; u16 active; int i; - struct dma_async_tx_descriptor *tx; - - prefetch(chan->completion); - - spin_lock_bh(&chan->cleanup_lock); - phys_complete = ioat_get_current_completion(chan); - if (phys_complete == chan->last_completion) { - spin_unlock_bh(&chan->cleanup_lock); - /* - * perhaps we're stuck so hard that the watchdog can't go off? - * try to catch it after WATCHDOG_DELAY seconds - */ - if (chan->device->version < IOAT_VER_3_0) { - unsigned long tmo; - - tmo = chan->last_completion_time + HZ*WATCHDOG_DELAY; - if (time_after(jiffies, tmo)) { - ioat2_chan_watchdog(&(chan->device->work.work)); - chan->last_completion_time = jiffies; - } - } - return; - } - chan->last_completion_time = jiffies; - - spin_lock_bh(&ioat->ring_lock); dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", __func__, ioat->head, ioat->tail, ioat->issued); @@ -329,10 +162,42 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) } ioat->tail += i; BUG_ON(!seen_current); /* no active descs have written a completion? */ - spin_unlock_bh(&ioat->ring_lock); chan->last_completion = phys_complete; + if (ioat->head == ioat->tail) { + dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", + __func__); + clear_bit(IOAT_COMPLETION_PENDING, &chan->state); + } +} + +/** + * ioat2_cleanup - clean finished descriptors (advance tail pointer) + * @chan: ioat channel to be cleaned up + */ +static void ioat2_cleanup(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + prefetch(chan->completion); + + if (!spin_trylock_bh(&chan->cleanup_lock)) + return; + + if (!ioat_cleanup_preamble(chan, &phys_complete)) { + spin_unlock_bh(&chan->cleanup_lock); + return; + } + + if (!spin_trylock_bh(&ioat->ring_lock)) { + spin_unlock_bh(&chan->cleanup_lock); + return; + } + + __cleanup(ioat, phys_complete); + + spin_unlock_bh(&ioat->ring_lock); spin_unlock_bh(&chan->cleanup_lock); } @@ -344,6 +209,90 @@ static void ioat2_cleanup_tasklet(unsigned long data) writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } +static void __restart_chan(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + + /* set the tail to be re-issued */ + ioat->issued = ioat->tail; + ioat->dmacount = 0; + set_bit(IOAT_COMPLETION_PENDING, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + + dev_dbg(to_dev(chan), + "%s: head: %#x tail: %#x issued: %#x count: %#x\n", + __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); + + if (ioat2_ring_pending(ioat)) { + struct ioat_ring_ent *desc; + + desc = ioat2_get_ring_ent(ioat, ioat->tail); + ioat2_set_chainaddr(ioat, desc->txd.phys); + __ioat2_issue_pending(ioat); + } else + __ioat2_start_null_desc(ioat); +} + +static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + u32 status; + + status = ioat_chansts(chan); + if (is_ioat_active(status) || is_ioat_idle(status)) + ioat_suspend(chan); + while (is_ioat_active(status) || is_ioat_idle(status)) { + status = ioat_chansts(chan); + cpu_relax(); + } + + if (ioat_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); + + __restart_chan(ioat); +} + +static void ioat2_timer_event(unsigned long data) +{ + struct ioat2_dma_chan *ioat = (void *) data; + struct ioat_chan_common *chan = &ioat->base; + + spin_lock_bh(&chan->cleanup_lock); + if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { + unsigned long phys_complete; + u64 status; + + spin_lock_bh(&ioat->ring_lock); + status = ioat_chansts(chan); + + /* when halted due to errors check for channel + * programming errors before advancing the completion state + */ + if (is_ioat_halted(status)) { + u32 chanerr; + + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + BUG_ON(is_ioat_bug(chanerr)); + } + + /* if we haven't made progress and we have already + * acknowledged a pending completion once, then be more + * forceful with a restart + */ + if (ioat_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); + else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) + ioat2_restart_channel(ioat); + else { + set_bit(IOAT_COMPLETION_ACK, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + } + spin_unlock_bh(&ioat->ring_lock); + } + spin_unlock_bh(&chan->cleanup_lock); +} + /** * ioat2_enumerate_channels - find and initialize the device's channels * @device: the device to be enumerated @@ -381,7 +330,7 @@ static int ioat2_enumerate_channels(struct ioatdma_device *device) break; ioat_init_channel(device, &ioat->base, i, - ioat2_reset_part2, + ioat2_timer_event, ioat2_cleanup_tasklet, (unsigned long) ioat); ioat->xfercap_log = xfercap_log; @@ -395,6 +344,7 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) { struct dma_chan *c = tx->chan; struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_chan_common *chan = &ioat->base; dma_cookie_t cookie = c->cookie; cookie++; @@ -404,6 +354,8 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) c->cookie = cookie; dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); + if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); ioat2_update_pending(ioat); spin_unlock_bh(&ioat->ring_lock); @@ -543,9 +495,18 @@ static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_d ioat->issued); spin_unlock_bh(&ioat->ring_lock); - /* do direct reclaim in the allocation failure case */ - ioat2_cleanup(ioat); - + /* progress reclaim in the allocation failure case we + * may be called under bh_disabled so we need to trigger + * the timer event directly + */ + spin_lock_bh(&chan->cleanup_lock); + if (jiffies > chan->timer.expires && + timer_pending(&chan->timer)) { + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + spin_unlock_bh(&chan->cleanup_lock); + ioat2_timer_event((unsigned long) ioat); + } else + spin_unlock_bh(&chan->cleanup_lock); return -ENOMEM; } @@ -624,6 +585,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c) return; tasklet_disable(&chan->cleanup_task); + del_timer_sync(&chan->timer); ioat2_cleanup(ioat); /* Delay 100ms after reset to allow internal DMA logic to quiesce @@ -663,10 +625,6 @@ static void ioat2_free_chan_resources(struct dma_chan *c) chan->completion_dma = 0; ioat->pending = 0; ioat->dmacount = 0; - chan->watchdog_completion = 0; - chan->last_compl_desc_addr_hw = 0; - chan->watchdog_tcp_cookie = 0; - chan->watchdog_last_tcp_cookie = 0; } static enum dma_status @@ -716,9 +674,6 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) if (dca) device->dca = ioat2_dca_init(pdev, device->reg_base); - INIT_DELAYED_WORK(&device->work, ioat2_chan_watchdog); - schedule_delayed_work(&device->work, WATCHDOG_DELAY); - return err; } diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index bdde5373cf66..73b04a2eb4b0 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -127,6 +127,16 @@ ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) return ioat->ring[idx & ioat2_ring_mask(ioat)]; } +static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) +{ + struct ioat_chan_common *chan = &ioat->base; + + writel(addr & 0x00000000FFFFFFFF, + chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + writel(addr >> 32, + chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); +} + int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 4380f6fbf056..e4334a195380 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -101,11 +101,11 @@ #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL) #define IOAT_CHANSTS_SOFT_ERR 0x10ULL #define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x7ULL -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3 +#define IOAT_CHANSTS_STATUS 0x7ULL +#define IOAT_CHANSTS_ACTIVE 0x0 +#define IOAT_CHANSTS_DONE 0x1 +#define IOAT_CHANSTS_SUSPENDED 0x2 +#define IOAT_CHANSTS_HALTED 0x3 @@ -208,18 +208,18 @@ #define IOAT_CDAR_OFFSET_HIGH 0x24 #define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */ -#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001 -#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002 -#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004 -#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008 +#define IOAT_CHANERR_SRC_ADDR_ERR 0x0001 +#define IOAT_CHANERR_DEST_ADDR_ERR 0x0002 +#define IOAT_CHANERR_NEXT_ADDR_ERR 0x0004 +#define IOAT_CHANERR_NEXT_DESC_ALIGN_ERR 0x0008 #define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010 #define IOAT_CHANERR_CHANCMD_ERR 0x0020 #define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040 #define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080 #define IOAT_CHANERR_READ_DATA_ERR 0x0100 #define IOAT_CHANERR_WRITE_DATA_ERR 0x0200 -#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400 -#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800 +#define IOAT_CHANERR_CONTROL_ERR 0x0400 +#define IOAT_CHANERR_LENGTH_ERR 0x0800 #define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 #define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 #define IOAT_CHANERR_SOFT_ERR 0x4000 -- cgit v1.2.3 From a309218acee8606f7e235da20cc826eb06d9b0f6 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:02:01 -0700 Subject: ioat2,3: dynamically resize descriptor ring Increment the allocation order of the descriptor ring every time we run out of descriptors up to a maximum of allocation order specified by the module parameter 'ioat_max_alloc_order'. After each idle period decrement the allocation order to a minimum order of 'ioat_ring_alloc_order' (i.e. the default ring size, tunable as a module parameter). Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.h | 1 + drivers/dma/ioat/dma_v2.c | 215 +++++++++++++++++++++++++++++++++++++++------- drivers/dma/ioat/dma_v2.h | 2 + 3 files changed, 187 insertions(+), 31 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index dbfccac3e80c..d9d6a7e3cd76 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -88,6 +88,7 @@ struct ioat_chan_common { #define IOAT_RESET_PENDING 2 struct timer_list timer; #define COMPLETION_TIMEOUT msecs_to_jiffies(100) + #define IDLE_TIMEOUT msecs_to_jiffies(2000) #define RESET_DELAY msecs_to_jiffies(100) struct ioatdma_device *device; dma_addr_t completion_dma; diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 72e59a0d0f2e..460b77301332 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -43,6 +43,10 @@ static int ioat_ring_alloc_order = 8; module_param(ioat_ring_alloc_order, int, 0644); MODULE_PARM_DESC(ioat_ring_alloc_order, "ioat2+: allocate 2^n descriptors per channel (default: n=8)"); +static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; +module_param(ioat_ring_max_alloc_order, int, 0644); +MODULE_PARM_DESC(ioat_ring_max_alloc_order, + "ioat2+: upper limit for dynamic ring resizing (default: n=16)"); static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) { @@ -168,6 +172,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", __func__); clear_bit(IOAT_COMPLETION_PENDING, &chan->state); + mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } } @@ -253,6 +258,8 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) __restart_chan(ioat); } +static bool reshape_ring(struct ioat2_dma_chan *ioat, int order); + static void ioat2_timer_event(unsigned long data) { struct ioat2_dma_chan *ioat = (void *) data; @@ -289,6 +296,23 @@ static void ioat2_timer_event(unsigned long data) mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); } spin_unlock_bh(&ioat->ring_lock); + } else { + u16 active; + + /* if the ring is idle, empty, and oversized try to step + * down the size + */ + spin_lock_bh(&ioat->ring_lock); + active = ioat2_ring_active(ioat); + if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) + reshape_ring(ioat, ioat->alloc_order-1); + spin_unlock_bh(&ioat->ring_lock); + + /* keep shrinking until we get back to our minimum + * default size + */ + if (ioat->alloc_order > ioat_get_alloc_order()) + mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } spin_unlock_bh(&chan->cleanup_lock); } @@ -362,7 +386,7 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) return cookie; } -static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan) +static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) { struct ioat_dma_descriptor *hw; struct ioat_ring_ent *desc; @@ -370,12 +394,12 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan) dma_addr_t phys; dma = to_ioatdma_device(chan->device); - hw = pci_pool_alloc(dma->dma_pool, GFP_KERNEL, &phys); + hw = pci_pool_alloc(dma->dma_pool, flags, &phys); if (!hw) return NULL; memset(hw, 0, sizeof(*hw)); - desc = kzalloc(sizeof(*desc), GFP_KERNEL); + desc = kzalloc(sizeof(*desc), flags); if (!desc) { pci_pool_free(dma->dma_pool, hw, phys); return NULL; @@ -397,6 +421,42 @@ static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *cha kfree(desc); } +static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) +{ + struct ioat_ring_ent **ring; + int descs = 1 << order; + int i; + + if (order > ioat_get_max_alloc_order()) + return NULL; + + /* allocate the array to hold the software ring */ + ring = kcalloc(descs, sizeof(*ring), flags); + if (!ring) + return NULL; + for (i = 0; i < descs; i++) { + ring[i] = ioat2_alloc_ring_ent(c, flags); + if (!ring[i]) { + while (i--) + ioat2_free_ring_ent(ring[i], c); + kfree(ring); + return NULL; + } + set_desc_id(ring[i], i); + } + + /* link descs */ + for (i = 0; i < descs-1; i++) { + struct ioat_ring_ent *next = ring[i+1]; + struct ioat_dma_descriptor *hw = ring[i]->hw; + + hw->next = next->txd.phys; + } + ring[i]->hw->next = ring[0]->txd.phys; + + return ring; +} + /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring * @chan: channel to be initialized */ @@ -406,8 +466,7 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) struct ioat_chan_common *chan = &ioat->base; struct ioat_ring_ent **ring; u32 chanerr; - int descs; - int i; + int order; /* have we already been set up? */ if (ioat->ring) @@ -435,32 +494,10 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) writel(((u64) chan->completion_dma) >> 32, chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); - ioat->alloc_order = ioat_get_alloc_order(); - descs = 1 << ioat->alloc_order; - - /* allocate the array to hold the software ring */ - ring = kcalloc(descs, sizeof(*ring), GFP_KERNEL); + order = ioat_get_alloc_order(); + ring = ioat2_alloc_ring(c, order, GFP_KERNEL); if (!ring) return -ENOMEM; - for (i = 0; i < descs; i++) { - ring[i] = ioat2_alloc_ring_ent(c); - if (!ring[i]) { - while (i--) - ioat2_free_ring_ent(ring[i], c); - kfree(ring); - return -ENOMEM; - } - set_desc_id(ring[i], i); - } - - /* link descs */ - for (i = 0; i < descs-1; i++) { - struct ioat_ring_ent *next = ring[i+1]; - struct ioat_dma_descriptor *hw = ring[i]->hw; - - hw->next = next->txd.phys; - } - ring[i]->hw->next = ring[0]->txd.phys; spin_lock_bh(&ioat->ring_lock); ioat->ring = ring; @@ -468,12 +505,120 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) ioat->issued = 0; ioat->tail = 0; ioat->pending = 0; + ioat->alloc_order = order; spin_unlock_bh(&ioat->ring_lock); tasklet_enable(&chan->cleanup_task); ioat2_start_null_desc(ioat); - return descs; + return 1 << ioat->alloc_order; +} + +static bool reshape_ring(struct ioat2_dma_chan *ioat, int order) +{ + /* reshape differs from normal ring allocation in that we want + * to allocate a new software ring while only + * extending/truncating the hardware ring + */ + struct ioat_chan_common *chan = &ioat->base; + struct dma_chan *c = &chan->common; + const u16 curr_size = ioat2_ring_mask(ioat) + 1; + const u16 active = ioat2_ring_active(ioat); + const u16 new_size = 1 << order; + struct ioat_ring_ent **ring; + u16 i; + + if (order > ioat_get_max_alloc_order()) + return false; + + /* double check that we have at least 1 free descriptor */ + if (active == curr_size) + return false; + + /* when shrinking, verify that we can hold the current active + * set in the new ring + */ + if (active >= new_size) + return false; + + /* allocate the array to hold the software ring */ + ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); + if (!ring) + return false; + + /* allocate/trim descriptors as needed */ + if (new_size > curr_size) { + /* copy current descriptors to the new ring */ + for (i = 0; i < curr_size; i++) { + u16 curr_idx = (ioat->tail+i) & (curr_size-1); + u16 new_idx = (ioat->tail+i) & (new_size-1); + + ring[new_idx] = ioat->ring[curr_idx]; + set_desc_id(ring[new_idx], new_idx); + } + + /* add new descriptors to the ring */ + for (i = curr_size; i < new_size; i++) { + u16 new_idx = (ioat->tail+i) & (new_size-1); + + ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT); + if (!ring[new_idx]) { + while (i--) { + u16 new_idx = (ioat->tail+i) & (new_size-1); + + ioat2_free_ring_ent(ring[new_idx], c); + } + kfree(ring); + return false; + } + set_desc_id(ring[new_idx], new_idx); + } + + /* hw link new descriptors */ + for (i = curr_size-1; i < new_size; i++) { + u16 new_idx = (ioat->tail+i) & (new_size-1); + struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)]; + struct ioat_dma_descriptor *hw = ring[new_idx]->hw; + + hw->next = next->txd.phys; + } + } else { + struct ioat_dma_descriptor *hw; + struct ioat_ring_ent *next; + + /* copy current descriptors to the new ring, dropping the + * removed descriptors + */ + for (i = 0; i < new_size; i++) { + u16 curr_idx = (ioat->tail+i) & (curr_size-1); + u16 new_idx = (ioat->tail+i) & (new_size-1); + + ring[new_idx] = ioat->ring[curr_idx]; + set_desc_id(ring[new_idx], new_idx); + } + + /* free deleted descriptors */ + for (i = new_size; i < curr_size; i++) { + struct ioat_ring_ent *ent; + + ent = ioat2_get_ring_ent(ioat, ioat->tail+i); + ioat2_free_ring_ent(ent, c); + } + + /* fix up hardware ring */ + hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw; + next = ring[(ioat->tail+new_size) & (new_size-1)]; + hw->next = next->txd.phys; + } + + dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", + __func__, new_size); + + kfree(ioat->ring); + ioat->ring = ring; + ioat->alloc_order = order; + + return true; } /** @@ -487,7 +632,15 @@ static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_d struct ioat_chan_common *chan = &ioat->base; spin_lock_bh(&ioat->ring_lock); - if (unlikely(ioat2_ring_space(ioat) < num_descs)) { + /* never allow the last descriptor to be consumed, we need at + * least one free at all times to allow for on-the-fly ring + * resizing. + */ + while (unlikely(ioat2_ring_space(ioat) <= num_descs)) { + if (reshape_ring(ioat, ioat->alloc_order + 1) && + ioat2_ring_space(ioat) > num_descs) + break; + if (printk_ratelimit()) dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n", diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 73b04a2eb4b0..9baa3d6065ff 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -37,6 +37,8 @@ extern int ioat_pending_level; #define IOAT_MAX_ORDER 16 #define ioat_get_alloc_order() \ (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) +#define ioat_get_max_alloc_order() \ + (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER)) /* struct ioat2_dma_chan - ioat v2 / v3 channel attributes * @base: common ioat channel parameters -- cgit v1.2.3 From 4b652f0db3be891c7b76b109c3b55003b920fc96 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:02:15 -0700 Subject: net_dma: poll for a descriptor after allocation failure Handle descriptor allocation failures by polling for a descriptor. The driver will force forward progress when polled. In the best case this polling interval will be the time it takes for one dma memcpy transaction to complete. In the worst case, channel hang, we will need to wait 100ms for the cleanup watchdog to fire (ioatdma driver). Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/iovlock.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c index 9f6fe46a9b87..c0a272c73682 100644 --- a/drivers/dma/iovlock.c +++ b/drivers/dma/iovlock.c @@ -183,6 +183,11 @@ dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, iov_byte_offset, kdata, copy); + /* poll for a descriptor slot */ + if (unlikely(dma_cookie < 0)) { + dma_async_issue_pending(chan); + continue; + } len -= copy; iov[iovec_idx].iov_len -= copy; @@ -248,6 +253,11 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, page, offset, copy); + /* poll for a descriptor slot */ + if (unlikely(dma_cookie < 0)) { + dma_async_issue_pending(chan); + continue; + } len -= copy; iov[iovec_idx].iov_len -= copy; -- cgit v1.2.3