diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/sn/pci | |
download | linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.bz2 linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ia64/sn/pci')
-rw-r--r-- | arch/ia64/sn/pci/Makefile | 10 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pci_dma.c | 363 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/Makefile | 11 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_ate.c | 188 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_dma.c | 379 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_provider.c | 170 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_reg.c | 282 |
7 files changed, 1403 insertions, 0 deletions
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile new file mode 100644 index 000000000000..b5dca0097a8e --- /dev/null +++ b/arch/ia64/sn/pci/Makefile @@ -0,0 +1,10 @@ +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. +# +# Makefile for the sn pci general routines. + +obj-y := pci_dma.o pcibr/ diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c new file mode 100644 index 000000000000..f680824f819d --- /dev/null +++ b/arch/ia64/sn/pci/pci_dma.c @@ -0,0 +1,363 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. + * + * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for + * a description of how these routines should be used. + */ + +#include <linux/module.h> +#include <asm/dma.h> +#include <asm/sn/sn_sal.h> +#include "pci/pcibus_provider_defs.h" +#include "pci/pcidev.h" +#include "pci/pcibr_provider.h" + +#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) +#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) + +/** + * sn_dma_supported - test a DMA mask + * @dev: device to test + * @mask: DMA mask to test + * + * Return whether the given PCI device DMA address mask can be supported + * properly. For example, if your device can only drive the low 24-bits + * during PCI bus mastering, then you would pass 0x00ffffff as the mask to + * this function. Of course, SN only supports devices that have 32 or more + * address bits when using the PMU. + */ +int sn_dma_supported(struct device *dev, u64 mask) +{ + BUG_ON(dev->bus != &pci_bus_type); + + if (mask < 0x7fffffff) + return 0; + return 1; +} +EXPORT_SYMBOL(sn_dma_supported); + +/** + * sn_dma_set_mask - set the DMA mask + * @dev: device to set + * @dma_mask: new mask + * + * Set @dev's DMA mask if the hw supports it. + */ +int sn_dma_set_mask(struct device *dev, u64 dma_mask) +{ + BUG_ON(dev->bus != &pci_bus_type); + + if (!sn_dma_supported(dev, dma_mask)) + return 0; + + *dev->dma_mask = dma_mask; + return 1; +} +EXPORT_SYMBOL(sn_dma_set_mask); + +/** + * sn_dma_alloc_coherent - allocate memory for coherent DMA + * @dev: device to allocate for + * @size: size of the region + * @dma_handle: DMA (bus) address + * @flags: memory allocation flags + * + * dma_alloc_coherent() returns a pointer to a memory region suitable for + * coherent DMA traffic to/from a PCI device. On SN platforms, this means + * that @dma_handle will have the %PCIIO_DMA_CMD flag set. + * + * This interface is usually used for "command" streams (e.g. the command + * queue for a SCSI controller). See Documentation/DMA-API.txt for + * more information. + */ +void *sn_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t * dma_handle, int flags) +{ + void *cpuaddr; + unsigned long phys_addr; + struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); + + BUG_ON(dev->bus != &pci_bus_type); + + /* + * Allocate the memory. + * FIXME: We should be doing alloc_pages_node for the node closest + * to the PCI device. + */ + if (!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size)))) + return NULL; + + memset(cpuaddr, 0x0, size); + + /* physical addr. of the memory we just got */ + phys_addr = __pa(cpuaddr); + + /* + * 64 bit address translations should never fail. + * 32 bit translations can fail if there are insufficient mapping + * resources. + */ + + *dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size, + SN_PCIDMA_CONSISTENT); + if (!*dma_handle) { + printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); + free_pages((unsigned long)cpuaddr, get_order(size)); + return NULL; + } + + return cpuaddr; +} +EXPORT_SYMBOL(sn_dma_alloc_coherent); + +/** + * sn_pci_free_coherent - free memory associated with coherent DMAable region + * @dev: device to free for + * @size: size to free + * @cpu_addr: kernel virtual address to free + * @dma_handle: DMA address associated with this region + * + * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping + * any associated IOMMU mappings. + */ +void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle) +{ + struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); + + BUG_ON(dev->bus != &pci_bus_type); + + pcibr_dma_unmap(pcidev_info, dma_handle, 0); + free_pages((unsigned long)cpu_addr, get_order(size)); +} +EXPORT_SYMBOL(sn_dma_free_coherent); + +/** + * sn_dma_map_single - map a single page for DMA + * @dev: device to map for + * @cpu_addr: kernel virtual address of the region to map + * @size: size of the region + * @direction: DMA direction + * + * Map the region pointed to by @cpu_addr for DMA and return the + * DMA address. + * + * We map this to the one step pcibr_dmamap_trans interface rather than + * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have + * no way of saving the dmamap handle from the alloc to later free + * (which is pretty much unacceptable). + * + * TODO: simplify our interface; + * figure out how to save dmamap handle so can use two step. + */ +dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, + int direction) +{ + dma_addr_t dma_addr; + unsigned long phys_addr; + struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); + + BUG_ON(dev->bus != &pci_bus_type); + + phys_addr = __pa(cpu_addr); + dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0); + if (!dma_addr) { + printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); + return 0; + } + return dma_addr; +} +EXPORT_SYMBOL(sn_dma_map_single); + +/** + * sn_dma_unmap_single - unamp a DMA mapped page + * @dev: device to sync + * @dma_addr: DMA address to sync + * @size: size of region + * @direction: DMA direction + * + * This routine is supposed to sync the DMA region specified + * by @dma_handle into the coherence domain. On SN, we're always cache + * coherent, so we just need to free any ATEs associated with this mapping. + */ +void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + int direction) +{ + struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); + + BUG_ON(dev->bus != &pci_bus_type); + pcibr_dma_unmap(pcidev_info, dma_addr, direction); +} +EXPORT_SYMBOL(sn_dma_unmap_single); + +/** + * sn_dma_unmap_sg - unmap a DMA scatterlist + * @dev: device to unmap + * @sg: scatterlist to unmap + * @nhwentries: number of scatterlist entries + * @direction: DMA direction + * + * Unmap a set of streaming mode DMA translations. + */ +void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nhwentries, int direction) +{ + int i; + struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); + + BUG_ON(dev->bus != &pci_bus_type); + + for (i = 0; i < nhwentries; i++, sg++) { + pcibr_dma_unmap(pcidev_info, sg->dma_address, direction); + sg->dma_address = (dma_addr_t) NULL; + sg->dma_length = 0; + } +} +EXPORT_SYMBOL(sn_dma_unmap_sg); + +/** + * sn_dma_map_sg - map a scatterlist for DMA + * @dev: device to map for + * @sg: scatterlist to map + * @nhwentries: number of entries + * @direction: direction of the DMA transaction + * + * Maps each entry of @sg for DMA. + */ +int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, + int direction) +{ + unsigned long phys_addr; + struct scatterlist *saved_sg = sg; + struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); + int i; + + BUG_ON(dev->bus != &pci_bus_type); + + /* + * Setup a DMA address for each entry in the scatterlist. + */ + for (i = 0; i < nhwentries; i++, sg++) { + phys_addr = SG_ENT_PHYS_ADDRESS(sg); + sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr, + sg->length, 0); + + if (!sg->dma_address) { + printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); + + /* + * Free any successfully allocated entries. + */ + if (i > 0) + sn_dma_unmap_sg(dev, saved_sg, i, direction); + return 0; + } + + sg->dma_length = sg->length; + } + + return nhwentries; +} +EXPORT_SYMBOL(sn_dma_map_sg); + +void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, + size_t size, int direction) +{ + BUG_ON(dev->bus != &pci_bus_type); +} +EXPORT_SYMBOL(sn_dma_sync_single_for_cpu); + +void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, + size_t size, int direction) +{ + BUG_ON(dev->bus != &pci_bus_type); +} +EXPORT_SYMBOL(sn_dma_sync_single_for_device); + +void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, int direction) +{ + BUG_ON(dev->bus != &pci_bus_type); +} +EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu); + +void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, int direction) +{ + BUG_ON(dev->bus != &pci_bus_type); +} +EXPORT_SYMBOL(sn_dma_sync_sg_for_device); + +int sn_dma_mapping_error(dma_addr_t dma_addr) +{ + return 0; +} +EXPORT_SYMBOL(sn_dma_mapping_error); + +char *sn_pci_get_legacy_mem(struct pci_bus *bus) +{ + if (!SN_PCIBUS_BUSSOFT(bus)) + return ERR_PTR(-ENODEV); + + return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET); +} + +int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size) +{ + unsigned long addr; + int ret; + + if (!SN_PCIBUS_BUSSOFT(bus)) + return -ENODEV; + + addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; + addr += port; + + ret = ia64_sn_probe_mem(addr, (long)size, (void *)val); + + if (ret == 2) + return -EINVAL; + + if (ret == 1) + *val = -1; + + return size; +} + +int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) +{ + int ret = size; + unsigned long paddr; + unsigned long *addr; + + if (!SN_PCIBUS_BUSSOFT(bus)) { + ret = -ENODEV; + goto out; + } + + /* Put the phys addr in uncached space */ + paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; + paddr += port; + addr = (unsigned long *)paddr; + + switch (size) { + case 1: + *(volatile u8 *)(addr) = (u8)(val); + break; + case 2: + *(volatile u16 *)(addr) = (u16)(val); + break; + case 4: + *(volatile u32 *)(addr) = (u32)(val); + break; + default: + ret = -EINVAL; + break; + } + out: + return ret; +} diff --git a/arch/ia64/sn/pci/pcibr/Makefile b/arch/ia64/sn/pci/pcibr/Makefile new file mode 100644 index 000000000000..1850c4a94c41 --- /dev/null +++ b/arch/ia64/sn/pci/pcibr/Makefile @@ -0,0 +1,11 @@ +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved. +# +# Makefile for the sn2 io routines. + +obj-y += pcibr_dma.o pcibr_reg.o \ + pcibr_ate.o pcibr_provider.o diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c new file mode 100644 index 000000000000..9d6854666f9b --- /dev/null +++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c @@ -0,0 +1,188 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#include <linux/types.h> +#include <asm/sn/sn_sal.h> +#include "pci/pcibus_provider_defs.h" +#include "pci/pcidev.h" +#include "pci/pcibr_provider.h" + +int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ + +/* + * mark_ate: Mark the ate as either free or inuse. + */ +static void mark_ate(struct ate_resource *ate_resource, int start, int number, + uint64_t value) +{ + + uint64_t *ate = ate_resource->ate; + int index; + int length = 0; + + for (index = start; length < number; index++, length++) + ate[index] = value; + +} + +/* + * find_free_ate: Find the first free ate index starting from the given + * index for the desired consequtive count. + */ +static int find_free_ate(struct ate_resource *ate_resource, int start, + int count) +{ + + uint64_t *ate = ate_resource->ate; + int index; + int start_free; + + for (index = start; index < ate_resource->num_ate;) { + if (!ate[index]) { + int i; + int free; + free = 0; + start_free = index; /* Found start free ate */ + for (i = start_free; i < ate_resource->num_ate; i++) { + if (!ate[i]) { /* This is free */ + if (++free == count) + return start_free; + } else { + index = i + 1; + break; + } + } + } else + index++; /* Try next ate */ + } + + return -1; +} + +/* + * free_ate_resource: Free the requested number of ATEs. + */ +static inline void free_ate_resource(struct ate_resource *ate_resource, + int start) +{ + + mark_ate(ate_resource, start, ate_resource->ate[start], 0); + if ((ate_resource->lowest_free_index > start) || + (ate_resource->lowest_free_index < 0)) + ate_resource->lowest_free_index = start; + +} + +/* + * alloc_ate_resource: Allocate the requested number of ATEs. + */ +static inline int alloc_ate_resource(struct ate_resource *ate_resource, + int ate_needed) +{ + + int start_index; + + /* + * Check for ate exhaustion. + */ + if (ate_resource->lowest_free_index < 0) + return -1; + + /* + * Find the required number of free consequtive ates. + */ + start_index = + find_free_ate(ate_resource, ate_resource->lowest_free_index, + ate_needed); + if (start_index >= 0) + mark_ate(ate_resource, start_index, ate_needed, ate_needed); + + ate_resource->lowest_free_index = + find_free_ate(ate_resource, ate_resource->lowest_free_index, 1); + + return start_index; +} + +/* + * Allocate "count" contiguous Bridge Address Translation Entries + * on the specified bridge to be used for PCI to XTALK mappings. + * Indices in rm map range from 1..num_entries. Indicies returned + * to caller range from 0..num_entries-1. + * + * Return the start index on success, -1 on failure. + */ +int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count) +{ + int status = 0; + uint64_t flag; + + flag = pcibr_lock(pcibus_info); + status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count); + + if (status < 0) { + /* Failed to allocate */ + pcibr_unlock(pcibus_info, flag); + return -1; + } + + pcibr_unlock(pcibus_info, flag); + + return status; +} + +/* + * Setup an Address Translation Entry as specified. Use either the Bridge + * internal maps or the external map RAM, as appropriate. + */ +static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info, + int ate_index) +{ + if (ate_index < pcibus_info->pbi_int_ate_size) { + return pcireg_int_ate_addr(pcibus_info, ate_index); + } + panic("pcibr_ate_addr: invalid ate_index 0x%x", ate_index); +} + +/* + * Update the ate. + */ +void inline +ate_write(struct pcibus_info *pcibus_info, int ate_index, int count, + volatile uint64_t ate) +{ + while (count-- > 0) { + if (ate_index < pcibus_info->pbi_int_ate_size) { + pcireg_int_ate_set(pcibus_info, ate_index, ate); + } else { + panic("ate_write: invalid ate_index 0x%x", ate_index); + } + ate_index++; + ate += IOPGSIZE; + } + + pcireg_tflush_get(pcibus_info); /* wait until Bridge PIO complete */ +} + +void pcibr_ate_free(struct pcibus_info *pcibus_info, int index) +{ + + volatile uint64_t ate; + int count; + uint64_t flags; + + if (pcibr_invalidate_ate) { + /* For debugging purposes, clear the valid bit in the ATE */ + ate = *pcibr_ate_addr(pcibus_info, index); + count = pcibus_info->pbi_int_ate_resource.ate[index]; + ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V)); + } + + flags = pcibr_lock(pcibus_info); + free_ate_resource(&pcibus_info->pbi_int_ate_resource, index); + pcibr_unlock(pcibus_info, flags); +} diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c new file mode 100644 index 000000000000..b1d66ac065c8 --- /dev/null +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c @@ -0,0 +1,379 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#include <linux/types.h> +#include <linux/pci.h> +#include <asm/sn/sn_sal.h> +#include <asm/sn/geo.h> +#include "xtalk/xwidgetdev.h" +#include "xtalk/hubdev.h" +#include "pci/pcibus_provider_defs.h" +#include "pci/pcidev.h" +#include "pci/tiocp.h" +#include "pci/pic.h" +#include "pci/pcibr_provider.h" +#include "pci/tiocp.h" +#include "tio.h" +#include <asm/sn/addrs.h> + +extern int sn_ioif_inited; + +/* ===================================================================== + * DMA MANAGEMENT + * + * The Bridge ASIC provides three methods of doing DMA: via a "direct map" + * register available in 32-bit PCI space (which selects a contiguous 2G + * address space on some other widget), via "direct" addressing via 64-bit + * PCI space (all destination information comes from the PCI address, + * including transfer attributes), and via a "mapped" region that allows + * a bunch of different small mappings to be established with the PMU. + * + * For efficiency, we most prefer to use the 32bit direct mapping facility, + * since it requires no resource allocations. The advantage of using the + * PMU over the 64-bit direct is that single-cycle PCI addressing can be + * used; the advantage of using 64-bit direct over PMU addressing is that + * we do not have to allocate entries in the PMU. + */ + +static uint64_t +pcibr_dmamap_ate32(struct pcidev_info *info, + uint64_t paddr, size_t req_size, uint64_t flags) +{ + + struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; + struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> + pdi_pcibus_info; + uint8_t internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info-> + pdi_linux_pcidev->devfn)) - 1; + int ate_count; + int ate_index; + uint64_t ate_flags = flags | PCI32_ATE_V; + uint64_t ate; + uint64_t pci_addr; + uint64_t xio_addr; + uint64_t offset; + + /* PIC in PCI-X mode does not supports 32bit PageMap mode */ + if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) { + return 0; + } + + /* Calculate the number of ATEs needed. */ + if (!(MINIMAL_ATE_FLAG(paddr, req_size))) { + ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */ + +req_size /* max mapping bytes */ + - 1) + 1; /* round UP */ + } else { /* assume requested target is page aligned */ + ate_count = IOPG(req_size /* max mapping bytes */ + - 1) + 1; /* round UP */ + } + + /* Get the number of ATEs required. */ + ate_index = pcibr_ate_alloc(pcibus_info, ate_count); + if (ate_index < 0) + return 0; + + /* In PCI-X mode, Prefetch not supported */ + if (IS_PCIX(pcibus_info)) + ate_flags &= ~(PCI32_ATE_PREF); + + xio_addr = + IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : + PHYS_TO_TIODMA(paddr); + offset = IOPGOFF(xio_addr); + ate = ate_flags | (xio_addr - offset); + + /* If PIC, put the targetid in the ATE */ + if (IS_PIC_SOFT(pcibus_info)) { + ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT); + } + ate_write(pcibus_info, ate_index, ate_count, ate); + + /* + * Set up the DMA mapped Address. + */ + pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index; + + /* + * If swap was set in device in pcibr_endian_set() + * we need to turn swapping on. + */ + if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR) + ATE_SWAP_ON(pci_addr); + + return pci_addr; +} + +static uint64_t +pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, + uint64_t dma_attributes) +{ + struct pcibus_info *pcibus_info = (struct pcibus_info *) + ((info->pdi_host_pcidev_info)->pdi_pcibus_info); + uint64_t pci_addr; + + /* Translate to Crosstalk View of Physical Address */ + pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : + PHYS_TO_TIODMA(paddr)) | dma_attributes; + + /* Handle Bus mode */ + if (IS_PCIX(pcibus_info)) + pci_addr &= ~PCI64_ATTR_PREF; + + /* Handle Bridge Chipset differences */ + if (IS_PIC_SOFT(pcibus_info)) { + pci_addr |= + ((uint64_t) pcibus_info-> + pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); + } else + pci_addr |= TIOCP_PCI64_CMDTYPE_MEM; + + /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */ + if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) + pci_addr |= PCI64_ATTR_VIRTUAL; + + return pci_addr; + +} + +static uint64_t +pcibr_dmatrans_direct32(struct pcidev_info * info, + uint64_t paddr, size_t req_size, uint64_t flags) +{ + + struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; + struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> + pdi_pcibus_info; + uint64_t xio_addr; + + uint64_t xio_base; + uint64_t offset; + uint64_t endoff; + + if (IS_PCIX(pcibus_info)) { + return 0; + } + + xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : + PHYS_TO_TIODMA(paddr); + + xio_base = pcibus_info->pbi_dir_xbase; + offset = xio_addr - xio_base; + endoff = req_size + offset; + if ((req_size > (1ULL << 31)) || /* Too Big */ + (xio_addr < xio_base) || /* Out of range for mappings */ + (endoff > (1ULL << 31))) { /* Too Big */ + return 0; + } + + return PCI32_DIRECT_BASE | offset; + +} + +/* + * Wrapper routine for free'ing DMA maps + * DMA mappings for Direct 64 and 32 do not have any DMA maps. + */ +void +pcibr_dma_unmap(struct pcidev_info *pcidev_info, dma_addr_t dma_handle, + int direction) +{ + struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> + pdi_pcibus_info; + + if (IS_PCI32_MAPPED(dma_handle)) { + int ate_index; + + ate_index = + IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE)); + pcibr_ate_free(pcibus_info, ate_index); + } +} + +/* + * On SN systems there is a race condition between a PIO read response and + * DMA's. In rare cases, the read response may beat the DMA, causing the + * driver to think that data in memory is complete and meaningful. This code + * eliminates that race. This routine is called by the PIO read routines + * after doing the read. For PIC this routine then forces a fake interrupt + * on another line, which is logically associated with the slot that the PIO + * is addressed to. It then spins while watching the memory location that + * the interrupt is targetted to. When the interrupt response arrives, we + * are sure that the DMA has landed in memory and it is safe for the driver + * to proceed. For TIOCP use the Device(x) Write Request Buffer Flush + * Bridge register since it ensures the data has entered the coherence domain, + * unlike the PIC Device(x) Write Request Buffer Flush register. + */ + +void sn_dma_flush(uint64_t addr) +{ + nasid_t nasid; + int is_tio; + int wid_num; + int i, j; + int bwin; + uint64_t flags; + struct hubdev_info *hubinfo; + volatile struct sn_flush_device_list *p; + struct sn_flush_nasid_entry *flush_nasid_list; + + if (!sn_ioif_inited) + return; + + nasid = NASID_GET(addr); + if (-1 == nasid_to_cnodeid(nasid)) + return; + + hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; + + if (!hubinfo) { + BUG(); + } + is_tio = (nasid & 1); + if (is_tio) { + wid_num = TIO_SWIN_WIDGETNUM(addr); + bwin = TIO_BWIN_WINDOWNUM(addr); + } else { + wid_num = SWIN_WIDGETNUM(addr); + bwin = BWIN_WINDOWNUM(addr); + } + + flush_nasid_list = &hubinfo->hdi_flush_nasid_list; + if (flush_nasid_list->widget_p == NULL) + return; + if (bwin > 0) { + uint64_t itte = flush_nasid_list->iio_itte[bwin]; + + if (is_tio) { + wid_num = (itte >> TIO_ITTE_WIDGET_SHIFT) & + TIO_ITTE_WIDGET_MASK; + } else { + wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT) & + IIO_ITTE_WIDGET_MASK; + } + } + if (flush_nasid_list->widget_p == NULL) + return; + if (flush_nasid_list->widget_p[wid_num] == NULL) + return; + p = &flush_nasid_list->widget_p[wid_num][0]; + + /* find a matching BAR */ + for (i = 0; i < DEV_PER_WIDGET; i++) { + for (j = 0; j < PCI_ROM_RESOURCE; j++) { + if (p->sfdl_bar_list[j].start == 0) + break; + if (addr >= p->sfdl_bar_list[j].start + && addr <= p->sfdl_bar_list[j].end) + break; + } + if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0) + break; + p++; + } + + /* if no matching BAR, return without doing anything. */ + if (i == DEV_PER_WIDGET) + return; + + /* + * For TIOCP use the Device(x) Write Request Buffer Flush Bridge + * register since it ensures the data has entered the coherence + * domain, unlike PIC + */ + if (is_tio) { + uint32_t tio_id = REMOTE_HUB_L(nasid, TIO_NODE_ID); + uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id); + + /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */ + if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) { + return; + } else { + pcireg_wrb_flush_get(p->sfdl_pcibus_info, + (p->sfdl_slot - 1)); + } + } else { + spin_lock_irqsave(&((struct sn_flush_device_list *)p)-> + sfdl_flush_lock, flags); + + p->sfdl_flush_value = 0; + + /* force an interrupt. */ + *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1; + + /* wait for the interrupt to come back. */ + while (*(p->sfdl_flush_addr) != 0x10f) ; + + /* okay, everything is synched up. */ + spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags); + } + return; +} + +/* + * Wrapper DMA interface. Called from pci_dma.c routines. + */ + +uint64_t +pcibr_dma_map(struct pcidev_info * pcidev_info, unsigned long phys_addr, + size_t size, unsigned int flags) +{ + dma_addr_t dma_handle; + struct pci_dev *pcidev = pcidev_info->pdi_linux_pcidev; + + if (flags & SN_PCIDMA_CONSISTENT) { + /* sn_pci_alloc_consistent interfaces */ + if (pcidev->dev.coherent_dma_mask == ~0UL) { + dma_handle = + pcibr_dmatrans_direct64(pcidev_info, phys_addr, + PCI64_ATTR_BAR); + } else { + dma_handle = + (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, + phys_addr, size, + PCI32_ATE_BAR); + } + } else { + /* map_sg/map_single interfaces */ + + /* SN cannot support DMA addresses smaller than 32 bits. */ + if (pcidev->dma_mask < 0x7fffffff) { + return 0; + } + + if (pcidev->dma_mask == ~0UL) { + /* + * Handle the most common case: 64 bit cards. This + * call should always succeed. + */ + + dma_handle = + pcibr_dmatrans_direct64(pcidev_info, phys_addr, + PCI64_ATTR_PREF); + } else { + /* Handle 32-63 bit cards via direct mapping */ + dma_handle = + pcibr_dmatrans_direct32(pcidev_info, phys_addr, + size, 0); + if (!dma_handle) { + /* + * It is a 32 bit card and we cannot do direct mapping, + * so we use an ATE. + */ + + dma_handle = + pcibr_dmamap_ate32(pcidev_info, phys_addr, + size, PCI32_ATE_PREF); + } + } + } + + return dma_handle; +} + +EXPORT_SYMBOL(sn_dma_flush); diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c new file mode 100644 index 000000000000..92bd278cf7ff --- /dev/null +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c @@ -0,0 +1,170 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <asm/sn/sn_sal.h> +#include "xtalk/xwidgetdev.h" +#include <asm/sn/geo.h> +#include "xtalk/hubdev.h" +#include "pci/pcibus_provider_defs.h" +#include "pci/pcidev.h" +#include "pci/pcibr_provider.h" +#include <asm/sn/addrs.h> + + +static int sal_pcibr_error_interrupt(struct pcibus_info *soft) +{ + struct ia64_sal_retval ret_stuff; + uint64_t busnum; + int segment; + ret_stuff.status = 0; + ret_stuff.v0 = 0; + + segment = 0; + busnum = soft->pbi_buscommon.bs_persist_busnum; + SAL_CALL_NOLOCK(ret_stuff, + (u64) SN_SAL_IOIF_ERROR_INTERRUPT, + (u64) segment, (u64) busnum, 0, 0, 0, 0, 0); + + return (int)ret_stuff.v0; +} + +/* + * PCI Bridge Error interrupt handler. Gets invoked whenever a PCI + * bridge sends an error interrupt. + */ +static irqreturn_t +pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *regs) +{ + struct pcibus_info *soft = (struct pcibus_info *)arg; + + if (sal_pcibr_error_interrupt(soft) < 0) { + panic("pcibr_error_intr_handler(): Fatal Bridge Error"); + } + return IRQ_HANDLED; +} + +void * +pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft) +{ + int nasid, cnode, j; + struct hubdev_info *hubdev_info; + struct pcibus_info *soft; + struct sn_flush_device_list *sn_flush_device_list; + + if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) { + return NULL; + } + + /* + * Allocate kernel bus soft and copy from prom. + */ + + soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL); + if (!soft) { + return NULL; + } + + memcpy(soft, prom_bussoft, sizeof(struct pcibus_info)); + soft->pbi_buscommon.bs_base = + (((u64) soft->pbi_buscommon. + bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET; + + spin_lock_init(&soft->pbi_lock); + + /* + * register the bridge's error interrupt handler + */ + if (request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, + SA_SHIRQ, "PCIBR error", (void *)(soft))) { + printk(KERN_WARNING + "pcibr cannot allocate interrupt for error handler\n"); + } + + /* + * Update the Bridge with the "kernel" pagesize + */ + if (PAGE_SIZE < 16384) { + pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE); + } else { + pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE); + } + + nasid = NASID_GET(soft->pbi_buscommon.bs_base); + cnode = nasid_to_cnodeid(nasid); + hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); + + if (hubdev_info->hdi_flush_nasid_list.widget_p) { + sn_flush_device_list = hubdev_info->hdi_flush_nasid_list. + widget_p[(int)soft->pbi_buscommon.bs_xid]; + if (sn_flush_device_list) { + for (j = 0; j < DEV_PER_WIDGET; + j++, sn_flush_device_list++) { + if (sn_flush_device_list->sfdl_slot == -1) + continue; + if (sn_flush_device_list-> + sfdl_persistent_busnum == + soft->pbi_buscommon.bs_persist_busnum) + sn_flush_device_list->sfdl_pcibus_info = + soft; + } + } + } + + /* Setup the PMU ATE map */ + soft->pbi_int_ate_resource.lowest_free_index = 0; + soft->pbi_int_ate_resource.ate = + kmalloc(soft->pbi_int_ate_size * sizeof(uint64_t), GFP_KERNEL); + memset(soft->pbi_int_ate_resource.ate, 0, + (soft->pbi_int_ate_size * sizeof(uint64_t))); + + return soft; +} + +void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info) +{ + struct pcidev_info *pcidev_info; + struct pcibus_info *pcibus_info; + int bit = sn_irq_info->irq_int_bit; + + pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; + if (pcidev_info) { + pcibus_info = + (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info-> + pdi_pcibus_info; + pcireg_force_intr_set(pcibus_info, bit); + } +} + +void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info) +{ + struct pcidev_info *pcidev_info; + struct pcibus_info *pcibus_info; + int bit = sn_irq_info->irq_int_bit; + uint64_t xtalk_addr = sn_irq_info->irq_xtalkaddr; + + pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; + if (pcidev_info) { + pcibus_info = + (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info-> + pdi_pcibus_info; + + /* Disable the device's IRQ */ + pcireg_intr_enable_bit_clr(pcibus_info, bit); + + /* Change the device's IRQ */ + pcireg_intr_addr_addr_set(pcibus_info, bit, xtalk_addr); + + /* Re-enable the device's IRQ */ + pcireg_intr_enable_bit_set(pcibus_info, bit); + + pcibr_force_interrupt(sn_irq_info); + } +} diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c new file mode 100644 index 000000000000..74a74a7d2a13 --- /dev/null +++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c @@ -0,0 +1,282 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. + */ + +#include <linux/types.h> +#include <linux/interrupt.h> +#include "pci/pcibus_provider_defs.h" +#include "pci/pcidev.h" +#include "pci/tiocp.h" +#include "pci/pic.h" +#include "pci/pcibr_provider.h" + +union br_ptr { + struct tiocp tio; + struct pic pic; +}; + +/* + * Control Register Access -- Read/Write 0000_0020 + */ +void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits) +{ + union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ptr->tio.cp_control &= ~bits; + break; + case PCIBR_BRIDGETYPE_PIC: + ptr->pic.p_wid_control &= ~bits; + break; + default: + panic + ("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p", + (void *)ptr); + } + } +} + +void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits) +{ + union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ptr->tio.cp_control |= bits; + break; + case PCIBR_BRIDGETYPE_PIC: + ptr->pic.p_wid_control |= bits; + break; + default: + panic + ("pcireg_control_bit_set: unknown bridgetype bridge 0x%p", + (void *)ptr); + } + } +} + +/* + * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050 + */ +uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info) +{ + union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base; + uint64_t ret = 0; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ret = ptr->tio.cp_tflush; + break; + case PCIBR_BRIDGETYPE_PIC: + ret = ptr->pic.p_wid_tflush; + break; + default: + panic + ("pcireg_tflush_get: unknown bridgetype bridge 0x%p", + (void *)ptr); + } + } + + /* Read of the Target Flush should always return zero */ + if (ret != 0) + panic("pcireg_tflush_get:Target Flush failed\n"); + + return ret; +} + +/* + * Interrupt Status Register Access -- Read Only 0000_0100 + */ +uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info) +{ + union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base; + uint64_t ret = 0; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ret = ptr->tio.cp_int_status; + break; + case PCIBR_BRIDGETYPE_PIC: + ret = ptr->pic.p_int_status; + break; + default: + panic + ("pcireg_intr_status_get: unknown bridgetype bridge 0x%p", + (void *)ptr); + } + } + return ret; +} + +/* + * Interrupt Enable Register Access -- Read/Write 0000_0108 + */ +void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits) +{ + union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ptr->tio.cp_int_enable &= ~bits; + break; + case PCIBR_BRIDGETYPE_PIC: + ptr->pic.p_int_enable &= ~bits; + break; + default: + panic + ("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p", + (void *)ptr); + } + } +} + +void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits) +{ + union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ptr->tio.cp_int_enable |= bits; + break; + case PCIBR_BRIDGETYPE_PIC: + ptr->pic.p_int_enable |= bits; + break; + default: + panic + ("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p", + (void *)ptr); + } + } +} + +/* + * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168 + */ +void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n, + uint64_t addr) +{ + union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ptr->tio.cp_int_addr[int_n] &= ~TIOCP_HOST_INTR_ADDR; + ptr->tio.cp_int_addr[int_n] |= + (addr & TIOCP_HOST_INTR_ADDR); + break; + case PCIBR_BRIDGETYPE_PIC: + ptr->pic.p_int_addr[int_n] &= ~PIC_HOST_INTR_ADDR; + ptr->pic.p_int_addr[int_n] |= + (addr & PIC_HOST_INTR_ADDR); + break; + default: + panic + ("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p", + (void *)ptr); + } + } +} + +/* + * Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8 + */ +void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n) +{ + union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ptr->tio.cp_force_pin[int_n] = 1; + break; + case PCIBR_BRIDGETYPE_PIC: + ptr->pic.p_force_pin[int_n] = 1; + break; + default: + panic + ("pcireg_force_intr_set: unknown bridgetype bridge 0x%p", + (void *)ptr); + } + } +} + +/* + * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258 + */ +uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device) +{ + union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base; + uint64_t ret = 0; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ret = ptr->tio.cp_wr_req_buf[device]; + break; + case PCIBR_BRIDGETYPE_PIC: + ret = ptr->pic.p_wr_req_buf[device]; + break; + default: + panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", (void *)ptr); + } + + } + /* Read of the Write Buffer Flush should always return zero */ + return ret; +} + +void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index, + uint64_t val) +{ + union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ptr->tio.cp_int_ate_ram[ate_index] = (uint64_t) val; + break; + case PCIBR_BRIDGETYPE_PIC: + ptr->pic.p_int_ate_ram[ate_index] = (uint64_t) val; + break; + default: + panic + ("pcireg_int_ate_set: unknown bridgetype bridge 0x%p", + (void *)ptr); + } + } +} + +uint64_t *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index) +{ + union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base; + uint64_t *ret = (uint64_t *) 0; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ret = + (uint64_t *) & (ptr->tio.cp_int_ate_ram[ate_index]); + break; + case PCIBR_BRIDGETYPE_PIC: + ret = + (uint64_t *) & (ptr->pic.p_int_ate_ram[ate_index]); + break; + default: + panic + ("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p", + (void *)ptr); + } + } + return ret; +} |