diff options
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r-- | arch/tile/kernel/pci-dma.c | 21 | ||||
-rw-r--r-- | arch/tile/kernel/pci_gx.c | 15 |
2 files changed, 25 insertions, 11 deletions
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index d94f4872e94f..09b58703ac26 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -588,15 +588,18 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) { struct dma_map_ops *dma_ops = get_dma_ops(dev); - /* Handle hybrid PCI devices with limited memory addressability. */ - if ((dma_ops == gx_pci_dma_map_ops || - dma_ops == gx_hybrid_pci_dma_map_ops || - dma_ops == gx_legacy_pci_dma_map_ops) && - (mask <= DMA_BIT_MASK(32))) { - if (dma_ops == gx_pci_dma_map_ops) - set_dma_ops(dev, gx_hybrid_pci_dma_map_ops); - - if (mask > dev->archdata.max_direct_dma_addr) + /* + * For PCI devices with 64-bit DMA addressing capability, promote + * the dma_ops to full capability for both streams and consistent + * memory access. For 32-bit capable devices, limit the consistent + * memory DMA range to max_direct_dma_addr. + */ + if (dma_ops == gx_pci_dma_map_ops || + dma_ops == gx_hybrid_pci_dma_map_ops || + dma_ops == gx_legacy_pci_dma_map_ops) { + if (mask == DMA_BIT_MASK(64)) + set_dma_ops(dev, gx_pci_dma_map_ops); + else if (mask > dev->archdata.max_direct_dma_addr) mask = dev->archdata.max_direct_dma_addr; } diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index 66ef9db5c2fb..29acac6af4ba 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -1081,13 +1081,24 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) return pci_enable_resources(dev, mask); } -/* Called for each device after PCI setup is done. */ +/* + * Called for each device after PCI setup is done. + * We initialize the PCI device capabilities conservatively, assuming that + * all devices can only address the 32-bit DMA space. The exception here is + * that the device dma_offset is set to the value that matches the 64-bit + * capable devices. This is OK because dma_offset is not used by legacy + * dma_ops, nor by the hybrid dma_ops's streaming DMAs, which are 64-bit ops. + * This implementation matches the kernel design of setting PCI devices' + * coherent_dma_mask to 0xffffffffull by default, allowing the device drivers + * to skip calling pci_set_consistent_dma_mask(DMA_BIT_MASK(32)). + */ static void pcibios_fixup_final(struct pci_dev *pdev) { - set_dma_ops(&pdev->dev, gx_pci_dma_map_ops); + set_dma_ops(&pdev->dev, gx_legacy_pci_dma_map_ops); set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET); pdev->dev.archdata.max_direct_dma_addr = TILE_PCI_MAX_DIRECT_DMA_ADDRESS; + pdev->dev.coherent_dma_mask = TILE_PCI_MAX_DIRECT_DMA_ADDRESS; } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); |