From cecfed31fda849767799e5521064796a21c5164c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 10 Oct 2018 20:16:41 +0200 Subject: scsi: snic: switch to generic DMA API Switch from the legacy PCI DMA API to the generic DMA API. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/snic/snic_disc.c | 7 ++++--- drivers/scsi/snic/snic_io.c | 25 +++++++++++++------------ drivers/scsi/snic/snic_main.c | 24 ++---------------------- drivers/scsi/snic/snic_scsi.c | 11 +++++------ drivers/scsi/snic/vnic_dev.c | 29 ++++++++++++++--------------- 5 files changed, 38 insertions(+), 58 deletions(-) (limited to 'drivers/scsi/snic') diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index b106596cc0cf..e9ccfb97773f 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -111,8 +111,8 @@ snic_queue_report_tgt_req(struct snic *snic) SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0); - pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(snic->pdev, pa)) { + pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE); + if (dma_mapping_error(&snic->pdev->dev, pa)) { SNIC_HOST_ERR(snic->shost, "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n", buf); @@ -138,7 +138,8 @@ snic_queue_report_tgt_req(struct snic *snic) ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); if (ret) { - pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE); + dma_unmap_single(&snic->pdev->dev, pa, buf_len, + DMA_FROM_DEVICE); kfree(buf); rqi->sge_va = 0; snic_release_untagged_req(snic, rqi); diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c index 8e69548395b9..159ee94d2a55 100644 --- a/drivers/scsi/snic/snic_io.c +++ b/drivers/scsi/snic/snic_io.c @@ -102,7 +102,8 @@ snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) struct snic_req_info *rqi = NULL; unsigned long flags; - pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); + dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len, + DMA_TO_DEVICE); rqi = req_to_rqi(req); spin_lock_irqsave(&snic->spl_cmd_lock, flags); @@ -172,8 +173,8 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) snic_print_desc(__func__, os_buf, len); /* Map request buffer */ - pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(snic->pdev, pa)) { + pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE); + if (dma_mapping_error(&snic->pdev->dev, pa)) { SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n"); return -ENOMEM; @@ -186,7 +187,7 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) spin_lock_irqsave(&snic->wq_lock[q_num], flags); desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type); if (desc_avail <= 0) { - pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE); + dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE); req->req_pa = 0; spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); @@ -350,29 +351,29 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi) if (rqi->abort_req) { if (rqi->abort_req->req_pa) - pci_unmap_single(snic->pdev, + dma_unmap_single(&snic->pdev->dev, rqi->abort_req->req_pa, sizeof(struct snic_host_req), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); } if (rqi->dr_req) { if (rqi->dr_req->req_pa) - pci_unmap_single(snic->pdev, + dma_unmap_single(&snic->pdev->dev, rqi->dr_req->req_pa, sizeof(struct snic_host_req), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); } if (rqi->req->req_pa) - pci_unmap_single(snic->pdev, + dma_unmap_single(&snic->pdev->dev, rqi->req->req_pa, rqi->req_len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); } @@ -384,10 +385,10 @@ snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi) sgd = req_to_sgl(rqi_to_req(rqi)); SNIC_BUG_ON(sgd[0].addr == 0); - pci_unmap_single(snic->pdev, + dma_unmap_single(&snic->pdev->dev, le64_to_cpu(sgd[0].addr), le32_to_cpu(sgd[0].len), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); } /* diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c index 7cf70aaec0ba..5295277d6325 100644 --- a/drivers/scsi/snic/snic_main.c +++ b/drivers/scsi/snic/snic_main.c @@ -435,37 +435,17 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * limitation for the device. Try 43-bit first, and * fail to 32-bit. */ - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43)); if (ret) { - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { SNIC_HOST_ERR(shost, "No Usable DMA Configuration, aborting %d\n", ret); - - goto err_rel_regions; - } - - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (ret) { - SNIC_HOST_ERR(shost, - "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n", - ret); - - goto err_rel_regions; - } - } else { - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43)); - if (ret) { - SNIC_HOST_ERR(shost, - "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n", - ret); - goto err_rel_regions; } } - /* Map vNIC resources from BAR0 */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n"); diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c index 42e485139fc9..b3650c989ed4 100644 --- a/drivers/scsi/snic/snic_scsi.c +++ b/drivers/scsi/snic/snic_scsi.c @@ -146,10 +146,10 @@ snic_release_req_buf(struct snic *snic, CMD_FLAGS(sc)); if (req->u.icmnd.sense_addr) - pci_unmap_single(snic->pdev, + dma_unmap_single(&snic->pdev->dev, le64_to_cpu(req->u.icmnd.sense_addr), SCSI_SENSE_BUFFERSIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); scsi_dma_unmap(sc); @@ -185,12 +185,11 @@ snic_queue_icmnd_req(struct snic *snic, } } - pa = pci_map_single(snic->pdev, + pa = dma_map_single(&snic->pdev->dev, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE, - PCI_DMA_FROMDEVICE); - - if (pci_dma_mapping_error(snic->pdev, pa)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&snic->pdev->dev, pa)) { SNIC_HOST_ERR(snic->shost, "QIcmnd:PCI Map Failed for sns buf %p tag %x\n", sc->sense_buffer, snic_cmd_tag(sc)); diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c index dad5fc66effb..05e374f80946 100644 --- a/drivers/scsi/snic/vnic_dev.c +++ b/drivers/scsi/snic/vnic_dev.c @@ -225,10 +225,9 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, { svnic_dev_desc_ring_size(ring, desc_count, desc_size); - ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, - ring->size_unaligned, - &ring->base_addr_unaligned); - + ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev, + ring->size_unaligned, &ring->base_addr_unaligned, + GFP_KERNEL); if (!ring->descs_unaligned) { pr_err("Failed to allocate ring (size=%d), aborting\n", (int)ring->size); @@ -251,7 +250,7 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) { if (ring->descs) { - pci_free_consistent(vdev->pdev, + dma_free_coherent(&vdev->pdev->dev, ring->size_unaligned, ring->descs_unaligned, ring->base_addr_unaligned); @@ -470,9 +469,9 @@ int svnic_dev_fw_info(struct vnic_dev *vdev, int err = 0; if (!vdev->fw_info) { - vdev->fw_info = pci_alloc_consistent(vdev->pdev, + vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev, sizeof(struct vnic_devcmd_fw_info), - &vdev->fw_info_pa); + &vdev->fw_info_pa, GFP_KERNEL); if (!vdev->fw_info) return -ENOMEM; @@ -534,8 +533,8 @@ int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) int wait = VNIC_DVCMD_TMO; if (!vdev->stats) { - vdev->stats = pci_alloc_consistent(vdev->pdev, - sizeof(struct vnic_stats), &vdev->stats_pa); + vdev->stats = dma_alloc_coherent(&vdev->pdev->dev, + sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL); if (!vdev->stats) return -ENOMEM; } @@ -607,9 +606,9 @@ int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) int wait = VNIC_DVCMD_TMO; if (!vdev->notify) { - vdev->notify = pci_alloc_consistent(vdev->pdev, + vdev->notify = dma_alloc_coherent(&vdev->pdev->dev, sizeof(struct vnic_devcmd_notify), - &vdev->notify_pa); + &vdev->notify_pa, GFP_KERNEL); if (!vdev->notify) return -ENOMEM; } @@ -697,21 +696,21 @@ void svnic_dev_unregister(struct vnic_dev *vdev) { if (vdev) { if (vdev->notify) - pci_free_consistent(vdev->pdev, + dma_free_coherent(&vdev->pdev->dev, sizeof(struct vnic_devcmd_notify), vdev->notify, vdev->notify_pa); if (vdev->linkstatus) - pci_free_consistent(vdev->pdev, + dma_free_coherent(&vdev->pdev->dev, sizeof(u32), vdev->linkstatus, vdev->linkstatus_pa); if (vdev->stats) - pci_free_consistent(vdev->pdev, + dma_free_coherent(&vdev->pdev->dev, sizeof(struct vnic_stats), vdev->stats, vdev->stats_pa); if (vdev->fw_info) - pci_free_consistent(vdev->pdev, + dma_free_coherent(&vdev->pdev->dev, sizeof(struct vnic_devcmd_fw_info), vdev->fw_info, vdev->fw_info_pa); if (vdev->devcmd2) -- cgit v1.2.3