From 0605bed9ba62e2e284be791da7ad6ee37d2f29cf Mon Sep 17 00:00:00 2001 From: Aliasgar Surti Date: Wed, 18 Sep 2019 18:46:36 +0530 Subject: staging: qlge: Removed unnecessary variable coccicheck reported warning for unnecessary variable used. This patch fixes the same by removing the variable and returning value directly. Signed-off-by: Aliasgar Surti Link: https://lore.kernel.org/r/1568812596-25926-1-git-send-email-aliasgar.surti500@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge_dbg.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index 31389ab8bdf7..5599525a19d5 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -389,7 +389,6 @@ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf, static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf) { - int status = 0; int i; for (i = 0; i < 8; i++, buf++) { @@ -402,7 +401,7 @@ static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf) *buf = ql_read32(qdev, CNA_ETS); } - return status; + return 0; } static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf) -- cgit v1.2.3 From d7618e38461e6a3f190d88fb941befd51b7c29b0 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:11:55 +0900 Subject: staging: qlge: Fix irq masking in INTx mode Tracing the driver operation reveals that the INTR_EN_EN bit (per-queue interrupt control) does not immediately prevent rx completion interrupts when the device is operating in INTx mode. This leads to interrupts being raised while napi is scheduled/running. Those interrupts are ignored by qlge_isr() and falsely reported as IRQ_NONE thanks to the irq_cnt scheme. This in turn can cause frames to loiter in the receive queue until a later frame leads to another rx interrupt that will schedule napi. Use the INTR_EN_EI bit (master interrupt control) instead. Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-2-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge_main.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 6cae33072496..d7b64d360ea8 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -3366,6 +3366,7 @@ msi: } } qlge_irq_type = LEG_IRQ; + set_bit(QL_LEGACY_ENABLED, &qdev->flags); netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Running with legacy interrupts.\n"); } @@ -3509,6 +3510,16 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) intr_context->intr_dis_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_DISABLE; + if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) { + /* Experience shows that when using INTx interrupts, + * the device does not always auto-mask INTR_EN_EN. + * Moreover, masking INTR_EN_EN manually does not + * immediately prevent interrupt generation. + */ + intr_context->intr_en_mask |= INTR_EN_EI << 16 | + INTR_EN_EI; + intr_context->intr_dis_mask |= INTR_EN_EI << 16; + } intr_context->intr_read_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; /* -- cgit v1.2.3 From e759b5cf70894a1a4a6a5e60a3bd63b7dc788b01 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:11:56 +0900 Subject: staging: qlge: Remove irq_cnt qlge uses an irq enable/disable refcounting scheme that is: * poorly implemented Uses a spin_lock to protect accesses to the irq_cnt atomic variable. * buggy Breaks when there is not a 1:1 sequence of irq - napi_poll, such as when using SO_BUSY_POLL. * unnecessary The purpose or irq_cnt is to reduce irq control writes when multiple work items result from one irq: the irq is re-enabled after all work is done. Analysis of the irq handler shows that there is only one case where there might be two workers scheduled at once, and those have separate irq masking bits. Therefore, remove irq_cnt. Additionally, we get a performance improvement: perf stat -e cycles -a -r5 super_netperf 100 -H 192.168.33.1 -t TCP_RR Before: 628560 628056 622103 622744 627202 [...] 268,803,947,669 cycles ( +- 0.09% ) After: 636300 634106 634984 638555 634188 [...] 259,237,291,449 cycles ( +- 0.19% ) Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-3-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge.h | 7 --- drivers/staging/qlge/qlge_main.c | 98 +++++++++++----------------------------- drivers/staging/qlge/qlge_mpi.c | 1 - 3 files changed, 27 insertions(+), 79 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index ad7c5eb8a3b6..5d9a36deda08 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -1982,11 +1982,6 @@ struct intr_context { u32 intr_dis_mask; /* value/mask used to disable this intr */ u32 intr_read_mask; /* value/mask used to read this intr */ char name[IFNAMSIZ * 2]; - atomic_t irq_cnt; /* irq_cnt is used in single vector - * environment. It's incremented for each - * irq handler that is scheduled. When each - * handler finishes it decrements irq_cnt and - * enables interrupts if it's zero. */ irq_handler_t handler; }; @@ -2074,7 +2069,6 @@ struct ql_adapter { u32 port; /* Port number this adapter */ spinlock_t adapter_lock; - spinlock_t hw_lock; spinlock_t stats_lock; /* PCI Bus Relative Register Addresses */ @@ -2235,7 +2229,6 @@ void ql_mpi_reset_work(struct work_struct *work); void ql_mpi_core_to_log(struct work_struct *work); int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); void ql_queue_asic_error(struct ql_adapter *qdev); -u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); void ql_set_ethtool_ops(struct net_device *ndev); int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); void ql_mpi_idc_work(struct work_struct *work); diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index d7b64d360ea8..7a8d6390d5de 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -625,75 +625,26 @@ static void ql_disable_interrupts(struct ql_adapter *qdev) ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); } -/* If we're running with multiple MSI-X vectors then we enable on the fly. - * Otherwise, we may have multiple outstanding workers and don't want to - * enable until the last one finishes. In this case, the irq_cnt gets - * incremented every time we queue a worker and decremented every time - * a worker finishes. Once it hits zero we enable the interrupt. - */ -u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) +static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) { - u32 var = 0; - unsigned long hw_flags = 0; - struct intr_context *ctx = qdev->intr_context + intr; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { - /* Always enable if we're MSIX multi interrupts and - * it's not the default (zeroeth) interrupt. - */ - ql_write32(qdev, INTR_EN, - ctx->intr_en_mask); - var = ql_read32(qdev, STS); - return var; - } + struct intr_context *ctx = &qdev->intr_context[intr]; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (atomic_dec_and_test(&ctx->irq_cnt)) { - ql_write32(qdev, INTR_EN, - ctx->intr_en_mask); - var = ql_read32(qdev, STS); - } - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return var; + ql_write32(qdev, INTR_EN, ctx->intr_en_mask); } -static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) +static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) { - u32 var = 0; - struct intr_context *ctx; + struct intr_context *ctx = &qdev->intr_context[intr]; - /* HW disables for us if we're MSIX multi interrupts and - * it's not the default (zeroeth) interrupt. - */ - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) - return 0; - - ctx = qdev->intr_context + intr; - spin_lock(&qdev->hw_lock); - if (!atomic_read(&ctx->irq_cnt)) { - ql_write32(qdev, INTR_EN, - ctx->intr_dis_mask); - var = ql_read32(qdev, STS); - } - atomic_inc(&ctx->irq_cnt); - spin_unlock(&qdev->hw_lock); - return var; + ql_write32(qdev, INTR_EN, ctx->intr_dis_mask); } static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) { int i; - for (i = 0; i < qdev->intr_count; i++) { - /* The enable call does a atomic_dec_and_test - * and enables only if the result is zero. - * So we precharge it here. - */ - if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || - i == 0)) - atomic_set(&qdev->intr_context[i].irq_cnt, 1); - ql_enable_completion_interrupt(qdev, i); - } + for (i = 0; i < qdev->intr_count; i++) + ql_enable_completion_interrupt(qdev, i); } static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) @@ -2500,21 +2451,22 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) u32 var; int work_done = 0; - spin_lock(&qdev->hw_lock); - if (atomic_read(&qdev->intr_context[0].irq_cnt)) { - netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, - "Shared Interrupt, Not ours!\n"); - spin_unlock(&qdev->hw_lock); - return IRQ_NONE; - } - spin_unlock(&qdev->hw_lock); + /* Experience shows that when using INTx interrupts, interrupts must + * be masked manually. + * When using MSI mode, INTR_EN_EN must be explicitly disabled + * (even though it is auto-masked), otherwise a later command to + * enable it is not effective. + */ + if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) + ql_disable_completion_interrupt(qdev, 0); - var = ql_disable_completion_interrupt(qdev, intr_context->intr); + var = ql_read32(qdev, STS); /* * Check for fatal error. */ if (var & STS_FE) { + ql_disable_completion_interrupt(qdev, 0); ql_queue_asic_error(qdev); netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); var = ql_read32(qdev, ERR_STS); @@ -2534,7 +2486,6 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) */ netif_err(qdev, intr, qdev->ndev, "Got MPI processor interrupt.\n"); - ql_disable_completion_interrupt(qdev, intr_context->intr); ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); queue_delayed_work_on(smp_processor_id(), qdev->workqueue, &qdev->mpi_work, 0); @@ -2550,11 +2501,18 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) if (var & intr_context->irq_mask) { netif_info(qdev, intr, qdev->ndev, "Waking handler for rx_ring[0].\n"); - ql_disable_completion_interrupt(qdev, intr_context->intr); napi_schedule(&rx_ring->napi); work_done++; + } else { + /* Experience shows that the device sometimes signals an + * interrupt but no work is scheduled from this function. + * Nevertheless, the interrupt is auto-masked. Therefore, we + * systematically re-enable the interrupt if we didn't + * schedule napi. + */ + ql_enable_completion_interrupt(qdev, 0); } - ql_enable_completion_interrupt(qdev, intr_context->intr); + return work_done ? IRQ_HANDLED : IRQ_NONE; } @@ -3568,7 +3526,6 @@ static int ql_request_irq(struct ql_adapter *qdev) ql_resolve_queues_to_irqs(qdev); for (i = 0; i < qdev->intr_count; i++, intr_context++) { - atomic_set(&intr_context->irq_cnt, 0); if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { status = request_irq(qdev->msi_x_entry[i].vector, intr_context->handler, @@ -4653,7 +4610,6 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, goto err_out2; } qdev->msg_enable = netif_msg_init(debug, default_msg); - spin_lock_init(&qdev->hw_lock); spin_lock_init(&qdev->stats_lock); if (qlge_mpi_coredump) { diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c index 957c72985a06..9e422bbbb6ab 100644 --- a/drivers/staging/qlge/qlge_mpi.c +++ b/drivers/staging/qlge/qlge_mpi.c @@ -1257,7 +1257,6 @@ void ql_mpi_work(struct work_struct *work) /* End polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); mutex_unlock(&qdev->mpi_mutex); - ql_enable_completion_interrupt(qdev, 0); } void ql_mpi_reset_work(struct work_struct *work) -- cgit v1.2.3 From f70e8459fdea1905ca9bfbf987daf9f1a1c545e8 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:11:57 +0900 Subject: staging: qlge: Remove page_chunk.last_flag As already done in ql_get_curr_lchunk(), this member can be replaced by a simple test. Signed-off-by: Benjamin Poirier Acked-by: Manish Chopra Link: https://lore.kernel.org/r/20190927101210.23856-4-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge.h | 1 - drivers/staging/qlge/qlge_main.c | 13 +++++-------- 2 files changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index 5d9a36deda08..0a156a95e981 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -1363,7 +1363,6 @@ struct page_chunk { char *va; /* virt addr for this chunk */ u64 map; /* mapping for master */ unsigned int offset; /* offset for this chunk */ - unsigned int last_flag; /* flag set for last chunk in page */ }; struct bq_desc { diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 7a8d6390d5de..a82920776e6b 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -1077,11 +1077,9 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { rx_ring->pg_chunk.page = NULL; - lbq_desc->p.pg_chunk.last_flag = 1; } else { rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; get_page(rx_ring->pg_chunk.page); - lbq_desc->p.pg_chunk.last_flag = 0; } return 0; } @@ -2778,6 +2776,8 @@ pci_alloc_err: static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) { + unsigned int last_offset = ql_lbq_block_size(qdev) - + rx_ring->lbq_buf_size; struct bq_desc *lbq_desc; uint32_t curr_idx, clean_idx; @@ -2787,13 +2787,10 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring while (curr_idx != clean_idx) { lbq_desc = &rx_ring->lbq[curr_idx]; - if (lbq_desc->p.pg_chunk.last_flag) { - pci_unmap_page(qdev->pdev, - lbq_desc->p.pg_chunk.map, - ql_lbq_block_size(qdev), + if (lbq_desc->p.pg_chunk.offset == last_offset) + pci_unmap_page(qdev->pdev, lbq_desc->p.pg_chunk.map, + ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); - lbq_desc->p.pg_chunk.last_flag = 0; - } put_page(lbq_desc->p.pg_chunk.page); lbq_desc->p.pg_chunk.page = NULL; -- cgit v1.2.3 From 2b27fc39da55ee45ad30bcf2f7b4deb017cb89b8 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:11:58 +0900 Subject: staging: qlge: Deduplicate lbq_buf_size lbq_buf_size is duplicated to every rx_ring structure whereas lbq_buf_order is present once in the ql_adapter structure. All rings use the same buf size, keep only one copy of it. Also factor out the calculation of lbq_buf_size instead of having two copies. Signed-off-by: Benjamin Poirier Acked-by: Willem de Bruijn Link: https://lore.kernel.org/r/20190927101210.23856-5-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge.h | 2 +- drivers/staging/qlge/qlge_dbg.c | 2 +- drivers/staging/qlge/qlge_main.c | 61 +++++++++++++++++----------------------- 3 files changed, 28 insertions(+), 37 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index 0a156a95e981..ba61b4559dd6 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -1433,7 +1433,6 @@ struct rx_ring { /* Large buffer queue elements. */ u32 lbq_len; /* entry count */ u32 lbq_size; /* size in bytes of queue */ - u32 lbq_buf_size; void *lbq_base; dma_addr_t lbq_base_dma; void *lbq_base_indirect; @@ -2108,6 +2107,7 @@ struct ql_adapter { struct rx_ring rx_ring[MAX_RX_RINGS]; struct tx_ring tx_ring[MAX_TX_RINGS]; unsigned int lbq_buf_order; + u32 lbq_buf_size; int rx_csum; u32 default_rx_queue; diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index 5599525a19d5..718943e78406 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -1629,6 +1629,7 @@ void ql_dump_qdev(struct ql_adapter *qdev) DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask); DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up); DUMP_QDEV_FIELD(qdev, "0x%08x", port_init); + DUMP_QDEV_FIELD(qdev, "%u", lbq_buf_size); } #endif @@ -1773,7 +1774,6 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); - pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size); pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); pr_err("rx_ring->sbq_base_dma = %llx\n", diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index a82920776e6b..2b1cc4b29bed 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -995,15 +995,14 @@ static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); pci_dma_sync_single_for_cpu(qdev->pdev, - dma_unmap_addr(lbq_desc, mapaddr), - rx_ring->lbq_buf_size, - PCI_DMA_FROMDEVICE); + dma_unmap_addr(lbq_desc, mapaddr), + qdev->lbq_buf_size, PCI_DMA_FROMDEVICE); /* If it's the last chunk of our master page then * we unmap it. */ - if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) - == ql_lbq_block_size(qdev)) + if (lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size == + ql_lbq_block_size(qdev)) pci_unmap_page(qdev->pdev, lbq_desc->p.pg_chunk.map, ql_lbq_block_size(qdev), @@ -1074,11 +1073,11 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, /* Adjust the master page chunk for next * buffer get. */ - rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; + rx_ring->pg_chunk.offset += qdev->lbq_buf_size; if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { rx_ring->pg_chunk.page = NULL; } else { - rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; + rx_ring->pg_chunk.va += qdev->lbq_buf_size; get_page(rx_ring->pg_chunk.page); } return 0; @@ -1110,12 +1109,12 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) lbq_desc->p.pg_chunk.offset; dma_unmap_addr_set(lbq_desc, mapaddr, map); dma_unmap_len_set(lbq_desc, maplen, - rx_ring->lbq_buf_size); + qdev->lbq_buf_size); *lbq_desc->addr = cpu_to_le64(map); pci_dma_sync_single_for_device(qdev->pdev, map, - rx_ring->lbq_buf_size, - PCI_DMA_FROMDEVICE); + qdev->lbq_buf_size, + PCI_DMA_FROMDEVICE); clean_idx++; if (clean_idx == rx_ring->lbq_len) clean_idx = 0; @@ -1880,8 +1879,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, } do { lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - size = (length < rx_ring->lbq_buf_size) ? length : - rx_ring->lbq_buf_size; + size = min(length, qdev->lbq_buf_size); netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Adding page %d to skb for %d bytes.\n", @@ -2776,12 +2774,12 @@ pci_alloc_err: static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) { - unsigned int last_offset = ql_lbq_block_size(qdev) - - rx_ring->lbq_buf_size; + unsigned int last_offset; struct bq_desc *lbq_desc; uint32_t curr_idx, clean_idx; + last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size; curr_idx = rx_ring->lbq_curr_idx; clean_idx = rx_ring->lbq_clean_idx; while (curr_idx != clean_idx) { @@ -3149,8 +3147,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq_base_indirect_dma); - bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : - (u16) rx_ring->lbq_buf_size; + bq_len = (qdev->lbq_buf_size == 65536) ? 0 : + (u16)qdev->lbq_buf_size; cqicb->lbq_buf_size = cpu_to_le16(bq_len); bq_len = (rx_ring->lbq_len == 65536) ? 0 : (u16) rx_ring->lbq_len; @@ -4059,16 +4057,21 @@ static int qlge_close(struct net_device *ndev) return 0; } +static void qlge_set_lb_size(struct ql_adapter *qdev) +{ + if (qdev->ndev->mtu <= 1500) + qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE; + else + qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE; + qdev->lbq_buf_order = get_order(qdev->lbq_buf_size); +} + static int ql_configure_rings(struct ql_adapter *qdev) { int i; struct rx_ring *rx_ring; struct tx_ring *tx_ring; int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); - unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? - LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; - - qdev->lbq_buf_order = get_order(lbq_buf_len); /* In a perfect world we have one RSS ring for each CPU * and each has it's own vector. To do that we ask for @@ -4116,7 +4119,6 @@ static int ql_configure_rings(struct ql_adapter *qdev) rx_ring->lbq_len = NUM_LARGE_BUFFERS; rx_ring->lbq_size = rx_ring->lbq_len * sizeof(__le64); - rx_ring->lbq_buf_size = (u16)lbq_buf_len; rx_ring->sbq_len = NUM_SMALL_BUFFERS; rx_ring->sbq_size = rx_ring->sbq_len * sizeof(__le64); @@ -4132,7 +4134,6 @@ static int ql_configure_rings(struct ql_adapter *qdev) rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); rx_ring->lbq_len = 0; rx_ring->lbq_size = 0; - rx_ring->lbq_buf_size = 0; rx_ring->sbq_len = 0; rx_ring->sbq_size = 0; rx_ring->sbq_buf_size = 0; @@ -4151,6 +4152,7 @@ static int qlge_open(struct net_device *ndev) if (err) return err; + qlge_set_lb_size(qdev); err = ql_configure_rings(qdev); if (err) return err; @@ -4172,9 +4174,7 @@ error_up: static int ql_change_rx_buffers(struct ql_adapter *qdev) { - struct rx_ring *rx_ring; - int i, status; - u32 lbq_buf_len; + int status; /* Wait for an outstanding reset to complete. */ if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { @@ -4197,16 +4197,7 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev) if (status) goto error; - /* Get the new rx buffer size. */ - lbq_buf_len = (qdev->ndev->mtu > 1500) ? - LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; - qdev->lbq_buf_order = get_order(lbq_buf_len); - - for (i = 0; i < qdev->rss_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - /* Set the new size. */ - rx_ring->lbq_buf_size = lbq_buf_len; - } + qlge_set_lb_size(qdev); status = ql_adapter_up(qdev); if (status) -- cgit v1.2.3 From a68a5b2fd3a2490b74cbdda53fb2de5302e973bb Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:11:59 +0900 Subject: staging: qlge: Remove bq_desc.maplen The size of the mapping is known statically in all cases, there's no need to save it at runtime. Remove this member. Signed-off-by: Benjamin Poirier Acked-by: Manish Chopra Link: https://lore.kernel.org/r/20190927101210.23856-6-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge.h | 1 - drivers/staging/qlge/qlge_main.c | 43 ++++++++++++++-------------------------- 2 files changed, 15 insertions(+), 29 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index ba61b4559dd6..f32da8c7679f 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -1373,7 +1373,6 @@ struct bq_desc { __le64 *addr; u32 index; DEFINE_DMA_UNMAP_ADDR(mapaddr); - DEFINE_DMA_UNMAP_LEN(maplen); }; #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 2b1cc4b29bed..34bc1d9560ce 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -1108,8 +1108,6 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) map = lbq_desc->p.pg_chunk.map + lbq_desc->p.pg_chunk.offset; dma_unmap_addr_set(lbq_desc, mapaddr, map); - dma_unmap_len_set(lbq_desc, maplen, - qdev->lbq_buf_size); *lbq_desc->addr = cpu_to_le64(map); pci_dma_sync_single_for_device(qdev->pdev, map, @@ -1177,8 +1175,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) return; } dma_unmap_addr_set(sbq_desc, mapaddr, map); - dma_unmap_len_set(sbq_desc, maplen, - rx_ring->sbq_buf_size); *sbq_desc->addr = cpu_to_le64(map); } @@ -1598,14 +1594,14 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, pci_dma_sync_single_for_cpu(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), + rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); skb_put_data(new_skb, skb->data, length); pci_dma_sync_single_for_device(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), + rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); skb = new_skb; @@ -1727,8 +1723,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, sbq_desc = ql_get_curr_sbuf(rx_ring); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); + rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); skb = sbq_desc->p.skb; ql_realign_skb(skb, hdr_len); skb_put(skb, hdr_len); @@ -1758,19 +1753,15 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, */ sbq_desc = ql_get_curr_sbuf(rx_ring); pci_dma_sync_single_for_cpu(qdev->pdev, - dma_unmap_addr - (sbq_desc, mapaddr), - dma_unmap_len - (sbq_desc, maplen), + dma_unmap_addr(sbq_desc, + mapaddr), + rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); skb_put_data(skb, sbq_desc->p.skb->data, length); pci_dma_sync_single_for_device(qdev->pdev, - dma_unmap_addr - (sbq_desc, - mapaddr), - dma_unmap_len - (sbq_desc, - maplen), + dma_unmap_addr(sbq_desc, + mapaddr), + rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); } else { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, @@ -1781,10 +1772,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ql_realign_skb(skb, length); skb_put(skb, length); pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, - mapaddr), - dma_unmap_len(sbq_desc, - maplen), + dma_unmap_addr(sbq_desc, mapaddr), + rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); sbq_desc->p.skb = NULL; } @@ -1822,9 +1811,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, return NULL; } pci_unmap_page(qdev->pdev, - dma_unmap_addr(lbq_desc, - mapaddr), - dma_unmap_len(lbq_desc, maplen), + dma_unmap_addr(lbq_desc, mapaddr), + qdev->lbq_buf_size, PCI_DMA_FROMDEVICE); skb_reserve(skb, NET_IP_ALIGN); netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, @@ -1858,8 +1846,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, sbq_desc = ql_get_curr_sbuf(rx_ring); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); + rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { /* * This is an non TCP/UDP IP frame, so @@ -2820,7 +2807,7 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring if (sbq_desc->p.skb) { pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), + rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); dev_kfree_skb(sbq_desc->p.skb); sbq_desc->p.skb = NULL; -- cgit v1.2.3 From 16714d98bf631909864f7eacd591ab5f7cf1588c Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:12:00 +0900 Subject: staging: qlge: Remove rx_ring.sbq_buf_size Tx completion rings have sbq_buf_size = 0 but there's no case where the code actually tests on that value. We can remove sbq_buf_size and use a constant instead. Signed-off-by: Benjamin Poirier Reviewed-by: Willem de Bruijn Link: https://lore.kernel.org/r/20190927101210.23856-7-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge.h | 1 - drivers/staging/qlge/qlge_dbg.c | 1 - drivers/staging/qlge/qlge_main.c | 24 ++++++++++-------------- 3 files changed, 10 insertions(+), 16 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index f32da8c7679f..a3a52bbc2821 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -1447,7 +1447,6 @@ struct rx_ring { /* Small buffer queue elements. */ u32 sbq_len; /* entry count */ u32 sbq_size; /* size in bytes of queue */ - u32 sbq_buf_size; void *sbq_base; dma_addr_t sbq_base_dma; void *sbq_base_indirect; diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index 718943e78406..9f3f1b014023 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -1791,7 +1791,6 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); - pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size); pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); pr_err("rx_ring->irq = %d\n", rx_ring->irq); pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 34bc1d9560ce..0a3809c50c10 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -1164,7 +1164,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); map = pci_map_single(qdev->pdev, sbq_desc->p.skb->data, - rx_ring->sbq_buf_size, + SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(qdev->pdev, map)) { netif_err(qdev, ifup, qdev->ndev, @@ -1594,14 +1594,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, pci_dma_sync_single_for_cpu(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - rx_ring->sbq_buf_size, - PCI_DMA_FROMDEVICE); + SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); skb_put_data(new_skb, skb->data, length); pci_dma_sync_single_for_device(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - rx_ring->sbq_buf_size, + SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); skb = new_skb; @@ -1723,7 +1722,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, sbq_desc = ql_get_curr_sbuf(rx_ring); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); + SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); skb = sbq_desc->p.skb; ql_realign_skb(skb, hdr_len); skb_put(skb, hdr_len); @@ -1755,13 +1754,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, pci_dma_sync_single_for_cpu(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - rx_ring->sbq_buf_size, + SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); skb_put_data(skb, sbq_desc->p.skb->data, length); pci_dma_sync_single_for_device(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - rx_ring->sbq_buf_size, + SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); } else { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, @@ -1773,7 +1772,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, skb_put(skb, length); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - rx_ring->sbq_buf_size, + SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); sbq_desc->p.skb = NULL; } @@ -1846,7 +1845,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, sbq_desc = ql_get_curr_sbuf(rx_ring); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); + SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { /* * This is an non TCP/UDP IP frame, so @@ -2807,7 +2806,7 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring if (sbq_desc->p.skb) { pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), - rx_ring->sbq_buf_size, + SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb(sbq_desc->p.skb); sbq_desc->p.skb = NULL; @@ -3158,8 +3157,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); cqicb->sbq_addr = cpu_to_le64(rx_ring->sbq_base_indirect_dma); - cqicb->sbq_buf_size = - cpu_to_le16((u16)(rx_ring->sbq_buf_size)); + cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUF_MAP_SIZE); bq_len = (rx_ring->sbq_len == 65536) ? 0 : (u16) rx_ring->sbq_len; cqicb->sbq_len = cpu_to_le16(bq_len); @@ -4109,7 +4107,6 @@ static int ql_configure_rings(struct ql_adapter *qdev) rx_ring->sbq_len = NUM_SMALL_BUFFERS; rx_ring->sbq_size = rx_ring->sbq_len * sizeof(__le64); - rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; rx_ring->type = RX_Q; } else { /* @@ -4123,7 +4120,6 @@ static int ql_configure_rings(struct ql_adapter *qdev) rx_ring->lbq_size = 0; rx_ring->sbq_len = 0; rx_ring->sbq_size = 0; - rx_ring->sbq_buf_size = 0; rx_ring->type = TX_Q; } } -- cgit v1.2.3 From cf1c2987bfd890a9c4ef3f174ed6148ec9b2b622 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:12:01 +0900 Subject: staging: qlge: Remove useless dma synchronization calls This is unneeded for two reasons: 1) the cpu does not write data for the device in the mapping 2) calls like ..._sync_..._for_device(..., ..._FROMDEVICE) are nonsensical, see commit 3f0fb4e85b38 ("Documentation/DMA-API-HOWTO.txt: fix misleading example") Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-8-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge_main.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 0a3809c50c10..03403718a273 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -1110,9 +1110,6 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) dma_unmap_addr_set(lbq_desc, mapaddr, map); *lbq_desc->addr = cpu_to_le64(map); - pci_dma_sync_single_for_device(qdev->pdev, map, - qdev->lbq_buf_size, - PCI_DMA_FROMDEVICE); clean_idx++; if (clean_idx == rx_ring->lbq_len) clean_idx = 0; @@ -1598,10 +1595,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, skb_put_data(new_skb, skb->data, length); - pci_dma_sync_single_for_device(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - SMALL_BUF_MAP_SIZE, - PCI_DMA_FROMDEVICE); skb = new_skb; /* Frame error, so drop the packet. */ @@ -1757,11 +1750,6 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); skb_put_data(skb, sbq_desc->p.skb->data, length); - pci_dma_sync_single_for_device(qdev->pdev, - dma_unmap_addr(sbq_desc, - mapaddr), - SMALL_BUF_MAP_SIZE, - PCI_DMA_FROMDEVICE); } else { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%d bytes in a single small buffer.\n", -- cgit v1.2.3 From 03a0e14bd8bc8df2ea478b336ee65f429375acbb Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:12:02 +0900 Subject: staging: qlge: Deduplicate rx buffer queue management The qlge driver (and device) uses two kinds of buffers for reception, so-called "small buffers" and "large buffers". The two are arranged in rings, the sbq and lbq. These two share similar data structures and code. Factor out data structures into a common struct qlge_bq, make required adjustments to code and dedup the most obvious cases of copy/paste. This patch should not introduce any functional change other than to some of the printk format strings. Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-9-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge.h | 96 ++++--- drivers/staging/qlge/qlge_dbg.c | 60 ++-- drivers/staging/qlge/qlge_main.c | 573 +++++++++++++++++---------------------- 3 files changed, 335 insertions(+), 394 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index a3a52bbc2821..a84aa264dfa8 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -1358,23 +1358,6 @@ struct tx_ring_desc { struct tx_ring_desc *next; }; -struct page_chunk { - struct page *page; /* master page */ - char *va; /* virt addr for this chunk */ - u64 map; /* mapping for master */ - unsigned int offset; /* offset for this chunk */ -}; - -struct bq_desc { - union { - struct page_chunk pg_chunk; - struct sk_buff *skb; - } p; - __le64 *addr; - u32 index; - DEFINE_DMA_UNMAP_ADDR(mapaddr); -}; - #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) struct tx_ring { @@ -1413,6 +1396,56 @@ enum { RX_Q = 4, /* Handles inbound completions. */ }; +struct qlge_page_chunk { + struct page *page; + void *va; /* virt addr including offset */ + unsigned int offset; +}; + +struct qlge_bq_desc { + union { + /* for large buffers */ + struct qlge_page_chunk pg_chunk; + /* for small buffers */ + struct sk_buff *skb; + } p; + dma_addr_t dma_addr; + /* address in ring where the buffer address (dma_addr) is written for + * the device + */ + __le64 *buf_ptr; + u32 index; + DEFINE_DMA_UNMAP_ADDR(mapaddr); +}; + +/* buffer queue */ +struct qlge_bq { + __le64 *base; + dma_addr_t base_dma; + __le64 *base_indirect; + dma_addr_t base_indirect_dma; + struct qlge_bq_desc *queue; + void __iomem *prod_idx_db_reg; + u32 len; /* entry count */ + u32 size; /* size in bytes of hw ring */ + u32 prod_idx; /* current sw prod idx */ + u32 curr_idx; /* next entry we expect */ + u32 clean_idx; /* beginning of new descs */ + u32 free_cnt; /* free buffer desc cnt */ + enum { + QLGE_SB, /* small buffer */ + QLGE_LB, /* large buffer */ + } type; +}; + +#define QLGE_BQ_CONTAINER(bq) \ +({ \ + typeof(bq) _bq = bq; \ + (struct rx_ring *)((char *)_bq - (_bq->type == QLGE_SB ? \ + offsetof(struct rx_ring, sbq) : \ + offsetof(struct rx_ring, lbq))); \ +}) + struct rx_ring { struct cqicb cqicb; /* The chip's completion queue init control block. */ @@ -1430,33 +1463,12 @@ struct rx_ring { void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */ /* Large buffer queue elements. */ - u32 lbq_len; /* entry count */ - u32 lbq_size; /* size in bytes of queue */ - void *lbq_base; - dma_addr_t lbq_base_dma; - void *lbq_base_indirect; - dma_addr_t lbq_base_indirect_dma; - struct page_chunk pg_chunk; /* current page for chunks */ - struct bq_desc *lbq; /* array of control blocks */ - void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ - u32 lbq_prod_idx; /* current sw prod idx */ - u32 lbq_curr_idx; /* next entry we expect */ - u32 lbq_clean_idx; /* beginning of new descs */ - u32 lbq_free_cnt; /* free buffer desc cnt */ + struct qlge_bq lbq; + struct qlge_page_chunk master_chunk; + dma_addr_t chunk_dma_addr; /* Small buffer queue elements. */ - u32 sbq_len; /* entry count */ - u32 sbq_size; /* size in bytes of queue */ - void *sbq_base; - dma_addr_t sbq_base_dma; - void *sbq_base_indirect; - dma_addr_t sbq_base_indirect_dma; - struct bq_desc *sbq; /* array of control blocks */ - void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */ - u32 sbq_prod_idx; /* current sw prod idx */ - u32 sbq_curr_idx; /* next entry we expect */ - u32 sbq_clean_idx; /* beginning of new descs */ - u32 sbq_free_cnt; /* free buffer desc cnt */ + struct qlge_bq sbq; /* Misc. handler elements. */ u32 type; /* Type of queue, tx, rx. */ diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index 9f3f1b014023..e8ad8209d487 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -1758,39 +1758,39 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); - pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); - pr_err("rx_ring->lbq_base_dma = %llx\n", - (unsigned long long) rx_ring->lbq_base_dma); - pr_err("rx_ring->lbq_base_indirect = %p\n", - rx_ring->lbq_base_indirect); - pr_err("rx_ring->lbq_base_indirect_dma = %llx\n", - (unsigned long long) rx_ring->lbq_base_indirect_dma); - pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); - pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); - pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); - pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n", - rx_ring->lbq_prod_idx_db_reg); - pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); - pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); + pr_err("rx_ring->lbq.base = %p\n", rx_ring->lbq.base); + pr_err("rx_ring->lbq.base_dma = %llx\n", + (unsigned long long)rx_ring->lbq.base_dma); + pr_err("rx_ring->lbq.base_indirect = %p\n", + rx_ring->lbq.base_indirect); + pr_err("rx_ring->lbq.base_indirect_dma = %llx\n", + (unsigned long long)rx_ring->lbq.base_indirect_dma); + pr_err("rx_ring->lbq = %p\n", rx_ring->lbq.queue); + pr_err("rx_ring->lbq.len = %d\n", rx_ring->lbq.len); + pr_err("rx_ring->lbq.size = %d\n", rx_ring->lbq.size); + pr_err("rx_ring->lbq.prod_idx_db_reg = %p\n", + rx_ring->lbq.prod_idx_db_reg); + pr_err("rx_ring->lbq.prod_idx = %d\n", rx_ring->lbq.prod_idx); + pr_err("rx_ring->lbq.curr_idx = %d\n", rx_ring->lbq.curr_idx); pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); - pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); - pr_err("rx_ring->sbq_base_dma = %llx\n", - (unsigned long long) rx_ring->sbq_base_dma); - pr_err("rx_ring->sbq_base_indirect = %p\n", - rx_ring->sbq_base_indirect); - pr_err("rx_ring->sbq_base_indirect_dma = %llx\n", - (unsigned long long) rx_ring->sbq_base_indirect_dma); - pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); - pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); - pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); - pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n", - rx_ring->sbq_prod_idx_db_reg); - pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); - pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); - pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); - pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); + pr_err("rx_ring->sbq.base = %p\n", rx_ring->sbq.base); + pr_err("rx_ring->sbq.base_dma = %llx\n", + (unsigned long long)rx_ring->sbq.base_dma); + pr_err("rx_ring->sbq.base_indirect = %p\n", + rx_ring->sbq.base_indirect); + pr_err("rx_ring->sbq.base_indirect_dma = %llx\n", + (unsigned long long)rx_ring->sbq.base_indirect_dma); + pr_err("rx_ring->sbq = %p\n", rx_ring->sbq.queue); + pr_err("rx_ring->sbq.len = %d\n", rx_ring->sbq.len); + pr_err("rx_ring->sbq.size = %d\n", rx_ring->sbq.size); + pr_err("rx_ring->sbq.prod_idx_db_reg addr = %p\n", + rx_ring->sbq.prod_idx_db_reg); + pr_err("rx_ring->sbq.prod_idx = %d\n", rx_ring->sbq.prod_idx); + pr_err("rx_ring->sbq.curr_idx = %d\n", rx_ring->sbq.curr_idx); + pr_err("rx_ring->sbq.clean_idx = %d\n", rx_ring->sbq.clean_idx); + pr_err("rx_ring->sbq.free_cnt = %d\n", rx_ring->sbq.free_cnt); pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); pr_err("rx_ring->irq = %d\n", rx_ring->irq); pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 03403718a273..ba133d1f2b74 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -978,47 +978,36 @@ static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) return PAGE_SIZE << qdev->lbq_buf_order; } -/* Get the next large buffer. */ -static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) -{ - struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; - rx_ring->lbq_curr_idx++; - if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) - rx_ring->lbq_curr_idx = 0; - rx_ring->lbq_free_cnt++; - return lbq_desc; +static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq) +{ + struct qlge_bq_desc *bq_desc; + + bq_desc = &bq->queue[bq->curr_idx++]; + if (bq->curr_idx == bq->len) + bq->curr_idx = 0; + bq->free_cnt++; + + return bq_desc; } -static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, - struct rx_ring *rx_ring) +static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, + struct rx_ring *rx_ring) { - struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); + struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq); pci_dma_sync_single_for_cpu(qdev->pdev, dma_unmap_addr(lbq_desc, mapaddr), qdev->lbq_buf_size, PCI_DMA_FROMDEVICE); - /* If it's the last chunk of our master page then - * we unmap it. - */ - if (lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size == - ql_lbq_block_size(qdev)) - pci_unmap_page(qdev->pdev, - lbq_desc->p.pg_chunk.map, - ql_lbq_block_size(qdev), - PCI_DMA_FROMDEVICE); - return lbq_desc; -} + if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) == + ql_lbq_block_size(qdev)) { + /* last chunk of the master page */ + pci_unmap_page(qdev->pdev, lbq_desc->dma_addr - + lbq_desc->p.pg_chunk.offset, + ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); + } -/* Get the next small buffer. */ -static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) -{ - struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; - rx_ring->sbq_curr_idx++; - if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) - rx_ring->sbq_curr_idx = 0; - rx_ring->sbq_free_cnt++; - return sbq_desc; + return lbq_desc; } /* Update an rx ring index. */ @@ -1037,169 +1026,159 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring) ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); } -static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, - struct bq_desc *lbq_desc) +static const char * const bq_type_name[] = { + [QLGE_SB] = "sbq", + [QLGE_LB] = "lbq", +}; + +/* return size of allocated buffer (may be 0) or negative error */ +static int qlge_refill_sb(struct rx_ring *rx_ring, + struct qlge_bq_desc *sbq_desc) { - if (!rx_ring->pg_chunk.page) { - u64 map; - rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC, - qdev->lbq_buf_order); - if (unlikely(!rx_ring->pg_chunk.page)) { - netif_err(qdev, drv, qdev->ndev, - "page allocation failed.\n"); + struct ql_adapter *qdev = rx_ring->qdev; + struct sk_buff *skb; + + if (sbq_desc->p.skb) + return 0; + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "ring %u sbq: getting new skb for index %d.\n", + rx_ring->cq_id, sbq_desc->index); + + skb = netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE); + if (!skb) + return -ENOMEM; + skb_reserve(skb, QLGE_SB_PAD); + + sbq_desc->dma_addr = pci_map_single(qdev->pdev, skb->data, + SMALL_BUF_MAP_SIZE, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(qdev->pdev, sbq_desc->dma_addr)) { + netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n"); + dev_kfree_skb_any(skb); + return -EIO; + } + + sbq_desc->p.skb = skb; + return SMALL_BUFFER_SIZE; +} + +/* return size of allocated buffer or negative error */ +static int qlge_refill_lb(struct rx_ring *rx_ring, + struct qlge_bq_desc *lbq_desc) +{ + struct ql_adapter *qdev = rx_ring->qdev; + struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk; + + if (!master_chunk->page) { + struct page *page; + dma_addr_t dma_addr; + + page = alloc_pages(__GFP_COMP | GFP_ATOMIC, + qdev->lbq_buf_order); + if (unlikely(!page)) return -ENOMEM; - } - rx_ring->pg_chunk.offset = 0; - map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, - 0, ql_lbq_block_size(qdev), + dma_addr = pci_map_page(qdev->pdev, page, 0, + ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(qdev->pdev, map)) { - __free_pages(rx_ring->pg_chunk.page, - qdev->lbq_buf_order); - rx_ring->pg_chunk.page = NULL; + if (pci_dma_mapping_error(qdev->pdev, dma_addr)) { + __free_pages(page, qdev->lbq_buf_order); netif_err(qdev, drv, qdev->ndev, "PCI mapping failed.\n"); - return -ENOMEM; + return -EIO; } - rx_ring->pg_chunk.map = map; - rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); + master_chunk->page = page; + master_chunk->va = page_address(page); + master_chunk->offset = 0; + rx_ring->chunk_dma_addr = dma_addr; } - /* Copy the current master pg_chunk info - * to the current descriptor. - */ - lbq_desc->p.pg_chunk = rx_ring->pg_chunk; + lbq_desc->p.pg_chunk = *master_chunk; + lbq_desc->dma_addr = rx_ring->chunk_dma_addr + master_chunk->offset; /* Adjust the master page chunk for next * buffer get. */ - rx_ring->pg_chunk.offset += qdev->lbq_buf_size; - if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { - rx_ring->pg_chunk.page = NULL; + master_chunk->offset += qdev->lbq_buf_size; + if (master_chunk->offset == ql_lbq_block_size(qdev)) { + master_chunk->page = NULL; } else { - rx_ring->pg_chunk.va += qdev->lbq_buf_size; - get_page(rx_ring->pg_chunk.page); + master_chunk->va += qdev->lbq_buf_size; + get_page(master_chunk->page); } - return 0; + + return qdev->lbq_buf_size; } -/* Process (refill) a large buffer queue. */ -static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) + +static void qlge_refill_bq(struct qlge_bq *bq) { - u32 clean_idx = rx_ring->lbq_clean_idx; + struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq); + struct ql_adapter *qdev = rx_ring->qdev; + u32 clean_idx = bq->clean_idx; + unsigned int reserved_count; u32 start_idx = clean_idx; - struct bq_desc *lbq_desc; - u64 map; int i; - while (rx_ring->lbq_free_cnt > 32) { - for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) { + if (bq->type == QLGE_SB) + reserved_count = 16; + else + reserved_count = 32; + + while (bq->free_cnt > reserved_count) { + for (i = (bq->clean_idx % 16); i < 16; i++) { + struct qlge_bq_desc *bq_desc = &bq->queue[clean_idx]; + int retval; + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "lbq: try cleaning clean_idx = %d.\n", + "ring %u %s: try cleaning clean_idx = %d.\n", + rx_ring->cq_id, bq_type_name[bq->type], clean_idx); - lbq_desc = &rx_ring->lbq[clean_idx]; - if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { - rx_ring->lbq_clean_idx = clean_idx; + + if (bq->type == QLGE_SB) + retval = qlge_refill_sb(rx_ring, bq_desc); + else + retval = qlge_refill_lb(rx_ring, bq_desc); + + if (retval > 0) { + dma_unmap_addr_set(bq_desc, mapaddr, + bq_desc->dma_addr); + *bq_desc->buf_ptr = + cpu_to_le64(bq_desc->dma_addr); + } else if (retval < 0) { + bq->clean_idx = clean_idx; netif_err(qdev, ifup, qdev->ndev, - "Could not get a page chunk, i=%d, clean_idx =%d .\n", - i, clean_idx); + "ring %u %s: Could not get a page chunk, i=%d, clean_idx =%d .\n", + rx_ring->cq_id, + bq_type_name[bq->type], i, + clean_idx); return; } - map = lbq_desc->p.pg_chunk.map + - lbq_desc->p.pg_chunk.offset; - dma_unmap_addr_set(lbq_desc, mapaddr, map); - *lbq_desc->addr = cpu_to_le64(map); - clean_idx++; - if (clean_idx == rx_ring->lbq_len) + if (clean_idx == bq->len) clean_idx = 0; } - rx_ring->lbq_clean_idx = clean_idx; - rx_ring->lbq_prod_idx += 16; - if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) - rx_ring->lbq_prod_idx = 0; - rx_ring->lbq_free_cnt -= 16; - } - - if (start_idx != clean_idx) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "lbq: updating prod idx = %d.\n", - rx_ring->lbq_prod_idx); - ql_write_db_reg(rx_ring->lbq_prod_idx, - rx_ring->lbq_prod_idx_db_reg); - } -} - -/* Process (refill) a small buffer queue. */ -static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - u32 clean_idx = rx_ring->sbq_clean_idx; - u32 start_idx = clean_idx; - struct bq_desc *sbq_desc; - u64 map; - int i; - - while (rx_ring->sbq_free_cnt > 16) { - for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) { - sbq_desc = &rx_ring->sbq[clean_idx]; - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "sbq: try cleaning clean_idx = %d.\n", - clean_idx); - if (sbq_desc->p.skb == NULL) { - netif_printk(qdev, rx_status, KERN_DEBUG, - qdev->ndev, - "sbq: getting new skb for index %d.\n", - sbq_desc->index); - sbq_desc->p.skb = - netdev_alloc_skb(qdev->ndev, - SMALL_BUFFER_SIZE); - if (sbq_desc->p.skb == NULL) { - rx_ring->sbq_clean_idx = clean_idx; - return; - } - skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); - map = pci_map_single(qdev->pdev, - sbq_desc->p.skb->data, - SMALL_BUF_MAP_SIZE, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(qdev->pdev, map)) { - netif_err(qdev, ifup, qdev->ndev, - "PCI mapping failed.\n"); - rx_ring->sbq_clean_idx = clean_idx; - dev_kfree_skb_any(sbq_desc->p.skb); - sbq_desc->p.skb = NULL; - return; - } - dma_unmap_addr_set(sbq_desc, mapaddr, map); - *sbq_desc->addr = cpu_to_le64(map); - } - - clean_idx++; - if (clean_idx == rx_ring->sbq_len) - clean_idx = 0; - } - rx_ring->sbq_clean_idx = clean_idx; - rx_ring->sbq_prod_idx += 16; - if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) - rx_ring->sbq_prod_idx = 0; - rx_ring->sbq_free_cnt -= 16; + bq->clean_idx = clean_idx; + bq->prod_idx += 16; + if (bq->prod_idx == bq->len) + bq->prod_idx = 0; + bq->free_cnt -= 16; } if (start_idx != clean_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "sbq: updating prod idx = %d.\n", - rx_ring->sbq_prod_idx); - ql_write_db_reg(rx_ring->sbq_prod_idx, - rx_ring->sbq_prod_idx_db_reg); + "ring %u %s: updating prod idx = %d.\n", + rx_ring->cq_id, bq_type_name[bq->type], + bq->prod_idx); + ql_write_db_reg(bq->prod_idx, bq->prod_idx_db_reg); } } -static void ql_update_buffer_queues(struct ql_adapter *qdev, - struct rx_ring *rx_ring) +static void ql_update_buffer_queues(struct rx_ring *rx_ring) { - ql_update_sbq(qdev, rx_ring); - ql_update_lbq(qdev, rx_ring); + qlge_refill_bq(&rx_ring->sbq); + qlge_refill_bq(&rx_ring->lbq); } /* Unmaps tx buffers. Can be called from send() if a pci mapping @@ -1436,7 +1415,7 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, u16 vlan_id) { struct sk_buff *skb; - struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); struct napi_struct *napi = &rx_ring->napi; /* Frame error, so drop the packet. */ @@ -1485,7 +1464,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, struct net_device *ndev = qdev->ndev; struct sk_buff *skb = NULL; void *addr; - struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); struct napi_struct *napi = &rx_ring->napi; size_t hlen = ETH_HLEN; @@ -1575,10 +1554,9 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, u32 length, u16 vlan_id) { + struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq); struct net_device *ndev = qdev->ndev; - struct sk_buff *skb = NULL; - struct sk_buff *new_skb = NULL; - struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); + struct sk_buff *skb, *new_skb; skb = sbq_desc->p.skb; /* Allocate new_skb and copy */ @@ -1695,11 +1673,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp) { - struct bq_desc *lbq_desc; - struct bq_desc *sbq_desc; - struct sk_buff *skb = NULL; u32 length = le32_to_cpu(ib_mac_rsp->data_len); u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); + struct qlge_bq_desc *lbq_desc, *sbq_desc; + struct sk_buff *skb = NULL; size_t hlen = ETH_HLEN; /* @@ -1712,7 +1689,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, /* * Headers fit nicely into a small buffer. */ - sbq_desc = ql_get_curr_sbuf(rx_ring); + sbq_desc = qlge_get_curr_buf(&rx_ring->sbq); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); @@ -1743,7 +1720,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, * from the "data" small buffer to the "header" small * buffer. */ - sbq_desc = ql_get_curr_sbuf(rx_ring); + sbq_desc = qlge_get_curr_buf(&rx_ring->sbq); pci_dma_sync_single_for_cpu(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), @@ -1754,7 +1731,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%d bytes in a single small buffer.\n", length); - sbq_desc = ql_get_curr_sbuf(rx_ring); + sbq_desc = qlge_get_curr_buf(&rx_ring->sbq); skb = sbq_desc->p.skb; ql_realign_skb(skb, length); skb_put(skb, length); @@ -1830,7 +1807,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, * eventually be in trouble. */ int size, i = 0; - sbq_desc = ql_get_curr_sbuf(rx_ring); + sbq_desc = qlge_get_curr_buf(&rx_ring->sbq); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); @@ -2207,7 +2184,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) if (count == budget) break; } - ql_update_buffer_queues(qdev, rx_ring); + ql_update_buffer_queues(rx_ring); ql_write_cq_idx(rx_ring); return count; } @@ -2749,43 +2726,42 @@ pci_alloc_err: static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) { unsigned int last_offset; - struct bq_desc *lbq_desc; uint32_t curr_idx, clean_idx; last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size; - curr_idx = rx_ring->lbq_curr_idx; - clean_idx = rx_ring->lbq_clean_idx; + curr_idx = rx_ring->lbq.curr_idx; + clean_idx = rx_ring->lbq.clean_idx; while (curr_idx != clean_idx) { - lbq_desc = &rx_ring->lbq[curr_idx]; + struct qlge_bq_desc *lbq_desc = &rx_ring->lbq.queue[curr_idx]; if (lbq_desc->p.pg_chunk.offset == last_offset) - pci_unmap_page(qdev->pdev, lbq_desc->p.pg_chunk.map, - ql_lbq_block_size(qdev), + pci_unmap_page(qdev->pdev, lbq_desc->dma_addr - + last_offset, ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); put_page(lbq_desc->p.pg_chunk.page); lbq_desc->p.pg_chunk.page = NULL; - if (++curr_idx == rx_ring->lbq_len) + if (++curr_idx == rx_ring->lbq.len) curr_idx = 0; - } - if (rx_ring->pg_chunk.page) { - pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, - ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); - put_page(rx_ring->pg_chunk.page); - rx_ring->pg_chunk.page = NULL; + + if (rx_ring->master_chunk.page) { + pci_unmap_page(qdev->pdev, rx_ring->chunk_dma_addr, + ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); + put_page(rx_ring->master_chunk.page); + rx_ring->master_chunk.page = NULL; } } static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) { int i; - struct bq_desc *sbq_desc; - for (i = 0; i < rx_ring->sbq_len; i++) { - sbq_desc = &rx_ring->sbq[i]; + for (i = 0; i < rx_ring->sbq.len; i++) { + struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i]; + if (sbq_desc == NULL) { netif_err(qdev, ifup, qdev->ndev, "sbq_desc %d is NULL.\n", i); @@ -2808,13 +2784,13 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring static void ql_free_rx_buffers(struct ql_adapter *qdev) { int i; - struct rx_ring *rx_ring; for (i = 0; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - if (rx_ring->lbq) + struct rx_ring *rx_ring = &qdev->rx_ring[i]; + + if (rx_ring->lbq.queue) ql_free_lbq_buffers(qdev, rx_ring); - if (rx_ring->sbq) + if (rx_ring->sbq.queue) ql_free_sbq_buffers(qdev, rx_ring); } } @@ -2827,70 +2803,70 @@ static void ql_alloc_rx_buffers(struct ql_adapter *qdev) for (i = 0; i < qdev->rx_ring_count; i++) { rx_ring = &qdev->rx_ring[i]; if (rx_ring->type != TX_Q) - ql_update_buffer_queues(qdev, rx_ring); + ql_update_buffer_queues(rx_ring); } } -static void ql_init_lbq_ring(struct ql_adapter *qdev, - struct rx_ring *rx_ring) +static int qlge_init_bq(struct qlge_bq *bq) { + struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq); + struct ql_adapter *qdev = rx_ring->qdev; + struct qlge_bq_desc *bq_desc; + __le64 *buf_ptr; int i; - struct bq_desc *lbq_desc; - __le64 *bq = rx_ring->lbq_base; - memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); - for (i = 0; i < rx_ring->lbq_len; i++) { - lbq_desc = &rx_ring->lbq[i]; - memset(lbq_desc, 0, sizeof(*lbq_desc)); - lbq_desc->index = i; - lbq_desc->addr = bq; - bq++; + bq->base = pci_alloc_consistent(qdev->pdev, bq->size, &bq->base_dma); + if (!bq->base) { + netif_err(qdev, ifup, qdev->ndev, + "ring %u %s allocation failed.\n", rx_ring->cq_id, + bq_type_name[bq->type]); + return -ENOMEM; } -} -static void ql_init_sbq_ring(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - int i; - struct bq_desc *sbq_desc; - __le64 *bq = rx_ring->sbq_base; + bq->queue = kmalloc_array(bq->len, sizeof(struct qlge_bq_desc), + GFP_KERNEL); + if (!bq->queue) + return -ENOMEM; - memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); - for (i = 0; i < rx_ring->sbq_len; i++) { - sbq_desc = &rx_ring->sbq[i]; - memset(sbq_desc, 0, sizeof(*sbq_desc)); - sbq_desc->index = i; - sbq_desc->addr = bq; - bq++; + memset(bq->queue, 0, bq->len * sizeof(struct qlge_bq_desc)); + + buf_ptr = bq->base; + bq_desc = &bq->queue[0]; + for (i = 0; i < bq->len; i++, buf_ptr++, bq_desc++) { + memset(bq_desc, 0, sizeof(*bq_desc)); + bq_desc->index = i; + bq_desc->buf_ptr = buf_ptr; } + + return 0; } static void ql_free_rx_resources(struct ql_adapter *qdev, struct rx_ring *rx_ring) { /* Free the small buffer queue. */ - if (rx_ring->sbq_base) { + if (rx_ring->sbq.base) { pci_free_consistent(qdev->pdev, - rx_ring->sbq_size, - rx_ring->sbq_base, rx_ring->sbq_base_dma); - rx_ring->sbq_base = NULL; + rx_ring->sbq.size, + rx_ring->sbq.base, rx_ring->sbq.base_dma); + rx_ring->sbq.base = NULL; } /* Free the small buffer queue control blocks. */ - kfree(rx_ring->sbq); - rx_ring->sbq = NULL; + kfree(rx_ring->sbq.queue); + rx_ring->sbq.queue = NULL; /* Free the large buffer queue. */ - if (rx_ring->lbq_base) { + if (rx_ring->lbq.base) { pci_free_consistent(qdev->pdev, - rx_ring->lbq_size, - rx_ring->lbq_base, rx_ring->lbq_base_dma); - rx_ring->lbq_base = NULL; + rx_ring->lbq.size, + rx_ring->lbq.base, rx_ring->lbq.base_dma); + rx_ring->lbq.base = NULL; } /* Free the large buffer queue control blocks. */ - kfree(rx_ring->lbq); - rx_ring->lbq = NULL; + kfree(rx_ring->lbq.queue); + rx_ring->lbq.queue = NULL; /* Free the rx queue. */ if (rx_ring->cq_base) { @@ -2919,56 +2895,10 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev, return -ENOMEM; } - if (rx_ring->sbq_len) { - /* - * Allocate small buffer queue. - */ - rx_ring->sbq_base = - pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, - &rx_ring->sbq_base_dma); - - if (rx_ring->sbq_base == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Small buffer queue allocation failed.\n"); - goto err_mem; - } - - /* - * Allocate small buffer queue control blocks. - */ - rx_ring->sbq = kmalloc_array(rx_ring->sbq_len, - sizeof(struct bq_desc), - GFP_KERNEL); - if (rx_ring->sbq == NULL) - goto err_mem; - - ql_init_sbq_ring(qdev, rx_ring); - } - - if (rx_ring->lbq_len) { - /* - * Allocate large buffer queue. - */ - rx_ring->lbq_base = - pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, - &rx_ring->lbq_base_dma); - - if (rx_ring->lbq_base == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Large buffer queue allocation failed.\n"); - goto err_mem; - } - /* - * Allocate large buffer queue control blocks. - */ - rx_ring->lbq = kmalloc_array(rx_ring->lbq_len, - sizeof(struct bq_desc), - GFP_KERNEL); - if (rx_ring->lbq == NULL) - goto err_mem; - - ql_init_lbq_ring(qdev, rx_ring); - } + if (rx_ring->sbq.len && qlge_init_bq(&rx_ring->sbq)) + goto err_mem; + if (rx_ring->lbq.len && qlge_init_bq(&rx_ring->lbq)) + goto err_mem; return 0; @@ -3071,12 +3001,12 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) *rx_ring->prod_idx_sh_reg = 0; shadow_reg += sizeof(u64); shadow_reg_dma += sizeof(u64); - rx_ring->lbq_base_indirect = shadow_reg; - rx_ring->lbq_base_indirect_dma = shadow_reg_dma; - shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); - shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); - rx_ring->sbq_base_indirect = shadow_reg; - rx_ring->sbq_base_indirect_dma = shadow_reg_dma; + rx_ring->lbq.base_indirect = shadow_reg; + rx_ring->lbq.base_indirect_dma = shadow_reg_dma; + shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq.len)); + shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq.len)); + rx_ring->sbq.base_indirect = shadow_reg; + rx_ring->sbq.base_indirect_dma = shadow_reg_dma; /* PCI doorbell mem area + 0x00 for consumer index register */ rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; @@ -3087,10 +3017,10 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) rx_ring->valid_db_reg = doorbell_area + 0x04; /* PCI doorbell mem area + 0x18 for large buffer consumer */ - rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); + rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18); /* PCI doorbell mem area + 0x1c */ - rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); + rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c); memset((void *)cqicb, 0, sizeof(struct cqicb)); cqicb->msix_vect = rx_ring->irq; @@ -3108,51 +3038,50 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) cqicb->flags = FLAGS_LC | /* Load queue base address */ FLAGS_LV | /* Load MSI-X vector */ FLAGS_LI; /* Load irq delay values */ - if (rx_ring->lbq_len) { + if (rx_ring->lbq.len) { cqicb->flags |= FLAGS_LL; /* Load lbq values */ - tmp = (u64)rx_ring->lbq_base_dma; - base_indirect_ptr = rx_ring->lbq_base_indirect; + tmp = (u64)rx_ring->lbq.base_dma; + base_indirect_ptr = rx_ring->lbq.base_indirect; page_entries = 0; do { *base_indirect_ptr = cpu_to_le64(tmp); tmp += DB_PAGE_SIZE; base_indirect_ptr++; page_entries++; - } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); - cqicb->lbq_addr = - cpu_to_le64(rx_ring->lbq_base_indirect_dma); - bq_len = (qdev->lbq_buf_size == 65536) ? 0 : + } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq.len)); + cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma); + bq_len = qdev->lbq_buf_size == 65536 ? 0 : (u16)qdev->lbq_buf_size; cqicb->lbq_buf_size = cpu_to_le16(bq_len); - bq_len = (rx_ring->lbq_len == 65536) ? 0 : - (u16) rx_ring->lbq_len; + bq_len = (rx_ring->lbq.len == 65536) ? 0 : + (u16)rx_ring->lbq.len; cqicb->lbq_len = cpu_to_le16(bq_len); - rx_ring->lbq_prod_idx = 0; - rx_ring->lbq_curr_idx = 0; - rx_ring->lbq_clean_idx = 0; - rx_ring->lbq_free_cnt = rx_ring->lbq_len; + rx_ring->lbq.prod_idx = 0; + rx_ring->lbq.curr_idx = 0; + rx_ring->lbq.clean_idx = 0; + rx_ring->lbq.free_cnt = rx_ring->lbq.len; } - if (rx_ring->sbq_len) { + if (rx_ring->sbq.len) { cqicb->flags |= FLAGS_LS; /* Load sbq values */ - tmp = (u64)rx_ring->sbq_base_dma; - base_indirect_ptr = rx_ring->sbq_base_indirect; + tmp = (u64)rx_ring->sbq.base_dma; + base_indirect_ptr = rx_ring->sbq.base_indirect; page_entries = 0; do { *base_indirect_ptr = cpu_to_le64(tmp); tmp += DB_PAGE_SIZE; base_indirect_ptr++; page_entries++; - } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); + } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq.len)); cqicb->sbq_addr = - cpu_to_le64(rx_ring->sbq_base_indirect_dma); - cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUF_MAP_SIZE); - bq_len = (rx_ring->sbq_len == 65536) ? 0 : - (u16) rx_ring->sbq_len; + cpu_to_le64(rx_ring->sbq.base_indirect_dma); + cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE); + bq_len = (rx_ring->sbq.len == 65536) ? 0 : + (u16)rx_ring->sbq.len; cqicb->sbq_len = cpu_to_le16(bq_len); - rx_ring->sbq_prod_idx = 0; - rx_ring->sbq_curr_idx = 0; - rx_ring->sbq_clean_idx = 0; - rx_ring->sbq_free_cnt = rx_ring->sbq_len; + rx_ring->sbq.prod_idx = 0; + rx_ring->sbq.curr_idx = 0; + rx_ring->sbq.clean_idx = 0; + rx_ring->sbq.free_cnt = rx_ring->sbq.len; } switch (rx_ring->type) { case TX_Q: @@ -4089,12 +4018,12 @@ static int ql_configure_rings(struct ql_adapter *qdev) rx_ring->cq_len = qdev->rx_ring_size; rx_ring->cq_size = rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); - rx_ring->lbq_len = NUM_LARGE_BUFFERS; - rx_ring->lbq_size = - rx_ring->lbq_len * sizeof(__le64); - rx_ring->sbq_len = NUM_SMALL_BUFFERS; - rx_ring->sbq_size = - rx_ring->sbq_len * sizeof(__le64); + rx_ring->lbq.type = QLGE_LB; + rx_ring->lbq.len = NUM_LARGE_BUFFERS; + rx_ring->lbq.size = rx_ring->lbq.len * sizeof(__le64); + rx_ring->sbq.type = QLGE_SB; + rx_ring->sbq.len = NUM_SMALL_BUFFERS; + rx_ring->sbq.size = rx_ring->sbq.len * sizeof(__le64); rx_ring->type = RX_Q; } else { /* @@ -4104,10 +4033,10 @@ static int ql_configure_rings(struct ql_adapter *qdev) rx_ring->cq_len = qdev->tx_ring_size; rx_ring->cq_size = rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); - rx_ring->lbq_len = 0; - rx_ring->lbq_size = 0; - rx_ring->sbq_len = 0; - rx_ring->sbq_size = 0; + rx_ring->lbq.len = 0; + rx_ring->lbq.size = 0; + rx_ring->sbq.len = 0; + rx_ring->sbq.size = 0; rx_ring->type = TX_Q; } } -- cgit v1.2.3 From 6f5740b1d35ea9d25b895c0eb836aaa7b1ee427b Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:12:03 +0900 Subject: staging: qlge: Fix dma_sync_single calls Using the unmap addr elsewhere than unmap calls is a misuse of the dma api. In prevision of this fix, qlge kept two copies of the dma address around ;) Fixes: c4e84bde1d59 ("qlge: New Qlogic 10Gb Ethernet Driver.") Fixes: 7c734359d350 ("qlge: Size RX buffers based on MTU.") Fixes: 2c9a266afefe ("qlge: Fix receive packets drop.") Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-10-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge.h | 5 +--- drivers/staging/qlge/qlge_main.c | 54 ++++++++++++++++------------------------ 2 files changed, 22 insertions(+), 37 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index a84aa264dfa8..519fa39dd194 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -1410,12 +1410,9 @@ struct qlge_bq_desc { struct sk_buff *skb; } p; dma_addr_t dma_addr; - /* address in ring where the buffer address (dma_addr) is written for - * the device - */ + /* address in ring where the buffer address is written for the device */ __le64 *buf_ptr; u32 index; - DEFINE_DMA_UNMAP_ADDR(mapaddr); }; /* buffer queue */ diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index ba133d1f2b74..609a87804a94 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -995,15 +995,13 @@ static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, { struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq); - pci_dma_sync_single_for_cpu(qdev->pdev, - dma_unmap_addr(lbq_desc, mapaddr), + pci_dma_sync_single_for_cpu(qdev->pdev, lbq_desc->dma_addr, qdev->lbq_buf_size, PCI_DMA_FROMDEVICE); if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) == ql_lbq_block_size(qdev)) { /* last chunk of the master page */ - pci_unmap_page(qdev->pdev, lbq_desc->dma_addr - - lbq_desc->p.pg_chunk.offset, + pci_unmap_page(qdev->pdev, lbq_desc->dma_addr, ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); } @@ -1031,7 +1029,7 @@ static const char * const bq_type_name[] = { [QLGE_LB] = "lbq", }; -/* return size of allocated buffer (may be 0) or negative error */ +/* return 0 or negative error */ static int qlge_refill_sb(struct rx_ring *rx_ring, struct qlge_bq_desc *sbq_desc) { @@ -1058,12 +1056,13 @@ static int qlge_refill_sb(struct rx_ring *rx_ring, dev_kfree_skb_any(skb); return -EIO; } + *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr); sbq_desc->p.skb = skb; - return SMALL_BUFFER_SIZE; + return 0; } -/* return size of allocated buffer or negative error */ +/* return 0 or negative error */ static int qlge_refill_lb(struct rx_ring *rx_ring, struct qlge_bq_desc *lbq_desc) { @@ -1094,7 +1093,9 @@ static int qlge_refill_lb(struct rx_ring *rx_ring, } lbq_desc->p.pg_chunk = *master_chunk; - lbq_desc->dma_addr = rx_ring->chunk_dma_addr + master_chunk->offset; + lbq_desc->dma_addr = rx_ring->chunk_dma_addr; + *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr + + lbq_desc->p.pg_chunk.offset); /* Adjust the master page chunk for next * buffer get. @@ -1107,7 +1108,7 @@ static int qlge_refill_lb(struct rx_ring *rx_ring, get_page(master_chunk->page); } - return qdev->lbq_buf_size; + return 0; } static void qlge_refill_bq(struct qlge_bq *bq) @@ -1138,13 +1139,7 @@ static void qlge_refill_bq(struct qlge_bq *bq) retval = qlge_refill_sb(rx_ring, bq_desc); else retval = qlge_refill_lb(rx_ring, bq_desc); - - if (retval > 0) { - dma_unmap_addr_set(bq_desc, mapaddr, - bq_desc->dma_addr); - *bq_desc->buf_ptr = - cpu_to_le64(bq_desc->dma_addr); - } else if (retval < 0) { + if (retval < 0) { bq->clean_idx = clean_idx; netif_err(qdev, ifup, qdev->ndev, "ring %u %s: Could not get a page chunk, i=%d, clean_idx =%d .\n", @@ -1567,8 +1562,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, } skb_reserve(new_skb, NET_IP_ALIGN); - pci_dma_sync_single_for_cpu(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), + pci_dma_sync_single_for_cpu(qdev->pdev, sbq_desc->dma_addr, SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); skb_put_data(new_skb, skb->data, length); @@ -1690,9 +1684,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, * Headers fit nicely into a small buffer. */ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); + pci_unmap_single(qdev->pdev, sbq_desc->dma_addr, + SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); skb = sbq_desc->p.skb; ql_realign_skb(skb, hdr_len); skb_put(skb, hdr_len); @@ -1722,8 +1715,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, */ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq); pci_dma_sync_single_for_cpu(qdev->pdev, - dma_unmap_addr(sbq_desc, - mapaddr), + sbq_desc->dma_addr, SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); skb_put_data(skb, sbq_desc->p.skb->data, length); @@ -1735,8 +1727,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, skb = sbq_desc->p.skb; ql_realign_skb(skb, length); skb_put(skb, length); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), + pci_unmap_single(qdev->pdev, sbq_desc->dma_addr, SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); sbq_desc->p.skb = NULL; @@ -1774,8 +1765,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, "No skb available, drop the packet.\n"); return NULL; } - pci_unmap_page(qdev->pdev, - dma_unmap_addr(lbq_desc, mapaddr), + pci_unmap_page(qdev->pdev, lbq_desc->dma_addr, qdev->lbq_buf_size, PCI_DMA_FROMDEVICE); skb_reserve(skb, NET_IP_ALIGN); @@ -1808,8 +1798,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, */ int size, i = 0; sbq_desc = qlge_get_curr_buf(&rx_ring->sbq); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), + pci_unmap_single(qdev->pdev, sbq_desc->dma_addr, SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { /* @@ -2736,8 +2725,8 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring struct qlge_bq_desc *lbq_desc = &rx_ring->lbq.queue[curr_idx]; if (lbq_desc->p.pg_chunk.offset == last_offset) - pci_unmap_page(qdev->pdev, lbq_desc->dma_addr - - last_offset, ql_lbq_block_size(qdev), + pci_unmap_page(qdev->pdev, lbq_desc->dma_addr, + ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); put_page(lbq_desc->p.pg_chunk.page); @@ -2768,8 +2757,7 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring return; } if (sbq_desc->p.skb) { - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), + pci_unmap_single(qdev->pdev, sbq_desc->dma_addr, SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb(sbq_desc->p.skb); -- cgit v1.2.3 From e4c911a73c8948a679981c2001c0aed9d0054ab1 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:12:04 +0900 Subject: staging: qlge: Remove rx_ring.type This field is redundant, the type can be determined from the index, cq_id. Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-11-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge.h | 10 ---------- drivers/staging/qlge/qlge_dbg.c | 16 ++++++++++++---- drivers/staging/qlge/qlge_main.c | 31 +++++++------------------------ 3 files changed, 19 insertions(+), 38 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index 519fa39dd194..5a4b2520cd2a 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -1387,15 +1387,6 @@ struct tx_ring { u64 tx_errors; }; -/* - * Type of inbound queue. - */ -enum { - DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */ - TX_Q = 3, /* Handles outbound completions. */ - RX_Q = 4, /* Handles inbound completions. */ -}; - struct qlge_page_chunk { struct page *page; void *va; /* virt addr including offset */ @@ -1468,7 +1459,6 @@ struct rx_ring { struct qlge_bq sbq; /* Misc. handler elements. */ - u32 type; /* Type of queue, tx, rx. */ u32 irq; /* Which vector this ring is assigned. */ u32 cpu; /* Which CPU this should run on. */ char name[IFNAMSIZ + 5]; diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index e8ad8209d487..81fbee7f2af6 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -1730,16 +1730,24 @@ void ql_dump_cqicb(struct cqicb *cqicb) le16_to_cpu(cqicb->sbq_len)); } +static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring) +{ + struct ql_adapter *qdev = rx_ring->qdev; + + if (rx_ring->cq_id < qdev->rss_ring_count) + return "RX COMPLETION"; + else + return "TX COMPLETION"; +}; + void ql_dump_rx_ring(struct rx_ring *rx_ring) { if (rx_ring == NULL) return; pr_err("===================== Dumping rx_ring %d ===============\n", rx_ring->cq_id); - pr_err("Dumping rx_ring %d, type = %s%s%s\n", - rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", - rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", - rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); + pr_err("Dumping rx_ring %d, type = %s\n", rx_ring->cq_id, + qlge_rx_ring_type_name(rx_ring)); pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); pr_err("rx_ring->cq_base_dma = %llx\n", diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 609a87804a94..0e304a7ac22f 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -2785,14 +2785,10 @@ static void ql_free_rx_buffers(struct ql_adapter *qdev) static void ql_alloc_rx_buffers(struct ql_adapter *qdev) { - struct rx_ring *rx_ring; int i; - for (i = 0; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - if (rx_ring->type != TX_Q) - ql_update_buffer_queues(rx_ring); - } + for (i = 0; i < qdev->rss_ring_count; i++) + ql_update_buffer_queues(&qdev->rx_ring[i]); } static int qlge_init_bq(struct qlge_bq *bq) @@ -3071,12 +3067,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) rx_ring->sbq.clean_idx = 0; rx_ring->sbq.free_cnt = rx_ring->sbq.len; } - switch (rx_ring->type) { - case TX_Q: - cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); - cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); - break; - case RX_Q: + if (rx_ring->cq_id < qdev->rss_ring_count) { /* Inbound completion handling rx_rings run in * separate NAPI contexts. */ @@ -3084,10 +3075,9 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) 64); cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); - break; - default: - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Invalid rx_ring->type = %d.\n", rx_ring->type); + } else { + cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); + cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); } err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), CFG_LCQ, rx_ring->cq_id); @@ -3444,12 +3434,7 @@ static int ql_request_irq(struct ql_adapter *qdev) goto err_irq; netif_err(qdev, ifup, qdev->ndev, - "Hooked intr %d, queue type %s, with name %s.\n", - i, - qdev->rx_ring[0].type == DEFAULT_Q ? - "DEFAULT_Q" : - qdev->rx_ring[0].type == TX_Q ? "TX_Q" : - qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", + "Hooked intr 0, queue type RX_Q, with name %s.\n", intr_context->name); } intr_context->hooked = 1; @@ -4012,7 +3997,6 @@ static int ql_configure_rings(struct ql_adapter *qdev) rx_ring->sbq.type = QLGE_SB; rx_ring->sbq.len = NUM_SMALL_BUFFERS; rx_ring->sbq.size = rx_ring->sbq.len * sizeof(__le64); - rx_ring->type = RX_Q; } else { /* * Outbound queue handles outbound completions only. @@ -4025,7 +4009,6 @@ static int ql_configure_rings(struct ql_adapter *qdev) rx_ring->lbq.size = 0; rx_ring->sbq.len = 0; rx_ring->sbq.size = 0; - rx_ring->type = TX_Q; } } return 0; -- cgit v1.2.3 From dc4eec33bf15eb936d1888872f4039d53565d26c Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:12:05 +0900 Subject: staging: qlge: Factor out duplicated expression Given that (u16) 65536 == 0, that expression can be replaced by a simple cast. Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-12-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge.h | 5 +++++ drivers/staging/qlge/qlge_main.c | 18 ++++++------------ 2 files changed, 11 insertions(+), 12 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index 5a4b2520cd2a..24af938da7a4 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -77,6 +77,11 @@ #define LSD(x) ((u32)((u64)(x))) #define MSD(x) ((u32)((((u64)(x)) >> 32))) +/* In some cases, the device interprets a value of 0x0000 as 65536. These + * cases are marked using the following macro. + */ +#define QLGE_FIT16(value) ((u16)(value)) + /* MPI test register definitions. This register * is used for determining alternate NIC function's * PCI->func number. diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 0e304a7ac22f..e1099bd29672 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -2974,7 +2974,6 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) void __iomem *doorbell_area = qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); int err = 0; - u16 bq_len; u64 tmp; __le64 *base_indirect_ptr; int page_entries; @@ -3009,8 +3008,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) memset((void *)cqicb, 0, sizeof(struct cqicb)); cqicb->msix_vect = rx_ring->irq; - bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; - cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); + cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V | + LEN_CPP_CONT); cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); @@ -3034,12 +3033,9 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) page_entries++; } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq.len)); cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma); - bq_len = qdev->lbq_buf_size == 65536 ? 0 : - (u16)qdev->lbq_buf_size; - cqicb->lbq_buf_size = cpu_to_le16(bq_len); - bq_len = (rx_ring->lbq.len == 65536) ? 0 : - (u16)rx_ring->lbq.len; - cqicb->lbq_len = cpu_to_le16(bq_len); + cqicb->lbq_buf_size = + cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size)); + cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(rx_ring->lbq.len)); rx_ring->lbq.prod_idx = 0; rx_ring->lbq.curr_idx = 0; rx_ring->lbq.clean_idx = 0; @@ -3059,9 +3055,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) cqicb->sbq_addr = cpu_to_le64(rx_ring->sbq.base_indirect_dma); cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE); - bq_len = (rx_ring->sbq.len == 65536) ? 0 : - (u16)rx_ring->sbq.len; - cqicb->sbq_len = cpu_to_le16(bq_len); + cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(rx_ring->sbq.len)); rx_ring->sbq.prod_idx = 0; rx_ring->sbq.curr_idx = 0; rx_ring->sbq.clean_idx = 0; -- cgit v1.2.3 From ec705b983b46b8e2d3cafd40c188458bf4241f11 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:12:06 +0900 Subject: staging: qlge: Remove qlge_bq.len & size Given the way the driver currently works, these values are always known at compile time. Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-13-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge.h | 17 +++++---- drivers/staging/qlge/qlge_dbg.c | 4 --- drivers/staging/qlge/qlge_main.c | 75 +++++++++++++++------------------------- 3 files changed, 39 insertions(+), 57 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index 24af938da7a4..5e773af50397 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -34,8 +34,13 @@ #define NUM_TX_RING_ENTRIES 256 #define NUM_RX_RING_ENTRIES 256 -#define NUM_SMALL_BUFFERS 512 -#define NUM_LARGE_BUFFERS 512 +/* Use the same len for sbq and lbq. Note that it seems like the device might + * support different sizes. + */ +#define QLGE_BQ_SHIFT 9 +#define QLGE_BQ_LEN BIT(QLGE_BQ_SHIFT) +#define QLGE_BQ_SIZE (QLGE_BQ_LEN * sizeof(__le64)) + #define DB_PAGE_SIZE 4096 /* Calculate the number of (4k) pages required to @@ -46,8 +51,8 @@ (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0)) #define RX_RING_SHADOW_SPACE (sizeof(u64) + \ - MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ - MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) + MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64) + \ + MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64)) #define LARGE_BUFFER_MAX_SIZE 8192 #define LARGE_BUFFER_MIN_SIZE 2048 @@ -1419,8 +1424,6 @@ struct qlge_bq { dma_addr_t base_indirect_dma; struct qlge_bq_desc *queue; void __iomem *prod_idx_db_reg; - u32 len; /* entry count */ - u32 size; /* size in bytes of hw ring */ u32 prod_idx; /* current sw prod idx */ u32 curr_idx; /* next entry we expect */ u32 clean_idx; /* beginning of new descs */ @@ -1439,6 +1442,8 @@ struct qlge_bq { offsetof(struct rx_ring, lbq))); \ }) +#define QLGE_BQ_WRAP(index) ((index) & (QLGE_BQ_LEN - 1)) + struct rx_ring { struct cqicb cqicb; /* The chip's completion queue init control block. */ diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index 81fbee7f2af6..c6bf78517410 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -1774,8 +1774,6 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) pr_err("rx_ring->lbq.base_indirect_dma = %llx\n", (unsigned long long)rx_ring->lbq.base_indirect_dma); pr_err("rx_ring->lbq = %p\n", rx_ring->lbq.queue); - pr_err("rx_ring->lbq.len = %d\n", rx_ring->lbq.len); - pr_err("rx_ring->lbq.size = %d\n", rx_ring->lbq.size); pr_err("rx_ring->lbq.prod_idx_db_reg = %p\n", rx_ring->lbq.prod_idx_db_reg); pr_err("rx_ring->lbq.prod_idx = %d\n", rx_ring->lbq.prod_idx); @@ -1791,8 +1789,6 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) pr_err("rx_ring->sbq.base_indirect_dma = %llx\n", (unsigned long long)rx_ring->sbq.base_indirect_dma); pr_err("rx_ring->sbq = %p\n", rx_ring->sbq.queue); - pr_err("rx_ring->sbq.len = %d\n", rx_ring->sbq.len); - pr_err("rx_ring->sbq.size = %d\n", rx_ring->sbq.size); pr_err("rx_ring->sbq.prod_idx_db_reg addr = %p\n", rx_ring->sbq.prod_idx_db_reg); pr_err("rx_ring->sbq.prod_idx = %d\n", rx_ring->sbq.prod_idx); diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index e1099bd29672..ef33db118aa1 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -982,9 +982,8 @@ static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq) { struct qlge_bq_desc *bq_desc; - bq_desc = &bq->queue[bq->curr_idx++]; - if (bq->curr_idx == bq->len) - bq->curr_idx = 0; + bq_desc = &bq->queue[bq->curr_idx]; + bq->curr_idx = QLGE_BQ_WRAP(bq->curr_idx + 1); bq->free_cnt++; return bq_desc; @@ -1149,15 +1148,11 @@ static void qlge_refill_bq(struct qlge_bq *bq) return; } - clean_idx++; - if (clean_idx == bq->len) - clean_idx = 0; + clean_idx = QLGE_BQ_WRAP(clean_idx + 1); } bq->clean_idx = clean_idx; - bq->prod_idx += 16; - if (bq->prod_idx == bq->len) - bq->prod_idx = 0; + bq->prod_idx = QLGE_BQ_WRAP(bq->prod_idx + 16); bq->free_cnt -= 16; } @@ -2732,8 +2727,7 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring put_page(lbq_desc->p.pg_chunk.page); lbq_desc->p.pg_chunk.page = NULL; - if (++curr_idx == rx_ring->lbq.len) - curr_idx = 0; + curr_idx = QLGE_BQ_WRAP(curr_idx + 1); } if (rx_ring->master_chunk.page) { @@ -2748,7 +2742,7 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring { int i; - for (i = 0; i < rx_ring->sbq.len; i++) { + for (i = 0; i < QLGE_BQ_LEN; i++) { struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i]; if (sbq_desc == NULL) { @@ -2799,7 +2793,8 @@ static int qlge_init_bq(struct qlge_bq *bq) __le64 *buf_ptr; int i; - bq->base = pci_alloc_consistent(qdev->pdev, bq->size, &bq->base_dma); + bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE, + &bq->base_dma); if (!bq->base) { netif_err(qdev, ifup, qdev->ndev, "ring %u %s allocation failed.\n", rx_ring->cq_id, @@ -2807,16 +2802,16 @@ static int qlge_init_bq(struct qlge_bq *bq) return -ENOMEM; } - bq->queue = kmalloc_array(bq->len, sizeof(struct qlge_bq_desc), + bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc), GFP_KERNEL); if (!bq->queue) return -ENOMEM; - memset(bq->queue, 0, bq->len * sizeof(struct qlge_bq_desc)); + memset(bq->queue, 0, QLGE_BQ_LEN * sizeof(struct qlge_bq_desc)); buf_ptr = bq->base; bq_desc = &bq->queue[0]; - for (i = 0; i < bq->len; i++, buf_ptr++, bq_desc++) { + for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) { memset(bq_desc, 0, sizeof(*bq_desc)); bq_desc->index = i; bq_desc->buf_ptr = buf_ptr; @@ -2830,8 +2825,7 @@ static void ql_free_rx_resources(struct ql_adapter *qdev, { /* Free the small buffer queue. */ if (rx_ring->sbq.base) { - pci_free_consistent(qdev->pdev, - rx_ring->sbq.size, + pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE, rx_ring->sbq.base, rx_ring->sbq.base_dma); rx_ring->sbq.base = NULL; } @@ -2842,8 +2836,7 @@ static void ql_free_rx_resources(struct ql_adapter *qdev, /* Free the large buffer queue. */ if (rx_ring->lbq.base) { - pci_free_consistent(qdev->pdev, - rx_ring->lbq.size, + pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE, rx_ring->lbq.base, rx_ring->lbq.base_dma); rx_ring->lbq.base = NULL; } @@ -2879,16 +2872,13 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev, return -ENOMEM; } - if (rx_ring->sbq.len && qlge_init_bq(&rx_ring->sbq)) - goto err_mem; - if (rx_ring->lbq.len && qlge_init_bq(&rx_ring->lbq)) - goto err_mem; + if (rx_ring->cq_id < qdev->rss_ring_count && + (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) { + ql_free_rx_resources(qdev, rx_ring); + return -ENOMEM; + } return 0; - -err_mem: - ql_free_rx_resources(qdev, rx_ring); - return -ENOMEM; } static void ql_tx_ring_clean(struct ql_adapter *qdev) @@ -2986,8 +2976,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) shadow_reg_dma += sizeof(u64); rx_ring->lbq.base_indirect = shadow_reg; rx_ring->lbq.base_indirect_dma = shadow_reg_dma; - shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq.len)); - shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq.len)); + shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN)); + shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN)); rx_ring->sbq.base_indirect = shadow_reg; rx_ring->sbq.base_indirect_dma = shadow_reg_dma; @@ -3021,7 +3011,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) cqicb->flags = FLAGS_LC | /* Load queue base address */ FLAGS_LV | /* Load MSI-X vector */ FLAGS_LI; /* Load irq delay values */ - if (rx_ring->lbq.len) { + if (rx_ring->cq_id < qdev->rss_ring_count) { cqicb->flags |= FLAGS_LL; /* Load lbq values */ tmp = (u64)rx_ring->lbq.base_dma; base_indirect_ptr = rx_ring->lbq.base_indirect; @@ -3031,17 +3021,16 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) tmp += DB_PAGE_SIZE; base_indirect_ptr++; page_entries++; - } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq.len)); + } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN)); cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma); cqicb->lbq_buf_size = cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size)); - cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(rx_ring->lbq.len)); + cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN)); rx_ring->lbq.prod_idx = 0; rx_ring->lbq.curr_idx = 0; rx_ring->lbq.clean_idx = 0; - rx_ring->lbq.free_cnt = rx_ring->lbq.len; - } - if (rx_ring->sbq.len) { + rx_ring->lbq.free_cnt = QLGE_BQ_LEN; + cqicb->flags |= FLAGS_LS; /* Load sbq values */ tmp = (u64)rx_ring->sbq.base_dma; base_indirect_ptr = rx_ring->sbq.base_indirect; @@ -3051,15 +3040,15 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) tmp += DB_PAGE_SIZE; base_indirect_ptr++; page_entries++; - } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq.len)); + } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN)); cqicb->sbq_addr = cpu_to_le64(rx_ring->sbq.base_indirect_dma); cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE); - cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(rx_ring->sbq.len)); + cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN)); rx_ring->sbq.prod_idx = 0; rx_ring->sbq.curr_idx = 0; rx_ring->sbq.clean_idx = 0; - rx_ring->sbq.free_cnt = rx_ring->sbq.len; + rx_ring->sbq.free_cnt = QLGE_BQ_LEN; } if (rx_ring->cq_id < qdev->rss_ring_count) { /* Inbound completion handling rx_rings run in @@ -3986,11 +3975,7 @@ static int ql_configure_rings(struct ql_adapter *qdev) rx_ring->cq_size = rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); rx_ring->lbq.type = QLGE_LB; - rx_ring->lbq.len = NUM_LARGE_BUFFERS; - rx_ring->lbq.size = rx_ring->lbq.len * sizeof(__le64); rx_ring->sbq.type = QLGE_SB; - rx_ring->sbq.len = NUM_SMALL_BUFFERS; - rx_ring->sbq.size = rx_ring->sbq.len * sizeof(__le64); } else { /* * Outbound queue handles outbound completions only. @@ -3999,10 +3984,6 @@ static int ql_configure_rings(struct ql_adapter *qdev) rx_ring->cq_len = qdev->tx_ring_size; rx_ring->cq_size = rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); - rx_ring->lbq.len = 0; - rx_ring->lbq.size = 0; - rx_ring->sbq.len = 0; - rx_ring->sbq.size = 0; } } return 0; -- cgit v1.2.3 From 7be4c3f92077e4c5e1423edc5056c890f8bddfc7 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:12:07 +0900 Subject: staging: qlge: Remove useless memset This just repeats what the other memset a few lines above did. Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-14-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge_main.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index ef33db118aa1..8da596922582 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -2812,7 +2812,6 @@ static int qlge_init_bq(struct qlge_bq *bq) buf_ptr = bq->base; bq_desc = &bq->queue[0]; for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) { - memset(bq_desc, 0, sizeof(*bq_desc)); bq_desc->index = i; bq_desc->buf_ptr = buf_ptr; } -- cgit v1.2.3 From c8c1ff5c19021a241afa78d1272c23ef7e7297aa Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:12:08 +0900 Subject: staging: qlge: Replace memset with assignment Instead of clearing the structure wholesale, it is sufficient to initialize the skb member which is used to manage sbq instances. lbq instances are managed according to curr_idx and clean_idx. Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-15-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 8da596922582..009934bcb515 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -2807,11 +2807,10 @@ static int qlge_init_bq(struct qlge_bq *bq) if (!bq->queue) return -ENOMEM; - memset(bq->queue, 0, QLGE_BQ_LEN * sizeof(struct qlge_bq_desc)); - buf_ptr = bq->base; bq_desc = &bq->queue[0]; for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) { + bq_desc->p.skb = NULL; bq_desc->index = i; bq_desc->buf_ptr = buf_ptr; } -- cgit v1.2.3 From aec626d2092f2b203f4acec5b57ed7c1e3095802 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:12:09 +0900 Subject: staging: qlge: Update buffer queue prod index despite oom Currently, if we repeatedly fail to allocate all of the buffers from the desired batching budget, we will never update the prod_idx register. Restructure code to always update prod_idx if new buffers could be allocated. This eliminates the current two stage process (clean_idx -> prod_idx) and some associated bookkeeping variables. Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-16-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge.h | 8 +-- drivers/staging/qlge/qlge_dbg.c | 10 ++-- drivers/staging/qlge/qlge_main.c | 105 +++++++++++++++++++-------------------- 3 files changed, 60 insertions(+), 63 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index 5e773af50397..7c48e333d29b 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -1424,10 +1424,10 @@ struct qlge_bq { dma_addr_t base_indirect_dma; struct qlge_bq_desc *queue; void __iomem *prod_idx_db_reg; - u32 prod_idx; /* current sw prod idx */ - u32 curr_idx; /* next entry we expect */ - u32 clean_idx; /* beginning of new descs */ - u32 free_cnt; /* free buffer desc cnt */ + /* next index where sw should refill a buffer for hw */ + u16 next_to_use; + /* next index where sw expects to find a buffer filled by hw */ + u16 next_to_clean; enum { QLGE_SB, /* small buffer */ QLGE_LB, /* large buffer */ diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index c6bf78517410..7e16066a3527 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -1776,8 +1776,8 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) pr_err("rx_ring->lbq = %p\n", rx_ring->lbq.queue); pr_err("rx_ring->lbq.prod_idx_db_reg = %p\n", rx_ring->lbq.prod_idx_db_reg); - pr_err("rx_ring->lbq.prod_idx = %d\n", rx_ring->lbq.prod_idx); - pr_err("rx_ring->lbq.curr_idx = %d\n", rx_ring->lbq.curr_idx); + pr_err("rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use); + pr_err("rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean); pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); @@ -1791,10 +1791,8 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) pr_err("rx_ring->sbq = %p\n", rx_ring->sbq.queue); pr_err("rx_ring->sbq.prod_idx_db_reg addr = %p\n", rx_ring->sbq.prod_idx_db_reg); - pr_err("rx_ring->sbq.prod_idx = %d\n", rx_ring->sbq.prod_idx); - pr_err("rx_ring->sbq.curr_idx = %d\n", rx_ring->sbq.curr_idx); - pr_err("rx_ring->sbq.clean_idx = %d\n", rx_ring->sbq.clean_idx); - pr_err("rx_ring->sbq.free_cnt = %d\n", rx_ring->sbq.free_cnt); + pr_err("rx_ring->sbq.next_to_use = %d\n", rx_ring->sbq.next_to_use); + pr_err("rx_ring->sbq.next_to_clean = %d\n", rx_ring->sbq.next_to_clean); pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); pr_err("rx_ring->irq = %d\n", rx_ring->irq); pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 009934bcb515..83e75005688a 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -982,9 +982,8 @@ static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq) { struct qlge_bq_desc *bq_desc; - bq_desc = &bq->queue[bq->curr_idx]; - bq->curr_idx = QLGE_BQ_WRAP(bq->curr_idx + 1); - bq->free_cnt++; + bq_desc = &bq->queue[bq->next_to_clean]; + bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1); return bq_desc; } @@ -1114,9 +1113,9 @@ static void qlge_refill_bq(struct qlge_bq *bq) { struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq); struct ql_adapter *qdev = rx_ring->qdev; - u32 clean_idx = bq->clean_idx; + struct qlge_bq_desc *bq_desc; + int free_count, refill_count; unsigned int reserved_count; - u32 start_idx = clean_idx; int i; if (bq->type == QLGE_SB) @@ -1124,44 +1123,52 @@ static void qlge_refill_bq(struct qlge_bq *bq) else reserved_count = 32; - while (bq->free_cnt > reserved_count) { - for (i = (bq->clean_idx % 16); i < 16; i++) { - struct qlge_bq_desc *bq_desc = &bq->queue[clean_idx]; - int retval; + free_count = bq->next_to_clean - bq->next_to_use; + if (free_count <= 0) + free_count += QLGE_BQ_LEN; - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "ring %u %s: try cleaning clean_idx = %d.\n", - rx_ring->cq_id, bq_type_name[bq->type], - clean_idx); - - if (bq->type == QLGE_SB) - retval = qlge_refill_sb(rx_ring, bq_desc); - else - retval = qlge_refill_lb(rx_ring, bq_desc); - if (retval < 0) { - bq->clean_idx = clean_idx; - netif_err(qdev, ifup, qdev->ndev, - "ring %u %s: Could not get a page chunk, i=%d, clean_idx =%d .\n", - rx_ring->cq_id, - bq_type_name[bq->type], i, - clean_idx); - return; - } + refill_count = free_count - reserved_count; + /* refill batch size */ + if (refill_count < 16) + return; + + i = bq->next_to_use; + bq_desc = &bq->queue[i]; + i -= QLGE_BQ_LEN; + do { + int retval; + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "ring %u %s: try cleaning idx %d\n", + rx_ring->cq_id, bq_type_name[bq->type], i); - clean_idx = QLGE_BQ_WRAP(clean_idx + 1); + if (bq->type == QLGE_SB) + retval = qlge_refill_sb(rx_ring, bq_desc); + else + retval = qlge_refill_lb(rx_ring, bq_desc); + if (retval < 0) { + netif_err(qdev, ifup, qdev->ndev, + "ring %u %s: Could not get a page chunk, idx %d\n", + rx_ring->cq_id, bq_type_name[bq->type], i); + break; } - bq->clean_idx = clean_idx; - bq->prod_idx = QLGE_BQ_WRAP(bq->prod_idx + 16); - bq->free_cnt -= 16; - } + bq_desc++; + i++; + if (unlikely(!i)) { + bq_desc = &bq->queue[0]; + i -= QLGE_BQ_LEN; + } + refill_count--; + } while (refill_count); + i += QLGE_BQ_LEN; - if (start_idx != clean_idx) { + if (bq->next_to_use != i) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "ring %u %s: updating prod idx = %d.\n", - rx_ring->cq_id, bq_type_name[bq->type], - bq->prod_idx); - ql_write_db_reg(bq->prod_idx, bq->prod_idx_db_reg); + rx_ring->cq_id, bq_type_name[bq->type], i); + bq->next_to_use = i; + ql_write_db_reg(bq->next_to_use, bq->prod_idx_db_reg); } } @@ -2709,25 +2716,21 @@ pci_alloc_err: static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) { + struct qlge_bq *lbq = &rx_ring->lbq; unsigned int last_offset; - uint32_t curr_idx, clean_idx; - last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size; - curr_idx = rx_ring->lbq.curr_idx; - clean_idx = rx_ring->lbq.clean_idx; - while (curr_idx != clean_idx) { - struct qlge_bq_desc *lbq_desc = &rx_ring->lbq.queue[curr_idx]; + while (lbq->next_to_clean != lbq->next_to_use) { + struct qlge_bq_desc *lbq_desc = + &lbq->queue[lbq->next_to_clean]; if (lbq_desc->p.pg_chunk.offset == last_offset) pci_unmap_page(qdev->pdev, lbq_desc->dma_addr, ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); - put_page(lbq_desc->p.pg_chunk.page); - lbq_desc->p.pg_chunk.page = NULL; - curr_idx = QLGE_BQ_WRAP(curr_idx + 1); + lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1); } if (rx_ring->master_chunk.page) { @@ -3024,10 +3027,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) cqicb->lbq_buf_size = cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size)); cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN)); - rx_ring->lbq.prod_idx = 0; - rx_ring->lbq.curr_idx = 0; - rx_ring->lbq.clean_idx = 0; - rx_ring->lbq.free_cnt = QLGE_BQ_LEN; + rx_ring->lbq.next_to_use = 0; + rx_ring->lbq.next_to_clean = 0; cqicb->flags |= FLAGS_LS; /* Load sbq values */ tmp = (u64)rx_ring->sbq.base_dma; @@ -3043,10 +3044,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) cpu_to_le64(rx_ring->sbq.base_indirect_dma); cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE); cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN)); - rx_ring->sbq.prod_idx = 0; - rx_ring->sbq.curr_idx = 0; - rx_ring->sbq.clean_idx = 0; - rx_ring->sbq.free_cnt = QLGE_BQ_LEN; + rx_ring->sbq.next_to_use = 0; + rx_ring->sbq.next_to_clean = 0; } if (rx_ring->cq_id < qdev->rss_ring_count) { /* Inbound completion handling rx_rings run in -- cgit v1.2.3 From 6e9c52b920974b90c4ca994c5d8c5bc56742017f Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:12:10 +0900 Subject: staging: qlge: Refill rx buffers up to multiple of 16 Reading the {s,l}bq_prod_idx registers on a running device, it appears that the adapter will only use buffers up to prod_idx & 0xfff0. The driver currently uses fixed-size guard zones (16 for sbq, 32 for lbq - don't know why this difference). After the previous patch, this approach no longer guarantees prod_idx values aligned on multiples of 16. While it appears that we can write unaligned values to prod_idx without ill effects on device operation, it makes more sense to change qlge_refill_bq() to refill up to a limit that corresponds with the device's behavior. Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-17-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge.h | 8 ++++++++ drivers/staging/qlge/qlge_main.c | 29 +++++++++++------------------ 2 files changed, 19 insertions(+), 18 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index 7c48e333d29b..e5a352df8228 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -1423,6 +1423,9 @@ struct qlge_bq { __le64 *base_indirect; dma_addr_t base_indirect_dma; struct qlge_bq_desc *queue; + /* prod_idx is the index of the first buffer that may NOT be used by + * hw, ie. one after the last. Advanced by sw. + */ void __iomem *prod_idx_db_reg; /* next index where sw should refill a buffer for hw */ u16 next_to_use; @@ -1442,6 +1445,11 @@ struct qlge_bq { offsetof(struct rx_ring, lbq))); \ }) +/* Experience shows that the device ignores the low 4 bits of the tail index. + * Refill up to a x16 multiple. + */ +#define QLGE_BQ_ALIGN(index) ALIGN_DOWN(index, 16) + #define QLGE_BQ_WRAP(index) ((index) & (QLGE_BQ_LEN - 1)) struct rx_ring { diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 83e75005688a..02ad0cdf4856 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -1114,22 +1114,12 @@ static void qlge_refill_bq(struct qlge_bq *bq) struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq); struct ql_adapter *qdev = rx_ring->qdev; struct qlge_bq_desc *bq_desc; - int free_count, refill_count; - unsigned int reserved_count; + int refill_count; int i; - if (bq->type == QLGE_SB) - reserved_count = 16; - else - reserved_count = 32; - - free_count = bq->next_to_clean - bq->next_to_use; - if (free_count <= 0) - free_count += QLGE_BQ_LEN; - - refill_count = free_count - reserved_count; - /* refill batch size */ - if (refill_count < 16) + refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) - + bq->next_to_use); + if (!refill_count) return; i = bq->next_to_use; @@ -1164,11 +1154,14 @@ static void qlge_refill_bq(struct qlge_bq *bq) i += QLGE_BQ_LEN; if (bq->next_to_use != i) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "ring %u %s: updating prod idx = %d.\n", - rx_ring->cq_id, bq_type_name[bq->type], i); + if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "ring %u %s: updating prod idx = %d.\n", + rx_ring->cq_id, bq_type_name[bq->type], + i); + ql_write_db_reg(i, bq->prod_idx_db_reg); + } bq->next_to_use = i; - ql_write_db_reg(bq->next_to_use, bq->prod_idx_db_reg); } } -- cgit v1.2.3 From b91fec1ecf6f0a93d909f8cb98ad8aea7109e81b Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 27 Sep 2019 19:12:11 +0900 Subject: staging: qlge: Refill empty buffer queues from wq When operating at mtu 9000, qlge does order-1 allocations for rx buffers in atomic context. This is especially unreliable when free memory is low or fragmented. Add an approach similar to commit 3161e453e496 ("virtio: net refill on out-of-memory") to qlge so that the device doesn't lock up if there are allocation failures. Signed-off-by: Benjamin Poirier Link: https://lore.kernel.org/r/20190927101210.23856-18-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/TODO | 3 -- drivers/staging/qlge/qlge.h | 8 ++++ drivers/staging/qlge/qlge_main.c | 80 ++++++++++++++++++++++++++++++++-------- 3 files changed, 72 insertions(+), 19 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/TODO b/drivers/staging/qlge/TODO index 51c509084e80..f93f7428f5d5 100644 --- a/drivers/staging/qlge/TODO +++ b/drivers/staging/qlge/TODO @@ -1,6 +1,3 @@ -* reception stalls permanently (until admin intervention) if the rx buffer - queues become empty because of allocation failures (ex. under memory - pressure) * commit 7c734359d350 ("qlge: Size RX buffers based on MTU.", v2.6.33-rc1) introduced dead code in the receive routines, which should be rewritten anyways by the admission of the author himself, see the comment above diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h index e5a352df8228..6ec7e3ce3863 100644 --- a/drivers/staging/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h @@ -1452,6 +1452,13 @@ struct qlge_bq { #define QLGE_BQ_WRAP(index) ((index) & (QLGE_BQ_LEN - 1)) +#define QLGE_BQ_HW_OWNED(bq) \ +({ \ + typeof(bq) _bq = bq; \ + QLGE_BQ_WRAP(QLGE_BQ_ALIGN((_bq)->next_to_use) - \ + (_bq)->next_to_clean); \ +}) + struct rx_ring { struct cqicb cqicb; /* The chip's completion queue init control block. */ @@ -1479,6 +1486,7 @@ struct rx_ring { /* Misc. handler elements. */ u32 irq; /* Which vector this ring is assigned. */ u32 cpu; /* Which CPU this should run on. */ + struct delayed_work refill_work; char name[IFNAMSIZ + 5]; struct napi_struct napi; u8 reserved; diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 02ad0cdf4856..0c381d91faa6 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -1029,7 +1029,7 @@ static const char * const bq_type_name[] = { /* return 0 or negative error */ static int qlge_refill_sb(struct rx_ring *rx_ring, - struct qlge_bq_desc *sbq_desc) + struct qlge_bq_desc *sbq_desc, gfp_t gfp) { struct ql_adapter *qdev = rx_ring->qdev; struct sk_buff *skb; @@ -1041,7 +1041,7 @@ static int qlge_refill_sb(struct rx_ring *rx_ring, "ring %u sbq: getting new skb for index %d.\n", rx_ring->cq_id, sbq_desc->index); - skb = netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE); + skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp); if (!skb) return -ENOMEM; skb_reserve(skb, QLGE_SB_PAD); @@ -1062,7 +1062,7 @@ static int qlge_refill_sb(struct rx_ring *rx_ring, /* return 0 or negative error */ static int qlge_refill_lb(struct rx_ring *rx_ring, - struct qlge_bq_desc *lbq_desc) + struct qlge_bq_desc *lbq_desc, gfp_t gfp) { struct ql_adapter *qdev = rx_ring->qdev; struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk; @@ -1071,8 +1071,7 @@ static int qlge_refill_lb(struct rx_ring *rx_ring, struct page *page; dma_addr_t dma_addr; - page = alloc_pages(__GFP_COMP | GFP_ATOMIC, - qdev->lbq_buf_order); + page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order); if (unlikely(!page)) return -ENOMEM; dma_addr = pci_map_page(qdev->pdev, page, 0, @@ -1109,33 +1108,33 @@ static int qlge_refill_lb(struct rx_ring *rx_ring, return 0; } -static void qlge_refill_bq(struct qlge_bq *bq) +/* return 0 or negative error */ +static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp) { struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq); struct ql_adapter *qdev = rx_ring->qdev; struct qlge_bq_desc *bq_desc; int refill_count; + int retval; int i; refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) - bq->next_to_use); if (!refill_count) - return; + return 0; i = bq->next_to_use; bq_desc = &bq->queue[i]; i -= QLGE_BQ_LEN; do { - int retval; - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "ring %u %s: try cleaning idx %d\n", rx_ring->cq_id, bq_type_name[bq->type], i); if (bq->type == QLGE_SB) - retval = qlge_refill_sb(rx_ring, bq_desc); + retval = qlge_refill_sb(rx_ring, bq_desc, gfp); else - retval = qlge_refill_lb(rx_ring, bq_desc); + retval = qlge_refill_lb(rx_ring, bq_desc, gfp); if (retval < 0) { netif_err(qdev, ifup, qdev->ndev, "ring %u %s: Could not get a page chunk, idx %d\n", @@ -1163,12 +1162,52 @@ static void qlge_refill_bq(struct qlge_bq *bq) } bq->next_to_use = i; } + + return retval; +} + +static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp, + unsigned long delay) +{ + bool sbq_fail, lbq_fail; + + sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp); + lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp); + + /* Minimum number of buffers needed to be able to receive at least one + * frame of any format: + * sbq: 1 for header + 1 for data + * lbq: mtu 9000 / lb size + * Below this, the queue might stall. + */ + if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) || + (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) < + DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE))) + /* Allocations can take a long time in certain cases (ex. + * reclaim). Therefore, use a workqueue for long-running + * work items. + */ + queue_delayed_work_on(smp_processor_id(), system_long_wq, + &rx_ring->refill_work, delay); } -static void ql_update_buffer_queues(struct rx_ring *rx_ring) +static void qlge_slow_refill(struct work_struct *work) { - qlge_refill_bq(&rx_ring->sbq); - qlge_refill_bq(&rx_ring->lbq); + struct rx_ring *rx_ring = container_of(work, struct rx_ring, + refill_work.work); + struct napi_struct *napi = &rx_ring->napi; + + napi_disable(napi); + ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2); + napi_enable(napi); + + local_bh_disable(); + /* napi_disable() might have prevented incomplete napi work from being + * rescheduled. + */ + napi_schedule(napi); + /* trigger softirq processing */ + local_bh_enable(); } /* Unmaps tx buffers. Can be called from send() if a pci mapping @@ -2168,7 +2207,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) if (count == budget) break; } - ql_update_buffer_queues(rx_ring); + ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0); ql_write_cq_idx(rx_ring); return count; } @@ -2778,7 +2817,8 @@ static void ql_alloc_rx_buffers(struct ql_adapter *qdev) int i; for (i = 0; i < qdev->rss_ring_count; i++) - ql_update_buffer_queues(&qdev->rx_ring[i]); + ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL, + HZ / 2); } static int qlge_init_bq(struct qlge_bq *bq) @@ -3883,6 +3923,7 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev) static int qlge_close(struct net_device *ndev) { struct ql_adapter *qdev = netdev_priv(ndev); + int i; /* If we hit pci_channel_io_perm_failure * failure condition, then we already @@ -3900,6 +3941,11 @@ static int qlge_close(struct net_device *ndev) */ while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) msleep(1); + + /* Make sure refill_work doesn't re-enable napi */ + for (i = 0; i < qdev->rss_ring_count; i++) + cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work); + ql_adapter_down(qdev); ql_release_adapter_resources(qdev); return 0; @@ -3966,6 +4012,8 @@ static int ql_configure_rings(struct ql_adapter *qdev) rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); rx_ring->lbq.type = QLGE_LB; rx_ring->sbq.type = QLGE_SB; + INIT_DELAYED_WORK(&rx_ring->refill_work, + &qlge_slow_refill); } else { /* * Outbound queue handles outbound completions only. -- cgit v1.2.3 From 52c4326a56ec69920062593086451f082ce66f33 Mon Sep 17 00:00:00 2001 From: Jules Irenge Date: Thu, 10 Oct 2019 18:21:14 +0100 Subject: staging: qlge: correct a misspelled word Fix a misspelling of "several" detected by checkpatch Signed-off-by: Jules Irenge Link: https://lore.kernel.org/r/20191010172114.12345-1-jbi.octave@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge_dbg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index 7e16066a3527..a743971e5782 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -354,7 +354,7 @@ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf, for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) { /* We're reading 400 xgmac registers, but we filter out - * serveral locations that are non-responsive to reads. + * several locations that are non-responsive to reads. */ if ((i == 0x00000114) || (i == 0x00000118) || -- cgit v1.2.3 From a8bfbd7cf3fd507e83e296d61b8b59755864413b Mon Sep 17 00:00:00 2001 From: Jules Irenge Date: Thu, 10 Oct 2019 22:40:03 +0100 Subject: staging: qlge: fix "alignment should match open parenthesis" checks Fix "alignment should mactch open parenthesis" checks issued by checkpatch.pl tool: "CHECK: Alignment should match open parenthesis". Signed-off-by: Jules Irenge Link: https://lore.kernel.org/r/20191010214006.23677-1-jbi.octave@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge_dbg.c | 182 ++++++++++++++++++++-------------------- 1 file changed, 91 insertions(+), 91 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index a743971e5782..a6ac9f796d81 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -7,7 +7,7 @@ /* Read a NIC register from the alternate function. */ static u32 ql_read_other_func_reg(struct ql_adapter *qdev, - u32 reg) + u32 reg) { u32 register_to_read; u32 reg_val; @@ -26,7 +26,7 @@ static u32 ql_read_other_func_reg(struct ql_adapter *qdev, /* Write a NIC register from the alternate function. */ static int ql_write_other_func_reg(struct ql_adapter *qdev, - u32 reg, u32 reg_val) + u32 reg, u32 reg_val) { u32 register_to_read; int status = 0; @@ -41,7 +41,7 @@ static int ql_write_other_func_reg(struct ql_adapter *qdev, } static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, - u32 bit, u32 err_bit) + u32 bit, u32 err_bit) { u32 temp; int count = 10; @@ -61,13 +61,13 @@ static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, } static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, - u32 *data) + u32 *data) { int status; /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, - XG_SERDES_ADDR_RDY, 0); + XG_SERDES_ADDR_RDY, 0); if (status) goto exit; @@ -76,7 +76,7 @@ static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, - XG_SERDES_ADDR_RDY, 0); + XG_SERDES_ADDR_RDY, 0); if (status) goto exit; @@ -111,7 +111,7 @@ exit: } static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr, - u32 *direct_ptr, u32 *indirect_ptr, + u32 *direct_ptr, u32 *indirect_ptr, unsigned int direct_valid, unsigned int indirect_valid) { unsigned int status; @@ -133,7 +133,7 @@ static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr, } static int ql_get_serdes_regs(struct ql_adapter *qdev, - struct ql_mpi_coredump *mpi_coredump) + struct ql_mpi_coredump *mpi_coredump) { int status; unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid; @@ -203,7 +203,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xaui_direct_valid, xaui_indirect_valid); + xaui_direct_valid, xaui_indirect_valid); /* Get XAUI_HSS_PCS register block. */ if (qdev->func & 1) { @@ -220,7 +220,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xaui_direct_valid, xaui_indirect_valid); + xaui_direct_valid, xaui_indirect_valid); /* Get XAUI_XFI_AN register block. */ if (qdev->func & 1) { @@ -233,7 +233,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); + xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_TRAIN register block. */ if (qdev->func & 1) { @@ -248,7 +248,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); + xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_HSS_PCS register block. */ if (qdev->func & 1) { @@ -265,7 +265,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); + xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_HSS_TX register block. */ if (qdev->func & 1) { @@ -280,7 +280,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, } for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); + xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_HSS_RX register block. */ if (qdev->func & 1) { @@ -296,7 +296,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); + xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_HSS_PLL register block. */ @@ -313,18 +313,18 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, } for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); + xfi_direct_valid, xfi_indirect_valid); return 0; } static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg, - u32 *data) + u32 *data) { int status = 0; /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, - XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + XGMAC_ADDR_RDY, XGMAC_ADDR_XME); if (status) goto exit; @@ -333,7 +333,7 @@ static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg, /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, - XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + XGMAC_ADDR_RDY, XGMAC_ADDR_XME); if (status) goto exit; @@ -347,7 +347,7 @@ exit: * skipping unused locations. */ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf, - unsigned int other_function) + unsigned int other_function) { int status = 0; int i; @@ -357,7 +357,7 @@ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf, * several locations that are non-responsive to reads. */ if ((i == 0x00000114) || - (i == 0x00000118) || + (i == 0x00000118) || (i == 0x0000013c) || (i == 0x00000140) || (i > 0x00000150 && i < 0x000001fc) || @@ -410,7 +410,7 @@ static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf) for (i = 0; i < qdev->rx_ring_count; i++, buf++) { ql_write32(qdev, INTR_EN, - qdev->intr_context[i].intr_read_mask); + qdev->intr_context[i].intr_read_mask); *buf = ql_read32(qdev, INTR_EN); } } @@ -426,7 +426,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf) for (i = 0; i < 16; i++) { status = ql_get_mac_addr_reg(qdev, - MAC_ADDR_TYPE_CAM_MAC, i, value); + MAC_ADDR_TYPE_CAM_MAC, i, value); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed read of mac index register\n"); @@ -438,7 +438,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf) } for (i = 0; i < 32; i++) { status = ql_get_mac_addr_reg(qdev, - MAC_ADDR_TYPE_MULTI_MAC, i, value); + MAC_ADDR_TYPE_MULTI_MAC, i, value); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed read of mac index register\n"); @@ -497,7 +497,7 @@ end: /* Read the MPI Processor core registers */ static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf, - u32 offset, u32 count) + u32 offset, u32 count) { int i, status = 0; for (i = 0; i < count; i++, buf++) { @@ -510,7 +510,7 @@ static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf, /* Read the ASIC probe dump */ static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock, - u32 valid, u32 *buf) + u32 valid, u32 *buf) { u32 module, mux_sel, probe, lo_val, hi_val; @@ -545,13 +545,13 @@ static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf) /* First we have to enable the probe mux */ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN); buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK, - PRB_MX_ADDR_VALID_SYS_MOD, buf); + PRB_MX_ADDR_VALID_SYS_MOD, buf); buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK, - PRB_MX_ADDR_VALID_PCI_MOD, buf); + PRB_MX_ADDR_VALID_PCI_MOD, buf); buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK, - PRB_MX_ADDR_VALID_XGM_MOD, buf); + PRB_MX_ADDR_VALID_XGM_MOD, buf); buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK, - PRB_MX_ADDR_VALID_FC_MOD, buf); + PRB_MX_ADDR_VALID_FC_MOD, buf); return 0; } @@ -666,7 +666,7 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) result_index = 0; while ((result_index & MAC_ADDR_MR) == 0) { result_index = ql_read32(qdev, - MAC_ADDR_IDX); + MAC_ADDR_IDX); } result_data = ql_read32(qdev, MAC_ADDR_DATA); *buf = result_index; @@ -740,7 +740,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Insert the global header */ memset(&(mpi_coredump->mpi_global_header), 0, - sizeof(struct mpi_coredump_global_header)); + sizeof(struct mpi_coredump_global_header)); mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; mpi_coredump->mpi_global_header.headerSize = sizeof(struct mpi_coredump_global_header); @@ -751,23 +751,23 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get generic NIC reg dump */ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, - NIC1_CONTROL_SEG_NUM, + NIC1_CONTROL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic_regs), "NIC1 Registers"); ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr, - NIC2_CONTROL_SEG_NUM, + NIC2_CONTROL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic2_regs), "NIC2 Registers"); /* Get XGMac registers. (Segment 18, Rev C. step 21) */ ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr, - NIC1_XGMAC_SEG_NUM, + NIC1_XGMAC_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers"); ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr, - NIC2_XGMAC_SEG_NUM, + NIC2_XGMAC_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers"); @@ -798,97 +798,97 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Rev C. Step 20a */ ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr, - XAUI_AN_SEG_NUM, + XAUI_AN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xaui_an), "XAUI AN Registers"); /* Rev C. Step 20b */ ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr, - XAUI_HSS_PCS_SEG_NUM, + XAUI_HSS_PCS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xaui_hss_pcs), "XAUI HSS PCS Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + + sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_an), "XFI AN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr, - XFI_TRAIN_SEG_NUM, + XFI_TRAIN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_train), "XFI TRAIN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr, - XFI_HSS_PCS_SEG_NUM, + XFI_HSS_PCS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_hss_pcs), "XFI HSS PCS Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr, - XFI_HSS_TX_SEG_NUM, + XFI_HSS_TX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_hss_tx), "XFI HSS TX Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr, - XFI_HSS_RX_SEG_NUM, + XFI_HSS_RX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_hss_rx), "XFI HSS RX Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr, - XFI_HSS_PLL_SEG_NUM, + XFI_HSS_PLL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_hss_pll), "XFI HSS PLL Registers"); ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr, - XAUI2_AN_SEG_NUM, + XAUI2_AN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xaui_an), "XAUI2 AN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr, - XAUI2_HSS_PCS_SEG_NUM, + XAUI2_HSS_PCS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xaui_hss_pcs), "XAUI2 HSS PCS Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr, - XFI2_AN_SEG_NUM, + XFI2_AN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_an), "XFI2 AN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr, - XFI2_TRAIN_SEG_NUM, + XFI2_TRAIN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_train), "XFI2 TRAIN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr, - XFI2_HSS_PCS_SEG_NUM, + XFI2_HSS_PCS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_hss_pcs), "XFI2 HSS PCS Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr, - XFI2_HSS_TX_SEG_NUM, + XFI2_HSS_TX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_hss_tx), "XFI2 HSS TX Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr, - XFI2_HSS_RX_SEG_NUM, + XFI2_HSS_RX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_hss_rx), "XFI2 HSS RX Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr, - XFI2_HSS_PLL_SEG_NUM, + XFI2_HSS_PLL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_hss_pll), "XFI2 HSS PLL Registers"); @@ -902,7 +902,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) } ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr, - CORE_SEG_NUM, + CORE_SEG_NUM, sizeof(mpi_coredump->core_regs_seg_hdr) + sizeof(mpi_coredump->mpi_core_regs) + sizeof(mpi_coredump->mpi_core_sh_regs), @@ -921,7 +921,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the Test Logic Registers */ ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr, - TEST_LOGIC_SEG_NUM, + TEST_LOGIC_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->test_logic_regs), "Test Logic Regs"); @@ -932,7 +932,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the RMII Registers */ ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr, - RMII_SEG_NUM, + RMII_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->rmii_regs), "RMII Registers"); @@ -943,7 +943,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the FCMAC1 Registers */ ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr, - FCMAC1_SEG_NUM, + FCMAC1_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->fcmac1_regs), "FCMAC1 Registers"); @@ -955,7 +955,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the FCMAC2 Registers */ ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr, - FCMAC2_SEG_NUM, + FCMAC2_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->fcmac2_regs), "FCMAC2 Registers"); @@ -967,7 +967,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the FC1 MBX Registers */ ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr, - FC1_MBOX_SEG_NUM, + FC1_MBOX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->fc1_mbx_regs), "FC1 MBox Regs"); @@ -978,7 +978,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the IDE Registers */ ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr, - IDE_SEG_NUM, + IDE_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->ide_regs), "IDE Registers"); @@ -989,7 +989,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the NIC1 MBX Registers */ ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr, - NIC1_MBOX_SEG_NUM, + NIC1_MBOX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic1_mbx_regs), "NIC1 MBox Regs"); @@ -1000,7 +1000,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the SMBus Registers */ ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr, - SMBUS_SEG_NUM, + SMBUS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->smbus_regs), "SMBus Registers"); @@ -1011,7 +1011,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the FC2 MBX Registers */ ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr, - FC2_MBOX_SEG_NUM, + FC2_MBOX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->fc2_mbx_regs), "FC2 MBox Regs"); @@ -1022,7 +1022,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the NIC2 MBX Registers */ ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr, - NIC2_MBOX_SEG_NUM, + NIC2_MBOX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic2_mbx_regs), "NIC2 MBox Regs"); @@ -1033,7 +1033,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the I2C Registers */ ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr, - I2C_SEG_NUM, + I2C_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->i2c_regs), "I2C Registers"); @@ -1044,7 +1044,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the MEMC Registers */ ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr, - MEMC_SEG_NUM, + MEMC_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->memc_regs), "MEMC Registers"); @@ -1055,7 +1055,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the PBus Registers */ ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr, - PBUS_SEG_NUM, + PBUS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->pbus_regs), "PBUS Registers"); @@ -1066,7 +1066,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the MDE Registers */ ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr, - MDE_SEG_NUM, + MDE_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->mde_regs), "MDE Registers"); @@ -1076,7 +1076,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) goto err; ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, - MISC_NIC_INFO_SEG_NUM, + MISC_NIC_INFO_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->misc_nic_info), "MISC NIC INFO"); @@ -1088,14 +1088,14 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Segment 31 */ /* Get indexed register values. */ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, - INTR_STATES_SEG_NUM, + INTR_STATES_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->intr_states), "INTR States"); ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, - CAM_ENTRIES_SEG_NUM, + CAM_ENTRIES_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->cam_entries), "CAM Entries"); @@ -1104,18 +1104,18 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) goto err; ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, - ROUTING_WORDS_SEG_NUM, + ROUTING_WORDS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic_routing_words), "Routing Words"); status = ql_get_routing_entries(qdev, - &mpi_coredump->nic_routing_words[0]); + &mpi_coredump->nic_routing_words[0]); if (status) goto err; /* Segment 34 (Rev C. step 23) */ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, - ETS_SEG_NUM, + ETS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->ets), "ETS Registers"); @@ -1124,24 +1124,24 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) goto err; ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr, - PROBE_DUMP_SEG_NUM, + PROBE_DUMP_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->probe_dump), "Probe Dump"); ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]); ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr, - ROUTING_INDEX_SEG_NUM, + ROUTING_INDEX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->routing_regs), "Routing Regs"); status = ql_get_routing_index_registers(qdev, - &mpi_coredump->routing_regs[0]); + &mpi_coredump->routing_regs[0]); if (status) goto err; ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr, - MAC_PROTOCOL_SEG_NUM, + MAC_PROTOCOL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->mac_prot_regs), "MAC Prot Regs"); @@ -1149,7 +1149,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Get the semaphore registers for all 5 functions */ ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr, - SEM_REGS_SEG_NUM, + SEM_REGS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->sem_regs), "Sem Registers"); @@ -1175,12 +1175,12 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) } ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr, - WCS_RAM_SEG_NUM, + WCS_RAM_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->code_ram), "WCS RAM"); status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], - CODE_RAM_ADDR, CODE_RAM_CNT); + CODE_RAM_ADDR, CODE_RAM_CNT); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed Dump of CODE RAM. Status = 0x%.08x\n", @@ -1190,12 +1190,12 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) /* Insert the segment header */ ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr, - MEMC_RAM_SEG_NUM, + MEMC_RAM_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->memc_ram), "MEMC RAM"); status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], - MEMC_RAM_ADDR, MEMC_RAM_CNT); + MEMC_RAM_ADDR, MEMC_RAM_CNT); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed Dump of MEMC RAM. Status = 0x%.08x\n", @@ -1230,7 +1230,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev, memset(&(mpi_coredump->mpi_global_header), 0, - sizeof(struct mpi_coredump_global_header)); + sizeof(struct mpi_coredump_global_header)); mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; mpi_coredump->mpi_global_header.headerSize = sizeof(struct mpi_coredump_global_header); @@ -1242,7 +1242,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev, /* segment 16 */ ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, - MISC_NIC_INFO_SEG_NUM, + MISC_NIC_INFO_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->misc_nic_info), "MISC NIC INFO"); @@ -1253,7 +1253,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev, /* Segment 16, Rev C. Step 18 */ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, - NIC1_CONTROL_SEG_NUM, + NIC1_CONTROL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic_regs), "NIC Registers"); @@ -1264,14 +1264,14 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev, /* Segment 31 */ /* Get indexed register values. */ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, - INTR_STATES_SEG_NUM, + INTR_STATES_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->intr_states), "INTR States"); ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, - CAM_ENTRIES_SEG_NUM, + CAM_ENTRIES_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->cam_entries), "CAM Entries"); @@ -1280,18 +1280,18 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev, return; ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, - ROUTING_WORDS_SEG_NUM, + ROUTING_WORDS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic_routing_words), "Routing Words"); status = ql_get_routing_entries(qdev, - &mpi_coredump->nic_routing_words[0]); + &mpi_coredump->nic_routing_words[0]); if (status) return; /* Segment 34 (Rev C. step 23) */ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, - ETS_SEG_NUM, + ETS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->ets), "ETS Registers"); @@ -1992,7 +1992,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) le16_to_cpu(ib_mac_rsp->vlan_id)); pr_err("flags4 = %s%s%s\n", - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); -- cgit v1.2.3 From b883582d734074bc0f0c4176998becec4a394a04 Mon Sep 17 00:00:00 2001 From: Jules Irenge Date: Thu, 10 Oct 2019 22:40:04 +0100 Subject: staging: qlge: Fix multiple assignments warning by replacing integer variables to bool Fix multiple assignments warning " check issue detected by checkpatch tool: "CHECK: multiple assignments should be avoided". Signed-off-by: Jules Irenge Link: https://lore.kernel.org/r/20191010214006.23677-2-jbi.octave@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge_dbg.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index a6ac9f796d81..ac162176f6f5 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -112,7 +112,7 @@ exit: static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr, u32 *direct_ptr, u32 *indirect_ptr, - unsigned int direct_valid, unsigned int indirect_valid) + bool direct_valid, bool indirect_valid) { unsigned int status; @@ -136,13 +136,12 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) { int status; - unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid; - unsigned int xaui_indirect_valid, i; + bool xfi_direct_valid = false, xfi_indirect_valid = false; + bool xaui_direct_valid = true, xaui_indirect_valid = true; + unsigned int i; u32 *direct_ptr, temp; u32 *indirect_ptr; - xfi_direct_valid = xfi_indirect_valid = 0; - xaui_direct_valid = xaui_indirect_valid = 1; /* The XAUI needs to be read out per port */ status = ql_read_other_func_serdes_reg(qdev, @@ -152,7 +151,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_indirect_valid = 0; + xaui_indirect_valid = false; status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); @@ -161,7 +160,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_direct_valid = 0; + xaui_direct_valid = false; /* * XFI register is shared so only need to read one @@ -176,18 +175,18 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, /* now see if i'm NIC 1 or NIC 2 */ if (qdev->func & 1) /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ - xfi_indirect_valid = 1; + xfi_indirect_valid = true; else - xfi_direct_valid = 1; + xfi_direct_valid = true; } if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) == XG_SERDES_ADDR_XFI2_PWR_UP) { /* now see if i'm NIC 1 or NIC 2 */ if (qdev->func & 1) /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ - xfi_direct_valid = 1; + xfi_direct_valid = true; else - xfi_indirect_valid = 1; + xfi_indirect_valid = true; } /* Get XAUI_AN register block. */ -- cgit v1.2.3 From e311f25e186560b0315e66707177ee152e718228 Mon Sep 17 00:00:00 2001 From: Jules Irenge Date: Thu, 10 Oct 2019 22:40:05 +0100 Subject: staging: qlge: add space to fix check warning Add space to fix warning of preferred space near the division operator issue detected by checkpatch. Signed-off-by: Jules Irenge Link: https://lore.kernel.org/r/20191010214006.23677-3-jbi.octave@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge_dbg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index ac162176f6f5..182a07ae5e61 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -72,7 +72,7 @@ static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, goto exit; /* set up for reg read */ - ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R); + ql_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R); /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, -- cgit v1.2.3 From db4b23d5a898cda286e83fcb3650f0951a235d12 Mon Sep 17 00:00:00 2001 From: Jules Irenge Date: Thu, 10 Oct 2019 22:40:06 +0100 Subject: staging: qlge: fix comparison to NULL warning Fix comparison to NULL by replacing with !ptr instead. Issue detected by checkpatch. Signed-off-by: Jules Irenge Link: https://lore.kernel.org/r/20191010214006.23677-4-jbi.octave@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge_dbg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index 182a07ae5e61..019b7e6a1b7a 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -1805,7 +1805,7 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) pr_err("%s: Enter\n", __func__); ptr = kmalloc(size, GFP_ATOMIC); - if (ptr == NULL) + if (!ptr) return; if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { -- cgit v1.2.3 From 41e1bf811ace29bdc0df15523e3dfb3233704d1b Mon Sep 17 00:00:00 2001 From: Samuil Ivanov Date: Wed, 23 Oct 2019 23:58:55 +0300 Subject: Staging: qlge: Rewrite two while loops as simple for loops This is a task from the TODO list of qlge driver: - some "while" loops could be rewritten with simple "for" The change is in functions ql_wait_reg_rdy and ql_wait_cfg in qlge_main.c. The while loops are basically count based (they decrement on each iteration), and it makes more sense to be a for loop construction instead. Signed-off-by: Samuil Ivanov Link: https://lore.kernel.org/r/20191023205855.GA1841@samuil-ThinkCentre-M92P Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge_main.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 0c381d91faa6..6f6b4c06688c 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -167,9 +167,9 @@ void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) { u32 temp; - int count = UDELAY_COUNT; + int count; - while (count) { + for (count = 0; count < UDELAY_COUNT; count++) { temp = ql_read32(qdev, reg); /* check for errors */ @@ -181,7 +181,6 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) } else if (temp & bit) return 0; udelay(UDELAY_DELAY); - count--; } netif_alert(qdev, probe, qdev->ndev, "Timed out waiting for reg %x to come ready.\n", reg); @@ -193,17 +192,16 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) */ static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) { - int count = UDELAY_COUNT; + int count; u32 temp; - while (count) { + for (count = 0; count < UDELAY_COUNT; count++) { temp = ql_read32(qdev, CFG); if (temp & CFG_LE) return -EIO; if (!(temp & bit)) return 0; udelay(UDELAY_DELAY); - count--; } return -ETIMEDOUT; } -- cgit v1.2.3 From 3b525cb00c620269e07663788bfd727c07524bf9 Mon Sep 17 00:00:00 2001 From: Nachammai Karuppiah Date: Mon, 4 Nov 2019 23:54:18 -0800 Subject: staging: qlge: Avoid NULL comparison Replace NULL comparison with boolean negation. Issue found using checkpatch.pl Signed-off-by: Nachammai Karuppiah Acked-by: Julia Lawall Link: https://lore.kernel.org/r/1572940458-109252-1-git-send-email-nachukannan@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/qlge/qlge_dbg.c | 4 ++-- drivers/staging/qlge/qlge_main.c | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers/staging/qlge') diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index 019b7e6a1b7a..83f34ca43aa4 100644 --- a/drivers/staging/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c @@ -1649,7 +1649,7 @@ void ql_dump_wqicb(struct wqicb *wqicb) void ql_dump_tx_ring(struct tx_ring *tx_ring) { - if (tx_ring == NULL) + if (!tx_ring) return; pr_err("===================== Dumping tx_ring %d ===============\n", tx_ring->wq_id); @@ -1741,7 +1741,7 @@ static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring) void ql_dump_rx_ring(struct rx_ring *rx_ring) { - if (rx_ring == NULL) + if (!rx_ring) return; pr_err("===================== Dumping rx_ring %d ===============\n", rx_ring->cq_id); diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 6f6b4c06688c..6ad4515311f7 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -1588,7 +1588,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, skb = sbq_desc->p.skb; /* Allocate new_skb and copy */ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); - if (new_skb == NULL) { + if (!new_skb) { rx_ring->rx_dropped++; return; } @@ -1792,7 +1792,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, */ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); skb = netdev_alloc_skb(qdev->ndev, length); - if (skb == NULL) { + if (!skb) { netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, "No skb available, drop the packet.\n"); return NULL; @@ -2663,7 +2663,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev) qdev->rx_ring_shadow_reg_area = pci_zalloc_consistent(qdev->pdev, PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); - if (qdev->rx_ring_shadow_reg_area == NULL) { + if (!qdev->rx_ring_shadow_reg_area) { netif_err(qdev, ifup, qdev->ndev, "Allocation of RX shadow space failed.\n"); return -ENOMEM; @@ -2672,7 +2672,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev) qdev->tx_ring_shadow_reg_area = pci_zalloc_consistent(qdev->pdev, PAGE_SIZE, &qdev->tx_ring_shadow_reg_dma); - if (qdev->tx_ring_shadow_reg_area == NULL) { + if (!qdev->tx_ring_shadow_reg_area) { netif_err(qdev, ifup, qdev->ndev, "Allocation of TX shadow space failed.\n"); goto err_wqp_sh_area; @@ -2724,14 +2724,14 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev, pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, &tx_ring->wq_base_dma); - if ((tx_ring->wq_base == NULL) || + if (!tx_ring->wq_base || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) goto pci_alloc_err; tx_ring->q = kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc), GFP_KERNEL); - if (tx_ring->q == NULL) + if (!tx_ring->q) goto err; return 0; @@ -2778,7 +2778,7 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring for (i = 0; i < QLGE_BQ_LEN; i++) { struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i]; - if (sbq_desc == NULL) { + if (!sbq_desc) { netif_err(qdev, ifup, qdev->ndev, "sbq_desc %d is NULL.\n", i); return; @@ -2899,7 +2899,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev, pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, &rx_ring->cq_base_dma); - if (rx_ring->cq_base == NULL) { + if (!rx_ring->cq_base) { netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); return -ENOMEM; } @@ -4485,7 +4485,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, if (qlge_mpi_coredump) { qdev->mpi_coredump = vmalloc(sizeof(struct ql_mpi_coredump)); - if (qdev->mpi_coredump == NULL) { + if (!qdev->mpi_coredump) { err = -ENOMEM; goto err_out2; } -- cgit v1.2.3