diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2007-01-08 11:26:30 -0800 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-02-05 16:58:46 -0500 |
commit | 3de00b8958b12d62712ae9500968c65d3b43bb27 (patch) | |
tree | 6e4ba6acdc8745ad1e5890c517783ae304652697 /drivers/net/chelsio | |
parent | 4d22de3e6cc4a09c369b504cd8bcde3385a974cd (diff) | |
download | linux-3de00b8958b12d62712ae9500968c65d3b43bb27.tar.gz linux-3de00b8958b12d62712ae9500968c65d3b43bb27.tar.bz2 linux-3de00b8958b12d62712ae9500968c65d3b43bb27.zip |
chelsio: NAPI speed improvement
Speedup and cleanup the receive processing by eliminating the
mmio read and a lock round trip.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/chelsio')
-rw-r--r-- | drivers/net/chelsio/sge.c | 77 |
1 files changed, 35 insertions, 42 deletions
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index f94d63971642..8e287e79e4e3 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1559,6 +1559,14 @@ static int process_responses(struct adapter *adapter, int budget) return budget; } +static inline int responses_pending(const struct adapter *adapter) +{ + const struct respQ *Q = &adapter->sge->respQ; + const struct respQ_e *e = &Q->entries[Q->cidx]; + + return (e->GenerationBit == Q->genbit); +} + #ifdef CONFIG_CHELSIO_T1_NAPI /* * A simpler version of process_responses() that handles only pure (i.e., @@ -1568,13 +1576,16 @@ static int process_responses(struct adapter *adapter, int budget) * which the caller must ensure is a valid pure response. Returns 1 if it * encounters a valid data-carrying response, 0 otherwise. */ -static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) +static int process_pure_responses(struct adapter *adapter) { struct sge *sge = adapter->sge; struct respQ *q = &sge->respQ; + struct respQ_e *e = &q->entries[q->cidx]; unsigned int flags = 0; unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; + if (e->DataValid) + return 1; do { flags |= e->Qsleeping; @@ -1610,23 +1621,20 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) int t1_poll(struct net_device *dev, int *budget) { struct adapter *adapter = dev->priv; - int effective_budget = min(*budget, dev->quota); - int work_done = process_responses(adapter, effective_budget); + int work_done; + work_done = process_responses(adapter, min(*budget, dev->quota)); *budget -= work_done; dev->quota -= work_done; - if (work_done >= effective_budget) + if (unlikely(responses_pending(adapter))) return 1; - spin_lock_irq(&adapter->async_lock); - __netif_rx_complete(dev); + netif_rx_complete(dev); writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); - writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, - adapter->regs + A_PL_ENABLE); - spin_unlock_irq(&adapter->async_lock); return 0; + } /* @@ -1635,44 +1643,33 @@ int t1_poll(struct net_device *dev, int *budget) irqreturn_t t1_interrupt(int irq, void *data) { struct adapter *adapter = data; - struct net_device *dev = adapter->sge->netdev; struct sge *sge = adapter->sge; - u32 cause; - int handled = 0; - - cause = readl(adapter->regs + A_PL_CAUSE); - if (cause == 0 || cause == ~0) - return IRQ_NONE; + int handled; - spin_lock(&adapter->async_lock); - if (cause & F_PL_INTR_SGE_DATA) { - struct respQ *q = &adapter->sge->respQ; - struct respQ_e *e = &q->entries[q->cidx]; + if (likely(responses_pending(adapter))) { + struct net_device *dev = sge->netdev; - handled = 1; writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); - if (e->GenerationBit == q->genbit && - __netif_rx_schedule_prep(dev)) { - if (e->DataValid || process_pure_responses(adapter, e)) { - /* mask off data IRQ */ - writel(adapter->slow_intr_mask, - adapter->regs + A_PL_ENABLE); - __netif_rx_schedule(sge->netdev); - goto unlock; + if (__netif_rx_schedule_prep(dev)) { + if (process_pure_responses(adapter)) + __netif_rx_schedule(dev); + else { + /* no data, no NAPI needed */ + writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); + netif_poll_enable(dev); /* undo schedule_prep */ } - /* no data, no NAPI needed */ - netif_poll_enable(dev); - } - writel(q->cidx, adapter->regs + A_SG_SLEEPING); - } else - handled = t1_slow_intr_handler(adapter); + return IRQ_HANDLED; + } + + spin_lock(&adapter->async_lock); + handled = t1_slow_intr_handler(adapter); + spin_unlock(&adapter->async_lock); if (!handled) sge->stats.unhandled_irqs++; -unlock: - spin_unlock(&adapter->async_lock); + return IRQ_RETVAL(handled != 0); } @@ -1695,17 +1692,13 @@ unlock: irqreturn_t t1_interrupt(int irq, void *cookie) { int work_done; - struct respQ_e *e; struct adapter *adapter = cookie; - struct respQ *Q = &adapter->sge->respQ; spin_lock(&adapter->async_lock); - e = &Q->entries[Q->cidx]; - prefetch(e); writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); - if (likely(e->GenerationBit == Q->genbit)) + if (likely(responses_pending(adapter)) work_done = process_responses(adapter, -1); else work_done = t1_slow_intr_handler(adapter); |