summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYi Zou <yi.zou@intel.com>2011-01-06 14:29:56 +0000
committerDavid S. Miller <davem@davemloft.net>2011-01-09 23:44:10 -0800
commit2d39d576fad0fd4bb79a0de26fca50a4be1ffdc1 (patch)
tree12d38ef31a7d6c2a2b1e73e6564091eb9c85ce98
parent5377a4160bb65ee4dd11b4b1d081d86d56d92bff (diff)
downloadlinux-stable-2d39d576fad0fd4bb79a0de26fca50a4be1ffdc1.tar.gz
linux-stable-2d39d576fad0fd4bb79a0de26fca50a4be1ffdc1.tar.bz2
linux-stable-2d39d576fad0fd4bb79a0de26fca50a4be1ffdc1.zip
ixgbe: make sure per Rx queue is disabled before unmapping the receive buffer
When disable the Rx logic globally, we would also want to disable the per Rx queue receive logic by per queue Rx control register RXDCTL so no more DMA is happening from the packet buffer to the receive buffer associated with the Rx ring, before we start unmapping Rx ring receive buffer. The hardware may take max of 100us before the corresponding Rx queue is really disabled. Added ixgbe_disable_rx_queue() for this purpose. Signed-off-by: Yi Zou <yi.zou@intel.com> Tested-by: Stephen Ko <stephen.s.ko@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c40
3 files changed, 39 insertions, 7 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3ae30b8cb7d6..bdeaa9e06c00 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -508,6 +508,8 @@ extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 23ff23e8b393..a8bab1564d07 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1477,9 +1477,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
reg_ctl &= ~IXGBE_RXCTRL_RXEN;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
- reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
+ ixgbe_disable_rx_queue(adapter, rx_ring);
/* now Tx */
reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 38ab4f3f8197..e8ae311bbbe5 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3024,6 +3024,36 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
}
}
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop) {
+ e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
+ "the polling period\n", reg_idx);
+ }
+}
+
void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
@@ -3034,9 +3064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
/* disable queue to avoid issues while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
- rxdctl & ~IXGBE_RXDCTL_ENABLE);
- IXGBE_WRITE_FLUSH(hw);
+ ixgbe_disable_rx_queue(adapter, ring);
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -4064,7 +4092,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
- IXGBE_WRITE_FLUSH(hw);
+ /* disable all enabled rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
msleep(10);
netif_tx_stop_all_queues(netdev);