diff options
author | Alexander Duyck <alexander.h.duyck@redhat.com> | 2014-11-14 00:56:35 +0000 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2014-12-05 09:13:05 -0800 |
commit | ad435ec689c981a11005d5283cc88588a699537f (patch) | |
tree | 0e3730d2b344cef47865013209eb9b3ba5f04bb7 /drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |
parent | 18cb652a41ab2c9975e9b4d7ac69230d5a258f24 (diff) | |
download | linux-ad435ec689c981a11005d5283cc88588a699537f.tar.gz linux-ad435ec689c981a11005d5283cc88588a699537f.tar.bz2 linux-ad435ec689c981a11005d5283cc88588a699537f.zip |
ixgbe: Remove tail write abstraction and add missing barrier
This change cleans up the tail writes for the ixgbe descriptor queues. The
current implementation had me confused as I wasn't sure if it was still
making use of the surprise remove logic or not.
It also adds the mmiowb which is needed on ia64, mips, and a couple other
architectures in order to synchronize the MMIO writes with the Tx queue
_xmit_lock spinlock.
Cc: Don Skidmore <donald.c.skidmore@intel.com>
Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3c1d4ea47782..73cd0fefad44 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1416,22 +1416,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, skb->ip_summed = CHECKSUM_UNNECESSARY; } -static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) -{ - rx_ring->next_to_use = val; - - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = val; - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - ixgbe_write_tail(rx_ring, val); -} - static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *bi) { @@ -1517,8 +1501,20 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) i += rx_ring->count; - if (rx_ring->next_to_use != i) - ixgbe_release_rx_desc(rx_ring, i); + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } } static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, @@ -6954,8 +6950,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { - /* notify HW of packet */ - ixgbe_write_tail(tx_ring, i); + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); } return; |