diff options
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/defines.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ethtool.c | 62 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 90 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 620 |
4 files changed, 403 insertions, 370 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 5426b2dee6a6..05e4f32d84f7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed; #define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ #define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ #define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 515ba4e29760..f68b78c732a8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -77,11 +77,11 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, stats.saved_reset_vfgotc)}, {"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, + {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)}, + {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)}, {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, stats.saved_reset_vfmprc)}, - {"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)}, {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)}, - {"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)}, #ifdef BP_EXTENDED_STATS {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)}, {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)}, @@ -305,18 +305,18 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, /* clone ring and setup updated count */ tx_ring[i] = *adapter->tx_ring[i]; tx_ring[i].count = new_tx_count; - err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); - if (!err) - continue; - while (i) { - i--; - ixgbevf_free_tx_resources(adapter, &tx_ring[i]); - } + err = ixgbevf_setup_tx_resources(&tx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_tx_resources(&tx_ring[i]); + } - vfree(tx_ring); - tx_ring = NULL; + vfree(tx_ring); + tx_ring = NULL; - goto clear_reset; + goto clear_reset; + } } } @@ -331,18 +331,18 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, /* clone ring and setup updated count */ rx_ring[i] = *adapter->rx_ring[i]; rx_ring[i].count = new_rx_count; - err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); - if (!err) - continue; - while (i) { - i--; - ixgbevf_free_rx_resources(adapter, &rx_ring[i]); - } + err = ixgbevf_setup_rx_resources(&rx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_rx_resources(&rx_ring[i]); + } - vfree(rx_ring); - rx_ring = NULL; + vfree(rx_ring); + rx_ring = NULL; - goto clear_reset; + goto clear_reset; + } } } @@ -352,7 +352,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, /* Tx */ if (tx_ring) { for (i = 0; i < adapter->num_tx_queues; i++) { - ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]); + ixgbevf_free_tx_resources(adapter->tx_ring[i]); *adapter->tx_ring[i] = tx_ring[i]; } adapter->tx_ring_count = new_tx_count; @@ -364,7 +364,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, /* Rx */ if (rx_ring) { for (i = 0; i < adapter->num_rx_queues; i++) { - ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]); + ixgbevf_free_rx_resources(adapter->rx_ring[i]); *adapter->rx_ring[i] = rx_ring[i]; } adapter->rx_ring_count = new_rx_count; @@ -380,7 +380,7 @@ clear_reset: /* free Tx resources if Rx error is encountered */ if (tx_ring) { for (i = 0; i < adapter->num_tx_queues; i++) - ixgbevf_free_tx_resources(adapter, &tx_ring[i]); + ixgbevf_free_tx_resources(&tx_ring[i]); vfree(tx_ring); } @@ -411,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, tx_yields = 0, tx_cleaned = 0, tx_missed = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - rx_yields += adapter->rx_ring[i]->bp_yields; - rx_cleaned += adapter->rx_ring[i]->bp_cleaned; - rx_yields += adapter->rx_ring[i]->bp_yields; + rx_yields += adapter->rx_ring[i]->stats.yields; + rx_cleaned += adapter->rx_ring[i]->stats.cleaned; + rx_yields += adapter->rx_ring[i]->stats.yields; } for (i = 0; i < adapter->num_tx_queues; i++) { - tx_yields += adapter->tx_ring[i]->bp_yields; - tx_cleaned += adapter->tx_ring[i]->bp_cleaned; - tx_yields += adapter->tx_ring[i]->bp_yields; + tx_yields += adapter->tx_ring[i]->stats.yields; + tx_cleaned += adapter->tx_ring[i]->stats.cleaned; + tx_yields += adapter->tx_ring[i]->stats.yields; } adapter->bp_rx_yields = rx_yields; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 0547e40980cb..54829326bb09 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -46,12 +46,15 @@ /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct ixgbevf_tx_buffer { - struct sk_buff *skb; - dma_addr_t dma; - unsigned long time_stamp; union ixgbe_adv_tx_desc *next_to_watch; - u16 length; - u16 mapped_as_page; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; }; struct ixgbevf_rx_buffer { @@ -59,6 +62,29 @@ struct ixgbevf_rx_buffer { dma_addr_t dma; }; +struct ixgbevf_stats { + u64 packets; + u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif +}; + +struct ixgbevf_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct ixgbevf_rx_queue_stats { + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_err; +}; + struct ixgbevf_ring { struct ixgbevf_ring *next; struct net_device *netdev; @@ -70,22 +96,19 @@ struct ixgbevf_ring { unsigned int next_to_use; unsigned int next_to_clean; - int queue_index; /* needed for multiqueue queue management */ union { struct ixgbevf_tx_buffer *tx_buffer_info; struct ixgbevf_rx_buffer *rx_buffer_info; }; - u64 total_bytes; - u64 total_packets; - struct u64_stats_sync syncp; + struct ixgbevf_stats stats; + struct u64_stats_sync syncp; + union { + struct ixgbevf_tx_queue_stats tx_stats; + struct ixgbevf_rx_queue_stats rx_stats; + }; + u64 hw_csum_rx_error; - u64 hw_csum_rx_good; -#ifdef BP_EXTENDED_STATS - u64 bp_yields; - u64 bp_misses; - u64 bp_cleaned; -#endif u8 __iomem *tail; u16 reg_idx; /* holds the special value that gets the hardware register @@ -93,6 +116,7 @@ struct ixgbevf_ring { * for DCB and RSS modes */ u16 rx_buf_len; + int queue_index; /* needed for multiqueue queue management */ }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -123,8 +147,6 @@ struct ixgbevf_ring { #define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) #define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) #define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) -#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4) -#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 @@ -186,7 +208,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; rc = false; #ifdef BP_EXTENDED_STATS - q_vector->tx.ring->bp_yields++; + q_vector->tx.ring->stats.yields++; #endif } else { /* we don't care if someone yielded */ @@ -221,7 +243,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; rc = false; #ifdef BP_EXTENDED_STATS - q_vector->rx.ring->bp_yields++; + q_vector->rx.ring->stats.yields++; #endif } else { /* preserve yield marks */ @@ -314,7 +336,6 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) struct ixgbevf_adapter { struct timer_list watchdog_timer; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - u16 bd_number; struct work_struct reset_task; struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; @@ -327,25 +348,18 @@ struct ixgbevf_adapter { u32 eims_other; /* TX */ - struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ int num_tx_queues; + struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ u64 restart_queue; - u64 hw_csum_tx_good; - u64 lsc_int; - u64 hw_tso_ctxt; - u64 hw_tso6_ctxt; u32 tx_timeout_count; /* RX */ - struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */ int num_rx_queues; + struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */ u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; - u64 hw_csum_rx_good; u64 non_eop_descs; int num_msix_vectors; - struct msix_entry *msix_entries; - u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; @@ -357,6 +371,8 @@ struct ixgbevf_adapter { #define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1) #define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) + struct msix_entry *msix_entries; + /* OS defined structs */ struct net_device *netdev; struct pci_dev *pdev; @@ -364,10 +380,12 @@ struct ixgbevf_adapter { /* structs defined in ixgbe_vf.h */ struct ixgbe_hw hw; u16 msg_enable; - struct ixgbevf_hw_stats stats; + u16 bd_number; /* Interrupt Throttle Rate */ u32 eitr_param; + struct ixgbevf_hw_stats stats; + unsigned long state; u64 tx_busy; unsigned int tx_ring_count; @@ -386,9 +404,9 @@ struct ixgbevf_adapter { u32 link_speed; bool link_up; - struct work_struct watchdog_task; - spinlock_t mbx_lock; + + struct work_struct watchdog_task; }; enum ixbgevf_state_t { @@ -420,10 +438,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter); void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); void ixgbevf_reset(struct ixgbevf_adapter *adapter); void ixgbevf_set_ethtool_ops(struct net_device *netdev); -int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); -int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); -void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); -void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); +int ixgbevf_setup_rx_resources(struct ixgbevf_ring *); +int ixgbevf_setup_tx_resources(struct ixgbevf_ring *); +void ixgbevf_free_rx_resources(struct ixgbevf_ring *); +void ixgbevf_free_tx_resources(struct ixgbevf_ring *); void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); int ethtool_ioctl(struct ifreq *ifr); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 9c9291803fdd..9df28985eba7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -145,28 +145,25 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, } static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, - struct ixgbevf_tx_buffer - *tx_buffer_info) -{ - if (tx_buffer_info->dma) { - if (tx_buffer_info->mapped_as_page) - dma_unmap_page(tx_ring->dev, - tx_buffer_info->dma, - tx_buffer_info->length, - DMA_TO_DEVICE); - else + struct ixgbevf_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(tx_ring->dev, - tx_buffer_info->dma, - tx_buffer_info->length, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); - tx_buffer_info->dma = 0; + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); } - if (tx_buffer_info->skb) { - dev_kfree_skb_any(tx_buffer_info->skb); - tx_buffer_info->skb = NULL; - } - tx_buffer_info->time_stamp = 0; - /* tx_buffer_info must be completely set up in the transmit path */ + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ } #define IXGBE_MAX_TXD_PWR 14 @@ -187,20 +184,21 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *tx_ring) { struct ixgbevf_adapter *adapter = q_vector->adapter; - union ixgbe_adv_tx_desc *tx_desc, *eop_desc; - struct ixgbevf_tx_buffer *tx_buffer_info; - unsigned int i, count = 0; + struct ixgbevf_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = tx_ring->count / 2; + unsigned int i = tx_ring->next_to_clean; if (test_bit(__IXGBEVF_DOWN, &adapter->state)) return true; - i = tx_ring->next_to_clean; - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - eop_desc = tx_buffer_info->next_to_watch; + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBEVF_TX_DESC(tx_ring, i); + i -= tx_ring->count; do { - bool cleaned = false; + union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) @@ -214,67 +212,90 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, break; /* clear next_to_watch to prevent false hangs */ - tx_buffer_info->next_to_watch = NULL; + tx_buffer->next_to_watch = NULL; - for ( ; !cleaned; count++) { - struct sk_buff *skb; - tx_desc = IXGBEVF_TX_DESC(tx_ring, i); - cleaned = (tx_desc == eop_desc); - skb = tx_buffer_info->skb; - - if (cleaned && skb) { - unsigned int segs, bytecount; - - /* gso_segs is currently only valid for tcp */ - segs = skb_shinfo(skb)->gso_segs ?: 1; - /* multiply data chunks by size of headers */ - bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; - total_packets += segs; - total_bytes += bytecount; - } + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; - ixgbevf_unmap_and_free_tx_resource(tx_ring, - tx_buffer_info); + /* free the skb */ + dev_kfree_skb_any(tx_buffer->skb); - tx_desc->wb.status = 0; + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; i++; - if (i == tx_ring->count) - i = 0; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); + } - tx_buffer_info = &tx_ring->tx_buffer_info[i]; + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); } - eop_desc = tx_buffer_info->next_to_watch; - } while (count < tx_ring->count); + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && !test_bit(__IXGBEVF_DOWN, &adapter->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++adapter->restart_queue; + ++tx_ring->tx_stats.restart_queue; } } - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->total_bytes += total_bytes; - tx_ring->total_packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - q_vector->tx.total_bytes += total_bytes; - q_vector->tx.total_packets += total_packets; - - return count < tx_ring->count; + return !!budget; } /** @@ -343,7 +364,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, /* if IP and error */ if ((status_err & IXGBE_RXD_STAT_IPCS) && (status_err & IXGBE_RXDADV_ERR_IPE)) { - ring->hw_csum_rx_error++; + ring->rx_stats.csum_err++; return; } @@ -351,51 +372,46 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, return; if (status_err & IXGBE_RXDADV_ERR_TCPE) { - ring->hw_csum_rx_error++; + ring->rx_stats.csum_err++; return; } /* It must be a TCP or UDP packet with a valid checksum */ skb->ip_summed = CHECKSUM_UNNECESSARY; - ring->hw_csum_rx_good++; } /** * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split - * @adapter: address of board private structure + * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on **/ -static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *rx_ring, +static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, int cleaned_count) { - struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc; struct ixgbevf_rx_buffer *bi; unsigned int i = rx_ring->next_to_use; - bi = &rx_ring->rx_buffer_info[i]; - while (cleaned_count--) { rx_desc = IXGBEVF_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; if (!bi->skb) { struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(rx_ring->netdev, rx_ring->rx_buf_len); - if (!skb) { - adapter->alloc_rx_buff_failed++; + if (!skb) goto no_buffers; - } + bi->skb = skb; - bi->dma = dma_map_single(&pdev->dev, skb->data, + bi->dma = dma_map_single(rx_ring->dev, skb->data, rx_ring->rx_buf_len, DMA_FROM_DEVICE); - if (dma_mapping_error(&pdev->dev, bi->dma)) { + if (dma_mapping_error(rx_ring->dev, bi->dma)) { dev_kfree_skb(skb); bi->skb = NULL; - dev_err(&pdev->dev, "RX DMA map failed\n"); + dev_err(rx_ring->dev, "Rx DMA map failed\n"); break; } } @@ -404,10 +420,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, i++; if (i == rx_ring->count) i = 0; - bi = &rx_ring->rx_buffer_info[i]; } no_buffers: + rx_ring->rx_stats.alloc_rx_buff_failed++; if (rx_ring->next_to_use != i) ixgbevf_release_rx_desc(rx_ring, i); } @@ -424,8 +440,6 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *rx_ring, int budget) { - struct ixgbevf_adapter *adapter = q_vector->adapter; - struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc, *next_rxd; struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; struct sk_buff *skb; @@ -451,7 +465,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, rx_buffer_info->skb = NULL; if (rx_buffer_info->dma) { - dma_unmap_single(&pdev->dev, rx_buffer_info->dma, + dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, rx_ring->rx_buf_len, DMA_FROM_DEVICE); rx_buffer_info->dma = 0; @@ -471,7 +485,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, if (!(staterr & IXGBE_RXD_STAT_EOP)) { skb->next = next_buffer->skb; IXGBE_CB(skb->next)->prev = skb; - adapter->non_eop_descs++; + rx_ring->rx_stats.non_eop_descs++; goto next_desc; } @@ -503,7 +517,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, * source pruning. */ if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && - ether_addr_equal(adapter->netdev->dev_addr, + ether_addr_equal(rx_ring->netdev->dev_addr, eth_hdr(skb)->h_source)) { dev_kfree_skb_irq(skb); goto next_desc; @@ -516,8 +530,7 @@ next_desc: /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { - ixgbevf_alloc_rx_buffers(adapter, rx_ring, - cleaned_count); + ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } @@ -532,11 +545,11 @@ next_desc: cleaned_count = ixgbevf_desc_unused(rx_ring); if (cleaned_count) - ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); + ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); u64_stats_update_begin(&rx_ring->syncp); - rx_ring->total_packets += total_rx_packets; - rx_ring->total_bytes += total_rx_bytes; + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; @@ -641,9 +654,9 @@ static int ixgbevf_busy_poll_recv(struct napi_struct *napi) found = ixgbevf_clean_rx_irq(q_vector, ring, 4); #ifdef BP_EXTENDED_STATS if (found) - ring->bp_cleaned += found; + ring->stats.cleaned += found; else - ring->bp_misses++; + ring->stats.misses++; #endif if (found) break; @@ -1317,7 +1330,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); ixgbevf_rx_desc_queue_enable(adapter, ring); - ixgbevf_alloc_rx_buffers(adapter, ring, ixgbevf_desc_unused(ring)); + ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); } /** @@ -1633,13 +1646,10 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter) /** * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue - * @adapter: board private structure * @rx_ring: ring to free buffers from **/ -static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *rx_ring) +static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) { - struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; @@ -1652,7 +1662,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, rx_buffer_info = &rx_ring->rx_buffer_info[i]; if (rx_buffer_info->dma) { - dma_unmap_single(&pdev->dev, rx_buffer_info->dma, + dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, rx_ring->rx_buf_len, DMA_FROM_DEVICE); rx_buffer_info->dma = 0; @@ -1677,11 +1687,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, /** * ixgbevf_clean_tx_ring - Free Tx Buffers - * @adapter: board private structure * @tx_ring: ring to be cleaned **/ -static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring) +static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) { struct ixgbevf_tx_buffer *tx_buffer_info; unsigned long size; @@ -1711,7 +1719,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) int i; for (i = 0; i < adapter->num_rx_queues; i++) - ixgbevf_clean_rx_ring(adapter, adapter->rx_ring[i]); + ixgbevf_clean_rx_ring(adapter->rx_ring[i]); } /** @@ -1723,7 +1731,7 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - ixgbevf_clean_tx_ring(adapter, adapter->tx_ring[i]); + ixgbevf_clean_tx_ring(adapter->tx_ring[i]); } void ixgbevf_down(struct ixgbevf_adapter *adapter) @@ -2275,10 +2283,7 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { adapter->hw_csum_rx_error += adapter->rx_ring[i]->hw_csum_rx_error; - adapter->hw_csum_rx_good += - adapter->rx_ring[i]->hw_csum_rx_good; adapter->rx_ring[i]->hw_csum_rx_error = 0; - adapter->rx_ring[i]->hw_csum_rx_good = 0; } } @@ -2426,17 +2431,13 @@ pf_has_reset: /** * ixgbevf_free_tx_resources - Free Tx Resources per Queue - * @adapter: board private structure * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ -void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring) +void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) { - struct pci_dev *pdev = adapter->pdev; - - ixgbevf_clean_tx_ring(adapter, tx_ring); + ixgbevf_clean_tx_ring(tx_ring); vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; @@ -2445,7 +2446,7 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, if (!tx_ring->desc) return; - dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; @@ -2463,20 +2464,17 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i]->desc) - ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]); + ixgbevf_free_tx_resources(adapter->tx_ring[i]); } /** * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) - * @adapter: board private structure * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ -int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring) +int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) { - struct pci_dev *pdev = adapter->pdev; int size; size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; @@ -2488,7 +2486,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; @@ -2518,7 +2516,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { - err = ixgbevf_setup_tx_resources(adapter, adapter->tx_ring[i]); + err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; hw_dbg(&adapter->hw, @@ -2531,37 +2529,34 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) /** * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) - * @adapter: board private structure * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ -int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *rx_ring) +int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) { - struct pci_dev *pdev = adapter->pdev; int size; size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) - goto alloc_failed; + goto err; /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); - if (!rx_ring->desc) { - vfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; - goto alloc_failed; - } + if (!rx_ring->desc) + goto err; return 0; -alloc_failed: +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); return -ENOMEM; } @@ -2580,7 +2575,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); + err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); if (!err) continue; hw_dbg(&adapter->hw, @@ -2592,22 +2587,18 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) /** * ixgbevf_free_rx_resources - Free Rx Resources - * @adapter: board private structure * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ -void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *rx_ring) +void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) { - struct pci_dev *pdev = adapter->pdev; - - ixgbevf_clean_rx_ring(adapter, rx_ring); + ixgbevf_clean_rx_ring(rx_ring); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; - dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; @@ -2625,7 +2616,7 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->rx_ring[i]->desc) - ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]); + ixgbevf_free_rx_resources(adapter->rx_ring[i]); } /** @@ -2789,8 +2780,10 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, } static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) + struct ixgbevf_tx_buffer *first, + u8 *hdr_len) { + struct sk_buff *skb = first->skb; u32 vlan_macip_lens, type_tucmd; u32 mss_l4len_idx, l4len; @@ -2815,12 +2808,17 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, IPPROTO_TCP, 0); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM | + IXGBE_TX_FLAGS_IPV4; } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM; } /* compute header lengths */ @@ -2828,6 +2826,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, *hdr_len += l4len; *hdr_len = skb_transport_offset(skb) + l4len; + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + /* mss_l4len_id: use 1 as index for TSO */ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; @@ -2836,7 +2838,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = skb_network_header_len(skb); vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); @@ -2844,9 +2846,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, return 1; } -static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags) +static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first) { + struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; u32 mss_l4len_idx = 0; u32 type_tucmd = 0; @@ -2867,7 +2870,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, "partial checksum but proto=%x!\n", - skb->protocol); + first->protocol); } break; } @@ -2895,184 +2898,190 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, } break; } + + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; } /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); - - return (skb->ip_summed == CHECKSUM_PARTIAL); } -static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags) +static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) { - struct ixgbevf_tx_buffer *tx_buffer_info; - unsigned int len; - unsigned int total = skb->len; - unsigned int offset = 0, size; - int count = 0; - unsigned int nr_frags = skb_shinfo(skb)->nr_frags; - unsigned int f; - int i; + /* set type for advanced descriptor with frame checksum insertion */ + __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_IFCS | + IXGBE_ADVTXD_DCMD_DEXT); - i = tx_ring->next_to_use; + /* set HW vlan bit if vlan is present */ + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); - len = min(skb_headlen(skb), total); - while (len) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); - - tx_buffer_info->length = size; - tx_buffer_info->mapped_as_page = false; - tx_buffer_info->dma = dma_map_single(tx_ring->dev, - skb->data + offset, - size, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) - goto dma_error; + /* set segmentation enable bits for TSO/FSO */ + if (tx_flags & IXGBE_TX_FLAGS_TSO) + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); - len -= size; - total -= size; - offset += size; - count++; - i++; - if (i == tx_ring->count) - i = 0; - } + return cmd_type; +} - for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; +static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); - frag = &skb_shinfo(skb)->frags[f]; - len = min((unsigned int)skb_frag_size(frag), total); - offset = 0; + /* enable L4 checksum for TSO and TX checksum offload */ + if (tx_flags & IXGBE_TX_FLAGS_CSUM) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); - while (len) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); + /* enble IPv4 checksum for TSO */ + if (tx_flags & IXGBE_TX_FLAGS_IPV4) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); - tx_buffer_info->length = size; - tx_buffer_info->dma = - skb_frag_dma_map(tx_ring->dev, frag, - offset, size, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, - tx_buffer_info->dma)) - goto dma_error; - tx_buffer_info->mapped_as_page = true; + /* use index 1 context for TSO/FSO/FCOE */ + if (tx_flags & IXGBE_TX_FLAGS_TSO) + olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); - len -= size; - total -= size; - offset += size; - count++; - i++; - if (i == tx_ring->count) - i = 0; - } - if (total == 0) - break; - } + /* Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); - if (i == 0) - i = tx_ring->count - 1; - else - i = i - 1; - tx_ring->tx_buffer_info[i].skb = skb; + tx_desc->read.olinfo_status = olinfo_status; +} - return count; +static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + const u8 hdr_len) +{ + dma_addr_t dma; + struct sk_buff *skb = first->skb; + struct ixgbevf_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned int data_len = skb->data_len; + unsigned int size = skb_headlen(skb); + unsigned int paylen = skb->len - hdr_len; + u32 tx_flags = first->tx_flags; + __le32 cmd_type; + u16 i = tx_ring->next_to_use; -dma_error: - dev_err(tx_ring->dev, "TX DMA map failed\n"); + tx_desc = IXGBEVF_TX_DESC(tx_ring, i); - /* clear timestamp and dma mappings for failed tx_buffer_info map */ - tx_buffer_info->dma = 0; - count--; + ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen); + cmd_type = ixgbevf_tx_cmd_type(tx_flags); - /* clear timestamp and dma mappings for remaining portion of packet */ - while (count >= 0) { - count--; - i--; - if (i < 0) - i += tx_ring->count; - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); - } + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; - return count; -} + /* record length, and DMA address */ + dma_unmap_len_set(first, len, size); + dma_unmap_addr_set(first, dma, dma); -static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, - int count, unsigned int first, u32 paylen, - u8 hdr_len) -{ - union ixgbe_adv_tx_desc *tx_desc = NULL; - struct ixgbevf_tx_buffer *tx_buffer_info; - u32 olinfo_status = 0, cmd_type_len = 0; - unsigned int i; + tx_desc->read.buffer_addr = cpu_to_le64(dma); - u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; + for (;;) { + while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); - cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); + i = 0; + } - cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; + dma += IXGBE_MAX_DATA_PER_TXD; + size -= IXGBE_MAX_DATA_PER_TXD; - if (tx_flags & IXGBE_TX_FLAGS_VLAN) - cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.olinfo_status = 0; + } - if (tx_flags & IXGBE_TX_FLAGS_CSUM) - olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; + if (likely(!data_len)) + break; - if (tx_flags & IXGBE_TX_FLAGS_TSO) { - cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); - /* use index 1 context for tso */ - olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); - if (tx_flags & IXGBE_TX_FLAGS_IPV4) - olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; - } + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); + i = 0; + } - /* - * Check Context must be set if Tx switch is enabled, which it - * always is for case where virtual functions are running - */ - olinfo_status |= IXGBE_ADVTXD_CC; + size = skb_frag_size(frag); + data_len -= size; - olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; - i = tx_ring->next_to_use; - while (count--) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - tx_desc = IXGBEVF_TX_DESC(tx_ring, i); - tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); - tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | tx_buffer_info->length); - tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); - i++; - if (i == tx_ring->count) - i = 0; + tx_buffer = &tx_ring->tx_buffer_info[i]; + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.olinfo_status = 0; + + frag++; } - tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); + /* write last descriptor with RS and EOP bits */ + cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); + tx_desc->read.cmd_type_len = cmd_type; - tx_ring->tx_buffer_info[first].time_stamp = jiffies; + /* set the timestamp */ + first->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier (wmb) to make certain all of the + * status bits have been updated before next_to_watch is written. */ wmb(); - tx_ring->tx_buffer_info[first].next_to_watch = tx_desc; + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* notify HW of packet */ + writel(i, tx_ring->tail); + + return; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + tx_ring->next_to_use = i; } static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { - struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); @@ -3086,7 +3095,8 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++adapter->restart_queue; + ++tx_ring->tx_stats.restart_queue; + return 0; } @@ -3100,22 +3110,23 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_tx_buffer *first; struct ixgbevf_ring *tx_ring; - unsigned int first; - unsigned int tx_flags = 0; - u8 hdr_len = 0; - int r_idx = 0, tso; + int tso; + u32 tx_flags = 0; u16 count = TXD_USE_COUNT(skb_headlen(skb)); #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD unsigned short f; #endif + u8 hdr_len = 0; u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); + if (!dst_mac || is_link_local_ether_addr(dst_mac)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } - tx_ring = adapter->tx_ring[r_idx]; + tx_ring = adapter->tx_ring[skb->queue_mapping]; /* * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, @@ -3131,38 +3142,41 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) count += skb_shinfo(skb)->nr_frags; #endif if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { - adapter->tx_busy++; + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + if (vlan_tx_tag_present(skb)) { tx_flags |= vlan_tx_tag_get(skb); tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } - first = tx_ring->next_to_use; + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = vlan_get_protocol(skb); - if (skb->protocol == htons(ETH_P_IP)) - tx_flags |= IXGBE_TX_FLAGS_IPV4; - tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len); - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + tso = ixgbevf_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else + ixgbevf_tx_csum(tx_ring, first); - if (tso) - tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; - else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags)) - tx_flags |= IXGBE_TX_FLAGS_CSUM; + ixgbevf_tx_map(tx_ring, first, hdr_len); - ixgbevf_tx_queue(tx_ring, tx_flags, - ixgbevf_tx_map(tx_ring, skb, tx_flags), - first, skb->len, hdr_len); + ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); - writel(tx_ring->next_to_use, tx_ring->tail); + return NETDEV_TX_OK; - ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; return NETDEV_TX_OK; } @@ -3331,8 +3345,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, ring = adapter->rx_ring[i]; do { start = u64_stats_fetch_begin_bh(&ring->syncp); - bytes = ring->total_bytes; - packets = ring->total_packets; + bytes = ring->stats.bytes; + packets = ring->stats.packets; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); stats->rx_bytes += bytes; stats->rx_packets += packets; @@ -3342,8 +3356,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, ring = adapter->tx_ring[i]; do { start = u64_stats_fetch_begin_bh(&ring->syncp); - bytes = ring->total_bytes; - packets = ring->total_packets; + bytes = ring->stats.bytes; + packets = ring->stats.packets; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); stats->tx_bytes += bytes; stats->tx_packets += packets; |