diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
82 files changed, 4934 insertions, 2089 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 4270ad2d4ddf..83e557c7f279 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -559,8 +559,6 @@ static void e1000_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = e1000_get_regs_len(netdev); - drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } static void e1000_get_ringparam(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 45c8c864104e..b1af0d613caa 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -3900,10 +3900,6 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, return E1000_SUCCESS; } - /* If eeprom is not yet detected, do so now */ - if (eeprom->word_size == 0) - e1000_init_eeprom_params(hw); - /* A check for invalid values: offset too large, too many words, and * not enough words. */ @@ -4074,10 +4070,6 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, return E1000_SUCCESS; } - /* If eeprom is not yet detected, do so now */ - if (eeprom->word_size == 0) - e1000_init_eeprom_params(hw); - /* A check for invalid values: offset too large, too many words, and * not enough words. */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 74dc15055971..fd7be860c201 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3820,7 +3820,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) if (work_done < budget) { if (likely(adapter->itr_setting & 3)) e1000_set_itr(adapter); - napi_complete(napi); + napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->flags)) e1000_irq_enable(adapter); } diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index ad6daa656d3e..6cab1f30d41e 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -648,8 +648,6 @@ static void e1000_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = e1000_get_regs_len(netdev); - drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } static void e1000_get_ringparam(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index faf4b3f3d0b5..0a854a47d31a 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2693,7 +2693,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight) if (work_done < weight) { if (adapter->itr_setting & 3) e1000_set_itr(adapter); - napi_complete(napi); + napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->state)) { if (adapter->msix_entries) ew32(IMS, adapter->rx_ring->ims_val); @@ -6952,6 +6952,7 @@ static const struct net_device_ops e1000e_netdev_ops = { #endif .ndo_set_features = e1000_set_features, .ndo_fix_features = e1000_fix_features, + .ndo_features_check = passthru_features_check, }; /** diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index c8c8c5baefda..14440200499b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -101,12 +101,19 @@ struct fm10k_tx_queue_stats { u64 csum_err; u64 tx_busy; u64 tx_done_old; + u64 csum_good; }; struct fm10k_rx_queue_stats { u64 alloc_failed; u64 csum_err; u64 errors; + u64 csum_good; + u64 switch_errors; + u64 drops; + u64 pp_errors; + u64 link_errors; + u64 length_errors; }; struct fm10k_ring { @@ -251,6 +258,7 @@ struct fm10k_intfc { #define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2) #define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3) #define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4) +#define FM10K_FLAG_DEBUG_STATS (u32)(1 << 5) int xcast_mode; /* Tx fast path data */ @@ -277,6 +285,17 @@ struct fm10k_intfc { u64 rx_drops_nic; u64 rx_overrun_pf; u64 rx_overrun_vf; + + /* Debug Statistics */ + u64 hw_sm_mbx_full; + u64 hw_csum_tx_good; + u64 hw_csum_rx_good; + u64 rx_switch_errors; + u64 rx_drops; + u64 rx_pp_errors; + u64 rx_link_errors; + u64 rx_length_errors; + u32 tx_timeout_count; /* RX */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index f45b4d71adb8..5304bc1fbecd 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -37,7 +37,8 @@ static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos) } static void *fm10k_dbg_desc_seq_next(struct seq_file *s, - void __always_unused *v, loff_t *pos) + void __always_unused *v, + loff_t *pos) { struct fm10k_ring *ring = s->private; @@ -45,7 +46,7 @@ static void *fm10k_dbg_desc_seq_next(struct seq_file *s, } static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s, - __always_unused void *v) + void __always_unused *v) { /* Do nothing. */ } @@ -175,7 +176,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) return; /* Generate a folder for each q_vector */ - sprintf(name, "q_vector.%03d", q_vector->v_idx); + snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx); q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc); if (!q_vector->dbg_q_vector) @@ -185,7 +186,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) for (i = 0; i < q_vector->tx.count; i++) { struct fm10k_ring *ring = &q_vector->tx.ring[i]; - sprintf(name, "tx_ring.%03d", ring->queue_index); + snprintf(name, sizeof(name), "tx_ring.%03d", ring->queue_index); debugfs_create_file(name, 0600, q_vector->dbg_q_vector, ring, @@ -196,7 +197,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) for (i = 0; i < q_vector->rx.count; i++) { struct fm10k_ring *ring = &q_vector->rx.ring[i]; - sprintf(name, "rx_ring.%03d", ring->queue_index); + snprintf(name, sizeof(name), "rx_ring.%03d", ring->queue_index); debugfs_create_file(name, 0600, q_vector->dbg_q_vector, ring, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index c6dc9683429e..2ce0eba5e040 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -76,19 +76,22 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = { FM10K_STAT("mac_rules_used", hw.swapi.mac.used), FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), - FM10K_STAT("mbx_tx_busy", hw.mbx.tx_busy), - FM10K_STAT("mbx_tx_oversized", hw.mbx.tx_dropped), - FM10K_STAT("mbx_tx_messages", hw.mbx.tx_messages), - FM10K_STAT("mbx_tx_dwords", hw.mbx.tx_dwords), - FM10K_STAT("mbx_rx_messages", hw.mbx.rx_messages), - FM10K_STAT("mbx_rx_dwords", hw.mbx.rx_dwords), - FM10K_STAT("mbx_rx_parse_err", hw.mbx.rx_parse_err), - FM10K_STAT("tx_hang_count", tx_timeout_count), FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), }; +static const struct fm10k_stats fm10k_gstrings_debug_stats[] = { + FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full), + FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good), + FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good), + FM10K_STAT("rx_switch_errors", rx_switch_errors), + FM10K_STAT("rx_drops", rx_drops), + FM10K_STAT("rx_pp_errors", rx_pp_errors), + FM10K_STAT("rx_link_errors", rx_link_errors), + FM10K_STAT("rx_length_errors", rx_length_errors), +}; + static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { FM10K_STAT("timeout", stats.timeout.count), FM10K_STAT("ur", stats.ur.count), @@ -100,14 +103,33 @@ static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), }; +#define FM10K_MBX_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct fm10k_mbx_info, _stat), \ + .stat_offset = offsetof(struct fm10k_mbx_info, _stat) \ +} + +static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { + FM10K_MBX_STAT("mbx_tx_busy", tx_busy), + FM10K_MBX_STAT("mbx_tx_oversized", tx_dropped), + FM10K_MBX_STAT("mbx_tx_messages", tx_messages), + FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords), + FM10K_MBX_STAT("mbx_rx_messages", rx_messages), + FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords), + FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err), +}; + #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) +#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats) #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats) +#define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats) #define FM10K_QUEUE_STATS_LEN(_n) \ ( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ - FM10K_NETDEV_STATS_LEN) + FM10K_NETDEV_STATS_LEN + \ + FM10K_MBX_STATS_LEN) static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = { "Mailbox test (on/offline)" @@ -120,47 +142,97 @@ enum fm10k_self_test_types { FM10K_TEST_MAX = FM10K_TEST_LEN }; -static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data) +enum { + FM10K_PRV_FLAG_DEBUG_STATS, + FM10K_PRV_FLAG_LEN, +}; + +static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { + "debug-statistics", +}; + +static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) { struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_iov_data *iov_data = interface->iov_data; char *p = (char *)data; unsigned int i; + unsigned int j; - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *fm10k_gstrings_test, - FM10K_TEST_LEN * ETH_GSTRING_LEN); - break; - case ETH_SS_STATS: - for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_net_stats[i].stat_string, + for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_global_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + if (interface->flags & FM10K_FLAG_DEBUG_STATS) { + for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_debug_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_global_stats[i].stat_string, + } + + for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + if (interface->hw.mac.type != fm10k_mac_vf) { + for (i = 0; i < FM10K_PF_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_pf_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + } - if (interface->hw.mac.type != fm10k_mac_vf) { - for (i = 0; i < FM10K_PF_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_pf_stats[i].stat_string, - ETH_GSTRING_LEN); + if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { + for (i = 0; i < iov_data->num_vfs; i++) { + for (j = 0; j < FM10K_MBX_STATS_LEN; j++) { + snprintf(p, + ETH_GSTRING_LEN, + "vf_%u_%s", i, + fm10k_gstrings_mbx_stats[j].stat_string); p += ETH_GSTRING_LEN; } } + } - for (i = 0; i < interface->hw.mac.max_queues; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < interface->hw.mac.max_queues; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } +} + +static void fm10k_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + char *p = (char *)data; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *fm10k_gstrings_test, + FM10K_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + fm10k_get_stat_strings(dev, data); + break; + case ETH_SS_PRIV_FLAGS: + memcpy(p, fm10k_prv_flags, + FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN); break; } } @@ -168,6 +240,7 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data) static int fm10k_get_sset_count(struct net_device *dev, int sset) { struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_iov_data *iov_data = interface->iov_data; struct fm10k_hw *hw = &interface->hw; int stats_len = FM10K_STATIC_STATS_LEN; @@ -180,7 +253,16 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset) if (hw->mac.type != fm10k_mac_vf) stats_len += FM10K_PF_STATS_LEN; + if (interface->flags & FM10K_FLAG_DEBUG_STATS) { + stats_len += FM10K_DEBUG_STATS_LEN; + + if (iov_data) + stats_len += FM10K_MBX_STATS_LEN * iov_data->num_vfs; + } + return stats_len; + case ETH_SS_PRIV_FLAGS: + return FM10K_PRV_FLAG_LEN; default: return -EOPNOTSUPP; } @@ -192,6 +274,7 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, { const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64); struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_iov_data *iov_data = interface->iov_data; struct net_device_stats *net_stats = &netdev->stats; char *p; int i, j; @@ -211,13 +294,47 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - if (interface->hw.mac.type != fm10k_mac_vf) + if (interface->flags & FM10K_FLAG_DEBUG_STATS) { + for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { + p = (char *)interface + fm10k_gstrings_debug_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + + for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { + p = (char *)&interface->hw.mbx + fm10k_gstrings_mbx_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + if (interface->hw.mac.type != fm10k_mac_vf) { for (i = 0; i < FM10K_PF_STATS_LEN; i++) { p = (char *)interface + fm10k_gstrings_pf_stats[i].stat_offset; *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + } + + if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { + for (i = 0; i < iov_data->num_vfs; i++) { + struct fm10k_vf_info *vf_info; + vf_info = &iov_data->vf_info[i]; + + /* skip stats if we don't have a vf info */ + if (!vf_info) { + data += FM10K_MBX_STATS_LEN; + continue; + } + + for (j = 0; j < FM10K_MBX_STATS_LEN; j++) { + p = (char *)&vf_info->mbx + fm10k_gstrings_mbx_stats[j].stat_offset; + *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + } for (i = 0; i < interface->hw.mac.max_queues; i++) { struct fm10k_ring *ring; @@ -398,10 +515,6 @@ static void fm10k_get_drvinfo(struct net_device *dev, sizeof(info->version) - 1); strncpy(info->bus_info, pci_name(interface->pdev), sizeof(info->bus_info) - 1); - - info->n_stats = fm10k_get_sset_count(dev, ETH_SS_STATS); - - info->regdump_len = fm10k_get_regs_len(dev); } static void fm10k_get_pauseparam(struct net_device *dev, @@ -881,6 +994,33 @@ static void fm10k_self_test(struct net_device *dev, eth_test->flags |= ETH_TEST_FL_FAILED; } +static u32 fm10k_get_priv_flags(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + u32 priv_flags = 0; + + if (interface->flags & FM10K_FLAG_DEBUG_STATS) + priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS; + + return priv_flags; +} + +static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + + if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN)) + return -EINVAL; + + if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS)) + interface->flags |= FM10K_FLAG_DEBUG_STATS; + else + interface->flags &= ~FM10K_FLAG_DEBUG_STATS; + + return 0; +} + + static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) { return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; @@ -1094,6 +1234,8 @@ static const struct ethtool_ops fm10k_ethtool_ops = { .get_regs = fm10k_get_regs, .get_regs_len = fm10k_get_regs_len, .self_test = fm10k_self_test, + .get_priv_flags = fm10k_get_priv_flags, + .set_priv_flags = fm10k_set_priv_flags, .get_rxfh_indir_size = fm10k_get_reta_size, .get_rxfh_key_size = fm10k_get_rssrk_size, .get_rxfh = fm10k_get_rssh, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 94571e6e790c..acfb8b1f88a7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -137,8 +137,11 @@ process_mbx: } /* guarantee we have free space in the SM mailbox */ - if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) + if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) { + /* keep track of how many times this occurs */ + interface->hw_sm_mbx_full++; break; + } /* cleanup mailbox and process received messages */ mbx->ops.process(hw, mbx); @@ -228,9 +231,6 @@ int fm10k_iov_resume(struct pci_dev *pdev) hw->iov.ops.set_lport(hw, vf_info, i, FM10K_VF_FLAG_MULTI_CAPABLE); - /* assign our default vid to the VF following reset */ - vf_info->sw_vid = hw->mac.default_vid; - /* mailbox is disconnected so we don't send a message */ hw->iov.ops.assign_default_mac_vlan(hw, vf_info); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index b5b2925103ec..e76a44cf330c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -398,6 +398,8 @@ static inline void fm10k_rx_checksum(struct fm10k_ring *ring, return; skb->ip_summed = CHECKSUM_UNNECESSARY; + + ring->rx_stats.csum_good++; } #define FM10K_RSS_L4_TYPES_MASK \ @@ -497,8 +499,11 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, if (rx_desc->w.vlan) { u16 vid = le16_to_cpu(rx_desc->w.vlan); - if (vid != rx_ring->vid) + if ((vid & VLAN_VID_MASK) != rx_ring->vid) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + else if (vid & VLAN_PRIO_MASK) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vid & VLAN_PRIO_MASK); } fm10k_type_trans(rx_ring, rx_desc, skb); @@ -553,6 +558,18 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, { if (unlikely((fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_RXE)))) { +#define FM10K_TEST_RXD_BIT(rxd, bit) \ + ((rxd)->w.csum_err & cpu_to_le16(bit)) + if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR)) + rx_ring->rx_stats.switch_errors++; + if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR)) + rx_ring->rx_stats.drops++; + if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR)) + rx_ring->rx_stats.pp_errors++; + if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY)) + rx_ring->rx_stats.link_errors++; + if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG)) + rx_ring->rx_stats.length_errors++; dev_kfree_skb_any(skb); rx_ring->rx_stats.errors++; return true; @@ -576,9 +593,9 @@ static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, napi_gro_receive(&q_vector->napi, skb); } -static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, - struct fm10k_ring *rx_ring, - int budget) +static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, + struct fm10k_ring *rx_ring, + int budget) { struct sk_buff *skb = rx_ring->skb; unsigned int total_bytes = 0, total_packets = 0; @@ -645,7 +662,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, q_vector->rx.total_packets += total_packets; q_vector->rx.total_bytes += total_bytes; - return total_packets < budget; + return total_packets; } #define VXLAN_HLEN (sizeof(struct udphdr) + 8) @@ -878,6 +895,7 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring, /* update TX checksum flag */ first->tx_flags |= FM10K_TX_FLAGS_CSUM; + tx_ring->tx_stats.csum_good++; no_csum: /* populate Tx descriptor header size and mss */ @@ -1079,9 +1097,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_tx_buffer *first; int tso; u32 tx_flags = 0; -#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD unsigned short f; -#endif u16 count = TXD_USE_COUNT(skb_headlen(skb)); /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, @@ -1089,12 +1105,9 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, * + 2 desc gap to keep tail from touching head * otherwise try next time */ -#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); -#else - count += skb_shinfo(skb)->nr_frags; -#endif + if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; @@ -1409,7 +1422,7 @@ static int fm10k_poll(struct napi_struct *napi, int budget) struct fm10k_q_vector *q_vector = container_of(napi, struct fm10k_q_vector, napi); struct fm10k_ring *ring; - int per_ring_budget; + int per_ring_budget, work_done = 0; bool clean_complete = true; fm10k_for_each_ring(ring, q_vector->tx) @@ -1423,16 +1436,19 @@ static int fm10k_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - fm10k_for_each_ring(ring, q_vector->rx) - clean_complete &= fm10k_clean_rx_irq(q_vector, ring, - per_ring_budget); + fm10k_for_each_ring(ring, q_vector->rx) { + int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget); + + work_done += work; + clean_complete &= !!(work < per_ring_budget); + } /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* all work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); /* re-enable the q_vector */ fm10k_qv_enable(q_vector); @@ -1892,7 +1908,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface) u32 reta, base; /* If the netdev is initialized we have to maintain table if possible */ - if (interface->netdev->reg_state) { + if (interface->netdev->reg_state != NETREG_UNINITIALIZED) { for (i = FM10K_RETA_SIZE; i--;) { reta = interface->reta[i]; if ((((reta << 24) >> 24) < rss_i) && diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 1a4b52637de9..af09a1b272e6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -129,8 +129,8 @@ static u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo) * fm10k_fifo_drop_all - Drop all messages in FIFO * @fifo: pointer to FIFO * - * This function resets the head pointer to drop all messages in the FIFO, - * and ensure the FIFO is empty. + * This function resets the head pointer to drop all messages in the FIFO and + * ensure the FIFO is empty. **/ static void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo) { @@ -899,6 +899,27 @@ static void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx) } /** + * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mailbox header + * @mbx: pointer to mailbox + * + * This function creates a fake disconnect header for loading into remote + * mailbox header. The primary purpose is to prevent errors on immediate + * start up after mbx->connect. + **/ +static void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx) +{ + u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->tail, HEAD); + u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1); + + mbx->mbx_lock |= FM10K_MBX_ACK; + + /* load header to memory to be written */ + mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC); +} + +/** * fm10k_mbx_create_error_msg - Generate a error message * @mbx: pointer to mailbox * @err: local error encountered @@ -1046,9 +1067,26 @@ static s32 fm10k_mbx_create_reply(struct fm10k_hw *hw, **/ static void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx) { + u16 len, head, ack; + /* reset our outgoing max size back to Rx limits */ mbx->max_size = mbx->rx.size - 1; + /* update mbx->pulled to account for tail_len and ack */ + head = FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, HEAD); + ack = fm10k_mbx_index_len(mbx, head, mbx->tail); + mbx->pulled += mbx->tail_len - ack; + + /* now drop any messages which have started or finished transmitting */ + while (fm10k_fifo_head_len(&mbx->tx) && mbx->pulled) { + len = fm10k_fifo_head_drop(&mbx->tx); + mbx->tx_dropped++; + if (mbx->pulled >= len) + mbx->pulled -= len; + else + mbx->pulled = 0; + } + /* just do a quick resysnc to start of message */ mbx->pushed = 0; mbx->pulled = 0; @@ -1418,8 +1456,10 @@ static s32 fm10k_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) /* Place mbx in ready to connect state */ mbx->state = FM10K_STATE_CONNECT; + fm10k_mbx_reset_work(mbx); + /* initialize header of remote mailbox */ - fm10k_mbx_create_disconnect_hdr(mbx); + fm10k_mbx_create_fake_disconnect_hdr(mbx); fm10k_write_reg(hw, mbx->mbmem_reg ^ mbx->mbmem_len, mbx->mbx_hdr); /* enable interrupt and notify other party of new message */ @@ -1725,7 +1765,7 @@ static void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw, mbx->state = FM10K_STATE_CLOSED; mbx->remote = 0; fm10k_mbx_reset_work(mbx); - fm10k_mbx_update_max_size(mbx, 0); + fm10k_fifo_drop_all(&mbx->tx); fm10k_write_reg(hw, mbx->mbmem_reg, 0); } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 99228bf46c12..639263d5e833 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -758,6 +758,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_hw *hw = &interface->hw; s32 err; + int i; /* updates do not apply to VLAN 0 */ if (!vid) @@ -775,8 +776,25 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (!set) clear_bit(vid, interface->active_vlans); - /* if default VLAN is already present do nothing */ - if (vid == hw->mac.default_vid) + /* disable the default VID on ring if we have an active VLAN */ + for (i = 0; i < interface->num_rx_queues; i++) { + struct fm10k_ring *rx_ring = interface->rx_ring[i]; + u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1); + + if (test_bit(rx_vid, interface->active_vlans)) + rx_ring->vid |= FM10K_VLAN_CLEAR; + else + rx_ring->vid &= ~FM10K_VLAN_CLEAR; + } + + /* Do not remove default VID related entries from VLAN and MAC tables */ + if (!set && vid == hw->mac.default_vid) + return 0; + + /* Do not throw an error if the interface is down. We will sync once + * we come up + */ + if (test_bit(__FM10K_DOWN, &interface->state)) return 0; fm10k_mbx_lock(interface); @@ -996,21 +1014,6 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) int xcast_mode; u16 vid, glort; - /* restore our address if perm_addr is set */ - if (hw->mac.type == fm10k_mac_vf) { - if (is_valid_ether_addr(hw->mac.perm_addr)) { - ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); - ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr); - ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr); - netdev->addr_assign_type &= ~NET_ADDR_RANDOM; - } - - if (hw->mac.vlan_override) - netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; - else - netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; - } - /* record glort for this interface */ glort = interface->glort; @@ -1045,7 +1048,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) vid, true, 0); } - /* update xcast mode before syncronizing addresses */ + /* update xcast mode before synchronizing addresses */ hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); /* synchronize all of the addresses */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index ce53ff25f88d..74be792f3f1b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -170,6 +170,21 @@ static void fm10k_reinit(struct fm10k_intfc *interface) /* reassociate interrupts */ fm10k_mbx_request_irq(interface); + /* update hardware address for VFs if perm_addr has changed */ + if (hw->mac.type == fm10k_mac_vf) { + if (is_valid_ether_addr(hw->mac.perm_addr)) { + ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); + ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr); + ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr); + netdev->addr_assign_type &= ~NET_ADDR_RANDOM; + } + + if (hw->mac.vlan_override) + netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + else + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; + } + /* reset clock */ fm10k_ts_reset(interface); @@ -259,8 +274,6 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface) * @interface: board private structure * * This function will process both the upstream and downstream mailboxes. - * It is necessary for us to hold the rtnl_lock while doing this as the - * mailbox accesses are protected by this lock. **/ static void fm10k_mbx_subtask(struct fm10k_intfc *interface) { @@ -315,6 +328,9 @@ void fm10k_update_stats(struct fm10k_intfc *interface) { struct net_device_stats *net_stats = &interface->netdev->stats; struct fm10k_hw *hw = &interface->hw; + u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0; + u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0; + u64 rx_link_errors = 0; u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0; u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0; u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0; @@ -334,6 +350,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface) tx_csum_errors += tx_ring->tx_stats.csum_err; bytes += tx_ring->stats.bytes; pkts += tx_ring->stats.packets; + hw_csum_tx_good += tx_ring->tx_stats.csum_good; } interface->restart_queue = restart_queue; @@ -341,6 +358,8 @@ void fm10k_update_stats(struct fm10k_intfc *interface) net_stats->tx_bytes = bytes; net_stats->tx_packets = pkts; interface->tx_csum_errors = tx_csum_errors; + interface->hw_csum_tx_good = hw_csum_tx_good; + /* gather some stats to the interface struct that are per queue */ for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) { struct fm10k_ring *rx_ring = interface->rx_ring[i]; @@ -350,12 +369,24 @@ void fm10k_update_stats(struct fm10k_intfc *interface) alloc_failed += rx_ring->rx_stats.alloc_failed; rx_csum_errors += rx_ring->rx_stats.csum_err; rx_errors += rx_ring->rx_stats.errors; + hw_csum_rx_good += rx_ring->rx_stats.csum_good; + rx_switch_errors += rx_ring->rx_stats.switch_errors; + rx_drops += rx_ring->rx_stats.drops; + rx_pp_errors += rx_ring->rx_stats.pp_errors; + rx_link_errors += rx_ring->rx_stats.link_errors; + rx_length_errors += rx_ring->rx_stats.length_errors; } net_stats->rx_bytes = bytes; net_stats->rx_packets = pkts; interface->alloc_failed = alloc_failed; interface->rx_csum_errors = rx_csum_errors; + interface->hw_csum_rx_good = hw_csum_rx_good; + interface->rx_switch_errors = rx_switch_errors; + interface->rx_drops = rx_drops; + interface->rx_pp_errors = rx_pp_errors; + interface->rx_link_errors = rx_link_errors; + interface->rx_length_errors = rx_length_errors; hw->mac.ops.update_hw_stats(hw, &interface->stats); @@ -483,7 +514,7 @@ static void fm10k_service_task(struct work_struct *work) interface = container_of(work, struct fm10k_intfc, service_task); - /* tasks always capable of running, but must be rtnl protected */ + /* tasks run even when interface is down */ fm10k_mbx_subtask(interface); fm10k_detach_subtask(interface); fm10k_reset_subtask(interface); @@ -663,6 +694,10 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* assign default VLAN to queue */ ring->vid = hw->mac.default_vid; + /* if we have an active VLAN, disable default VID */ + if (test_bit(hw->mac.default_vid, interface->active_vlans)) + ring->vid |= FM10K_VLAN_CLEAR; + /* Map interrupt */ if (ring->q_vector) { rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); @@ -861,10 +896,12 @@ void fm10k_netpoll(struct net_device *netdev) #endif #define FM10K_ERR_MSG(type) case (type): error = #type; break -static void fm10k_print_fault(struct fm10k_intfc *interface, int type, +static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, struct fm10k_fault *fault) { struct pci_dev *pdev = interface->pdev; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_iov_data *iov_data = interface->iov_data; char *error; switch (type) { @@ -918,6 +955,30 @@ static void fm10k_print_fault(struct fm10k_intfc *interface, int type, "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n", error, fault->address, fault->specinfo, PCI_SLOT(fault->func), PCI_FUNC(fault->func)); + + /* For VF faults, clear out the respective LPORT, reset the queue + * resources, and then reconnect to the mailbox. This allows the + * VF in question to resume behavior. For transient faults that are + * the result of non-malicious behavior this will log the fault and + * allow the VF to resume functionality. Obviously for malicious VFs + * they will be able to attempt malicious behavior again. In this + * case, the system administrator will need to step in and manually + * remove or disable the VF in question. + */ + if (fault->func && iov_data) { + int vf = fault->func - 1; + struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf]; + + hw->iov.ops.reset_lport(hw, vf_info); + hw->iov.ops.reset_resources(hw, vf_info); + + /* reset_lport disables the VF, so re-enable it */ + hw->iov.ops.set_lport(hw, vf_info, vf, + FM10K_VF_FLAG_MULTI_CAPABLE); + + /* reset_resources will disconnect from the mbx */ + vf_info->mbx.ops.connect(hw, &vf_info->mbx); + } } static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr) @@ -941,7 +1002,7 @@ static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr) continue; } - fm10k_print_fault(interface, type, &fault); + fm10k_handle_fault(interface, type, &fault); } } @@ -1705,22 +1766,86 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, static void fm10k_slot_warn(struct fm10k_intfc *interface) { - struct device *dev = &interface->pdev->dev; + enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; + enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; struct fm10k_hw *hw = &interface->hw; + int max_gts = 0, expected_gts = 0; + + if (pcie_get_minimum_link(interface->pdev, &speed, &width) || + speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { + dev_warn(&interface->pdev->dev, + "Unable to determine PCI Express bandwidth.\n"); + return; + } + + switch (speed) { + case PCIE_SPEED_2_5GT: + /* 8b/10b encoding reduces max throughput by 20% */ + max_gts = 2 * width; + break; + case PCIE_SPEED_5_0GT: + /* 8b/10b encoding reduces max throughput by 20% */ + max_gts = 4 * width; + break; + case PCIE_SPEED_8_0GT: + /* 128b/130b encoding has less than 2% impact on throughput */ + max_gts = 8 * width; + break; + default: + dev_warn(&interface->pdev->dev, + "Unable to determine PCI Express bandwidth.\n"); + return; + } + + dev_info(&interface->pdev->dev, + "PCI Express bandwidth of %dGT/s available\n", + max_gts); + dev_info(&interface->pdev->dev, + "(Speed:%s, Width: x%d, Encoding Loss:%s, Payload:%s)\n", + (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : + "Unknown"), + hw->bus.width, + (speed == PCIE_SPEED_2_5GT ? "20%" : + speed == PCIE_SPEED_5_0GT ? "20%" : + speed == PCIE_SPEED_8_0GT ? "<2%" : + "Unknown"), + (hw->bus.payload == fm10k_bus_payload_128 ? "128B" : + hw->bus.payload == fm10k_bus_payload_256 ? "256B" : + hw->bus.payload == fm10k_bus_payload_512 ? "512B" : + "Unknown")); - if (hw->mac.ops.is_slot_appropriate(hw)) + switch (hw->bus_caps.speed) { + case fm10k_bus_speed_2500: + /* 8b/10b encoding reduces max throughput by 20% */ + expected_gts = 2 * hw->bus_caps.width; + break; + case fm10k_bus_speed_5000: + /* 8b/10b encoding reduces max throughput by 20% */ + expected_gts = 4 * hw->bus_caps.width; + break; + case fm10k_bus_speed_8000: + /* 128b/130b encoding has less than 2% impact on throughput */ + expected_gts = 8 * hw->bus_caps.width; + break; + default: + dev_warn(&interface->pdev->dev, + "Unable to determine expected PCI Express bandwidth.\n"); return; + } - dev_warn(dev, - "For optimal performance, a %s %s slot is recommended.\n", - (hw->bus_caps.width == fm10k_bus_width_pcie_x1 ? "x1" : - hw->bus_caps.width == fm10k_bus_width_pcie_x4 ? "x4" : - "x8"), - (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s" : - hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s" : - "8.0GT/s")); - dev_warn(dev, - "A slot with more lanes and/or higher speed is suggested.\n"); + if (max_gts < expected_gts) { + dev_warn(&interface->pdev->dev, + "This device requires %dGT/s of bandwidth for optimal performance.\n", + expected_gts); + dev_warn(&interface->pdev->dev, + "A %sslot with x%d lanes is suggested.\n", + (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " : + hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " : + hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""), + hw->bus_caps.width); + } } /** @@ -1739,7 +1864,6 @@ static int fm10k_probe(struct pci_dev *pdev, { struct net_device *netdev; struct fm10k_intfc *interface; - struct fm10k_hw *hw; int err; err = pci_enable_device_mem(pdev); @@ -1783,7 +1907,6 @@ static int fm10k_probe(struct pci_dev *pdev, interface->netdev = netdev; interface->pdev = pdev; - hw = &interface->hw; interface->uc_addr = ioremap(pci_resource_start(pdev, 0), FM10K_UC_ADDR_SIZE); @@ -1825,24 +1948,12 @@ static int fm10k_probe(struct pci_dev *pdev, /* Register PTP interface */ fm10k_ptp_register(interface); - /* print bus type/speed/width info */ - dev_info(&pdev->dev, "(PCI Express:%s Width: %s Payload: %s)\n", - (hw->bus.speed == fm10k_bus_speed_8000 ? "8.0GT/s" : - hw->bus.speed == fm10k_bus_speed_5000 ? "5.0GT/s" : - hw->bus.speed == fm10k_bus_speed_2500 ? "2.5GT/s" : - "Unknown"), - (hw->bus.width == fm10k_bus_width_pcie_x8 ? "x8" : - hw->bus.width == fm10k_bus_width_pcie_x4 ? "x4" : - hw->bus.width == fm10k_bus_width_pcie_x1 ? "x1" : - "Unknown"), - (hw->bus.payload == fm10k_bus_payload_128 ? "128B" : - hw->bus.payload == fm10k_bus_payload_256 ? "256B" : - hw->bus.payload == fm10k_bus_payload_512 ? "512B" : - "Unknown")); - /* print warning for non-optimal configurations */ fm10k_slot_warn(interface); + /* report MAC address for logging */ + dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); + /* enable SR-IOV after registering netdev to enforce PF/VF ordering */ fm10k_iov_configure(pdev, 0); @@ -1983,6 +2094,16 @@ static int fm10k_resume(struct pci_dev *pdev) if (err) return err; + /* assume host is not ready, to prevent race with watchdog in case we + * actually don't have connection to the switch + */ + interface->host_ready = false; + fm10k_watchdog_host_not_ready(interface); + + /* clear the service task disable bit to allow service task to start */ + clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); + fm10k_service_event_schedule(interface); + /* restore SR-IOV interface */ fm10k_iov_resume(pdev); @@ -2010,6 +2131,15 @@ static int fm10k_suspend(struct pci_dev *pdev, fm10k_iov_suspend(pdev); + /* the watchdog tasks may read registers, which will appear like a + * surprise-remove event once the PCI device is disabled. This will + * cause us to close the netdevice, so we don't retain the open/closed + * state post-resume. Prevent this by disabling the service task while + * suspended, until we actually resume. + */ + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + cancel_work_sync(&interface->service_task); + rtnl_lock(); if (netif_running(netdev)) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 3ca0233b3ea2..8c0bdc4e4edd 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -59,6 +59,11 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE)) return FM10K_ERR_DMA_PENDING; + /* verify the switch is ready for reset */ + reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2); + if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY)) + goto out; + /* Inititate data path reset */ reg |= FM10K_DMA_CTRL_DATAPATH_RESET; fm10k_write_reg(hw, FM10K_DMA_CTRL, reg); @@ -72,6 +77,7 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) if (!(reg & FM10K_IP_NOTINRESET)) err = FM10K_ERR_RESET_FAILED; +out: return err; } @@ -185,19 +191,6 @@ static s32 fm10k_init_hw_pf(struct fm10k_hw *hw) } /** - * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU - * @hw: pointer to hardware structure - * - * Looks at the PCIe bus info to confirm whether or not this slot can support - * the necessary bandwidth for this device. - **/ -static bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw) -{ - return (hw->bus.speed == hw->bus_caps.speed) && - (hw->bus.width == hw->bus_caps.width); -} - -/** * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table * @hw: pointer to hardware structure * @vid: VLAN ID to add to table @@ -1162,6 +1155,24 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, } /** + * fm10k_iov_select_vid - Select correct default VID + * @hw: Pointer to hardware structure + * @vid: VID to correct + * + * Will report an error if VID is out of range. For VID = 0, it will return + * either the pf_vid or sw_vid depending on which one is set. + */ +static inline s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) +{ + if (!vid) + return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid; + else if (vf_info->pf_vid && vid != vf_info->pf_vid) + return FM10K_ERR_PARAM; + else + return vid; +} + +/** * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF * @hw: Pointer to hardware structure * @results: Pointer array to message, results[0] is pointer to message @@ -1175,9 +1186,10 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) { struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; - int err = 0; u8 mac[ETH_ALEN]; u32 *result; + int err = 0; + bool set; u16 vlan; u32 vid; @@ -1193,19 +1205,21 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, if (err) return err; - /* if VLAN ID is 0, set the default VLAN ID instead of 0 */ - if (!vid || (vid == FM10K_VLAN_CLEAR)) { - if (vf_info->pf_vid) - vid |= vf_info->pf_vid; - else - vid |= vf_info->sw_vid; - } else if (vid != vf_info->pf_vid) { + /* verify upper 16 bits are zero */ + if (vid >> 16) return FM10K_ERR_PARAM; - } + + set = !(vid & FM10K_VLAN_CLEAR); + vid &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, vid); + if (err < 0) + return err; + else + vid = err; /* update VSI info for VF in regards to VLAN table */ - err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, - !(vid & FM10K_VLAN_CLEAR)); + err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); } if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { @@ -1221,19 +1235,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, memcmp(mac, vf_info->mac, ETH_ALEN)) return FM10K_ERR_PARAM; - /* if VLAN ID is 0, set the default VLAN ID instead of 0 */ - if (!vlan || (vlan == FM10K_VLAN_CLEAR)) { - if (vf_info->pf_vid) - vlan |= vf_info->pf_vid; - else - vlan |= vf_info->sw_vid; - } else if (vf_info->pf_vid) { - return FM10K_ERR_PARAM; - } + set = !(vlan & FM10K_VLAN_CLEAR); + vlan &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, vlan); + if (err < 0) + return err; + else + vlan = err; /* notify switch of request for new unicast address */ - err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, mac, vlan, - !(vlan & FM10K_VLAN_CLEAR), 0); + err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, + mac, vlan, set, 0); } if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { @@ -1248,19 +1261,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) return FM10K_ERR_PARAM; - /* if VLAN ID is 0, set the default VLAN ID instead of 0 */ - if (!vlan || (vlan == FM10K_VLAN_CLEAR)) { - if (vf_info->pf_vid) - vlan |= vf_info->pf_vid; - else - vlan |= vf_info->sw_vid; - } else if (vf_info->pf_vid) { - return FM10K_ERR_PARAM; - } + set = !(vlan & FM10K_VLAN_CLEAR); + vlan &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, vlan); + if (err < 0) + return err; + else + vlan = err; /* notify switch of request for new multicast address */ - err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, vlan, - !(vlan & FM10K_VLAN_CLEAR)); + err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, + mac, vlan, set); } return err; @@ -1849,7 +1861,6 @@ static struct fm10k_mac_ops mac_ops_pf = { .init_hw = &fm10k_init_hw_pf, .start_hw = &fm10k_start_hw_generic, .stop_hw = &fm10k_stop_hw_generic, - .is_slot_appropriate = &fm10k_is_slot_appropriate_pf, .update_vlan = &fm10k_update_vlan_pf, .read_mac_addr = &fm10k_read_mac_addr_pf, .update_uc_addr = &fm10k_update_uc_addr_pf, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 2a17d82fa37d..318a212f0a78 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -521,7 +521,6 @@ struct fm10k_mac_ops { s32 (*stop_hw)(struct fm10k_hw *); s32 (*get_bus_info)(struct fm10k_hw *); s32 (*get_host_state)(struct fm10k_hw *, bool *); - bool (*is_slot_appropriate)(struct fm10k_hw *); s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool); s32 (*read_mac_addr)(struct fm10k_hw *); s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *, @@ -763,6 +762,12 @@ enum fm10k_rxdesc_xc { #define FM10K_RXD_STATUS_L4E 0x4000 /* L4 csum error */ #define FM10K_RXD_STATUS_IPE 0x8000 /* IPv4 csum error */ +#define FM10K_RXD_ERR_SWITCH_ERROR 0x0001 /* Switch found bad packet */ +#define FM10K_RXD_ERR_NO_DESCRIPTOR 0x0002 /* No descriptor available */ +#define FM10K_RXD_ERR_PP_ERROR 0x0004 /* RAM error during processing */ +#define FM10K_RXD_ERR_SWITCH_READY 0x0008 /* Link transition mid-packet */ +#define FM10K_RXD_ERR_TOO_BIG 0x0010 /* Pkt too big for single buf */ + struct fm10k_ftag { __be16 swpri_type_user; __be16 vlan; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 94f0f6a146d9..36c8b0aa08fd 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -131,19 +131,6 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) return 0; } -/** - * fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU - * @hw: pointer to hardware structure - * - * Looks at the PCIe bus info to confirm whether or not this slot can support - * the necessary bandwidth for this device. Since the VF has no control over - * the "slot" it is in, always indicate that the slot is appropriate. - **/ -static bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw) -{ - return true; -} - /* This structure defines the attibutes to be parsed below */ const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = { FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN), @@ -552,7 +539,6 @@ static struct fm10k_mac_ops mac_ops_vf = { .init_hw = &fm10k_init_hw_vf, .start_hw = &fm10k_start_hw_generic, .stop_hw = &fm10k_stop_hw_vf, - .is_slot_appropriate = &fm10k_is_slot_appropriate_vf, .update_vlan = &fm10k_update_vlan_vf, .read_mac_addr = &fm10k_read_mac_addr_vf, .update_uc_addr = &fm10k_update_uc_addr_vf, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index e7462793d48d..4dd3e26129b4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -71,7 +71,6 @@ #define I40E_MAX_VEB 16 #define I40E_MAX_NUM_DESCRIPTORS 4096 -#define I40E_MAX_REGISTER 0x800000 #define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) #define I40E_DEFAULT_NUM_DESCRIPTORS 512 #define I40E_REQ_DESCRIPTOR_MULTIPLE 32 @@ -94,19 +93,26 @@ #endif /* I40E_FCOE */ #define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_AQ_LEN 256 -#define I40E_AQ_WORK_LIMIT 32 +#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_MAX_USER_PRIORITY 8 #define I40E_DEFAULT_MSG_ENABLE 4 #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 -#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) +#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) /* Ethtool Private Flags */ #define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0) +#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) +#define I40E_PRIV_FLAGS_FD_ATR BIT(2) +#define I40E_PRIV_FLAGS_VEB_STATS BIT(3) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_HI_SHIFT 12 #define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) +#define I40E_OEM_VER_BUILD_MASK 0xffff +#define I40E_OEM_VER_PATCH_MASK 0xff +#define I40E_OEM_VER_BUILD_SHIFT 8 +#define I40E_OEM_VER_SHIFT 24 /* The values in here are decimal coded as hex as is the case in the NVM map*/ #define I40E_CURRENT_NVM_VERSION_HI 0x2 @@ -243,7 +249,6 @@ struct i40e_pf { struct pci_dev *pdev; struct i40e_hw hw; unsigned long state; - unsigned long link_check_timeout; struct msix_entry *msix_entries; bool fc_autoneg_status; @@ -305,7 +310,6 @@ struct i40e_pf { #ifdef I40E_FCOE #define I40E_FLAG_FCOE_ENABLED BIT_ULL(11) #endif /* I40E_FCOE */ -#define I40E_FLAG_IN_NETPOLL BIT_ULL(12) #define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13) #define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) #define I40E_FLAG_FILTER_SYNC BIT_ULL(15) @@ -327,8 +331,11 @@ struct i40e_pf { #define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE BIT_ULL(33) #define I40E_FLAG_128_QP_RSS_CAPABLE BIT_ULL(34) #define I40E_FLAG_WB_ON_ITR_CAPABLE BIT_ULL(35) +#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(37) #define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38) +#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39) #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) +#define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; @@ -409,6 +416,9 @@ struct i40e_pf { /* These are only valid in NPAR modes */ u32 npar_max_bw; u32 npar_min_bw; + + u32 ioremap_len; + u32 fd_inv; }; struct i40e_mac_filter { @@ -460,6 +470,8 @@ struct i40e_vsi { #define I40E_VSI_FLAG_VEB_OWNER BIT(1) unsigned long flags; + /* Per VSI lock to protect elements/list (MAC filter) */ + spinlock_t mac_filter_list_lock; struct list_head mac_filter_list; /* VSI stats */ @@ -474,6 +486,7 @@ struct i40e_vsi { #endif u32 tx_restart; u32 tx_busy; + u64 tx_linearize; u32 rx_buf_failed; u32 rx_page_failed; @@ -489,6 +502,7 @@ struct i40e_vsi { */ u16 rx_itr_setting; u16 tx_itr_setting; + u16 int_rate_limit; /* value in usecs */ u16 rss_table_size; u16 rss_size; @@ -534,6 +548,7 @@ struct i40e_vsi { u16 idx; /* index in pf->vsi[] */ u16 veb_idx; /* index of VEB parent */ struct kobject *kobj; /* sysfs object */ + bool current_isup; /* Sync 'link up' logging */ /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); @@ -564,6 +579,8 @@ struct i40e_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; bool arm_wb_state; +#define ITR_COUNTDOWN_START 100 + u8 itr_countdown; /* when 0 should adjust ITR */ } ____cacheline_internodealigned_in_smp; /* lan device */ @@ -573,22 +590,29 @@ struct i40e_device { }; /** - * i40e_fw_version_str - format the FW and NVM version strings + * i40e_nvm_version_str - format the NVM version strings * @hw: ptr to the hardware info **/ -static inline char *i40e_fw_version_str(struct i40e_hw *hw) +static inline char *i40e_nvm_version_str(struct i40e_hw *hw) { static char buf[32]; + u32 full_ver; + u8 ver, patch; + u16 build; + + full_ver = hw->nvm.oem_ver; + ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT); + build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) + & I40E_OEM_VER_BUILD_MASK); + patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK); snprintf(buf, sizeof(buf), - "f%d.%d.%05d a%d.%d n%x.%02x e%x", - hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, - hw->aq.api_maj_ver, hw->aq.api_min_ver, + "%x.%02x 0x%x %d.%d.%d", (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> I40E_NVM_VERSION_HI_SHIFT, (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> I40E_NVM_VERSION_LO_SHIFT, - (hw->nvm.eetrack & 0xffffff)); + hw->nvm.eetrack, ver, build, patch); return buf; } @@ -667,7 +691,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, bool is_vf, bool is_netdev); void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, bool is_vf, bool is_netdev); -int i40e_sync_vsi_filters(struct i40e_vsi *vsi); +int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink, u32 param1); int i40e_vsi_release(struct i40e_vsi *vsi); @@ -700,7 +724,24 @@ static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {} static inline void i40e_dbg_init(void) {} static inline void i40e_dbg_exit(void) {} #endif /* CONFIG_DEBUG_FS*/ -void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); +/** + * i40e_irq_dynamic_enable - Enable default interrupt generation settings + * @vsi: pointer to a vsi + * @vector: enable a particular Hw Interrupt vector, without base_vector + **/ +static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 val; + + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + wr32(hw, I40E_PFINT_DYN_CTLN(vector + vsi->base_vector - 1), val); + /* skip the flush */ +} + void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector); void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); @@ -739,7 +780,7 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt); u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf); void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi); void i40e_fcoe_vsi_setup(struct i40e_pf *pf); -int i40e_init_pf_fcoe(struct i40e_pf *pf); +void i40e_init_pf_fcoe(struct i40e_pf *pf); int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi); void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi); int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring, @@ -771,4 +812,5 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf); i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf); i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf); +void i40e_print_link_message(struct i40e_vsi *vsi, bool isup); #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 62488a67149d..0ff8f01e57ee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -386,7 +386,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -404,6 +403,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -445,7 +445,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -463,6 +462,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -482,8 +482,12 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) { i40e_status ret_code = 0; - if (hw->aq.asq.count == 0) - return I40E_ERR_NOT_READY; + mutex_lock(&hw->aq.asq_mutex); + + if (hw->aq.asq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_asq_out; + } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.asq.head, 0); @@ -492,16 +496,13 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) wr32(hw, hw->aq.asq.bal, 0); wr32(hw, hw->aq.asq.bah, 0); - /* make sure lock is available */ - mutex_lock(&hw->aq.asq_mutex); - hw->aq.asq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ i40e_free_asq_bufs(hw); +shutdown_asq_out: mutex_unlock(&hw->aq.asq_mutex); - return ret_code; } @@ -515,8 +516,12 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) { i40e_status ret_code = 0; - if (hw->aq.arq.count == 0) - return I40E_ERR_NOT_READY; + mutex_lock(&hw->aq.arq_mutex); + + if (hw->aq.arq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_arq_out; + } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.arq.head, 0); @@ -525,16 +530,13 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) wr32(hw, hw->aq.arq.bal, 0); wr32(hw, hw->aq.arq.bah, 0); - /* make sure lock is available */ - mutex_lock(&hw->aq.arq_mutex); - hw->aq.arq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ i40e_free_arq_bufs(hw); +shutdown_arq_out: mutex_unlock(&hw->aq.arq_mutex); - return ret_code; } @@ -551,8 +553,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) **/ i40e_status i40e_init_adminq(struct i40e_hw *hw) { - i40e_status ret_code; + u16 cfg_ptr, oem_hi, oem_lo; u16 eetrack_lo, eetrack_hi; + i40e_status ret_code; int retry = 0; /* verify input for valid configuration */ @@ -611,6 +614,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; + i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); + i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), + &oem_hi); + i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), + &oem_lo); + hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; @@ -657,6 +666,9 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) /* destroy the locks */ + if (hw->nvm_buff.va) + i40e_free_virt_mem(hw, &hw->nvm_buff); + return ret_code; } @@ -678,8 +690,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw) details = I40E_ADMINQ_DETAILS(*asq, ntc); while (rd32(hw, hw->aq.asq.head) != ntc) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "%s: ntc %d head %d.\n", __func__, ntc, - rd32(hw, hw->aq.asq.head)); + "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); if (details->callback) { I40E_ADMINQ_CALLBACK cb_func = @@ -742,19 +753,23 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, u16 retval = 0; u32 val = 0; - val = rd32(hw, hw->aq.asq.head); - if (val >= hw->aq.num_asq_entries) { + mutex_lock(&hw->aq.asq_mutex); + + if (hw->aq.asq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: head overrun at %d\n", val); + "AQTX: Admin queue not initialized.\n"); status = I40E_ERR_QUEUE_EMPTY; - goto asq_send_command_exit; + goto asq_send_command_error; } - if (hw->aq.asq.count == 0) { + hw->aq.asq_last_status = I40E_AQ_RC_OK; + + val = rd32(hw, hw->aq.asq.head); + if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: Admin queue not initialized.\n"); + "AQTX: head overrun at %d\n", val); status = I40E_ERR_QUEUE_EMPTY; - goto asq_send_command_exit; + goto asq_send_command_error; } details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); @@ -779,8 +794,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, desc->flags &= ~cpu_to_le16(details->flags_dis); desc->flags |= cpu_to_le16(details->flags_ena); - mutex_lock(&hw->aq.asq_mutex); - if (buff_size > hw->aq.asq_buf_size) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, @@ -889,6 +902,10 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, "AQTX: desc and buffer writeback:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); + /* save writeback aq if requested */ + if (details->wb_desc) + *details->wb_desc = *desc_on_ring; + /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { @@ -900,7 +917,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, asq_send_command_error: mutex_unlock(&hw->aq.asq_mutex); -asq_send_command_exit: return status; } @@ -1023,6 +1039,19 @@ clean_arq_element_err: i40e_release_nvm(hw); hw->aq.nvm_release_on_done = false; } + + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + case I40E_NVMUPD_STATE_WRITE_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + break; + + default: + break; + } } return ret_code; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index 28e519a50de4..12fbbddea299 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -69,6 +69,7 @@ struct i40e_asq_cmd_details { u16 flags_dis; bool async; bool postpone; + struct i40e_aq_desc *wb_desc; }; #define I40E_ADMINQ_DETAILS(R, i) \ @@ -108,9 +109,10 @@ struct i40e_adminq_info { /** * i40e_aq_rc_to_posix - convert errors to user-land codes - * aq_rc: AdminQ error code to convert + * aq_ret: AdminQ handler error code can override aq_rc + * aq_rc: AdminQ firmware error code to convert **/ -static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) +static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) { int aq_to_posix[] = { 0, /* I40E_AQ_RC_OK */ @@ -142,8 +144,9 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) return -EAGAIN; - if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) return -ERANGE; + return aq_to_posix[aq_rc]; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 95d23bfbcbf1..6584b6cd73fd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1722,11 +1722,13 @@ struct i40e_aqc_get_link_status { u8 phy_type; /* i40e_aq_phy_type */ u8 link_speed; /* i40e_aq_link_speed */ u8 link_info; -#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_UP 0x01 /* obsolete */ +#define I40E_AQ_LINK_UP_FUNCTION 0x01 #define I40E_AQ_LINK_FAULT 0x02 #define I40E_AQ_LINK_FAULT_TX 0x04 #define I40E_AQ_LINK_FAULT_RX 0x08 #define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_LINK_UP_PORT 0x20 #define I40E_AQ_MEDIA_AVAILABLE 0x40 #define I40E_AQ_SIGNAL_DETECT 0x80 u8 an_info; @@ -2062,6 +2064,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); #define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) #define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 #define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) + #define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 #define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) #define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 @@ -2070,10 +2073,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); #define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) #define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8 #define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT) -#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xA +#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB #define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT) #define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10 #define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT) + +/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with + * word boundary layout issues, which the Linux compilers silently deal + * with by adding padding, making the actual struct larger than designed. + * However, the FW compiler for the NIC is less lenient and complains + * about the struct. Hence, the struct defined here has an extra byte in + * fields reserved3 and reserved4 to directly acknowledge that padding, + * and the new length is used in the length check macro. + */ struct i40e_aqc_get_cee_dcb_cfg_v1_resp { u8 reserved1; u8 oper_num_tc; @@ -2081,9 +2093,9 @@ struct i40e_aqc_get_cee_dcb_cfg_v1_resp { u8 reserved2; u8 oper_tc_bw[8]; u8 oper_pfc_en; - u8 reserved3; + u8 reserved3[2]; __le16 oper_app_prio; - u8 reserved4; + u8 reserved4[2]; __le16 tlv_status; }; @@ -2120,6 +2132,13 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); struct i40e_aqc_lldp_set_local_mib { #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 #define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 u8 type; u8 reserved0; __le16 length; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 114dc6450183..2d74c6e4d7b6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -51,7 +51,9 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_20G_KR2: + case I40E_DEV_ID_20G_KR2_A: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_SFP_X722: @@ -85,7 +87,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) * @hw: pointer to the HW structure * @aq_err: the AQ error code to convert **/ -char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) { switch (aq_err) { case I40E_AQ_RC_OK: @@ -145,7 +147,7 @@ char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) * @hw: pointer to the HW structure * @stat_err: the status error code to convert **/ -char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) +const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) { switch (stat_err) { case 0: @@ -329,25 +331,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, len = buf_len; /* write the full 16-byte chunks */ for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, - "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", - i, buf[i], buf[i + 1], buf[i + 2], - buf[i + 3], buf[i + 4], buf[i + 5], - buf[i + 6], buf[i + 7], buf[i + 8], - buf[i + 9], buf[i + 10], buf[i + 11], - buf[i + 12], buf[i + 13], buf[i + 14], - buf[i + 15]); + i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); /* write whatever's left over without overrunning the buffer */ - if (i < len) { - char d_buf[80]; - int j = 0; - - memset(d_buf, 0, sizeof(d_buf)); - j += sprintf(d_buf, "\t0x%04X ", i); - while (i < len) - j += sprintf(&d_buf[j], " %02X", buf[i++]); - i40e_debug(hw, mask, "%s\n", d_buf); - } + if (i < len) + i40e_debug(hw, mask, "\t0x%04X %*ph\n", + i, len - i, buf + i); } } @@ -441,9 +429,6 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); - cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut)); - cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut)); - status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); return status; @@ -518,8 +503,6 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); - cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key)); - cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key)); status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); @@ -961,6 +944,9 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) else hw->pf_id = (u8)(func_rid & 0x7); + if (hw->mac.type == I40E_MAC_X722) + hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE; + status = i40e_init_nvm(hw); return status; } @@ -1038,7 +1024,7 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); if (flags & I40E_AQC_LAN_ADDR_VALID) - memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac)); + ether_addr_copy(mac_addr, addrs.pf_lan_mac); return status; } @@ -1061,7 +1047,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) return status; if (flags & I40E_AQC_PORT_ADDR_VALID) - memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac)); + ether_addr_copy(mac_addr, addrs.port_mac); else status = I40E_ERR_INVALID_MAC_ADDR; @@ -1119,7 +1105,7 @@ i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr) return status; if (flags & I40E_AQC_SAN_ADDR_VALID) - memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac)); + ether_addr_copy(mac_addr, addrs.pf_san_mac); else status = I40E_ERR_INVALID_MAC_ADDR; @@ -1260,7 +1246,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; - for (cnt = 0; cnt < grst_del + 2; cnt++) { + for (cnt = 0; cnt < grst_del + 10; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) break; @@ -1620,6 +1606,9 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) status = I40E_ERR_UNKNOWN_PHY; + if (report_init) + hw->phy.phy_types = le32_to_cpu(abilities->phy_type); + return status; } @@ -1720,14 +1709,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; } /* Update the link info */ - status = i40e_aq_get_link_info(hw, true, NULL, NULL); + status = i40e_update_link_info(hw); if (status) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ msleep(1000); - status = i40e_aq_get_link_info(hw, true, NULL, NULL); + status = i40e_update_link_info(hw); } if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; @@ -2238,27 +2227,54 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, /** * i40e_get_link_status - get status of the HW network link * @hw: pointer to the hw struct + * @link_up: pointer to bool (true/false = linkup/linkdown) * - * Returns true if link is up, false if link is down. + * Variable link_up true if link is up, false if link is down. + * The variable link_up is invalid if returned value of status != 0 * * Side effect: LinkStatusEvent reporting becomes enabled **/ -bool i40e_get_link_status(struct i40e_hw *hw) +i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) { i40e_status status = 0; - bool link_status = false; if (hw->phy.get_link_info) { - status = i40e_aq_get_link_info(hw, true, NULL, NULL); + status = i40e_update_link_info(hw); if (status) - goto i40e_get_link_status_exit; + i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", + status); } - link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; + *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; + + return status; +} + +/** + * i40e_updatelink_status - update status of the HW network link + * @hw: pointer to the hw struct + **/ +i40e_status i40e_update_link_info(struct i40e_hw *hw) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + i40e_status status = 0; + + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + if (status) + return status; + + if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) { + status = i40e_aq_get_phy_capabilities(hw, false, false, + &abilities, NULL); + if (status) + return status; -i40e_get_link_status_exit: - return link_status; + memcpy(hw->phy.link_info.module_type, &abilities.module_type, + sizeof(hw->phy.link_info.module_type)); + } + + return status; } /** @@ -2365,6 +2381,7 @@ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, *vebs_free = le16_to_cpu(cmd_resp->vebs_free); if (floating) { u16 flags = le16_to_cpu(cmd_resp->veb_flags); + if (flags & I40E_AQC_ADD_VEB_FLOATING) *floating = true; else @@ -3779,7 +3796,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } if (mac_addr) - memcpy(cmd->mac, mac_addr, ETH_ALEN); + ether_addr_copy(cmd->mac, mac_addr); cmd->etype = cpu_to_le16(ethtype); cmd->flags = cpu_to_le16(flags); @@ -3798,6 +3815,28 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } /** + * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control + * @hw: pointer to the hw struct + * @seid: VSI seid to add ethertype filter from + **/ +#define I40E_FLOW_CONTROL_ETHTYPE 0x8808 +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 seid) +{ + u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; + u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; + i40e_status status; + + status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, + seid, 0, true, NULL, + NULL); + if (status) + hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); +} + +/** * i40e_aq_alternate_read * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 90de46aef557..2691277c0055 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -292,6 +292,190 @@ static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv, } /** + * i40e_parse_cee_pgcfg_tlv + * @tlv: CEE DCBX PG CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses CEE DCBX PG CFG TLV + **/ +static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + struct i40e_dcb_ets_config *etscfg; + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + etscfg = &dcbcfg->etscfg; + + if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK) + etscfg->willing = 1; + + etscfg->cbs = 0; + /* Priority Group Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + etscfg->prioritytable[i * 2] = priority; + priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + etscfg->prioritytable[i * 2 + 1] = priority; + offset++; + } + + /* PG Percentage Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tcbwtable[i] = buf[offset++]; + + /* Number of TCs supported (1 octet) */ + etscfg->maxtcs = buf[offset]; +} + +/** + * i40e_parse_cee_pfccfg_tlv + * @tlv: CEE DCBX PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses CEE DCBX PFC CFG TLV + **/ +static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK) + dcbcfg->pfc.willing = 1; + + /* ------------------------ + * | PFC Enable | PFC TCs | + * ------------------------ + * | 1 octet | 1 octet | + */ + dcbcfg->pfc.pfcenable = buf[0]; + dcbcfg->pfc.pfccap = buf[1]; +} + +/** + * i40e_parse_cee_app_tlv + * @tlv: CEE DCBX APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses CEE DCBX APP PRIO TLV + **/ +static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 length, typelength, offset = 0; + struct i40e_cee_app_prio *app; + u8 i, up, selector; + + typelength = ntohs(tlv->hdr.typelen); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + + dcbcfg->numapps = length / sizeof(*app); + if (!dcbcfg->numapps) + return; + + for (i = 0; i < dcbcfg->numapps; i++) { + app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset); + for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) { + if (app->prio_map & BIT(up)) + break; + } + dcbcfg->app[i].priority = up; + + /* Get Selector from lower 2 bits, and convert to IEEE */ + selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK); + if (selector == I40E_CEE_APP_SEL_ETHTYPE) + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + else if (selector == I40E_CEE_APP_SEL_TCPIP) + dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; + else + /* Keep selector as it is for unknown types */ + dcbcfg->app[i].selector = selector; + + dcbcfg->app[i].protocolid = ntohs(app->protocol); + /* Move to next app */ + offset += sizeof(*app); + } +} + +/** + * i40e_parse_cee_tlv + * @tlv: CEE DCBX TLV + * @dcbcfg: Local store to update DCBX config data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + **/ +static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 len, tlvlen, sublen, typelength; + struct i40e_cee_feat_tlv *sub_tlv; + u8 subtype, feat_tlv_count = 0; + u32 ouisubtype; + + ouisubtype = ntohl(tlv->ouisubtype); + subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> + I40E_LLDP_TLV_SUBTYPE_SHIFT); + /* Return if not CEE DCBX */ + if (subtype != I40E_CEE_DCBX_TYPE) + return; + + typelength = ntohs(tlv->typelength); + tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + len = sizeof(tlv->typelength) + sizeof(ouisubtype) + + sizeof(struct i40e_cee_ctrl_tlv); + /* Return if no CEE DCBX Feature TLVs */ + if (tlvlen <= len) + return; + + sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len); + while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) { + typelength = ntohs(sub_tlv->hdr.typelen); + sublen = (u16)((typelength & + I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> + I40E_LLDP_TLV_TYPE_SHIFT); + switch (subtype) { + case I40E_CEE_SUBTYPE_PG_CFG: + i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); + break; + case I40E_CEE_SUBTYPE_PFC_CFG: + i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg); + break; + case I40E_CEE_SUBTYPE_APP_PRI: + i40e_parse_cee_app_tlv(sub_tlv, dcbcfg); + break; + default: + return; /* Invalid Sub-type return */ + } + feat_tlv_count++; + /* Move to next sub TLV */ + sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv + + sizeof(sub_tlv->hdr.typelen) + + sublen); + } +} + +/** * i40e_parse_org_tlv * @tlv: Organization specific TLV * @dcbcfg: Local store to update ETS REC data @@ -312,6 +496,9 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, case I40E_IEEE_8021QAZ_OUI: i40e_parse_ieee_tlv(tlv, dcbcfg); break; + case I40E_CEE_DCBX_OUI: + i40e_parse_cee_tlv(tlv, dcbcfg); + break; default: break; } @@ -502,15 +689,18 @@ static void i40e_cee_to_dcb_config( /* CEE PG data to ETS config */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + /* Note that the FW creates the oper_prio_tc nibbles reversed + * from those in the CEE Priority Group sub-TLV. + */ for (i = 0; i < 4; i++) { tc = (u8)((cee_cfg->oper_prio_tc[i] & - I40E_CEE_PGID_PRIO_1_MASK) >> - I40E_CEE_PGID_PRIO_1_SHIFT); - dcbcfg->etscfg.prioritytable[i*2] = tc; - tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_0_MASK) >> I40E_CEE_PGID_PRIO_0_SHIFT); - dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + dcbcfg->etscfg.prioritytable[i * 2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) @@ -531,37 +721,85 @@ static void i40e_cee_to_dcb_config( dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; - status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> - I40E_AQC_CEE_APP_STATUS_SHIFT; + i = 0; + status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >> + I40E_AQC_CEE_FCOE_STATUS_SHIFT; err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; - /* Add APPs if Error is False and Oper/Sync is True */ + /* Add FCoE APP if Error is False and Oper/Sync is True */ if (!err && sync && oper) { - /* CEE operating configuration supports FCoE/iSCSI/FIP only */ - dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; - /* FCoE APP */ - dcbcfg->app[0].priority = + dcbcfg->app[i].priority = (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> I40E_AQC_CEE_APP_FCOE_SHIFT; - dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; - dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE; + i++; + } + status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >> + I40E_AQC_CEE_ISCSI_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add iSCSI APP if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { /* iSCSI APP */ - dcbcfg->app[1].priority = + dcbcfg->app[i].priority = (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> I40E_AQC_CEE_APP_ISCSI_SHIFT; - dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; - dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI; + i++; + } + status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >> + I40E_AQC_CEE_FIP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add FIP APP if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { /* FIP APP */ - dcbcfg->app[2].priority = + dcbcfg->app[i].priority = (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> I40E_AQC_CEE_APP_FIP_SHIFT; - dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; - dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP; + i++; } + dcbcfg->numapps = i; +} + +/** + * i40e_get_ieee_dcb_config + * @hw: pointer to the hw struct + * + * Get IEEE mode DCB configuration from the Firmware + **/ +static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw) +{ + i40e_status ret = 0; + + /* IEEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; + /* Get Local DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, + &hw->local_dcbx_config); + if (ret) + goto out; + + /* Get Remote DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + ret = 0; + +out: + return ret; } /** @@ -579,7 +817,7 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) /* If Firmware version < v4.33 IEEE only */ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || (hw->aq.fw_maj_ver < 4)) - goto ieee; + return i40e_get_ieee_dcb_config(hw); /* If Firmware version == v4.33 use old CEE struct */ if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) { @@ -608,16 +846,14 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) /* CEE mode not enabled try querying IEEE data */ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) - goto ieee; - else + return i40e_get_ieee_dcb_config(hw); + + if (ret) goto out; -ieee: - /* IEEE mode */ - hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; - /* Get Local DCB Config */ + /* Get CEE DCB Desired Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, - &hw->local_dcbx_config); + &hw->desired_dcbx_config); if (ret) goto out; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 50fc894a4cde..92d01042c1f6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -44,6 +44,15 @@ #define I40E_IEEE_SUBTYPE_PFC_CFG 11 #define I40E_IEEE_SUBTYPE_APP_PRI 12 +#define I40E_CEE_DCBX_OUI 0x001b21 +#define I40E_CEE_DCBX_TYPE 2 + +#define I40E_CEE_SUBTYPE_CTRL 1 +#define I40E_CEE_SUBTYPE_PG_CFG 2 +#define I40E_CEE_SUBTYPE_PFC_CFG 3 +#define I40E_CEE_SUBTYPE_APP_PRI 4 + +#define I40E_CEE_MAX_FEAT_TYPE 3 /* Defines for LLDP TLV header */ #define I40E_LLDP_TLV_LEN_SHIFT 0 #define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT) @@ -98,6 +107,36 @@ struct i40e_lldp_org_tlv { __be32 ouisubtype; u8 tlvinfo[1]; }; + +struct i40e_cee_tlv_hdr { + __be16 typelen; + u8 operver; + u8 maxver; +}; + +struct i40e_cee_ctrl_tlv { + struct i40e_cee_tlv_hdr hdr; + __be32 seqno; + __be32 ackno; +}; + +struct i40e_cee_feat_tlv { + struct i40e_cee_tlv_hdr hdr; + u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */ +#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80 +#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40 +#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20 + u8 subtype; + u8 tlvinfo[1]; +}; + +struct i40e_cee_app_prio { + __be16 protocol; + u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */ +#define I40E_CEE_APP_SELECTOR_MASK 0x03 + __be16 lower_oui; + u8 prio_map; +}; #pragma pack() i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 1c51f736a8d0..886e667f2f1c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -236,14 +236,13 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf, struct i40e_dcb_app_priority_table *app) { int v, err; + for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && pf->vsi[v]->netdev) { err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); - if (err) - dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", - __func__, pf->vsi[v]->seid, - err, app->selector, - app->protocolid, app->priority); + dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", + pf->vsi[v]->seid, err, app->selector, + app->protocolid, app->priority); } } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index d7c15d17faa6..d4b7af9a2fc8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -404,82 +404,82 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) nstat = i40e_get_vsi_stats_struct(vsi); dev_info(&pf->pdev->dev, " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", - (long unsigned int)nstat->rx_packets, - (long unsigned int)nstat->rx_bytes, - (long unsigned int)nstat->rx_errors, - (long unsigned int)nstat->rx_dropped); + (unsigned long int)nstat->rx_packets, + (unsigned long int)nstat->rx_bytes, + (unsigned long int)nstat->rx_errors, + (unsigned long int)nstat->rx_dropped); dev_info(&pf->pdev->dev, " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", - (long unsigned int)nstat->tx_packets, - (long unsigned int)nstat->tx_bytes, - (long unsigned int)nstat->tx_errors, - (long unsigned int)nstat->tx_dropped); + (unsigned long int)nstat->tx_packets, + (unsigned long int)nstat->tx_bytes, + (unsigned long int)nstat->tx_errors, + (unsigned long int)nstat->tx_dropped); dev_info(&pf->pdev->dev, " net_stats: multicast = %lu, collisions = %lu\n", - (long unsigned int)nstat->multicast, - (long unsigned int)nstat->collisions); + (unsigned long int)nstat->multicast, + (unsigned long int)nstat->collisions); dev_info(&pf->pdev->dev, " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", - (long unsigned int)nstat->rx_length_errors, - (long unsigned int)nstat->rx_over_errors, - (long unsigned int)nstat->rx_crc_errors); + (unsigned long int)nstat->rx_length_errors, + (unsigned long int)nstat->rx_over_errors, + (unsigned long int)nstat->rx_crc_errors); dev_info(&pf->pdev->dev, " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", - (long unsigned int)nstat->rx_frame_errors, - (long unsigned int)nstat->rx_fifo_errors, - (long unsigned int)nstat->rx_missed_errors); + (unsigned long int)nstat->rx_frame_errors, + (unsigned long int)nstat->rx_fifo_errors, + (unsigned long int)nstat->rx_missed_errors); dev_info(&pf->pdev->dev, " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", - (long unsigned int)nstat->tx_aborted_errors, - (long unsigned int)nstat->tx_carrier_errors, - (long unsigned int)nstat->tx_fifo_errors); + (unsigned long int)nstat->tx_aborted_errors, + (unsigned long int)nstat->tx_carrier_errors, + (unsigned long int)nstat->tx_fifo_errors); dev_info(&pf->pdev->dev, " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", - (long unsigned int)nstat->tx_heartbeat_errors, - (long unsigned int)nstat->tx_window_errors); + (unsigned long int)nstat->tx_heartbeat_errors, + (unsigned long int)nstat->tx_window_errors); dev_info(&pf->pdev->dev, " net_stats: rx_compressed = %lu, tx_compressed = %lu\n", - (long unsigned int)nstat->rx_compressed, - (long unsigned int)nstat->tx_compressed); + (unsigned long int)nstat->rx_compressed, + (unsigned long int)nstat->tx_compressed); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", - (long unsigned int)vsi->net_stats_offsets.rx_packets, - (long unsigned int)vsi->net_stats_offsets.rx_bytes, - (long unsigned int)vsi->net_stats_offsets.rx_errors, - (long unsigned int)vsi->net_stats_offsets.rx_dropped); + (unsigned long int)vsi->net_stats_offsets.rx_packets, + (unsigned long int)vsi->net_stats_offsets.rx_bytes, + (unsigned long int)vsi->net_stats_offsets.rx_errors, + (unsigned long int)vsi->net_stats_offsets.rx_dropped); dev_info(&pf->pdev->dev, " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", - (long unsigned int)vsi->net_stats_offsets.tx_packets, - (long unsigned int)vsi->net_stats_offsets.tx_bytes, - (long unsigned int)vsi->net_stats_offsets.tx_errors, - (long unsigned int)vsi->net_stats_offsets.tx_dropped); + (unsigned long int)vsi->net_stats_offsets.tx_packets, + (unsigned long int)vsi->net_stats_offsets.tx_bytes, + (unsigned long int)vsi->net_stats_offsets.tx_errors, + (unsigned long int)vsi->net_stats_offsets.tx_dropped); dev_info(&pf->pdev->dev, " net_stats_offsets: multicast = %lu, collisions = %lu\n", - (long unsigned int)vsi->net_stats_offsets.multicast, - (long unsigned int)vsi->net_stats_offsets.collisions); + (unsigned long int)vsi->net_stats_offsets.multicast, + (unsigned long int)vsi->net_stats_offsets.collisions); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", - (long unsigned int)vsi->net_stats_offsets.rx_length_errors, - (long unsigned int)vsi->net_stats_offsets.rx_over_errors, - (long unsigned int)vsi->net_stats_offsets.rx_crc_errors); + (unsigned long int)vsi->net_stats_offsets.rx_length_errors, + (unsigned long int)vsi->net_stats_offsets.rx_over_errors, + (unsigned long int)vsi->net_stats_offsets.rx_crc_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", - (long unsigned int)vsi->net_stats_offsets.rx_frame_errors, - (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors, - (long unsigned int)vsi->net_stats_offsets.rx_missed_errors); + (unsigned long int)vsi->net_stats_offsets.rx_frame_errors, + (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors, + (unsigned long int)vsi->net_stats_offsets.rx_missed_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", - (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors, - (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors, - (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors); + (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors, + (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors, + (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", - (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors, - (long unsigned int)vsi->net_stats_offsets.tx_window_errors); + (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors, + (unsigned long int)vsi->net_stats_offsets.tx_window_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n", - (long unsigned int)vsi->net_stats_offsets.rx_compressed, - (long unsigned int)vsi->net_stats_offsets.tx_compressed); + (unsigned long int)vsi->net_stats_offsets.rx_compressed, + (unsigned long int)vsi->net_stats_offsets.tx_compressed); dev_info(&pf->pdev->dev, " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", vsi->tx_restart, vsi->tx_busy, @@ -487,6 +487,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); + if (!rx_ring) continue; @@ -527,7 +528,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " rx_rings[%i]: size = %i, dma = 0x%08lx\n", i, rx_ring->size, - (long unsigned int)rx_ring->dma); + (unsigned long int)rx_ring->dma); dev_info(&pf->pdev->dev, " rx_rings[%i]: vsi = %p, q_vector = %p\n", i, rx_ring->vsi, @@ -535,6 +536,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) } for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + if (!tx_ring) continue; @@ -573,7 +575,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " tx_rings[%i]: size = %i, dma = 0x%08lx\n", i, tx_ring->size, - (long unsigned int)tx_ring->dma); + (unsigned long int)tx_ring->dma); dev_info(&pf->pdev->dev, " tx_rings[%i]: vsi = %p, q_vector = %p\n", i, tx_ring->vsi, @@ -743,6 +745,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) ring = &(hw->aq.asq); for (i = 0; i < ring->count; i++) { struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); + dev_info(&pf->pdev->dev, " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", i, d->flags, d->opcode, d->datalen, d->retval, @@ -755,6 +758,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) ring = &(hw->aq.arq); for (i = 0; i < ring->count; i++) { struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); + dev_info(&pf->pdev->dev, " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", i, d->flags, d->opcode, d->datalen, d->retval, @@ -949,24 +953,6 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) } } -/** - * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR - * @pf: the PF that would be altered - * @flag: flag that needs enabling or disabling - * @enable: Enable/disable FD SD/ATR - **/ -static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) -{ - if (enable) { - pf->flags |= flag; - } else { - pf->flags &= ~flag; - pf->auto_disable_flags |= flag; - } - dev_info(&pf->pdev->dev, "requesting a PF reset\n"); - i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); -} - #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) /** * i40e_dbg_command_write - write into command datum @@ -1038,7 +1024,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf); } else if (strncmp(cmd_buf, "del vsi", 7) == 0) { - sscanf(&cmd_buf[7], "%i", &vsi_seid); + cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, + "del vsi: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n", @@ -1145,8 +1137,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } + spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, ma, vlan, false, false); - ret = i40e_sync_vsi_filters(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + ret = i40e_sync_vsi_filters(vsi, true); if (f && !ret) dev_info(&pf->pdev->dev, "add macaddr: %pM vlan=%d added to VSI %d\n", @@ -1182,8 +1176,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_del_filter(vsi, ma, vlan, false, false); - ret = i40e_sync_vsi_filters(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + ret = i40e_sync_vsi_filters(vsi, true); if (!ret) dev_info(&pf->pdev->dev, "del macaddr: %pM vlan=%d removed from VSI %d\n", @@ -1488,6 +1484,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else if (strncmp(cmd_buf, "read", 4) == 0) { u32 address; u32 value; + cnt = sscanf(&cmd_buf[4], "%i", &address); if (cnt != 1) { dev_info(&pf->pdev->dev, "read <reg>\n"); @@ -1495,9 +1492,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } /* check the range on address */ - if (address >= I40E_MAX_REGISTER) { - dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n", - address); + if (address > (pf->ioremap_len - sizeof(u32))) { + dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n", + address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); goto command_write_done; } @@ -1507,6 +1504,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else if (strncmp(cmd_buf, "write", 5) == 0) { u32 address, value; + cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value); if (cnt != 2) { dev_info(&pf->pdev->dev, "write <reg> <value>\n"); @@ -1514,9 +1512,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } /* check the range on address */ - if (address >= I40E_MAX_REGISTER) { - dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n", - address); + if (address > (pf->ioremap_len - sizeof(u32))) { + dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n", + address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); goto command_write_done; } wr32(&pf->hw, address, value); @@ -1528,6 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); if (cnt == 0) { int i; + for (i = 0; i < pf->num_alloc_vsi; i++) i40e_vsi_reset_stats(pf->vsi[i]); dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); @@ -1726,8 +1725,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE); for (i = 0; i < packet_len; i++) { - sscanf(&asc_packet[j], "%2hhx ", - &raw_packet[i]); + cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]); + if (!cnt) + break; j += 3; } dev_info(&pf->pdev->dev, "FD raw packet dump\n"); @@ -1745,16 +1745,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp, raw_packet = NULL; kfree(asc_packet); asc_packet = NULL; - } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) { - i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false); - } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) { - i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true); } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) { dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", i40e_get_current_fd_count(pf)); } else if (strncmp(cmd_buf, "lldp", 4) == 0) { if (strncmp(&cmd_buf[5], "stop", 4) == 0) { int ret; + ret = i40e_aq_stop_lldp(&pf->hw, false, NULL); if (ret) { dev_info(&pf->pdev->dev, @@ -1779,6 +1776,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, #endif /* CONFIG_I40E_DCB */ } else if (strncmp(&cmd_buf[5], "start", 5) == 0) { int ret; + ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, pf->hw.mac.addr, I40E_ETH_P_LLDP, 0, @@ -1807,6 +1805,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, u16 llen, rlen; int ret; u8 *buff; + buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) goto command_write_done; @@ -1833,6 +1832,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, u16 llen, rlen; int ret; u8 *buff; + buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) goto command_write_done; @@ -1858,6 +1858,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, buff = NULL; } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) { int ret; + ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, true, NULL); if (ret) { @@ -1868,6 +1869,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) { int ret; + ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL); if (ret) { @@ -1969,8 +1971,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n"); dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); - dev_info(&pf->pdev->dev, " fd-atr off\n"); - dev_info(&pf->pdev->dev, " fd-atr on\n"); dev_info(&pf->pdev->dev, " fd current cnt"); dev_info(&pf->pdev->dev, " lldp start\n"); dev_info(&pf->pdev->dev, " lldp stop\n"); @@ -2105,6 +2105,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, } } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) { int mtu; + cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i", &vsi_seid, &mtu); if (cnt != 2) { @@ -2220,7 +2221,6 @@ void i40e_dbg_pf_init(struct i40e_pf *pf) create_failed: dev_info(dev, "debugfs dir/file for %s failed\n", name); debugfs_remove_recursive(pf->i40e_dbg_pf); - return; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h new file mode 100644 index 000000000000..c601ca4a610c --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -0,0 +1,55 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_DEVIDS_H_ +#define _I40E_DEVIDS_H_ + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_A 0x157F +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_20G_KR2_A 0x1588 +#define I40E_DEV_ID_10G_BASE_T4 0x1589 +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_X722_VF 0x37CD +#define I40E_DEV_ID_X722_VF_HV 0x37D9 + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +#endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index e972b5ecbf0b..3f385ffe420f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -87,11 +87,9 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), + I40E_VSI_STAT("tx_linearize", tx_linearize), }; -static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, - struct ethtool_rxnfc *cmd); - /* These PF_STATs might look like duplicates of some NETDEV_STATs, * but they are separate. This device supports Virtualization, and * as such might have several netdevs supporting VMDq and FCoE going @@ -229,10 +227,12 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { "NPAR", + "LinkPolling", + "flow-director-atr", + "veb-stats", }; -#define I40E_PRIV_FLAGS_STR_LEN \ - (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN) +#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings) /** * i40e_partition_setting_complaint - generic complaint for MFP restriction @@ -253,7 +253,8 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf) **/ static void i40e_get_settings_link_up(struct i40e_hw *hw, struct ethtool_cmd *ecmd, - struct net_device *netdev) + struct net_device *netdev, + struct i40e_pf *pf) { struct i40e_link_status *hw_link_info = &hw->phy.link_info; u32 link_speed = hw_link_info->link_speed; @@ -272,65 +273,49 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, case I40E_PHY_TYPE_40GBASE_AOC: ecmd->supported = SUPPORTED_40000baseCR4_Full; break; - case I40E_PHY_TYPE_40GBASE_KR4: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_40000baseKR4_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_40000baseKR4_Full; - break; case I40E_PHY_TYPE_40GBASE_SR4: ecmd->supported = SUPPORTED_40000baseSR4_Full; break; case I40E_PHY_TYPE_40GBASE_LR4: ecmd->supported = SUPPORTED_40000baseLR4_Full; break; - case I40E_PHY_TYPE_20GBASE_KR2: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_20000baseKR2_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_20000baseKR2_Full; - break; - case I40E_PHY_TYPE_10GBASE_KX4: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseKX4_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseKX4_Full; - break; - case I40E_PHY_TYPE_10GBASE_KR: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseKR_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseKR_Full; - break; case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; + ecmd->supported = SUPPORTED_10000baseT_Full; + if (hw_link_info->module_type[2] & + I40E_MODULE_TYPE_1000BASE_SX || + hw_link_info->module_type[2] & + I40E_MODULE_TYPE_1000BASE_LX) { + ecmd->supported |= SUPPORTED_1000baseT_Full; + if (hw_link_info->requested_speeds & + I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - break; - case I40E_PHY_TYPE_1000BASE_KX: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseKX_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_1000baseKX_Full; break; case I40E_PHY_TYPE_10GBASE_T: case I40E_PHY_TYPE_1000BASE_T: - case I40E_PHY_TYPE_100BASE_TX: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; + SUPPORTED_1000baseT_Full; ecmd->advertising = ADVERTISED_Autoneg; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ecmd->advertising |= ADVERTISED_10000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + case I40E_PHY_TYPE_1000BASE_T_OPTICAL: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + break; + case I40E_PHY_TYPE_100BASE_TX: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_100baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) ecmd->advertising |= ADVERTISED_100baseT_Full; break; @@ -350,12 +335,24 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, break; case I40E_PHY_TYPE_SGMII: ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; + SUPPORTED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) - ecmd->advertising |= ADVERTISED_100baseT_Full; + if (pf->hw.mac.type == I40E_MAC_X722) { + ecmd->supported |= SUPPORTED_100baseT_Full; + if (hw_link_info->requested_speeds & + I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + } + break; + /* Backplane is set based on supported phy types in get_settings + * so don't set anything here but don't warn either + */ + case I40E_PHY_TYPE_40GBASE_KR4: + case I40E_PHY_TYPE_20GBASE_KR2: + case I40E_PHY_TYPE_10GBASE_KR: + case I40E_PHY_TYPE_10GBASE_KX4: + case I40E_PHY_TYPE_1000BASE_KX: break; default: /* if we got here and link is up something bad is afoot */ @@ -394,64 +391,73 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, * Reports link settings that can be determined when link is down **/ static void i40e_get_settings_link_down(struct i40e_hw *hw, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd, + struct i40e_pf *pf) { - struct i40e_link_status *hw_link_info = &hw->phy.link_info; + enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types; /* link is down and the driver needs to fall back on - * device ID to determine what kinds of info to display, - * it's mostly a guess that may change when link is up + * supported phy types to figure out what info to display */ - switch (hw->device_id) { - case I40E_DEV_ID_QSFP_A: - case I40E_DEV_ID_QSFP_B: - case I40E_DEV_ID_QSFP_C: - /* pluggable QSFP */ - ecmd->supported = SUPPORTED_40000baseSR4_Full | - SUPPORTED_40000baseCR4_Full | - SUPPORTED_40000baseLR4_Full; - ecmd->advertising = ADVERTISED_40000baseSR4_Full | - ADVERTISED_40000baseCR4_Full | - ADVERTISED_40000baseLR4_Full; - break; - case I40E_DEV_ID_KX_B: - /* backplane 40G */ - ecmd->supported = SUPPORTED_40000baseKR4_Full; - ecmd->advertising = ADVERTISED_40000baseKR4_Full; - break; - case I40E_DEV_ID_KX_C: - /* backplane 10G */ - ecmd->supported = SUPPORTED_10000baseKR_Full; - ecmd->advertising = ADVERTISED_10000baseKR_Full; - break; - case I40E_DEV_ID_10G_BASE_T: - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - /* Figure out what has been requested */ - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->supported = 0x0; + ecmd->advertising = 0x0; + if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + if (pf->hw.mac.type == I40E_MAC_X722) { + ecmd->supported |= SUPPORTED_100baseT_Full; ecmd->advertising |= ADVERTISED_100baseT_Full; - break; - case I40E_DEV_ID_20G_KR2: - /* backplane 20G */ - ecmd->supported = SUPPORTED_20000baseKR2_Full; - ecmd->advertising = ADVERTISED_20000baseKR2_Full; - break; - default: - /* all the rest are 10G/1G */ - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; - /* Figure out what has been requested */ - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - break; + } } + if (phy_types & I40E_CAP_PHY_TYPE_XAUI || + phy_types & I40E_CAP_PHY_TYPE_XFI || + phy_types & I40E_CAP_PHY_TYPE_SFI || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) + ecmd->supported |= SUPPORTED_10000baseT_Full; + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_T || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_10000baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || + phy_types & I40E_CAP_PHY_TYPE_XLPPI || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC) + ecmd->supported |= SUPPORTED_40000baseCR4_Full; + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_40000baseCR4_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_40000baseCR4_Full; + } + if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) && + !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_100baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_100baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) + ecmd->supported |= SUPPORTED_40000baseSR4_Full; + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) + ecmd->supported |= SUPPORTED_40000baseLR4_Full; /* With no link speed and duplex are unknown */ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); @@ -475,12 +481,43 @@ static int i40e_get_settings(struct net_device *netdev, bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; if (link_up) - i40e_get_settings_link_up(hw, ecmd, netdev); + i40e_get_settings_link_up(hw, ecmd, netdev, pf); else - i40e_get_settings_link_down(hw, ecmd); + i40e_get_settings_link_down(hw, ecmd, pf); /* Now set the settings that don't rely on link being up/down */ + /* For backplane, supported and advertised are only reliant on the + * phy types the NVM specifies are supported. + */ + if (hw->device_id == I40E_DEV_ID_KX_B || + hw->device_id == I40E_DEV_ID_KX_C || + hw->device_id == I40E_DEV_ID_20G_KR2 || + hw->device_id == I40E_DEV_ID_20G_KR2_A) { + ecmd->supported = SUPPORTED_Autoneg; + ecmd->advertising = ADVERTISED_Autoneg; + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) { + ecmd->supported |= SUPPORTED_40000baseKR4_Full; + ecmd->advertising |= ADVERTISED_40000baseKR4_Full; + } + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { + ecmd->supported |= SUPPORTED_20000baseKR2_Full; + ecmd->advertising |= ADVERTISED_20000baseKR2_Full; + } + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) { + ecmd->supported |= SUPPORTED_10000baseKR_Full; + ecmd->advertising |= ADVERTISED_10000baseKR_Full; + } + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { + ecmd->supported |= SUPPORTED_10000baseKX4_Full; + ecmd->advertising |= ADVERTISED_10000baseKX4_Full; + } + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) { + ecmd->supported |= SUPPORTED_1000baseKX_Full; + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + } + } + /* Set autoneg settings */ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); @@ -580,6 +617,14 @@ static int i40e_set_settings(struct net_device *netdev, hw->phy.link_info.link_info & I40E_AQ_LINK_UP) return -EOPNOTSUPP; + if (hw->device_id == I40E_DEV_ID_KX_B || + hw->device_id == I40E_DEV_ID_KX_C || + hw->device_id == I40E_DEV_ID_20G_KR2 || + hw->device_id == I40E_DEV_ID_20G_KR2_A) { + netdev_info(netdev, "Changing settings is not supported on backplane.\n"); + return -EOPNOTSUPP; + } + /* get our own copy of the bits to check against */ memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd)); i40e_get_settings(netdev, &safe_ecmd); @@ -616,28 +661,31 @@ static int i40e_set_settings(struct net_device *netdev, /* Check autoneg */ if (autoneg == AUTONEG_ENABLE) { - /* If autoneg is not supported, return error */ - if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { - netdev_info(netdev, "Autoneg not supported on this phy\n"); - return -EINVAL; - } /* If autoneg was not already enabled */ if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { + /* If autoneg is not supported, return error */ + if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { + netdev_info(netdev, "Autoneg not supported on this phy\n"); + return -EINVAL; + } + /* Autoneg is allowed to change */ config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_AN; change = true; } } else { - /* If autoneg is supported 10GBASE_T is the only phy that - * can disable it, so otherwise return error - */ - if (safe_ecmd.supported & SUPPORTED_Autoneg && - hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) { - netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); - return -EINVAL; - } /* If autoneg is currently enabled */ if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { + /* If autoneg is supported 10GBASE_T is the only PHY + * that can disable it, so otherwise return error + */ + if (safe_ecmd.supported & SUPPORTED_Autoneg && + hw->phy.link_info.phy_type != + I40E_PHY_TYPE_10GBASE_T) { + netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); + return -EINVAL; + } + /* Autoneg is allowed to change */ config.abilities = abilities.abilities & ~I40E_AQ_PHY_ENABLE_AN; change = true; @@ -664,6 +712,13 @@ static int i40e_set_settings(struct net_device *netdev, advertise & ADVERTISED_40000baseLR4_Full) config.link_speed |= I40E_LINK_SPEED_40GB; + /* If speed didn't get set, set it to what it currently is. + * This is needed because if advertise is 0 (as it is when autoneg + * is disabled) then speed won't get set. + */ + if (!config.link_speed) + config.link_speed = abilities.link_speed; + if (change || (abilities.link_speed != config.link_speed)) { /* copy over the rest of the abilities */ config.phy_type = abilities.phy_type; @@ -680,7 +735,7 @@ static int i40e_set_settings(struct net_device *netdev, /* Tell the OS link is going down, the link will go * back up when fw says it is ready asynchronously */ - netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n"); + i40e_print_link_message(vsi, false); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); } @@ -694,11 +749,11 @@ static int i40e_set_settings(struct net_device *netdev, return -EAGAIN; } - status = i40e_aq_get_link_info(hw, true, NULL, NULL); + status = i40e_update_link_info(hw); if (status) - netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n", - i40e_stat_str(hw, status), - i40e_aq_str(hw, hw->aq.asq_last_status)); + netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); } else { netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); @@ -824,7 +879,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, /* Tell the OS link is going down, the link will go back up when fw * says it is ready asynchronously */ - netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n"); + i40e_print_link_message(vsi, false); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); @@ -948,9 +1003,7 @@ static int i40e_get_eeprom(struct net_device *netdev, cmd = (struct i40e_nvm_access *)eeprom; ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && - ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) || - (hw->debug_mask & I40E_DEBUG_NVM))) + if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -1054,10 +1107,7 @@ static int i40e_set_eeprom(struct net_device *netdev, cmd = (struct i40e_nvm_access *)eeprom; ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && - ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM && - hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) || - (hw->debug_mask & I40E_DEBUG_NVM))) + if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -1077,11 +1127,10 @@ static void i40e_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, i40e_driver_version_str, sizeof(drvinfo->version)); - strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw), + strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw), sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; } static void i40e_get_ringparam(struct net_device *netdev, @@ -1166,6 +1215,11 @@ static int i40e_set_ringparam(struct net_device *netdev, /* clone ring and setup updated count */ tx_rings[i] = *vsi->tx_rings[i]; tx_rings[i].count = new_tx_count; + /* the desc and bi pointers will be reallocated in the + * setup call + */ + tx_rings[i].desc = NULL; + tx_rings[i].rx_bi = NULL; err = i40e_setup_tx_descriptors(&tx_rings[i]); if (err) { while (i) { @@ -1196,6 +1250,11 @@ static int i40e_set_ringparam(struct net_device *netdev, /* clone ring and setup updated count */ rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i].count = new_rx_count; + /* the desc and bi pointers will be reallocated in the + * setup call + */ + rx_rings[i].desc = NULL; + rx_rings[i].rx_bi = NULL; err = i40e_setup_rx_descriptors(&rx_rings[i]); if (err) { while (i) { @@ -1263,7 +1322,8 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { int len = I40E_PF_STATS_LEN(netdev); - if (pf->lan_veb != I40E_NO_VEB) + if ((pf->lan_veb != I40E_NO_VEB) && + (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) len += I40E_VEB_STATS_TOTAL; return len; } else { @@ -1336,14 +1396,22 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; - if (pf->lan_veb != I40E_NO_VEB) { + if ((pf->lan_veb != I40E_NO_VEB) && + (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { struct i40e_veb *veb = pf->veb[pf->lan_veb]; + for (j = 0; j < I40E_VEB_STATS_LEN; j++) { p = (char *)veb; p += i40e_gstrings_veb_stats[j].stat_offset; data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) { + data[i++] = veb->tc_stats.tc_tx_packets[j]; + data[i++] = veb->tc_stats.tc_tx_bytes[j]; + data[i++] = veb->tc_stats.tc_rx_packets[j]; + data[i++] = veb->tc_stats.tc_rx_bytes[j]; + } } for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { p = (char *)pf + i40e_gstrings_stats[j].stat_offset; @@ -1409,7 +1477,8 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; - if (pf->lan_veb != I40E_NO_VEB) { + if ((pf->lan_veb != I40E_NO_VEB) && + (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { for (i = 0; i < I40E_VEB_STATS_LEN; i++) { snprintf(p, ETH_GSTRING_LEN, "veb.%s", i40e_gstrings_veb_stats[i].stat_string); @@ -1504,9 +1573,18 @@ static int i40e_link_test(struct net_device *netdev, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; + i40e_status status; + bool link_up = false; netif_info(pf, hw, netdev, "link test\n"); - if (i40e_get_link_status(&pf->hw)) + status = i40e_get_link_status(&pf->hw, &link_up); + if (status) { + netif_err(pf, drv, netdev, "link query timed out, please retry test\n"); + *data = 1; + return *data; + } + + if (link_up) *data = 0; else *data = 1; @@ -1575,7 +1653,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf) int i; for (i = 0; i < pf->num_alloc_vfs; i++) - if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE) + if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states)) return true; return false; } @@ -1782,6 +1860,14 @@ static int i40e_get_coalesce(struct net_device *netdev, ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC; ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC; + /* we use the _usecs_high to store/set the interrupt rate limit + * that the hardware supports, that almost but not quite + * fits the original intent of the ethtool variable, + * the rx_coalesce_usecs_high limits total interrupts + * per second from both tx/rx sources. + */ + ec->rx_coalesce_usecs_high = vsi->int_rate_limit; + ec->tx_coalesce_usecs_high = vsi->int_rate_limit; return 0; } @@ -1800,6 +1886,17 @@ static int i40e_set_coalesce(struct net_device *netdev, if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) vsi->work_limit = ec->tx_max_coalesced_frames_irq; + /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */ + if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) { + netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n"); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { + netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n"); + return -EINVAL; + } + vector = vsi->base_vector; if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { @@ -1813,6 +1910,8 @@ static int i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } + vsi->int_rate_limit = ec->rx_coalesce_usecs_high; + if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { vsi->tx_itr_setting = ec->tx_coalesce_usecs; @@ -1837,11 +1936,14 @@ static int i40e_set_coalesce(struct net_device *netdev, vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit); + q_vector = vsi->q_vectors[i]; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr); + wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl); i40e_flush(hw); } @@ -2604,10 +2706,51 @@ static u32 i40e_get_priv_flags(struct net_device *dev) ret_flags |= pf->hw.func_caps.npar_enable ? I40E_PRIV_FLAGS_NPAR_FLAG : 0; + ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ? + I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0; + ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ? + I40E_PRIV_FLAGS_FD_ATR : 0; + ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ? + I40E_PRIV_FLAGS_VEB_STATS : 0; return ret_flags; } +/** + * i40e_set_priv_flags - set private flags + * @dev: network interface device structure + * @flags: bit flags to be set + **/ +static int i40e_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG) + pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED; + else + pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED; + + /* allow the user to control the state of the Flow + * Director ATR (Application Targeted Routing) feature + * of the driver + */ + if (flags & I40E_PRIV_FLAGS_FD_ATR) { + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + } else { + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; + } + + if (flags & I40E_PRIV_FLAGS_VEB_STATS) + pf->flags |= I40E_FLAG_VEB_STATS_ENABLED; + else + pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + + return 0; +} + static const struct ethtool_ops i40e_ethtool_ops = { .get_settings = i40e_get_settings, .set_settings = i40e_set_settings, @@ -2644,6 +2787,7 @@ static const struct ethtool_ops i40e_ethtool_ops = { .set_channels = i40e_set_channels, .get_ts_info = i40e_get_ts_info, .get_priv_flags = i40e_get_priv_flags, + .set_priv_flags = i40e_set_priv_flags, }; void i40e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 5ea75dd537d6..fe5d9bf3ed6d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -272,10 +272,8 @@ out: /** * i40e_fcoe_sw_init - sets up the HW for FCoE * @pf: pointer to PF - * - * Returns 0 if FCoE is supported otherwise the error code **/ -int i40e_init_pf_fcoe(struct i40e_pf *pf) +void i40e_init_pf_fcoe(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 val; @@ -286,14 +284,14 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) pf->fcoe_hmc_filt_num = 0; if (!pf->hw.func_caps.fcoe) { - dev_info(&pf->pdev->dev, "FCoE capability is disabled\n"); - return 0; + dev_dbg(&pf->pdev->dev, "FCoE capability is disabled\n"); + return; } if (!pf->hw.func_caps.dcb) { dev_warn(&pf->pdev->dev, "Hardware is not DCB capable not enabling FCoE.\n"); - return 0; + return; } /* enable FCoE hash filter */ @@ -326,7 +324,6 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) wr32(hw, I40E_GLFCOE_RCTL, val); dev_info(&pf->pdev->dev, "FCoE is supported.\n"); - return 0; } /** @@ -1519,10 +1516,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) * same PCI function. */ netdev->dev_port = 1; + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false); i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false); i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false); i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); /* use san mac */ ether_addr_copy(netdev->dev_addr, hw->mac.san_addr); diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index fa371a2a40c6..79ae7beeafe5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -431,9 +431,8 @@ exit_sd_error: pd_idx1 = max(pd_idx, ((j - 1) * I40E_HMC_MAX_BP_COUNT)); pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); - for (i = pd_idx1; i < pd_lmt1; i++) { + for (i = pd_idx1; i < pd_lmt1; i++) i40e_remove_pd_bp(hw, info->hmc_info, i); - } i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); break; case I40E_SD_TYPE_DIRECT: diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2fdf978ae6a5..b825f978d441 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -39,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 9 +#define DRV_VERSION_BUILD 46 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -75,10 +75,13 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, /* required last entry */ {0, } }; @@ -213,10 +216,10 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, ret = i; pile->search_hint = i + j; break; - } else { - /* not enough, so skip over it and continue looking */ - i += j; } + + /* not enough, so skip over it and continue looking */ + i += j; } return ret; @@ -299,25 +302,69 @@ static void i40e_tx_timeout(struct net_device *netdev) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + struct i40e_ring *tx_ring = NULL; + unsigned int i, hung_queue = 0; + u32 head, val; pf->tx_timeout_count++; + /* find the stopped queue the same way the stack does */ + for (i = 0; i < netdev->num_tx_queues; i++) { + struct netdev_queue *q; + unsigned long trans_start; + + q = netdev_get_tx_queue(netdev, i); + trans_start = q->trans_start ? : netdev->trans_start; + if (netif_xmit_stopped(q) && + time_after(jiffies, + (trans_start + netdev->watchdog_timeo))) { + hung_queue = i; + break; + } + } + + if (i == netdev->num_tx_queues) { + netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); + } else { + /* now that we have an index, find the tx_ring struct */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { + if (hung_queue == + vsi->tx_rings[i]->queue_index) { + tx_ring = vsi->tx_rings[i]; + break; + } + } + } + } + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) - pf->tx_timeout_recovery_level = 1; + pf->tx_timeout_recovery_level = 1; /* reset after some time */ + else if (time_before(jiffies, + (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) + return; /* don't do any new action before the next timeout */ + + if (tx_ring) { + head = i40e_get_head(tx_ring); + /* Read interrupt register */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + val = rd32(&pf->hw, + I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + + tx_ring->vsi->base_vector - 1)); + else + val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); + + netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", + vsi->seid, hung_queue, tx_ring->next_to_clean, + head, tx_ring->next_to_use, + readl(tx_ring->tail), val); + } + pf->tx_timeout_last_recovery = jiffies; - netdev_info(netdev, "tx_timeout recovery level %d\n", - pf->tx_timeout_recovery_level); + netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", + pf->tx_timeout_recovery_level, hung_queue); switch (pf->tx_timeout_recovery_level) { - case 0: - /* disable and re-enable queues for the VSI */ - if (in_interrupt()) { - set_bit(__I40E_REINIT_REQUESTED, &pf->state); - set_bit(__I40E_REINIT_REQUESTED, &vsi->state); - } else { - i40e_vsi_reinit_locked(vsi); - } - break; case 1: set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); break; @@ -329,10 +376,9 @@ static void i40e_tx_timeout(struct net_device *netdev) break; default: netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); - set_bit(__I40E_DOWN_REQUESTED, &pf->state); - set_bit(__I40E_DOWN_REQUESTED, &vsi->state); break; } + i40e_service_event_schedule(pf); pf->tx_timeout_recovery_level++; } @@ -431,6 +477,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( stats->tx_errors = vsi_stats->tx_errors; stats->tx_dropped = vsi_stats->tx_dropped; stats->rx_errors = vsi_stats->rx_errors; + stats->rx_dropped = vsi_stats->rx_dropped; stats->rx_crc_errors = vsi_stats->rx_crc_errors; stats->rx_length_errors = vsi_stats->rx_length_errors; @@ -456,11 +503,11 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); if (vsi->rx_rings && vsi->rx_rings[0]) { for (i = 0; i < vsi->num_queue_pairs; i++) { - memset(&vsi->rx_rings[i]->stats, 0 , + memset(&vsi->rx_rings[i]->stats, 0, sizeof(vsi->rx_rings[i]->stats)); - memset(&vsi->rx_rings[i]->rx_stats, 0 , + memset(&vsi->rx_rings[i]->rx_stats, 0, sizeof(vsi->rx_rings[i]->rx_stats)); - memset(&vsi->tx_rings[i]->stats, 0 , + memset(&vsi->tx_rings[i]->stats, 0, sizeof(vsi->tx_rings[i]->stats)); memset(&vsi->tx_rings[i]->tx_stats, 0, sizeof(vsi->tx_rings[i]->tx_stats)); @@ -754,7 +801,6 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf) struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw *hw = &pf->hw; u64 xoff = 0; - u16 i, v; if ((hw->fc.current_mode != I40E_FC_FULL) && (hw->fc.current_mode != I40E_FC_RX_PAUSE)) @@ -769,18 +815,6 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf) if (!(nsd->link_xoff_rx - xoff)) return; - /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ - for (v = 0; v < pf->num_alloc_vsi; v++) { - struct i40e_vsi *vsi = pf->vsi[v]; - - if (!vsi || !vsi->tx_rings[0]) - continue; - - for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *ring = vsi->tx_rings[i]; - clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); - } - } } /** @@ -796,7 +830,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; struct i40e_dcbx_config *dcb_cfg; struct i40e_hw *hw = &pf->hw; - u16 i, v; + u16 i; u8 tc; dcb_cfg = &hw->local_dcbx_config; @@ -809,6 +843,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { u64 prio_xoff = nsd->priority_xoff_rx[i]; + i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xoff_rx[i], @@ -821,23 +856,6 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) tc = dcb_cfg->etscfg.prioritytable[i]; xoff[tc] = true; } - - /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ - for (v = 0; v < pf->num_alloc_vsi; v++) { - struct i40e_vsi *vsi = pf->vsi[v]; - - if (!vsi || !vsi->tx_rings[0]) - continue; - - for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *ring = vsi->tx_rings[i]; - - tc = ring->dcb_tc; - if (xoff[tc]) - clear_bit(__I40E_HANG_CHECK_ARMED, - &ring->state); - } - } } /** @@ -862,6 +880,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) u32 rx_page, rx_buf; u64 bytes, packets; unsigned int start; + u64 tx_linearize; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@ -880,7 +899,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) */ rx_b = rx_p = 0; tx_b = tx_p = 0; - tx_restart = tx_busy = 0; + tx_restart = tx_busy = tx_linearize = 0; rx_page = 0; rx_buf = 0; rcu_read_lock(); @@ -897,6 +916,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_p += packets; tx_restart += p->tx_stats.restart_queue; tx_busy += p->tx_stats.tx_busy; + tx_linearize += p->tx_stats.tx_linearize; /* Rx queue is part of the same block as Tx queue */ p = &p[1]; @@ -913,6 +933,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rcu_read_unlock(); vsi->tx_restart = tx_restart; vsi->tx_busy = tx_busy; + vsi->tx_linearize = tx_linearize; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; @@ -1256,7 +1277,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) * so we have to go through all the list in order to make sure */ list_for_each_entry(f, &vsi->mac_filter_list, list) { - if (f->vlan >= 0) + if (f->vlan >= 0 || vsi->info.pvid) return true; } @@ -1334,6 +1355,9 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns ptr to the filter object or NULL when no memory available. + * + * NOTE: This function is expected to be called with mac_filter_list_lock + * being held. **/ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -1392,6 +1416,9 @@ add_filter_out: * @vlan: the vlan * @is_vf: make sure it's a VF filter, else doesn't matter * @is_netdev: make sure it's a netdev filter, else doesn't matter + * + * NOTE: This function is expected to be called with mac_filter_list_lock + * being held. **/ void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -1419,6 +1446,7 @@ void i40e_del_filter(struct i40e_vsi *vsi, } else { /* make sure we don't remove a filter in use by VF or netdev */ int min_f = 0; + min_f += (f->is_vf ? 1 : 0); min_f += (f->is_netdev ? 1 : 0); @@ -1477,6 +1505,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; + ret = i40e_aq_mac_address_write(&vsi->back->hw, I40E_AQC_WRITE_TYPE_LAA_WOL, addr->sa_data, NULL); @@ -1496,8 +1525,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p) element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } else { + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); } if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { @@ -1508,13 +1539,15 @@ static int i40e_set_mac(struct net_device *netdev, void *p) element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } else { + spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false); if (f) f->is_laa = true; + spin_unlock_bh(&vsi->mac_filter_list_lock); } - i40e_sync_vsi_filters(vsi); + i40e_sync_vsi_filters(vsi, false); ether_addr_copy(netdev->dev_addr, addr->sa_data); return 0; @@ -1684,6 +1717,8 @@ static void i40e_set_rx_mode(struct net_device *netdev) struct netdev_hw_addr *mca; struct netdev_hw_addr *ha; + spin_lock_bh(&vsi->mac_filter_list_lock); + /* add addr if not already in the filter list */ netdev_for_each_uc_addr(uca, netdev) { if (!i40e_find_mac(vsi, uca->addr, false, true)) { @@ -1709,37 +1744,29 @@ static void i40e_set_rx_mode(struct net_device *netdev) /* remove filter if not in netdev list */ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - bool found = false; if (!f->is_netdev) continue; - if (is_multicast_ether_addr(f->macaddr)) { - netdev_for_each_mc_addr(mca, netdev) { - if (ether_addr_equal(mca->addr, f->macaddr)) { - found = true; - break; - } - } - } else { - netdev_for_each_uc_addr(uca, netdev) { - if (ether_addr_equal(uca->addr, f->macaddr)) { - found = true; - break; - } - } + netdev_for_each_mc_addr(mca, netdev) + if (ether_addr_equal(mca->addr, f->macaddr)) + goto bottom_of_search_loop; - for_each_dev_addr(netdev, ha) { - if (ether_addr_equal(ha->addr, f->macaddr)) { - found = true; - break; - } - } - } - if (!found) - i40e_del_filter( - vsi, f->macaddr, I40E_VLAN_ANY, false, true); + netdev_for_each_uc_addr(uca, netdev) + if (ether_addr_equal(uca->addr, f->macaddr)) + goto bottom_of_search_loop; + + for_each_dev_addr(netdev, ha) + if (ether_addr_equal(ha->addr, f->macaddr)) + goto bottom_of_search_loop; + + /* f->macaddr wasn't found in uc, mc, or ha list so delete it */ + i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true); + +bottom_of_search_loop: + continue; } + spin_unlock_bh(&vsi->mac_filter_list_lock); /* check for other flag changes */ if (vsi->current_netdev_flags != vsi->netdev->flags) { @@ -1749,20 +1776,96 @@ static void i40e_set_rx_mode(struct net_device *netdev) } /** + * i40e_mac_filter_entry_clone - Clones a MAC filter entry + * @src: source MAC filter entry to be clones + * + * Returns the pointer to newly cloned MAC filter entry or NULL + * in case of error + **/ +static struct i40e_mac_filter *i40e_mac_filter_entry_clone( + struct i40e_mac_filter *src) +{ + struct i40e_mac_filter *f; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + *f = *src; + + INIT_LIST_HEAD(&f->list); + + return f; +} + +/** + * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries + * @vsi: pointer to vsi struct + * @from: Pointer to list which contains MAC filter entries - changes to + * those entries needs to be undone. + * + * MAC filter entries from list were slated to be removed from device. + **/ +static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, + struct list_head *from) +{ + struct i40e_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, from, list) { + f->changed = true; + /* Move the element back into MAC filter list*/ + list_move_tail(&f->list, &vsi->mac_filter_list); + } +} + +/** + * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries + * @vsi: pointer to vsi struct + * + * MAC filter entries from list were slated to be added from device. + **/ +static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + if (!f->changed && f->counter) + f->changed = true; + } +} + +/** + * i40e_cleanup_add_list - Deletes the element from add list and release + * memory + * @add_list: Pointer to list which contains MAC filter entries + **/ +static void i40e_cleanup_add_list(struct list_head *add_list) +{ + struct i40e_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, add_list, list) { + list_del(&f->list); + kfree(f); + } +} + +/** * i40e_sync_vsi_filters - Update the VSI filter list to the HW * @vsi: ptr to the VSI + * @grab_rtnl: whether RTNL needs to be grabbed * * Push any outstanding VSI filter changes through the AdminQ. * * Returns 0 or error value **/ -int i40e_sync_vsi_filters(struct i40e_vsi *vsi) +int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) { - struct i40e_mac_filter *f, *ftmp; + struct list_head tmp_del_list, tmp_add_list; + struct i40e_mac_filter *f, *ftmp, *fclone; bool promisc_forced_on = false; bool add_happened = false; int filter_list_len = 0; u32 changed_flags = 0; + bool err_cond = false; i40e_status ret = 0; struct i40e_pf *pf; int num_add = 0; @@ -1783,17 +1886,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) vsi->current_netdev_flags = vsi->netdev->flags; } + INIT_LIST_HEAD(&tmp_del_list); + INIT_LIST_HEAD(&tmp_add_list); + if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; - filter_list_len = pf->hw.aq.asq_buf_size / - sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kcalloc(filter_list_len, - sizeof(struct i40e_aqc_remove_macvlan_element_data), - GFP_KERNEL); - if (!del_list) - return -ENOMEM; - + spin_lock_bh(&vsi->mac_filter_list_lock); list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { if (!f->changed) continue; @@ -1801,6 +1900,58 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) if (f->counter != 0) continue; f->changed = false; + + /* Move the element into temporary del_list */ + list_move_tail(&f->list, &tmp_del_list); + } + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + if (!f->changed) + continue; + + if (f->counter == 0) + continue; + f->changed = false; + + /* Clone MAC filter entry and add into temporary list */ + fclone = i40e_mac_filter_entry_clone(f); + if (!fclone) { + err_cond = true; + break; + } + list_add_tail(&fclone->list, &tmp_add_list); + } + + /* if failed to clone MAC filter entry - undo */ + if (err_cond) { + i40e_undo_del_filter_entries(vsi, &tmp_del_list); + i40e_undo_add_filter_entries(vsi); + } + spin_unlock_bh(&vsi->mac_filter_list_lock); + + if (err_cond) + i40e_cleanup_add_list(&tmp_add_list); + } + + /* Now process 'del_list' outside the lock */ + if (!list_empty(&tmp_del_list)) { + filter_list_len = pf->hw.aq.asq_buf_size / + sizeof(struct i40e_aqc_remove_macvlan_element_data); + del_list = kcalloc(filter_list_len, + sizeof(struct i40e_aqc_remove_macvlan_element_data), + GFP_KERNEL); + if (!del_list) { + i40e_cleanup_add_list(&tmp_add_list); + + /* Undo VSI's MAC filter entry element updates */ + spin_lock_bh(&vsi->mac_filter_list_lock); + i40e_undo_del_filter_entries(vsi, &tmp_del_list); + i40e_undo_add_filter_entries(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + return -ENOMEM; + } + + list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { cmd_flags = 0; /* add to delete list */ @@ -1813,10 +1964,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) del_list[num_del].flags = cmd_flags; num_del++; - /* unlink from filter list */ - list_del(&f->list); - kfree(f); - /* flush a full buffer */ if (num_del == filter_list_len) { ret = i40e_aq_remove_macvlan(&pf->hw, @@ -1827,12 +1974,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) memset(del_list, 0, sizeof(*del_list)); if (ret && aq_err != I40E_AQ_RC_ENOENT) - dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, aq_err)); + dev_err(&pf->pdev->dev, + "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, aq_err)); } + /* Release memory for MAC filter entries which were + * synced up with HW. + */ + list_del(&f->list); + kfree(f); } + if (num_del) { ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, del_list, num_del, NULL); @@ -1848,6 +2001,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) kfree(del_list); del_list = NULL; + } + + if (!list_empty(&tmp_add_list)) { /* do all the adds now */ filter_list_len = pf->hw.aq.asq_buf_size / @@ -1855,16 +2011,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) add_list = kcalloc(filter_list_len, sizeof(struct i40e_aqc_add_macvlan_element_data), GFP_KERNEL); - if (!add_list) + if (!add_list) { + /* Purge element from temporary lists */ + i40e_cleanup_add_list(&tmp_add_list); + + /* Undo add filter entries from VSI MAC filter list */ + spin_lock_bh(&vsi->mac_filter_list_lock); + i40e_undo_add_filter_entries(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; + } - list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - if (!f->changed) - continue; + list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { - if (f->counter == 0) - continue; - f->changed = false; add_happened = true; cmd_flags = 0; @@ -1891,7 +2050,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) break; memset(add_list, 0, sizeof(*add_list)); } + /* Entries from tmp_add_list were cloned from MAC + * filter list, hence clean those cloned entries + */ + list_del(&f->list); + kfree(f); } + if (num_add) { ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, add_list, num_add, NULL); @@ -1920,6 +2085,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { bool cur_multipromisc; + cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, vsi->seid, @@ -1934,6 +2100,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { bool cur_promisc; + cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)); @@ -1945,7 +2112,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) */ if (pf->cur_promisc != cur_promisc) { pf->cur_promisc = cur_promisc; - i40e_do_reset_safe(pf, + if (grab_rtnl) + i40e_do_reset_safe(pf, + BIT(__I40E_PF_RESET_REQUESTED)); + else + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); } } else { @@ -1996,7 +2167,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) - i40e_sync_vsi_filters(pf->vsi[v]); + i40e_sync_vsi_filters(pf->vsi[v], true); } } @@ -2137,6 +2308,9 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) is_vf = (vsi->type == I40E_VSI_SRIOV); is_netdev = !!(vsi->netdev); + /* Locked once because all functions invoked below iterates list*/ + spin_lock_bh(&vsi->mac_filter_list_lock); + if (is_netdev) { add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, is_vf, is_netdev); @@ -2144,6 +2318,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add vlan filter %d for %pM\n", vid, vsi->netdev->dev_addr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2154,6 +2329,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add vlan filter %d for %pM\n", vid, f->macaddr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2175,6 +2351,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add filter 0 for %pM\n", vsi->netdev->dev_addr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2183,27 +2360,33 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ if (vid > 0 && !vsi->info.pvid) { list_for_each_entry(f, &vsi->mac_filter_list, list) { - if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, - is_vf, is_netdev)) { - i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, - is_vf, is_netdev); - add_f = i40e_add_filter(vsi, f->macaddr, - 0, is_vf, is_netdev); - if (!add_f) { - dev_info(&vsi->back->pdev->dev, - "Could not add filter 0 for %pM\n", - f->macaddr); - return -ENOMEM; - } + if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev)) + continue; + i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev); + add_f = i40e_add_filter(vsi, f->macaddr, + 0, is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add filter 0 for %pM\n", + f->macaddr); + spin_unlock_bh(&vsi->mac_filter_list_lock); + return -ENOMEM; } } } + /* Make sure to release before sync_vsi_filter because that + * function will lock/unlock as necessary + */ + spin_unlock_bh(&vsi->mac_filter_list_lock); + if (test_bit(__I40E_DOWN, &vsi->back->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) return 0; - return i40e_sync_vsi_filters(vsi); + return i40e_sync_vsi_filters(vsi, false); } /** @@ -2223,6 +2406,9 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) is_vf = (vsi->type == I40E_VSI_SRIOV); is_netdev = !!(netdev); + /* Locked once because all functions invoked below iterates list */ + spin_lock_bh(&vsi->mac_filter_list_lock); + if (is_netdev) i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); @@ -2253,6 +2439,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add filter %d for %pM\n", I40E_VLAN_ANY, netdev->dev_addr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2261,21 +2448,27 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) list_for_each_entry(f, &vsi->mac_filter_list, list) { i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, - is_vf, is_netdev); + is_vf, is_netdev); if (!add_f) { dev_info(&vsi->back->pdev->dev, "Could not add filter %d for %pM\n", I40E_VLAN_ANY, f->macaddr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } } + /* Make sure to release before sync_vsi_filter because that + * function with lock/unlock as necessary + */ + spin_unlock_bh(&vsi->mac_filter_list_lock); + if (test_bit(__I40E_DOWN, &vsi->back->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) return 0; - return i40e_sync_vsi_filters(vsi); + return i40e_sync_vsi_filters(vsi, false); } /** @@ -2609,8 +2802,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); i40e_flush(hw); - clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); - /* cache tail off for easier writes later */ ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); @@ -2882,11 +3073,9 @@ static int i40e_vsi_configure(struct i40e_vsi *vsi) static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - struct i40e_q_vector *q_vector; struct i40e_hw *hw = &pf->hw; u16 vector; int i, q; - u32 val; u32 qp; /* The interrupt indexing is offset by 1 in the PFINT_ITRn @@ -2896,7 +3085,9 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) qp = vsi->base_queue; vector = vsi->base_vector; for (i = 0; i < vsi->num_q_vectors; i++, vector++) { - q_vector = vsi->q_vectors[i]; + struct i40e_q_vector *q_vector = vsi->q_vectors[i]; + + q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), @@ -2905,10 +3096,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) q_vector->tx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); + wr32(hw, I40E_PFINT_RATEN(vector - 1), + INTRL_USEC_TO_REG(vsi->int_rate_limit)); /* Linked list for the queuepairs assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); for (q = 0; q < q_vector->num_ringpairs; q++) { + u32 val; + val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | @@ -2988,6 +3183,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) u32 val; /* set the ITR configuration */ + q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); @@ -3046,24 +3242,6 @@ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) } /** - * i40e_irq_dynamic_enable - Enable default interrupt generation settings - * @vsi: pointer to a vsi - * @vector: enable a particular Hw Interrupt vector - **/ -void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) -{ - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u32 val; - - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); - wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); - /* skip the flush */ -} - -/** * i40e_irq_dynamic_disable - Disable default interrupt generation settings * @vsi: pointer to a vsi * @vector: disable a particular Hw Interrupt vector @@ -3091,7 +3269,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data) if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -3136,8 +3314,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) q_vector); if (err) { dev_info(&pf->pdev->dev, - "%s: request_irq failed, error: %d\n", - __func__, err); + "MSIX request_irq failed, error: %d\n", err); goto free_queue_irqs; } /* assign the mask for this irq */ @@ -3202,8 +3379,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) int i; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { - for (i = vsi->base_vector; - i < (vsi->num_q_vectors + vsi->base_vector); i++) + for (i = 0; i < vsi->num_q_vectors; i++) i40e_irq_dynamic_enable(vsi, i); } else { i40e_irq_dynamic_enable_icr0(pf); @@ -3262,9 +3438,12 @@ static irqreturn_t i40e_intr(int irq, void *data) /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_q_vector *q_vector = vsi->q_vectors[0]; /* temporarily disable queue cause for NAPI processing */ u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); + qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_RQCTL(0), qval); @@ -3273,7 +3452,7 @@ static irqreturn_t i40e_intr(int irq, void *data) wr32(hw, I40E_QINT_TQCTL(0), qval); if (!test_bit(__I40E_DOWN, &pf->state)) - napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); + napi_schedule_irqoff(&q_vector->napi); } if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { @@ -3434,10 +3613,9 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) i += tx_ring->count; tx_ring->next_to_clean = i; - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { - i40e_irq_dynamic_enable(vsi, - tx_ring->q_vector->v_idx + vsi->base_vector); - } + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) + i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); + return budget > 0; } @@ -3575,14 +3753,12 @@ static void i40e_netpoll(struct net_device *netdev) if (test_bit(__I40E_DOWN, &vsi->state)) return; - pf->flags |= I40E_FLAG_IN_NETPOLL; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_msix_clean_rings(0, vsi->q_vectors[i]); } else { i40e_intr(pf->pdev->irq, netdev); } - pf->flags &= ~I40E_FLAG_IN_NETPOLL; } #endif @@ -3663,9 +3839,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) ret = i40e_pf_txq_wait(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, - "%s: VSI seid %d Tx ring %d %sable timeout\n", - __func__, vsi->seid, pf_q, - (enable ? "en" : "dis")); + "VSI seid %d Tx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); break; } } @@ -3741,9 +3916,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) ret = i40e_pf_rxq_wait(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, - "%s: VSI seid %d Rx ring %d %sable timeout\n", - __func__, vsi->seid, pf_q, - (enable ? "en" : "dis")); + "VSI seid %d Rx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); break; } } @@ -4038,17 +4212,15 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && vsi->type == I40E_VSI_FCOE) { dev_dbg(&vsi->back->pdev->dev, - "%s: VSI seid %d skipping FCoE VSI disable\n", - __func__, vsi->seid); + "VSI seid %d skipping FCoE VSI disable\n", vsi->seid); return; } set_bit(__I40E_NEEDS_RESTART, &vsi->state); - if (vsi->netdev && netif_running(vsi->netdev)) { + if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - } else { + else i40e_vsi_close(vsi); - } } /** @@ -4113,8 +4285,8 @@ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) ret = i40e_pf_txq_wait(pf, pf_q, false); if (ret) { dev_info(&pf->pdev->dev, - "%s: VSI seid %d Tx ring %d disable timeout\n", - __func__, vsi->seid, pf_q); + "VSI seid %d Tx ring %d disable timeout\n", + vsi->seid, pf_q); return ret; } } @@ -4146,6 +4318,108 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) } #endif + +/** + * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue + * @q_idx: TX queue number + * @vsi: Pointer to VSI struct + * + * This function checks specified queue for given VSI. Detects hung condition. + * Sets hung bit since it is two step process. Before next run of service task + * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not, + * hung condition remain unchanged and during subsequent run, this function + * issues SW interrupt to recover from hung condition. + **/ +static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) +{ + struct i40e_ring *tx_ring = NULL; + struct i40e_pf *pf; + u32 head, val, tx_pending; + int i; + + pf = vsi->back; + + /* now that we have an index, find the tx_ring struct */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { + if (q_idx == vsi->tx_rings[i]->queue_index) { + tx_ring = vsi->tx_rings[i]; + break; + } + } + } + + if (!tx_ring) + return; + + /* Read interrupt register */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + val = rd32(&pf->hw, + I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + + tx_ring->vsi->base_vector - 1)); + else + val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); + + head = i40e_get_head(tx_ring); + + tx_pending = i40e_get_tx_pending(tx_ring); + + /* Interrupts are disabled and TX pending is non-zero, + * trigger the SW interrupt (don't wait). Worst case + * there will be one extra interrupt which may result + * into not cleaning any queues because queues are cleaned. + */ + if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) + i40e_force_wb(vsi, tx_ring->q_vector); +} + +/** + * i40e_detect_recover_hung - Function to detect and recover hung_queues + * @pf: pointer to PF struct + * + * LAN VSI has netdev and netdev has TX queues. This function is to check + * each of those TX queues if they are hung, trigger recovery by issuing + * SW interrupt. + **/ +static void i40e_detect_recover_hung(struct i40e_pf *pf) +{ + struct net_device *netdev; + struct i40e_vsi *vsi; + int i; + + /* Only for LAN VSI */ + vsi = pf->vsi[pf->lan_vsi]; + + if (!vsi) + return; + + /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ + if (test_bit(__I40E_DOWN, &vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + return; + + /* Make sure type is MAIN VSI */ + if (vsi->type != I40E_VSI_MAIN) + return; + + netdev = vsi->netdev; + if (!netdev) + return; + + /* Bail out if netif_carrier is not OK */ + if (!netif_carrier_ok(netdev)) + return; + + /* Go thru' TX queues for netdev */ + for (i = 0; i < netdev->num_tx_queues; i++) { + struct netdev_queue *q; + + q = netdev_get_tx_queue(netdev, i); + if (q) + i40e_detect_recover_hung_queue(i, vsi); + } +} + /** * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP * @pf: pointer to PF @@ -4745,11 +5019,14 @@ out: * i40e_print_link_message - print link up or down * @vsi: the VSI for which link needs a message */ -static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) +void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) { - char speed[SPEED_SIZE] = "Unknown"; - char fc[FC_SIZE] = "RX/TX"; + char *speed = "Unknown"; + char *fc = "Unknown"; + if (vsi->current_isup == isup) + return; + vsi->current_isup = isup; if (!isup) { netdev_info(vsi->netdev, "NIC Link is Down\n"); return; @@ -4766,19 +5043,19 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) switch (vsi->back->hw.phy.link_info.link_speed) { case I40E_LINK_SPEED_40GB: - strlcpy(speed, "40 Gbps", SPEED_SIZE); + speed = "40 G"; break; case I40E_LINK_SPEED_20GB: - strncpy(speed, "20 Gbps", SPEED_SIZE); + speed = "20 G"; break; case I40E_LINK_SPEED_10GB: - strlcpy(speed, "10 Gbps", SPEED_SIZE); + speed = "10 G"; break; case I40E_LINK_SPEED_1GB: - strlcpy(speed, "1000 Mbps", SPEED_SIZE); + speed = "1000 M"; break; case I40E_LINK_SPEED_100MB: - strncpy(speed, "100 Mbps", SPEED_SIZE); + speed = "100 M"; break; default: break; @@ -4786,20 +5063,20 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) switch (vsi->back->hw.fc.current_mode) { case I40E_FC_FULL: - strlcpy(fc, "RX/TX", FC_SIZE); + fc = "RX/TX"; break; case I40E_FC_TX_PAUSE: - strlcpy(fc, "TX", FC_SIZE); + fc = "TX"; break; case I40E_FC_RX_PAUSE: - strlcpy(fc, "RX", FC_SIZE); + fc = "RX"; break; default: - strlcpy(fc, "None", FC_SIZE); + fc = "None"; break; } - netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n", + netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n", speed, fc); } @@ -5218,15 +5495,13 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) "VSI reinit requested\n"); for (v = 0; v < pf->num_alloc_vsi; v++) { struct i40e_vsi *vsi = pf->vsi[v]; + if (vsi != NULL && test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { i40e_vsi_reinit_locked(pf->vsi[v]); clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); } } - - /* no further action needed, so return now */ - return; } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { int v; @@ -5234,6 +5509,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) dev_info(&pf->pdev->dev, "VSI down requested\n"); for (v = 0; v < pf->num_alloc_vsi; v++) { struct i40e_vsi *vsi = pf->vsi[v]; + if (vsi != NULL && test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { set_bit(__I40E_DOWN, &vsi->state); @@ -5241,13 +5517,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); } } - - /* no further action needed, so return now */ - return; } else { dev_info(&pf->pdev->dev, "bad reset request 0x%08x\n", reset_flags); - return; } } @@ -5303,8 +5575,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); } - dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__, - need_reconfig); + dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); return need_reconfig; } @@ -5331,16 +5602,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Ignore if event is not for Nearest Bridge */ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); - dev_dbg(&pf->pdev->dev, - "%s: LLDP event mib bridge type 0x%x\n", __func__, type); + dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) return ret; /* Check MIB Type and return if event for Remote MIB update */ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; dev_dbg(&pf->pdev->dev, - "%s: LLDP event mib type %s\n", __func__, - type ? "remote" : "local"); + "LLDP event mib type %s\n", type ? "remote" : "local"); if (type == I40E_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, @@ -5525,7 +5794,9 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf) **/ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) { + struct i40e_fdir_filter *filter; u32 fcnt_prog, fcnt_avail; + struct hlist_node *node; if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) return; @@ -5554,6 +5825,18 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); } } + + /* if hw had a problem adding a filter, delete it */ + if (pf->fd_inv > 0) { + hlist_for_each_entry_safe(filter, node, + &pf->fdir_filter_list, fdir_node) { + if (filter->fd_id == pf->fd_inv) { + hlist_del(&filter->fdir_node); + kfree(filter); + pf->fdir_pf_active_filters--; + } + } + } } #define I40E_MIN_FD_FLUSH_INTERVAL 10 @@ -5573,49 +5856,51 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) return; - if (time_after(jiffies, pf->fd_flush_timestamp + - (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { - /* If the flush is happening too quick and we have mostly - * SB rules we should not re-enable ATR for some time. - */ - min_flush_time = pf->fd_flush_timestamp - + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); - fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; + if (!time_after(jiffies, pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) + return; - if (!(time_after(jiffies, min_flush_time)) && - (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { - if (I40E_DEBUG_FD & pf->hw.debug_mask) - dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); - disable_atr = true; - } + /* If the flush is happening too quick and we have mostly SB rules we + * should not re-enable ATR for some time. + */ + min_flush_time = pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); + fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; - pf->fd_flush_timestamp = jiffies; - pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; - /* flush all filters */ - wr32(&pf->hw, I40E_PFQF_CTL_1, - I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); - i40e_flush(&pf->hw); - pf->fd_flush_cnt++; - pf->fd_add_err = 0; - do { - /* Check FD flush status every 5-6msec */ - usleep_range(5000, 6000); - reg = rd32(&pf->hw, I40E_PFQF_CTL_1); - if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) - break; - } while (flush_wait_retry--); - if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { - dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); - } else { - /* replay sideband filters */ - i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); - if (!disable_atr) - pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); - if (I40E_DEBUG_FD & pf->hw.debug_mask) - dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); - } + if (!(time_after(jiffies, min_flush_time)) && + (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); + disable_atr = true; + } + + pf->fd_flush_timestamp = jiffies; + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + /* flush all filters */ + wr32(&pf->hw, I40E_PFQF_CTL_1, + I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); + i40e_flush(&pf->hw); + pf->fd_flush_cnt++; + pf->fd_add_err = 0; + do { + /* Check FD flush status every 5-6msec */ + usleep_range(5000, 6000); + reg = rd32(&pf->hw, I40E_PFQF_CTL_1); + if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) + break; + } while (flush_wait_retry--); + if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { + dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); + } else { + /* replay sideband filters */ + i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); + if (!disable_atr) + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } + } /** @@ -5723,15 +6008,23 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) **/ static void i40e_link_event(struct i40e_pf *pf) { - bool new_link, old_link; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 new_link_speed, old_link_speed; + i40e_status status; + bool new_link, old_link; /* set this to force the get_link_status call to refresh state */ pf->hw.phy.get_link_info = true; old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); - new_link = i40e_get_link_status(&pf->hw); + + status = i40e_get_link_status(&pf->hw, &new_link); + if (status) { + dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", + status); + return; + } + old_link_speed = pf->hw.phy.link_info_old.link_speed; new_link_speed = pf->hw.phy.link_info.link_speed; @@ -5760,68 +6053,6 @@ static void i40e_link_event(struct i40e_pf *pf) } /** - * i40e_check_hang_subtask - Check for hung queues and dropped interrupts - * @pf: board private structure - * - * Set the per-queue flags to request a check for stuck queues in the irq - * clean functions, then force interrupts to be sure the irq clean is called. - **/ -static void i40e_check_hang_subtask(struct i40e_pf *pf) -{ - int i, v; - - /* If we're down or resetting, just bail */ - if (test_bit(__I40E_DOWN, &pf->state) || - test_bit(__I40E_CONFIG_BUSY, &pf->state)) - return; - - /* for each VSI/netdev - * for each Tx queue - * set the check flag - * for each q_vector - * force an interrupt - */ - for (v = 0; v < pf->num_alloc_vsi; v++) { - struct i40e_vsi *vsi = pf->vsi[v]; - int armed = 0; - - if (!pf->vsi[v] || - test_bit(__I40E_DOWN, &vsi->state) || - (vsi->netdev && !netif_carrier_ok(vsi->netdev))) - continue; - - for (i = 0; i < vsi->num_queue_pairs; i++) { - set_check_for_tx_hang(vsi->tx_rings[i]); - if (test_bit(__I40E_HANG_CHECK_ARMED, - &vsi->tx_rings[i]->state)) - armed++; - } - - if (armed) { - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { - wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, - (I40E_PFINT_DYN_CTL0_INTENA_MASK | - I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | - I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | - I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | - I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); - } else { - u16 vec = vsi->base_vector - 1; - u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | - I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | - I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | - I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK); - for (i = 0; i < vsi->num_q_vectors; i++, vec++) - wr32(&vsi->back->hw, - I40E_PFINT_DYN_CTLN(vec), val); - } - i40e_flush(&vsi->back->hw); - } - } -} - -/** * i40e_watchdog_subtask - periodic checks not using event driven response * @pf: board private structure **/ @@ -5840,8 +6071,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) return; pf->service_timer_previous = jiffies; - i40e_check_hang_subtask(pf); - i40e_link_event(pf); + if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) + i40e_link_event(pf); /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to @@ -5850,10 +6081,12 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) if (pf->vsi[i] && pf->vsi[i]->netdev) i40e_update_stats(pf->vsi[i]); - /* Update the stats for the active switching components */ - for (i = 0; i < I40E_MAX_VEB; i++) - if (pf->veb[i]) - i40e_update_veb_stats(pf->veb[i]); + if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { + /* Update the stats for the active switching components */ + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i]) + i40e_update_veb_stats(pf->veb[i]); + } i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); } @@ -6164,8 +6397,9 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; - dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", - veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + if (pf->hw.debug_mask & I40E_DEBUG_LAN) + dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); if (veb->bridge_mode & BRIDGE_MODE_VEPA) i40e_disable_pf_switch_lb(pf); else @@ -6232,6 +6466,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (pf->vsi[v]->veb_idx == veb->idx) { struct i40e_vsi *vsi = pf->vsi[v]; + vsi->uplink_seid = veb->seid; ret = i40e_add_vsi(vsi); if (ret) { @@ -6296,12 +6531,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf) } } while (err); - if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || - (pf->hw.aq.fw_maj_ver < 2)) { - pf->hw.func_caps.num_msix_vectors++; - pf->hw.func_caps.num_msix_vectors_vf++; - } - if (pf->hw.debug_mask & I40E_DEBUG_USER) dev_info(&pf->pdev->dev, "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", @@ -6514,9 +6743,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } #endif /* CONFIG_I40E_DCB */ #ifdef I40E_FCOE - ret = i40e_init_pf_fcoe(pf); - if (ret) - dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret); + i40e_init_pf_fcoe(pf); #endif /* do basic switch setup */ @@ -6538,9 +6765,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) /* make sure our flow control settings are restored */ ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); if (ret) - dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only @@ -6610,6 +6837,15 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); + /* Add a filter to drop all Flow control frames from any VSI from being + * transmitted. By doing so we stop a malicious VF from sending out + * PAUSE or PFC frames and potentially controlling traffic for other + * PF/VF VSIs. + * The FW can still send Flow control frames if enabled. + */ + i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, + pf->main_vsi_seid); + /* restart the VSIs that were rebuilt and running before the reset */ i40e_pf_unquiesce_all_vsi(pf); @@ -6808,6 +7044,7 @@ static void i40e_service_task(struct work_struct *work) return; } + i40e_detect_recover_hung(pf); i40e_reset_subtask(pf); i40e_handle_mdd_event(pf); i40e_vc_process_vflr_event(pf); @@ -6991,6 +7228,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) vsi->idx = vsi_idx; vsi->rx_itr_setting = pf->rx_itr_default; vsi->tx_itr_setting = pf->tx_itr_default; + vsi->int_rate_limit = 0; vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? pf->rss_table_size : 64; vsi->netdev_registered = false; @@ -7009,6 +7247,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) /* Setup default MSIX irq handler for VSI */ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); + /* Initialize VSI lock */ + spin_lock_init(&vsi->mac_filter_list_lock); pf->vsi[vsi_idx] = vsi; ret = vsi_idx; goto unlock_pf; @@ -7566,7 +7806,7 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed) "Cannot set RSS key, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - return ret; + goto config_rss_aq_out; } if (vsi->type == I40E_VSI_MAIN) @@ -7580,6 +7820,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed) i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); +config_rss_aq_out: + kfree(rss_lut); return ret; } @@ -7854,6 +8096,7 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | + I40E_FLAG_LINK_POLLING_ENABLED | I40E_FLAG_MSIX_ENABLED; if (iommu_present(&pci_bus_type)) @@ -7896,12 +8139,12 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { - pf->flags |= I40E_FLAG_FD_SB_ENABLED; - } else { + if (pf->flags & I40E_FLAG_MFP_ENABLED && + pf->hw.num_partitions > 1) dev_info(&pf->pdev->dev, "Flow Director Sideband mode Disabled in MFP mode\n"); - } + else + pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->fdir_pf_filter_count = pf->hw.func_caps.fd_filters_guaranteed; pf->hw.fdir_shared_filter_count = @@ -7911,12 +8154,11 @@ static int i40e_sw_init(struct i40e_pf *pf) if (pf->hw.func_caps.vmdq) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; pf->flags |= I40E_FLAG_VMDQ_ENABLED; + pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); } #ifdef I40E_FCOE - err = i40e_init_pf_fcoe(pf); - if (err) - dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err); + i40e_init_pf_fcoe(pf); #endif /* I40E_FCOE */ #ifdef CONFIG_PCI_IOV @@ -7940,6 +8182,9 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; + /* By default FW has this off for performance reasons */ + pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + /* set up queue assignment tracking */ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); @@ -8119,9 +8364,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev, pf->vxlan_ports[idx] = 0; pf->pending_vxlan_bitmap |= BIT_ULL(idx); pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; - - dev_info(&pf->pdev->dev, "deleting vxlan port %d\n", - ntohs(port)); } else { netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", ntohs(port)); @@ -8273,13 +8515,15 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, * @seq: RTNL message seq # * @dev: the netdev being configured * @filter_mask: unused + * @nlflags: netlink flags passed in * * Return the mode in which the hardware bridge is operating in * i.e VEB or VEPA. **/ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 filter_mask, int nlflags) + u32 __always_unused filter_mask, + int nlflags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -8308,7 +8552,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, /** * i40e_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff - * @netdev: This physical port's netdev + * @dev: This physical port's netdev * @features: Offload features that the stack believes apply **/ static netdev_features_t i40e_features_check(struct sk_buff *skb, @@ -8389,6 +8633,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | NETIF_F_TSO; netdev->features = NETIF_F_SG | @@ -8396,6 +8641,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_SCTP_CSUM | NETIF_F_HIGHDMA | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | @@ -8421,17 +8667,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) * default a MAC-VLAN filter that accepts any tagged packet * which must be replaced by a normal filter. */ - if (!i40e_rm_default_mac_filter(vsi, mac_addr)) + if (!i40e_rm_default_mac_filter(vsi, mac_addr)) { + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); + spin_unlock_bh(&vsi->mac_filter_list_lock); + } } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ snprintf(netdev->name, IFNAMSIZ, "%sv%%d", pf->vsi[pf->lan_vsi]->netdev->name); random_ether_addr(mac_addr); + + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); } + + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); @@ -8487,12 +8742,22 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) return 1; veb = pf->veb[vsi->veb_idx]; + if (!veb) { + dev_info(&pf->pdev->dev, + "There is no veb associated with the bridge\n"); + return -ENOENT; + } + /* Uplink is a bridge in VEPA mode */ - if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) + if (veb->bridge_mode & BRIDGE_MODE_VEPA) { return 0; + } else { + /* Uplink is a bridge in VEB mode */ + return 1; + } - /* Uplink is a bridge in VEB mode */ - return 1; + /* VEPA is now default bridge, so return 0 */ + return 0; } /** @@ -8505,10 +8770,13 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) static int i40e_add_vsi(struct i40e_vsi *vsi) { int ret = -ENODEV; - struct i40e_mac_filter *f, *ftmp; + u8 laa_macaddr[ETH_ALEN]; + bool found_laa_mac_filter = false; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; + struct i40e_mac_filter *f, *ftmp; + u8 enabled_tc = 0x1; /* TC0 enabled */ int f_count = 0; @@ -8680,32 +8948,41 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) vsi->id = ctxt.vsi_number; } + spin_lock_bh(&vsi->mac_filter_list_lock); /* If macvlan filters already exist, force them to get loaded */ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { f->changed = true; f_count++; + /* Expected to have only one MAC filter entry for LAA in list */ if (f->is_laa && vsi->type == I40E_VSI_MAIN) { - struct i40e_aqc_remove_macvlan_element_data element; + ether_addr_copy(laa_macaddr, f->macaddr); + found_laa_mac_filter = true; + } + } + spin_unlock_bh(&vsi->mac_filter_list_lock); - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, f->macaddr); - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; - ret = i40e_aq_remove_macvlan(hw, vsi->seid, - &element, 1, NULL); - if (ret) { - /* some older FW has a different default */ - element.flags |= - I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; - i40e_aq_remove_macvlan(hw, vsi->seid, - &element, 1, NULL); - } + if (found_laa_mac_filter) { + struct i40e_aqc_remove_macvlan_element_data element; - i40e_aq_mac_address_write(hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, - f->macaddr, NULL); + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, laa_macaddr); + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + ret = i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); + if (ret) { + /* some older FW has a different default */ + element.flags |= + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); } + + i40e_aq_mac_address_write(hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + laa_macaddr, NULL); } + if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; pf->flags |= I40E_FLAG_FILTER_SYNC; @@ -8768,10 +9045,13 @@ int i40e_vsi_release(struct i40e_vsi *vsi) i40e_vsi_disable_irq(vsi); } + spin_lock_bh(&vsi->mac_filter_list_lock); list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, f->vlan, f->is_vf, f->is_netdev); - i40e_sync_vsi_filters(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + + i40e_sync_vsi_filters(vsi, false); i40e_vsi_delete(vsi); i40e_vsi_free_q_vectors(vsi); @@ -8996,8 +9276,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, if (veb) { if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { dev_info(&vsi->back->pdev->dev, - "%s: New VSI creation error, uplink seid of LAN VSI expected.\n", - __func__); + "New VSI creation error, uplink seid of LAN VSI expected.\n"); return NULL; } /* We come up by default in VEPA mode if SRIOV is not @@ -9647,6 +9926,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) } else { /* force a reset of TC and queue layout configurations */ u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; + pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); @@ -9670,7 +9950,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_config_rss(pf); /* fill in link information and enable LSE reporting */ - i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); + i40e_update_link_info(&pf->hw); i40e_link_event(pf); /* Initialize user-specific link properties */ @@ -9788,8 +10068,14 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) } pf->queues_left = queues_left; + dev_dbg(&pf->pdev->dev, + "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", + pf->hw.func_caps.num_tx_qp, + !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), + pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps, + pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); #ifdef I40E_FCOE - dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); + dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); #endif } @@ -9857,12 +10143,19 @@ static void i40e_print_features(struct i40e_pf *pf) } if (pf->flags & I40E_FLAG_DCB_CAPABLE) buf += sprintf(buf, "DCB "); +#if IS_ENABLED(CONFIG_VXLAN) + buf += sprintf(buf, "VxLAN "); +#endif if (pf->flags & I40E_FLAG_PTP) buf += sprintf(buf, "PTP "); #ifdef I40E_FCOE if (pf->flags & I40E_FLAG_FCOE_ENABLED) buf += sprintf(buf, "FCOE "); #endif + if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + buf += sprintf(buf, "VEB "); + else + buf += sprintf(buf, "VEPA "); BUG_ON(buf > (string + INFO_STRING_LEN)); dev_info(&pf->pdev->dev, "%s\n", string); @@ -9883,14 +10176,15 @@ static void i40e_print_features(struct i40e_pf *pf) static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct i40e_aq_get_phy_abilities_resp abilities; - unsigned long ioremap_len; struct i40e_pf *pf; struct i40e_hw *hw; static u16 pfs_found; + u16 wol_nvm_bits; u16 link_status; - int err = 0; + int err; u32 len; u32 i; + u8 set_fc_aq_fail; err = pci_enable_device_mem(pdev); if (err) @@ -9936,15 +10230,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = &pf->hw; hw->back = pf; - ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0), - I40E_MAX_CSR_SPACE); + pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), + I40E_MAX_CSR_SPACE); - hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len); + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); if (!hw->hw_addr) { err = -EIO; dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", (unsigned int)pci_resource_start(pdev, 0), - (unsigned int)pci_resource_len(pdev, 0), err); + pf->ioremap_len, err); goto err_ioremap; } hw->vendor_id = pdev->vendor; @@ -10002,7 +10296,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->hw.fc.requested_mode = I40E_FC_NONE; err = i40e_init_adminq(hw); - dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); + + /* provide nvm, fw, api versions */ + dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, + hw->aq.api_maj_ver, hw->aq.api_min_ver, + i40e_nvm_version_str(hw)); + if (err) { dev_info(&pdev->dev, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); @@ -10102,10 +10402,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&pf->service_task, i40e_service_task); clear_bit(__I40E_SERVICE_SCHED, &pf->state); pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; - pf->link_check_timeout = jiffies; - /* WoL defaults to disabled */ - pf->wol_en = false; + /* NVM bit on means WoL disabled for the port */ + i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); + if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) + pf->wol_en = false; + else + pf->wol_en = true; device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); /* set up the main switch operations */ @@ -10146,6 +10449,25 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } + + /* Make sure flow control is set according to current settings */ + err = i40e_set_fc(hw, &set_fc_aq_fail, true); + if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) + dev_dbg(&pf->pdev->dev, + "Set fc with err %s aq_err %s on get_phy_cap\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) + dev_dbg(&pf->pdev->dev, + "Set fc with err %s aq_err %s on set_phy_config\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) + dev_dbg(&pf->pdev->dev, + "Set fc with err %s aq_err %s on get_link_info\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + /* if FDIR VSI was set up, start it now */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { @@ -10236,37 +10558,82 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_fcoe_vsi_setup(pf); #endif - /* Get the negotiated link width and speed from PCI config space */ - pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); +#define PCI_SPEED_SIZE 8 +#define PCI_WIDTH_SIZE 8 + /* Devices on the IOSF bus do not have this information + * and will report PCI Gen 1 x 1 by default so don't bother + * checking them. + */ + if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) { + char speed[PCI_SPEED_SIZE] = "Unknown"; + char width[PCI_WIDTH_SIZE] = "Unknown"; - i40e_set_pci_config_data(hw, link_status); + /* Get the negotiated link width and speed from PCI config + * space + */ + pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, + &link_status); + + i40e_set_pci_config_data(hw, link_status); + + switch (hw->bus.speed) { + case i40e_bus_speed_8000: + strncpy(speed, "8.0", PCI_SPEED_SIZE); break; + case i40e_bus_speed_5000: + strncpy(speed, "5.0", PCI_SPEED_SIZE); break; + case i40e_bus_speed_2500: + strncpy(speed, "2.5", PCI_SPEED_SIZE); break; + default: + break; + } + switch (hw->bus.width) { + case i40e_bus_width_pcie_x8: + strncpy(width, "8", PCI_WIDTH_SIZE); break; + case i40e_bus_width_pcie_x4: + strncpy(width, "4", PCI_WIDTH_SIZE); break; + case i40e_bus_width_pcie_x2: + strncpy(width, "2", PCI_WIDTH_SIZE); break; + case i40e_bus_width_pcie_x1: + strncpy(width, "1", PCI_WIDTH_SIZE); break; + default: + break; + } - dev_info(&pdev->dev, "PCI-Express: %s %s\n", - (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : - hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : - hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : - "Unknown"), - (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : - hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : - hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : - hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : - "Unknown")); + dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", + speed, width); - if (hw->bus.width < i40e_bus_width_pcie_x8 || - hw->bus.speed < i40e_bus_speed_8000) { - dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); - dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); + if (hw->bus.width < i40e_bus_width_pcie_x8 || + hw->bus.speed < i40e_bus_speed_8000) { + dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); + dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); + } } /* get the requested speeds from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (err) - dev_info(&pf->pdev->dev, - "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n", - i40e_stat_str(&pf->hw, err), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->hw.phy.link_info.requested_speeds = abilities.link_speed; + /* get the supported phy types from the fw */ + err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); + if (err) + dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type); + + /* Add a filter to drop all Flow control frames from any VSI from being + * transmitted. By doing so we stop a malicious VF from sending out + * PAUSE or PFC frames and potentially controlling traffic for other + * PF/VF VSIs. + * The FW can still send Flow control frames if enabled. + */ + i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, + pf->main_vsi_seid); + /* print a string summarizing features */ i40e_print_features(pf); @@ -10314,6 +10681,7 @@ err_dma: static void i40e_remove(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_hw *hw = &pf->hw; i40e_status ret_code; int i; @@ -10321,6 +10689,10 @@ static void i40e_remove(struct pci_dev *pdev) i40e_ptp_stop(pf); + /* Disable RSS in hw */ + wr32(hw, I40E_PFQF_HENA(0), 0); + wr32(hw, I40E_PFQF_HENA(1), 0); + /* no more scheduling of any task */ set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); @@ -10437,7 +10809,7 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) int err; u32 reg; - dev_info(&pdev->dev, "%s\n", __func__); + dev_dbg(&pdev->dev, "%s\n", __func__); if (pci_enable_device_mem(pdev)) { dev_info(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); @@ -10477,13 +10849,13 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); - dev_info(&pdev->dev, "%s\n", __func__); + dev_dbg(&pdev->dev, "%s\n", __func__); if (test_bit(__I40E_SUSPENDED, &pf->state)) return; rtnl_lock(); i40e_handle_reset_warning(pf); - rtnl_lock(); + rtnl_unlock(); } /** @@ -10569,9 +10941,7 @@ static int i40e_resume(struct pci_dev *pdev) err = pci_enable_device_mem(pdev); if (err) { - dev_err(&pdev->dev, - "%s: Cannot enable PCI device from suspend\n", - __func__); + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 9b83abc0e774..6100cdd9ad13 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -290,9 +290,18 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { - if (hw->mac.type == I40E_MAC_X722) - return i40e_read_nvm_word_aq(hw, offset, data); - return i40e_read_nvm_word_srctl(hw, offset, data); + enum i40e_status_code ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_word_aq(hw, offset, data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); + } + return ret_code; } /** @@ -397,9 +406,19 @@ read_nvm_buffer_aq_exit: i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { - if (hw->mac.type == I40E_MAC_X722) - return i40e_read_nvm_buffer_aq(hw, offset, words, data); - return i40e_read_nvm_buffer_srctl(hw, offset, words, data); + enum i40e_status_code ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, + data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); + } + return ret_code; } /** @@ -418,6 +437,10 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, bool last_command) { i40e_status ret_code = I40E_ERR_NVM; + struct i40e_asq_cmd_details cmd_details; + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; /* Here we are checking the SR limit only for the flat memory model. * We cannot do it for the module-based model, as we did not acquire @@ -443,7 +466,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, ret_code = i40e_aq_update_nvm(hw, module_pointer, 2 * offset, /*bytes*/ 2 * words, /*bytes*/ - data, last_command, NULL); + data, last_command, &cmd_details); return ret_code; } @@ -461,7 +484,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { - i40e_status ret_code = 0; + i40e_status ret_code; struct i40e_virt_mem vmem; u16 pcie_alt_module = 0; u16 checksum_local = 0; @@ -541,13 +564,16 @@ i40e_calc_nvm_checksum_exit: **/ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) { - i40e_status ret_code = 0; + i40e_status ret_code; u16 checksum; + __le16 le_sum; ret_code = i40e_calc_nvm_checksum(hw, &checksum); - if (!ret_code) + if (!ret_code) { + le_sum = cpu_to_le16(checksum); ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, - 1, &checksum, true); + 1, &le_sum, true); + } return ret_code; } @@ -592,25 +618,31 @@ i40e_validate_nvm_checksum_exit: static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno); + u8 *bytes, int *perrno); static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno); + u8 *bytes, int *perrno); static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *errno); static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - int *errno); + int *perrno); static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - int *errno); + int *perrno); static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno); + u8 *bytes, int *perrno); static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno); + u8 *bytes, int *perrno); +static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); static inline u8 i40e_nvmupd_get_module(u32 val) { return (u8)(val & I40E_NVM_MOD_PNT_MASK); @@ -620,7 +652,7 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val) return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); } -static char *i40e_nvm_update_state_str[] = { +static const char * const i40e_nvm_update_state_str[] = { "I40E_NVMUPD_INVALID", "I40E_NVMUPD_READ_CON", "I40E_NVMUPD_READ_SNT", @@ -634,6 +666,9 @@ static char *i40e_nvm_update_state_str[] = { "I40E_NVMUPD_CSUM_CON", "I40E_NVMUPD_CSUM_SA", "I40E_NVMUPD_CSUM_LCB", + "I40E_NVMUPD_STATUS", + "I40E_NVMUPD_EXEC_AQ", + "I40E_NVMUPD_GET_AQ_RESULT", }; /** @@ -641,30 +676,60 @@ static char *i40e_nvm_update_state_str[] = { * @hw: pointer to hardware structure * @cmd: pointer to nvm update command * @bytes: pointer to the data buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * Dispatches command depending on what update state is current **/ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno) + u8 *bytes, int *perrno) { i40e_status status; + enum i40e_nvmupd_cmd upd_cmd; /* assume success */ - *errno = 0; + *perrno = 0; + + /* early check for status command and debug msgs */ + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); + + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n", + i40e_nvm_update_state_str[upd_cmd], + hw->nvmupd_state, + hw->aq.nvm_release_on_done); + + if (upd_cmd == I40E_NVMUPD_INVALID) { + *perrno = -EFAULT; + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command returns %d errno %d\n", + upd_cmd, *perrno); + } + + /* a status request returns immediately rather than + * going into the state machine + */ + if (upd_cmd == I40E_NVMUPD_STATUS) { + bytes[0] = hw->nvmupd_state; + return 0; + } switch (hw->nvmupd_state) { case I40E_NVMUPD_STATE_INIT: - status = i40e_nvmupd_state_init(hw, cmd, bytes, errno); + status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_STATE_READING: - status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno); + status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_STATE_WRITING: - status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno); + status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_STATE_INIT_WAIT: + case I40E_NVMUPD_STATE_WRITE_WAIT: + status = I40E_ERR_NOT_READY; + *perrno = -EBUSY; break; default: @@ -672,7 +737,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: no such state %d\n", hw->nvmupd_state); status = I40E_NOT_SUPPORTED; - *errno = -ESRCH; + *perrno = -ESRCH; break; } return status; @@ -683,28 +748,28 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * Process legitimate commands of the Init state and conditionally set next * state. Reject all other commands. **/ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno) + u8 *bytes, int *perrno) { i40e_status status = 0; enum i40e_nvmupd_cmd upd_cmd; - upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); switch (upd_cmd) { case I40E_NVMUPD_READ_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { - *errno = i40e_aq_rc_to_posix(status, + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { - status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); i40e_release_nvm(hw); } break; @@ -712,10 +777,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_READ_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { - *errno = i40e_aq_rc_to_posix(status, + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { - status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); if (status) i40e_release_nvm(hw); else @@ -726,70 +791,83 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_WRITE_ERA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(status, + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { - status = i40e_nvmupd_nvm_erase(hw, cmd, errno); - if (status) + status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); + if (status) { i40e_release_nvm(hw); - else + } else { hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } } break; case I40E_NVMUPD_WRITE_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(status, + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { - status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); - if (status) + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (status) { i40e_release_nvm(hw); - else + } else { hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } } break; case I40E_NVMUPD_WRITE_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(status, + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { - status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (status) i40e_release_nvm(hw); else - hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } break; case I40E_NVMUPD_CSUM_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(status, + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_update_nvm_checksum(hw); if (status) { - *errno = hw->aq.asq_last_status ? + *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; i40e_release_nvm(hw); } else { hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } break; + case I40E_NVMUPD_EXEC_AQ: + status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_GET_AQ_RESULT: + status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); + break; + default: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in init state\n", i40e_nvm_update_state_str[upd_cmd]); status = I40E_ERR_NVM; - *errno = -ESRCH; + *perrno = -ESRCH; break; } return status; @@ -800,28 +878,28 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * NVM ownership is already held. Process legitimate commands and set any * change in state; reject all other commands. **/ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno) + u8 *bytes, int *perrno) { - i40e_status status; + i40e_status status = 0; enum i40e_nvmupd_cmd upd_cmd; - upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); switch (upd_cmd) { case I40E_NVMUPD_READ_SA: case I40E_NVMUPD_READ_CON: - status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_READ_LCB: - status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); i40e_release_nvm(hw); hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; break; @@ -831,7 +909,7 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, "NVMUPD: bad cmd %s in reading state.\n", i40e_nvm_update_state_str[upd_cmd]); status = I40E_NOT_SUPPORTED; - *errno = -ESRCH; + *perrno = -ESRCH; break; } return status; @@ -842,55 +920,68 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * NVM ownership is already held. Process legitimate commands and set any * change in state; reject all other commands **/ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno) + u8 *bytes, int *perrno) { - i40e_status status; + i40e_status status = 0; enum i40e_nvmupd_cmd upd_cmd; bool retry_attempt = false; - upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); retry: switch (upd_cmd) { case I40E_NVMUPD_WRITE_CON: - status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (!status) + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; break; case I40E_NVMUPD_WRITE_LCB: - status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); - if (!status) + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (status) { + *perrno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } else { hw->aq.nvm_release_on_done = true; - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } break; case I40E_NVMUPD_CSUM_CON: status = i40e_update_nvm_checksum(hw); if (status) { - *errno = hw->aq.asq_last_status ? + *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } else { + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } break; case I40E_NVMUPD_CSUM_LCB: status = i40e_update_nvm_checksum(hw); - if (status) - *errno = hw->aq.asq_last_status ? + if (status) { + *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; - else + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } else { hw->aq.nvm_release_on_done = true; - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } break; default: @@ -898,7 +989,7 @@ retry: "NVMUPD: bad cmd %s in writing state.\n", i40e_nvm_update_state_str[upd_cmd]); status = I40E_NOT_SUPPORTED; - *errno = -ESRCH; + *perrno = -ESRCH; break; } @@ -941,21 +1032,22 @@ retry: * i40e_nvmupd_validate_command - Validate given command * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * Return one of the valid command types or I40E_NVMUPD_INVALID **/ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - int *errno) + int *perrno) { enum i40e_nvmupd_cmd upd_cmd; - u8 transaction; + u8 module, transaction; /* anything that doesn't match a recognized case is an error */ upd_cmd = I40E_NVMUPD_INVALID; transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); /* limits on data size */ if ((cmd->data_size < 1) || @@ -963,7 +1055,7 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_validate_command data_size %d\n", cmd->data_size); - *errno = -EFAULT; + *perrno = -EFAULT; return I40E_NVMUPD_INVALID; } @@ -982,6 +1074,12 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, case I40E_NVM_SA: upd_cmd = I40E_NVMUPD_READ_SA; break; + case I40E_NVM_EXEC: + if (module == 0xf) + upd_cmd = I40E_NVMUPD_STATUS; + else if (module == 0) + upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; + break; } break; @@ -1011,21 +1109,155 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, case (I40E_NVM_CSUM|I40E_NVM_LCB): upd_cmd = I40E_NVMUPD_CSUM_LCB; break; + case I40E_NVM_EXEC: + if (module == 0) + upd_cmd = I40E_NVMUPD_EXEC_AQ; + break; } break; } - i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n", - i40e_nvm_update_state_str[upd_cmd], - hw->nvmupd_state, - hw->aq.nvm_release_on_done); - if (upd_cmd == I40E_NVMUPD_INVALID) { - *errno = -EFAULT; + return upd_cmd; +} + +/** + * i40e_nvmupd_exec_aq - Run an AQ command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + i40e_status status; + struct i40e_aq_desc *aq_desc; + u32 buff_size = 0; + u8 *buff = NULL; + u32 aq_desc_len; + u32 aq_data_len; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + aq_desc_len = sizeof(struct i40e_aq_desc); + memset(&hw->nvm_wb_desc, 0, aq_desc_len); + + /* get the aq descriptor */ + if (cmd->data_size < aq_desc_len) { i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_validate_command returns %d errno %d\n", - upd_cmd, *errno); + "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", + cmd->data_size, aq_desc_len); + *perrno = -EINVAL; + return I40E_ERR_PARAM; } - return upd_cmd; + aq_desc = (struct i40e_aq_desc *)bytes; + + /* if data buffer needed, make sure it's ready */ + aq_data_len = cmd->data_size - aq_desc_len; + buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen)); + if (buff_size) { + if (!hw->nvm_buff.va) { + status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, + hw->aq.asq_buf_size); + if (status) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", + status); + } + + if (hw->nvm_buff.va) { + buff = hw->nvm_buff.va; + memcpy(buff, &bytes[aq_desc_len], aq_data_len); + } + } + + /* and away we go! */ + status = i40e_asq_send_command(hw, aq_desc, buff, + buff_size, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_exec_aq err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + u32 aq_total_len; + u32 aq_desc_len; + int remainder; + u8 *buff; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + + aq_desc_len = sizeof(struct i40e_aq_desc); + aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen); + + /* check offset range */ + if (cmd->offset > aq_total_len) { + i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", + __func__, cmd->offset, aq_total_len); + *perrno = -EINVAL; + return I40E_ERR_PARAM; + } + + /* check copylength range */ + if (cmd->data_size > (aq_total_len - cmd->offset)) { + int new_len = aq_total_len - cmd->offset; + + i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", + __func__, cmd->data_size, new_len); + cmd->data_size = new_len; + } + + remainder = cmd->data_size; + if (cmd->offset < aq_desc_len) { + u32 len = aq_desc_len - cmd->offset; + + len = min(len, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", + __func__, cmd->offset, cmd->offset + len); + + buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; + memcpy(bytes, buff, len); + + bytes += len; + remainder -= len; + buff = hw->nvm_buff.va; + } else { + buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len); + } + + if (remainder > 0) { + int start_byte = buff - (u8 *)hw->nvm_buff.va; + + i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", + __func__, start_byte, start_byte + remainder); + memcpy(bytes, buff, remainder); + } + + return 0; } /** @@ -1033,14 +1265,15 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * cmd structure contains identifiers and data buffer **/ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno) + u8 *bytes, int *perrno) { + struct i40e_asq_cmd_details cmd_details; i40e_status status; u8 module, transaction; bool last; @@ -1049,8 +1282,11 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, module = i40e_nvmupd_get_module(cmd->config); last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, - bytes, last, NULL); + bytes, last, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", @@ -1058,7 +1294,7 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_read status %d aq %d\n", status, hw->aq.asq_last_status); - *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } return status; @@ -1068,23 +1304,28 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, * i40e_nvmupd_nvm_erase - Erase an NVM module * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * module, offset, data_size and data are in cmd structure **/ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - int *errno) + int *perrno) { i40e_status status = 0; + struct i40e_asq_cmd_details cmd_details; u8 module, transaction; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, - last, NULL); + last, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", @@ -1092,7 +1333,7 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_erase status %d aq %d\n", status, hw->aq.asq_last_status); - *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } return status; @@ -1103,15 +1344,16 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * module, offset, data_size and data are in cmd structure **/ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno) + u8 *bytes, int *perrno) { i40e_status status = 0; + struct i40e_asq_cmd_details cmd_details; u8 module, transaction; bool last; @@ -1119,8 +1361,12 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + status = i40e_aq_update_nvm(hw, module, cmd->offset, - (u16)cmd->data_size, bytes, last, NULL); + (u16)cmd->data_size, bytes, last, + &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", @@ -1128,7 +1374,7 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_write status %d aq %d\n", status, hw->aq.asq_last_status); - *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } return status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index ad802dd0f67a..5b6feb7edeb1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -35,7 +35,7 @@ #include <linux/highuid.h> /* get readq/writeq support for 32 bit kernels, use the low-first version */ -#include <asm-generic/io-64-nonatomic-lo-hi.h> +#include <linux/io-64-nonatomic-lo-hi.h> /* File to be the magic between shared code and * actual OS primitives diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index dcb72a8ee8e5..bb9d583e5416 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -58,8 +58,8 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void i40e_idle_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); -char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); -char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err); +const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err); i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, bool pf_lut, u8 *lut, u16 lut_size); @@ -258,7 +258,8 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw); i40e_status i40e_pf_reset(struct i40e_hw *hw); void i40e_clear_hw(struct i40e_hw *hw); void i40e_clear_pxe_mode(struct i40e_hw *hw); -bool i40e_get_link_status(struct i40e_hw *hw); +i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up); +i40e_status i40e_update_link_info(struct i40e_hw *hw); i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, u32 *max_bw, u32 *min_bw, bool *min_valid, @@ -321,4 +322,6 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, void *buff, u16 *ret_buff_size, u8 *ret_next_table, u32 *ret_next_index, struct i40e_asq_cmd_details *cmd_details); +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 vsi_seid); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 8c40d6ea15fd..565ca7c835bc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -618,9 +618,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) /* Attempt to register the clock before enabling the hardware. */ pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev); - if (IS_ERR(pf->ptp_clock)) { + if (IS_ERR(pf->ptp_clock)) return PTR_ERR(pf->ptp_clock); - } /* clear the hwtstamp settings here during clock create, instead of * during regular init, so that we can maintain settings across a @@ -675,8 +674,8 @@ void i40e_ptp_init(struct i40e_pf *pf) struct timespec64 ts; u32 regval; - dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, - netdev->name); + if (pf->hw.debug_mask & I40E_DEBUG_LAN) + dev_info(&pf->pdev->dev, "PHC enabled\n"); pf->flags |= I40E_FLAG_PTP; /* Ensure the clocks are running. */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 738aca68f665..635b3ac17877 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -465,10 +465,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { + pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id); if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) || (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", - rx_desc->wb.qword0.hi_dword.fd_id); + pf->fd_inv); /* Check if the programming error is for ATR. * If so, auto disable ATR and set a state for @@ -601,27 +602,13 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) } /** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: tx ring to fetch head of - * - * Returns value of Tx ring head based on value stored - * in head write-back location - **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) -{ - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; - - return le32_to_cpu(*(volatile __le32 *)head); -} - -/** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ -static u32 i40e_get_tx_pending(struct i40e_ring *ring) +u32 i40e_get_tx_pending(struct i40e_ring *ring) { u32 head, tail; @@ -635,50 +622,6 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) return 0; } -/** - * i40e_check_tx_hang - Is there a hang in the Tx queue - * @tx_ring: the ring of descriptors - **/ -static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) -{ - u32 tx_done = tx_ring->stats.packets; - u32 tx_done_old = tx_ring->tx_stats.tx_done_old; - u32 tx_pending = i40e_get_tx_pending(tx_ring); - struct i40e_pf *pf = tx_ring->vsi->back; - bool ret = false; - - clear_check_for_tx_hang(tx_ring); - - /* Check for a hung queue, but be thorough. This verifies - * that a transmit has been completed since the previous - * check AND there is at least one packet pending. The - * ARMED bit is set to indicate a potential hang. The - * bit is cleared if a pause frame is received to remove - * false hang detection due to PFC or 802.3x frames. By - * requiring this to fail twice we avoid races with - * PFC clearing the ARMED bit and conditions where we - * run the check_tx_hang logic with a transmit completion - * pending but without time to complete it yet. - */ - if ((tx_done_old == tx_done) && tx_pending) { - /* make sure it is true for two checks in a row */ - ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, - &tx_ring->state); - } else if (tx_done_old == tx_done && - (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { - if (I40E_DEBUG_FLOW & pf->hw.debug_mask) - dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", - tx_pending, tx_ring->queue_index); - pf->tx_sluggish_count++; - } else { - /* update completed stats and disarm the hang check */ - tx_ring->tx_stats.tx_done_old = tx_done; - clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } - - return ret; -} - #define WB_STRIDE 0x3 /** @@ -784,42 +727,21 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; - /* check to see if there are any non-cache aligned descriptors - * waiting to be written back, and kick the hardware to force - * them to be written back in case of napi polling - */ - if (budget && - !((i & WB_STRIDE) == WB_STRIDE) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; - else - tx_ring->arm_wb = false; - - if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { - /* schedule immediate reset if we believe we hung */ - dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" - " VSI <%d>\n" - " Tx Queue <%d>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n", - tx_ring->vsi->seid, - tx_ring->queue_index, - tx_ring->next_to_use, i); - - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - - dev_info(tx_ring->dev, - "tx hang detected on queue %d, reset requested\n", - tx_ring->queue_index); - - /* do not fire the reset immediately, wait for the stack to - * decide we are truly stuck, also prevents every queue from - * simultaneously requesting a reset + if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { + unsigned int j = 0; + + /* check to see if there are < 4 descriptors + * waiting to be written back, then kick the hardware to force + * them to be written back in case we stay in NAPI. + * In this mode on X722 we do not enable Interrupt. */ + j = i40e_get_tx_pending(tx_ring); - /* the adapter is about to reset, no point in enabling polling */ - budget = 1; + if (budget && + ((j / (WB_STRIDE + 1)) == 0) && (j != 0) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; } netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, @@ -851,7 +773,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) * @q_vector: the vector on which to force writeback * **/ -static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { u16 flags = q_vector->tx.ring[0].flags; @@ -893,6 +815,8 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) * i40e_set_new_dynamic_itr - Find new ITR level * @rc: structure containing ring performance data * + * Returns true if ITR changed, false if not + * * Stores a new ITR value based on packets and byte counts during * the last interrupt. The advantage of per interrupt computation * is faster updates and more accurate ITR for the current traffic @@ -901,21 +825,32 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) * testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; + struct i40e_q_vector *qv = rc->ring->q_vector; u32 new_itr = rc->itr; int bytes_per_int; + int usecs; if (rc->total_packets == 0 || !rc->itr) - return; + return false; /* simple throttlerate management - * 0-10MB/s lowest (100000 ints/s) + * 0-10MB/s lowest (50000 ints/s) * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (8000 ints/s) + * 20-1249MB/s bulk (18000 ints/s) + * > 40000 Rx packets per second (8000 ints/s) + * + * The math works out because the divisor is in 10^(-6) which + * turns the bytes/us input value into MB/s values, but + * make sure to use usecs, as the register values written + * are in 2 usec increments in the ITR registers, and make sure + * to use the smoothed values that the countdown timer gives us. */ - bytes_per_int = rc->total_bytes / rc->itr; + usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; + bytes_per_int = rc->total_bytes / usecs; + switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) @@ -928,35 +863,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: - if (bytes_per_int <= 20) - new_latency_range = I40E_LOW_LATENCY; - break; + case I40E_ULTRA_LATENCY: default: if (bytes_per_int <= 20) new_latency_range = I40E_LOW_LATENCY; break; } + + /* this is to adjust RX more aggressively when streaming small + * packets. The value of 40000 was picked as it is just beyond + * what the hardware can receive per second if in low latency + * mode. + */ +#define RX_ULTRA_PACKET_RATE 40000 + + if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) && + (&qv->rx == rc)) + new_latency_range = I40E_ULTRA_LATENCY; + rc->latency_range = new_latency_range; switch (new_latency_range) { case I40E_LOWEST_LATENCY: - new_itr = I40E_ITR_100K; + new_itr = I40E_ITR_50K; break; case I40E_LOW_LATENCY: new_itr = I40E_ITR_20K; break; case I40E_BULK_LATENCY: + new_itr = I40E_ITR_18K; + break; + case I40E_ULTRA_LATENCY: new_itr = I40E_ITR_8K; break; default: break; } - if (new_itr != rc->itr) - rc->itr = new_itr; - rc->total_bytes = 0; rc->total_packets = 0; + + if (new_itr != rc->itr) { + rc->itr = new_itr; + return true; + } + + return false; } /** @@ -1002,6 +954,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) if (!dev) return -ENOMEM; + /* warn if we are about to overwrite the pointer */ + WARN_ON(tx_ring->tx_bi); bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); if (!tx_ring->tx_bi) @@ -1162,6 +1116,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) struct device *dev = rx_ring->dev; int bi_size; + /* warn if we are about to overwrite the pointer */ + WARN_ON(rx_ring->rx_bi); bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); if (!rx_ring->rx_bi) @@ -1342,16 +1298,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { struct i40e_q_vector *q_vector = rx_ring->q_vector; - struct i40e_vsi *vsi = rx_ring->vsi; - u64 flags = vsi->back->flags; if (vlan_tag & VLAN_VID_MASK) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - if (flags & I40E_FLAG_IN_NETPOLL) - netif_rx(skb); - else - napi_gro_receive(&q_vector->napi, skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -1518,7 +1469,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - const int current_node = numa_node_id(); + const int current_node = numa_mem_id(); struct i40e_vsi *vsi = rx_ring->vsi; u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; @@ -1596,6 +1547,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) cleaned_count++; if (rx_hbo || rx_sph) { int len; + if (rx_hbo) len = I40E_RX_HDR_SIZE; else @@ -1781,9 +1733,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } @@ -1828,6 +1777,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) return total_rx_packets; } +static u32 i40e_buildreg_itr(const int type, const u16 itr) +{ + u32 val; + + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); + + return val; +} + +/* a small macro to shorten up some long lines */ +#define INTREG I40E_PFINT_DYN_CTLN + /** * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt * @vsi: the VSI we care about @@ -1838,56 +1802,69 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { struct i40e_hw *hw = &vsi->back->hw; - u16 old_itr; + bool rx = false, tx = false; + u32 rxval, txval; int vector; - u32 val; vector = (q_vector->v_idx + vsi->base_vector); + + /* avoid dynamic calculation if in countdown mode OR if + * all dynamic is disabled + */ + rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + + if (q_vector->itr_countdown > 0 || + (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) && + !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) { + goto enable_int; + } + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { - old_itr = q_vector->rx.itr; - i40e_set_new_dynamic_itr(&q_vector->rx); - if (old_itr != q_vector->rx.itr) { - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_RX_ITR << - I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (q_vector->rx.itr << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); - } else { - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_ITR_NONE << - I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); - } - if (!test_bit(__I40E_DOWN, &vsi->state)) - wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); - } else { - i40e_irq_dynamic_enable(vsi, - q_vector->v_idx + vsi->base_vector); + rx = i40e_set_new_dynamic_itr(&q_vector->rx); + rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); } + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { - old_itr = q_vector->tx.itr; - i40e_set_new_dynamic_itr(&q_vector->tx); - if (old_itr != q_vector->tx.itr) { - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_TX_ITR << - I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (q_vector->tx.itr << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); - } else { - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_ITR_NONE << - I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); - } - if (!test_bit(__I40E_DOWN, &vsi->state)) - wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx + - vsi->base_vector - 1), val); - } else { - i40e_irq_dynamic_enable(vsi, - q_vector->v_idx + vsi->base_vector); + tx = i40e_set_new_dynamic_itr(&q_vector->tx); + txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); + } + + if (rx || tx) { + /* get the higher of the two ITR adjustments and + * use the same value for both ITR registers + * when in adaptive mode (Rx and/or Tx) + */ + u16 itr = max(q_vector->tx.itr, q_vector->rx.itr); + + q_vector->tx.itr = q_vector->rx.itr = itr; + txval = i40e_buildreg_itr(I40E_TX_ITR, itr); + tx = true; + rxval = i40e_buildreg_itr(I40E_RX_ITR, itr); + rx = true; } + + /* only need to enable the interrupt once, but need + * to possibly update both ITR values + */ + if (rx) { + /* set the INTENA_MSK_MASK so that this first write + * won't actually enable the interrupt, instead just + * updating the ITR (it's bit 31 PF and VF) + */ + rxval |= BIT(31); + /* don't check _DOWN because interrupt isn't being enabled */ + wr32(hw, INTREG(vector - 1), rxval); + } + +enable_int: + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, INTREG(vector - 1), txval); + + if (q_vector->itr_countdown) + q_vector->itr_countdown--; + else + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } /** @@ -1908,7 +1885,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; - int cleaned; + int work_done = 0; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1921,24 +1898,34 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) i40e_for_each_ring(ring, q_vector->tx) { clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); arm_wb |= ring->arm_wb; + ring->arm_wb = false; } + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { + int cleaned; + if (ring_is_ps_enabled(ring)) cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); else cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + + work_done += cleaned; /* if we didn't clean as many as budgeted, we must be done */ clean_complete &= (budget_per_ring != cleaned); } /* If work not completed, return budget and polling will return */ if (!clean_complete) { +tx_only: if (arm_wb) i40e_force_wb(vsi, q_vector); return budget; @@ -1948,7 +1935,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) q_vector->arm_wb_state = false; /* Work is done so exit the polling mode and re-enable the interrupt */ - napi_complete(napi); + napi_complete_done(napi, work_done); if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { i40e_update_enable_itr(vsi, q_vector); } else { /* Legacy mode */ @@ -2156,6 +2143,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, /* else if it is a SW VLAN, check the next protocol and store the tag */ } else if (protocol == htons(ETH_P_8021Q)) { struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); if (!vhdr) return -EINVAL; @@ -2199,6 +2187,7 @@ out: * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header + * @cd_type_cmd_tso_mss: ptr to u64 object * @cd_tunneling: ptr to context descriptor bits * * Returns 0 if no TSO can happen, 1 if tso is going, or error @@ -2258,6 +2247,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @tx_flags: the collected send information + * @cd_type_cmd_tso_mss: ptr to u64 object * * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen **/ @@ -2300,6 +2290,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set + * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, @@ -2324,6 +2315,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; + case IPPROTO_GRE: + l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING; + break; default: return; } @@ -2581,6 +2575,9 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 td_tag = 0; dma_addr_t dma; u16 gso_segs; + u16 desc_count = 0; + bool tail_bump = true; + bool do_rs = false; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; @@ -2621,6 +2618,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; + desc_count++; + if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; @@ -2640,6 +2639,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; + desc_count++; + if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; @@ -2654,34 +2655,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = &tx_ring->tx_bi[i]; } - /* Place RS bit on last descriptor of any packet that spans across the - * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. - */ - if (((i & WB_STRIDE) != WB_STRIDE) && - (first <= &tx_ring->tx_bi[i]) && - (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << - I40E_TXD_QW1_CMD_SHIFT); - } else { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TXD_CMD << - I40E_TXD_QW1_CMD_SHIFT); - } - - netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->queue_index), - first->bytecount); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; @@ -2691,15 +2664,72 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; + netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + first->bytecount); i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* Algorithm to optimize tail and RS bit setting: + * if xmit_more is supported + * if xmit_more is true + * do not update tail and do not mark RS bit. + * if xmit_more is false and last xmit_more was false + * if every packet spanned less than 4 desc + * then set RS bit on 4th packet and update tail + * on every packet + * else + * update tail and set RS bit on every packet. + * if xmit_more is false and last_xmit_more was true + * update tail and set RS bit. + * + * Optimization: wmb to be issued only in case of tail update. + * Also optimize the Descriptor WB path for RS bit with the same + * algorithm. + * + * Note: If there are less than 4 packets + * pending and interrupts were disabled the service task will + * trigger a force WB. + */ + if (skb->xmit_more && + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) { + tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; + tail_bump = false; + } else if (!skb->xmit_more && + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index)) && + (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && + (tx_ring->packet_stride < WB_STRIDE) && + (desc_count < WB_STRIDE)) { + tx_ring->packet_stride++; + } else { + tx_ring->packet_stride = 0; + tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; + do_rs = true; + } + if (do_rs) + tx_ring->packet_stride = 0; + + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD : + I40E_TX_DESC_CMD_EOP) << + I40E_TXD_QW1_CMD_SHIFT); + /* notify HW of packet */ - if (!skb->xmit_more || - netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->queue_index))) - writel(i, tx_ring->tail); - else + if (!tail_bump) prefetchw(tx_desc + 1); + if (tail_bump) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, tx_ring->tail); + } + return; dma_error: @@ -2776,6 +2806,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, u8 hdr_len = 0; int tsyn; int tso; + if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; @@ -2808,10 +2839,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; - if (i40e_chk_linearize(skb, tx_flags)) + if (i40e_chk_linearize(skb, tx_flags)) { if (skb_linearize(skb)) goto out_drop; - + tx_ring->tx_stats.tx_linearize++; + } skb_tx_timestamp(skb); /* always enable CRC insertion offload */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index f1385a1989fa..6779fb771d6a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -32,11 +32,14 @@ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 +#define I40E_ITR_50K 0x000A #define I40E_ITR_20K 0x0019 +#define I40E_ITR_18K 0x001B #define I40E_ITR_8K 0x003E #define I40E_ITR_4K 0x007A -#define I40E_ITR_RX_DEF I40E_ITR_8K -#define I40E_ITR_TX_DEF I40E_ITR_4K +#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ +#define I40E_ITR_RX_DEF I40E_ITR_20K +#define I40E_ITR_TX_DEF I40E_ITR_20K #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ @@ -44,6 +47,15 @@ #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) +/* 0x40 is the enable bit for interrupt rate limiting, and must be set if + * the value of the rate limit is non-zero + */ +#define INTRL_ENA BIT(6) +#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) +#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) +#define I40E_INTRL_8K 125 /* 8000 ints/sec */ +#define I40E_INTRL_62K 16 /* 62500 ints/sec */ +#define I40E_INTRL_83K 12 /* 83333 ints/sec */ #define I40E_QUEUE_END_OF_LIST 0x7FF @@ -79,12 +91,12 @@ enum i40e_dyn_idx_t { BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ - BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ - BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ - BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ - BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ - BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ - BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) #define i40e_pf_get_default_rss_hena(pf) \ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ @@ -165,6 +177,7 @@ struct i40e_tx_buffer { }; unsigned int bytecount; unsigned short gso_segs; + DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); u32 tx_flags; @@ -188,6 +201,7 @@ struct i40e_tx_queue_stats { u64 restart_queue; u64 tx_busy; u64 tx_done_old; + u64 tx_linearize; }; struct i40e_rx_queue_stats { @@ -199,8 +213,6 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, - __I40E_TX_DETECT_HANG, - __I40E_HANG_CHECK_ARMED, __I40E_RX_PS_ENABLED, __I40E_RX_16BYTE_DESC_ENABLED, }; @@ -211,12 +223,6 @@ enum i40e_ring_state_t { set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) #define clear_ring_ps_enabled(ring) \ clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define check_for_tx_hang(ring) \ - test_bit(__I40E_TX_DETECT_HANG, &(ring)->state) -#define set_check_for_tx_hang(ring) \ - set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) -#define clear_check_for_tx_hang(ring) \ - clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) #define ring_is_16byte_desc_enabled(ring) \ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) #define set_ring_16byte_desc_enabled(ring) \ @@ -264,10 +270,12 @@ struct i40e_ring { bool ring_active; /* is ring online or not */ bool arm_wb; /* do something to arm write back */ + u8 packet_stride; u16 flags; #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) #define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) +#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2) /* stats structs */ struct i40e_queue_stats stats; @@ -290,6 +298,7 @@ enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, I40E_BULK_LATENCY = 2, + I40E_ULTRA_LATENCY = 3, }; struct i40e_ring_container { @@ -326,4 +335,20 @@ int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring); int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags); #endif +void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); +u32 i40e_get_tx_pending(struct i40e_ring *ring); + +/** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 4842239ee777..dd2da356d9a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -33,29 +33,7 @@ #include "i40e_adminq.h" #include "i40e_hmc.h" #include "i40e_lan_hmc.h" - -/* Device IDs */ -#define I40E_DEV_ID_SFP_XL710 0x1572 -#define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F -#define I40E_DEV_ID_KX_B 0x1580 -#define I40E_DEV_ID_KX_C 0x1581 -#define I40E_DEV_ID_QSFP_A 0x1583 -#define I40E_DEV_ID_QSFP_B 0x1584 -#define I40E_DEV_ID_QSFP_C 0x1585 -#define I40E_DEV_ID_10G_BASE_T 0x1586 -#define I40E_DEV_ID_20G_KR2 0x1587 -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 -#define I40E_DEV_ID_SFP_X722 0x37D0 -#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 -#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 -#define I40E_DEV_ID_X722_VF 0x37CD -#define I40E_DEV_ID_X722_VF_HV 0x37D9 - -#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ - (d) == I40E_DEV_ID_QSFP_B || \ - (d) == I40E_DEV_ID_QSFP_C) +#include "i40e_devids.h" /* I40E_MASK is a macro used on 32 bit registers */ #define I40E_MASK(mask, shift) (mask << shift) @@ -158,14 +136,14 @@ enum i40e_set_fc_aq_failures { }; enum i40e_vsi_type { - I40E_VSI_MAIN = 0, - I40E_VSI_VMDQ1, - I40E_VSI_VMDQ2, - I40E_VSI_CTRL, - I40E_VSI_FCOE, - I40E_VSI_MIRROR, - I40E_VSI_SRIOV, - I40E_VSI_FDIR, + I40E_VSI_MAIN = 0, + I40E_VSI_VMDQ1 = 1, + I40E_VSI_VMDQ2 = 2, + I40E_VSI_CTRL = 3, + I40E_VSI_FCOE = 4, + I40E_VSI_MIRROR = 5, + I40E_VSI_SRIOV = 6, + I40E_VSI_FDIR = 7, I40E_VSI_TYPE_UNKNOWN }; @@ -189,16 +167,65 @@ struct i40e_link_status { bool crc_enable; u8 pacing; u8 requested_speeds; + u8 module_type[3]; + /* 1st byte: module identifier */ +#define I40E_MODULE_TYPE_SFP 0x03 +#define I40E_MODULE_TYPE_QSFP 0x0D + /* 2nd byte: ethernet compliance codes for 10/40G */ +#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 +#define I40E_MODULE_TYPE_40G_LR4 0x02 +#define I40E_MODULE_TYPE_40G_SR4 0x04 +#define I40E_MODULE_TYPE_40G_CR4 0x08 +#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 +#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 +#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 +#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 + /* 3rd byte: ethernet compliance codes for 1G */ +#define I40E_MODULE_TYPE_1000BASE_SX 0x01 +#define I40E_MODULE_TYPE_1000BASE_LX 0x02 +#define I40E_MODULE_TYPE_1000BASE_CX 0x04 +#define I40E_MODULE_TYPE_1000BASE_T 0x08 +}; + +enum i40e_aq_capabilities_phy_type { + I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII), + I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX), + I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4), + I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR), + I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4), + I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI), + I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI), + I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI), + I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI), + I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI), + I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU), + I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC), + I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC), + I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX), + I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T), + I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T), + I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR), + I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR), + I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1), + I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4), + I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4), + I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4), + I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX), + I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX), + I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = + BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL), + I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2) }; struct i40e_phy_info { struct i40e_link_status link_info; struct i40e_link_status link_info_old; - u32 autoneg_advertised; - u32 phy_id; - u32 module_type; bool get_link_info; enum i40e_media_type media_type; + /* all the phy types the NVM is capable of */ + enum i40e_aq_capabilities_phy_type phy_types; }; #define I40E_HW_CAP_MAX_GPIO 30 @@ -287,6 +314,7 @@ struct i40e_nvm_info { bool blank_nvm_mode; /* is NVM empty (no FW present)*/ u16 version; /* NVM package version */ u32 eetrack; /* NVM data version */ + u32 oem_ver; /* OEM version info */ }; /* definitions used in NVM update support */ @@ -305,12 +333,17 @@ enum i40e_nvmupd_cmd { I40E_NVMUPD_CSUM_CON, I40E_NVMUPD_CSUM_SA, I40E_NVMUPD_CSUM_LCB, + I40E_NVMUPD_STATUS, + I40E_NVMUPD_EXEC_AQ, + I40E_NVMUPD_GET_AQ_RESULT, }; enum i40e_nvmupd_state { I40E_NVMUPD_STATE_INIT, I40E_NVMUPD_STATE_READING, - I40E_NVMUPD_STATE_WRITING + I40E_NVMUPD_STATE_WRITING, + I40E_NVMUPD_STATE_INIT_WAIT, + I40E_NVMUPD_STATE_WRITE_WAIT, }; /* nvm_access definition and its masks/shifts need to be accessible to @@ -329,6 +362,7 @@ enum i40e_nvmupd_state { #define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) #define I40E_NVM_ERA 0x4 #define I40E_NVM_CSUM 0x8 +#define I40E_NVM_EXEC 0xf #define I40E_NVM_ADAPT_SHIFT 16 #define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) @@ -409,6 +443,8 @@ struct i40e_fc_info { #define I40E_APP_PROTOID_FIP 0x8914 #define I40E_APP_SEL_ETHTYPE 0x1 #define I40E_APP_SEL_TCPIP 0x2 +#define I40E_CEE_APP_SEL_ETHTYPE 0x0 +#define I40E_CEE_APP_SEL_TCPIP 0x1 /* CEE or IEEE 802.1Qaz ETS Configuration data */ struct i40e_dcb_ets_config { @@ -439,6 +475,8 @@ struct i40e_dcbx_config { u8 dcbx_mode; #define I40E_DCBX_MODE_CEE 0x1 #define I40E_DCBX_MODE_IEEE 0x2 + u8 app_mode; +#define I40E_DCBX_APPS_NON_WILLING 0x1 u32 numapps; u32 tlv_status; /* CEE mode TLV status */ struct i40e_dcb_ets_config etscfg; @@ -492,6 +530,8 @@ struct i40e_hw { /* state of nvm update process */ enum i40e_nvmupd_state nvmupd_state; + struct i40e_aq_desc nvm_wb_desc; + struct i40e_virt_mem nvm_buff; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ @@ -500,8 +540,12 @@ struct i40e_hw { u16 dcbx_status; /* DCBX info */ - struct i40e_dcbx_config local_dcbx_config; - struct i40e_dcbx_config remote_dcbx_config; + struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ + struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ + struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ + +#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) + u64 flags; /* debug mask */ u32 debug_mask; @@ -1024,8 +1068,8 @@ enum i40e_filter_program_desc_fd_status { }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \ - BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ @@ -1193,6 +1237,8 @@ struct i40e_hw_port_stats { #define I40E_SR_EMP_MODULE_PTR 0x0F #define I40E_SR_PBA_FLAGS 0x15 #define I40E_SR_PBA_BLOCK_PTR 0x16 +#define I40E_SR_BOOT_CONFIG_PTR 0x17 +#define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 0f8d4156f8b1..ae879826084b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -81,7 +81,6 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, I40E_VIRTCHNL_OP_EVENT = 17, - I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue @@ -151,6 +150,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index d99c116032f3..44462b40f2d7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -536,6 +536,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) } if (type == I40E_VSI_SRIOV) { u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + vf->lan_vsi_idx = vsi->idx; vf->lan_vsi_id = vsi->id; /* If the port VLAN has been configured and then the @@ -546,6 +547,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) */ if (vf->port_vlan_id) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); + + spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); @@ -558,10 +561,11 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) if (!f) dev_info(&pf->pdev->dev, "Could not allocate VF broadcast filter\n"); + spin_unlock_bh(&vsi->mac_filter_list_lock); } /* program mac filter */ - ret = i40e_sync_vsi_filters(vsi); + ret = i40e_sync_vsi_filters(vsi, false); if (ret) dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); @@ -605,6 +609,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) /* map PF queues to VF queues */ for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); + reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); total_queue_pairs++; @@ -701,6 +706,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf) */ vf->num_queue_pairs = 0; vf->vf_states = 0; + clear_bit(I40E_VF_STAT_INIT, &vf->vf_states); } /** @@ -839,11 +845,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) complete_reset: /* reallocate VF resources to reset the VSI state */ i40e_free_vf_res(vf); - i40e_alloc_vf_res(vf); - i40e_enable_vf_mappings(vf); - set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); - clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); - + if (!i40e_alloc_vf_res(vf)) { + i40e_enable_vf_mappings(vf); + set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + } /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); i40e_flush(hw); @@ -872,6 +878,11 @@ void i40e_free_vfs(struct i40e_pf *pf) i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], false); + for (i = 0; i < pf->num_alloc_vfs; i++) + if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) + i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], + false); + /* Disable IOV before freeing resources. This lets any VF drivers * running in the host get themselves cleaned up before we yank * the carpet out from underneath their feet. @@ -933,6 +944,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) if (pci_num_vf(pf->pdev) != num_alloc_vfs) { ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); if (ret) { + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; pf->num_alloc_vfs = 0; goto err_iov; } @@ -957,8 +969,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) /* VF resources get allocated during reset */ i40e_reset_vf(&vfs[i], false); - /* enable VF vplan_qtable mappings */ - i40e_enable_vf_mappings(&vfs[i]); } pf->num_alloc_vfs = num_alloc_vfs; @@ -986,24 +996,26 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) int pre_existing_vfs = pci_num_vf(pdev); int err = 0; - if (pf->state & __I40E_TESTING) { + if (test_bit(__I40E_TESTING, &pf->state)) { dev_warn(&pdev->dev, "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); err = -EPERM; goto err_out; } - dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); if (pre_existing_vfs && pre_existing_vfs != num_vfs) i40e_free_vfs(pf); else if (pre_existing_vfs && pre_existing_vfs == num_vfs) goto out; if (num_vfs > pf->num_req_vfs) { + dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", + num_vfs, pf->num_req_vfs); err = -EPERM; goto err_out; } + dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); err = i40e_alloc_vfs(pf, num_vfs); if (err) { dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); @@ -1094,6 +1106,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, } } else { vf->num_valid_msgs++; + /* reset the invalid counter, if a valid message is received. */ + vf->num_invalid_msgs = 0; } aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, @@ -1195,16 +1209,22 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) } else { vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; } + + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; + vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; if (vf->lan_vsi_idx) { vfres->vsi_res[i].vsi_id = vf->lan_vsi_id; vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; - vfres->vsi_res[i].num_queue_pairs = - pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; - memcpy(vfres->vsi_res[i].default_mac_addr, - vf->default_lan_addr.addr, ETH_ALEN); + vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs; + /* VFs only use TC 0 */ + vfres->vsi_res[i].qset_handle + = le16_to_cpu(vsi->info.qs_handle[0]); + ether_addr_copy(vfres->vsi_res[i].default_mac_addr, + vf->default_lan_addr.addr); i++; } set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); @@ -1582,6 +1602,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } vsi = pf->vsi[vf->lan_vsi_idx]; + /* Lock once, because all function inside for loop accesses VSI's + * MAC filter list which needs to be protected using same lock. + */ + spin_lock_bh(&vsi->mac_filter_list_lock); + /* add new addresses to the list */ for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; @@ -1600,12 +1625,14 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) dev_err(&pf->pdev->dev, "Unable to add VF MAC filter\n"); ret = I40E_ERR_PARAM; + spin_unlock_bh(&vsi->mac_filter_list_lock); goto error_param; } } + spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ - if (i40e_sync_vsi_filters(vsi)) + if (i40e_sync_vsi_filters(vsi, false)) dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); error_param: @@ -1650,13 +1677,15 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } vsi = pf->vsi[vf->lan_vsi_idx]; + spin_lock_bh(&vsi->mac_filter_list_lock); /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) i40e_del_filter(vsi, al->list[i].addr, I40E_VLAN_ANY, true, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ - if (i40e_sync_vsi_filters(vsi)) + if (i40e_sync_vsi_filters(vsi, false)) dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); error_param: @@ -1708,6 +1737,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < vfl->num_elements; i++) { /* add new VLAN filter */ int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); + if (ret) dev_err(&pf->pdev->dev, "Unable to add VF vlan filter %d, error %d\n", @@ -1759,6 +1789,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < vfl->num_elements; i++) { int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); + if (ret) dev_err(&pf->pdev->dev, "Unable to delete VF vlan filter %d, error %d\n", @@ -1870,7 +1901,6 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, case I40E_VIRTCHNL_OP_UNKNOWN: default: return -EPERM; - break; } /* few more checks */ if ((valid_len != msglen) || (err_msg_format)) { @@ -2049,6 +2079,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; } + /* Lock once because below invoked function add/del_filter requires + * mac_filter_list_lock to be held + */ + spin_lock_bh(&vsi->mac_filter_list_lock); + /* delete the temporary mac address */ i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id ? vf->port_vlan_id : -1, @@ -2060,9 +2095,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) list_for_each_entry(f, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); + dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); /* program mac filter */ - if (i40e_sync_vsi_filters(vsi)) { + if (i40e_sync_vsi_filters(vsi, false)) { dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); ret = -EIO; goto error_param; @@ -2089,8 +2126,10 @@ error_param: int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos) { + u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; + bool is_vsi_in_vlan = false; struct i40e_vsi *vsi; struct i40e_vf *vf; int ret = 0; @@ -2116,12 +2155,15 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, goto error_pvid; } - if (le16_to_cpu(vsi->info.pvid) == - (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT))) + if (le16_to_cpu(vsi->info.pvid) == vlanprio) /* duplicate request, so just return success */ goto error_pvid; - if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) { + spin_lock_bh(&vsi->mac_filter_list_lock); + is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + + if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) { dev_err(&pf->pdev->dev, "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", vf_id); @@ -2141,7 +2183,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, * MAC addresses deleted. */ if ((!(vlan_id || qos) || - (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) && + vlanprio != le16_to_cpu(vsi->info.pvid)) && vsi->info.pvid) ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); @@ -2156,8 +2198,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, } } if (vlan_id || qos) - ret = i40e_vsi_add_pvid(vsi, - vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); + ret = i40e_vsi_add_pvid(vsi, vlanprio); else i40e_vsi_remove_pvid(vsi); @@ -2310,7 +2351,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, ivi->vf = vf_id; - memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); + ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); ivi->max_tx_rate = vf->tx_rate; ivi->min_tx_rate = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 736f6f08b4f2..da44995def42 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -29,8 +29,6 @@ #include "i40e.h" -#define I40E_MAX_MACVLAN_FILTERS 256 -#define I40E_MAX_VLAN_FILTERS 256 #define I40E_MAX_VLANID 4095 #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 @@ -98,7 +96,8 @@ struct i40e_vf { u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u64 num_mdd_events; /* num of mdd events detected */ - u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */ + /* num of continuous malformed or invalid msgs detected */ + u64 num_invalid_msgs; u64 num_valid_msgs; /* num of valid msgs detected */ unsigned long vf_caps; /* vf's adv. capabilities */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index 929d47152bf2..fd123ca60761 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -373,7 +373,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -391,6 +390,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -432,7 +432,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -450,6 +449,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -469,8 +469,12 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) { i40e_status ret_code = 0; - if (hw->aq.asq.count == 0) - return I40E_ERR_NOT_READY; + mutex_lock(&hw->aq.asq_mutex); + + if (hw->aq.asq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_asq_out; + } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.asq.head, 0); @@ -479,16 +483,13 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) wr32(hw, hw->aq.asq.bal, 0); wr32(hw, hw->aq.asq.bah, 0); - /* make sure lock is available */ - mutex_lock(&hw->aq.asq_mutex); - hw->aq.asq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ i40e_free_asq_bufs(hw); +shutdown_asq_out: mutex_unlock(&hw->aq.asq_mutex); - return ret_code; } @@ -502,8 +503,12 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) { i40e_status ret_code = 0; - if (hw->aq.arq.count == 0) - return I40E_ERR_NOT_READY; + mutex_lock(&hw->aq.arq_mutex); + + if (hw->aq.arq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_arq_out; + } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.arq.head, 0); @@ -512,16 +517,13 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) wr32(hw, hw->aq.arq.bal, 0); wr32(hw, hw->aq.arq.bah, 0); - /* make sure lock is available */ - mutex_lock(&hw->aq.arq_mutex); - hw->aq.arq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ i40e_free_arq_bufs(hw); +shutdown_arq_out: mutex_unlock(&hw->aq.arq_mutex); - return ret_code; } @@ -596,6 +598,9 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) /* destroy the locks */ + if (hw->nvm_buff.va) + i40e_free_virt_mem(hw, &hw->nvm_buff); + return ret_code; } @@ -617,8 +622,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw) details = I40E_ADMINQ_DETAILS(*asq, ntc); while (rd32(hw, hw->aq.asq.head) != ntc) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "%s: ntc %d head %d.\n", __func__, ntc, - rd32(hw, hw->aq.asq.head)); + "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); if (details->callback) { I40E_ADMINQ_CALLBACK cb_func = @@ -682,19 +686,23 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, u16 retval = 0; u32 val = 0; - val = rd32(hw, hw->aq.asq.head); - if (val >= hw->aq.num_asq_entries) { + mutex_lock(&hw->aq.asq_mutex); + + if (hw->aq.asq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: head overrun at %d\n", val); + "AQTX: Admin queue not initialized.\n"); status = I40E_ERR_QUEUE_EMPTY; - goto asq_send_command_exit; + goto asq_send_command_error; } - if (hw->aq.asq.count == 0) { + hw->aq.asq_last_status = I40E_AQ_RC_OK; + + val = rd32(hw, hw->aq.asq.head); + if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: Admin queue not initialized.\n"); + "AQTX: head overrun at %d\n", val); status = I40E_ERR_QUEUE_EMPTY; - goto asq_send_command_exit; + goto asq_send_command_error; } details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); @@ -719,8 +727,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, desc->flags &= ~cpu_to_le16(details->flags_dis); desc->flags |= cpu_to_le16(details->flags_ena); - mutex_lock(&hw->aq.asq_mutex); - if (buff_size > hw->aq.asq_buf_size) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, @@ -830,6 +836,10 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); + /* save writeback aq if requested */ + if (details->wb_desc) + *details->wb_desc = *desc_on_ring; + /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { @@ -841,7 +851,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, asq_send_command_error: mutex_unlock(&hw->aq.asq_mutex); -asq_send_command_exit: return status; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index ef43d68f67b3..a3eae5d9a2bd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -69,6 +69,7 @@ struct i40e_asq_cmd_details { u16 flags_dis; bool async; bool postpone; + struct i40e_aq_desc *wb_desc; }; #define I40E_ADMINQ_DETAILS(R, i) \ @@ -108,9 +109,10 @@ struct i40e_adminq_info { /** * i40e_aq_rc_to_posix - convert errors to user-land codes - * aq_rc: AdminQ error code to convert + * aq_ret: AdminQ handler error code can override aq_rc + * aq_rc: AdminQ firmware error code to convert **/ -static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) +static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) { int aq_to_posix[] = { 0, /* I40E_AQ_RC_OK */ @@ -142,8 +144,9 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) return -EAGAIN; - if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) return -ERANGE; + return aq_to_posix[aq_rc]; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index c8022092d369..fcb9ef34cc7a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1719,11 +1719,13 @@ struct i40e_aqc_get_link_status { u8 phy_type; /* i40e_aq_phy_type */ u8 link_speed; /* i40e_aq_link_speed */ u8 link_info; -#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_UP 0x01 /* obsolete */ +#define I40E_AQ_LINK_UP_FUNCTION 0x01 #define I40E_AQ_LINK_FAULT 0x02 #define I40E_AQ_LINK_FAULT_TX 0x04 #define I40E_AQ_LINK_FAULT_RX 0x08 #define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_LINK_UP_PORT 0x20 #define I40E_AQ_MEDIA_AVAILABLE 0x40 #define I40E_AQ_SIGNAL_DETECT 0x80 u8 an_info; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index d45d0ae6bd3b..72b1942a94aa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -51,7 +51,9 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_20G_KR2: + case I40E_DEV_ID_20G_KR2_A: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_SFP_X722: @@ -85,7 +87,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) * @hw: pointer to the HW structure * @aq_err: the AQ error code to convert **/ -char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) { switch (aq_err) { case I40E_AQ_RC_OK: @@ -145,7 +147,7 @@ char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) * @hw: pointer to the HW structure * @stat_err: the status error code to convert **/ -char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err) +const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err) { switch (stat_err) { case 0: @@ -329,25 +331,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, len = buf_len; /* write the full 16-byte chunks */ for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, - "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", - i, buf[i], buf[i + 1], buf[i + 2], - buf[i + 3], buf[i + 4], buf[i + 5], - buf[i + 6], buf[i + 7], buf[i + 8], - buf[i + 9], buf[i + 10], buf[i + 11], - buf[i + 12], buf[i + 13], buf[i + 14], - buf[i + 15]); + i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); /* write whatever's left over without overrunning the buffer */ - if (i < len) { - char d_buf[80]; - int j = 0; - - memset(d_buf, 0, sizeof(d_buf)); - j += sprintf(d_buf, "\t0x%04X ", i); - while (i < len) - j += sprintf(&d_buf[j], " %02X", buf[i++]); - i40e_debug(hw, mask, "%s\n", d_buf); - } + if (i < len) + i40e_debug(hw, mask, "\t0x%04X %*ph\n", + i, len - i, buf + i); } } @@ -441,9 +429,6 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); - cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut)); - cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut)); - status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL); return status; @@ -518,8 +503,6 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); - cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key)); - cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key)); status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL); @@ -990,10 +973,10 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw, I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0; for (i = 0; i < msg->num_vsis; i++) { if (vsi_res->vsi_type == I40E_VSI_SRIOV) { - memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr, - ETH_ALEN); - memcpy(hw->mac.addr, vsi_res->default_mac_addr, - ETH_ALEN); + ether_addr_copy(hw->mac.perm_addr, + vsi_res->default_mac_addr); + ether_addr_copy(hw->mac.addr, + vsi_res->default_mac_addr); } vsi_res++; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h new file mode 100644 index 000000000000..e6a39c9862e8 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -0,0 +1,55 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_DEVIDS_H_ +#define _I40E_DEVIDS_H_ + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_A 0x157F +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_20G_KR2_A 0x1588 +#define I40E_DEV_ID_10G_BASE_T4 0x1589 +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_X722_VF 0x37CD +#define I40E_DEV_ID_X722_VF_HV 0x37D9 + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +#endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h index 21a91b14bf81..5e314fd3c016 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h @@ -34,7 +34,7 @@ #include <linux/pci.h> /* get readq/writeq support for 32 bit kernels, use the low-first version */ -#include <asm-generic/io-64-nonatomic-lo-hi.h> +#include <linux/io-64-nonatomic-lo-hi.h> /* File to be the magic between shared code and * actual OS primitives diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 55ae4b0f8192..cbd9a1b078ab 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -60,8 +60,8 @@ void i40e_idle_aq(struct i40e_hw *hw); void i40evf_resume_aq(struct i40e_hw *hw); bool i40evf_check_asq_alive(struct i40e_hw *hw); i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); -char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); -char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); +const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, bool pf_lut, u8 *lut, u16 lut_size); @@ -101,4 +101,6 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details); +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 vsi_seid); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 7e91d825c760..47e9a90d6b10 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -140,65 +140,6 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) return le32_to_cpu(*(volatile __le32 *)head); } -/** - * i40e_get_tx_pending - how many tx descriptors not processed - * @tx_ring: the ring of descriptors - * - * Since there is no access to the ring head register - * in XL710, we need to use our local copies - **/ -static u32 i40e_get_tx_pending(struct i40e_ring *ring) -{ - u32 head, tail; - - head = i40e_get_head(ring); - tail = readl(ring->tail); - - if (head != tail) - return (head < tail) ? - tail - head : (tail + ring->count - head); - - return 0; -} - -/** - * i40e_check_tx_hang - Is there a hang in the Tx queue - * @tx_ring: the ring of descriptors - **/ -static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) -{ - u32 tx_done = tx_ring->stats.packets; - u32 tx_done_old = tx_ring->tx_stats.tx_done_old; - u32 tx_pending = i40e_get_tx_pending(tx_ring); - bool ret = false; - - clear_check_for_tx_hang(tx_ring); - - /* Check for a hung queue, but be thorough. This verifies - * that a transmit has been completed since the previous - * check AND there is at least one packet pending. The - * ARMED bit is set to indicate a potential hang. The - * bit is cleared if a pause frame is received to remove - * false hang detection due to PFC or 802.3x frames. By - * requiring this to fail twice we avoid races with - * PFC clearing the ARMED bit and conditions where we - * run the check_tx_hang logic with a transmit completion - * pending but without time to complete it yet. - */ - if ((tx_done_old == tx_done) && tx_pending) { - /* make sure it is true for two checks in a row */ - ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, - &tx_ring->state); - } else if (tx_done_old == tx_done && - (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { - /* update completed stats and disarm the hang check */ - tx_ring->tx_stats.tx_done_old = tx_done; - clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } - - return ret; -} - #define WB_STRIDE 0x3 /** @@ -304,36 +245,15 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; + /* check to see if there are any non-cache aligned descriptors + * waiting to be written back, and kick the hardware to force + * them to be written back in case of napi polling + */ if (budget && !((i & WB_STRIDE) == WB_STRIDE) && !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; - else - tx_ring->arm_wb = false; - - if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { - /* schedule immediate reset if we believe we hung */ - dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" - " VSI <%d>\n" - " Tx Queue <%d>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n", - tx_ring->vsi->seid, - tx_ring->queue_index, - tx_ring->next_to_use, i); - - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - - dev_info(tx_ring->dev, - "tx hang detected on queue %d, resetting adapter\n", - tx_ring->queue_index); - - tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); - - /* the adapter is about to reset, no point in enabling stuff */ - return true; - } netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), @@ -355,16 +275,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) } } - return budget > 0; + return !!budget; } /** - * i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors + * i40evf_force_wb -Arm hardware to do a wb on noncache aligned descriptors * @vsi: the VSI we care about * @q_vector: the vector on which to force writeback * **/ -static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { u16 flags = q_vector->tx.ring[0].flags; @@ -398,6 +318,8 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) * i40e_set_new_dynamic_itr - Find new ITR level * @rc: structure containing ring performance data * + * Returns true if ITR changed, false if not + * * Stores a new ITR value based on packets and byte counts during * the last interrupt. The advantage of per interrupt computation * is faster updates and more accurate ITR for the current traffic @@ -406,21 +328,32 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) * testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; + struct i40e_q_vector *qv = rc->ring->q_vector; u32 new_itr = rc->itr; int bytes_per_int; + int usecs; if (rc->total_packets == 0 || !rc->itr) - return; + return false; /* simple throttlerate management - * 0-10MB/s lowest (100000 ints/s) + * 0-10MB/s lowest (50000 ints/s) * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (8000 ints/s) + * 20-1249MB/s bulk (18000 ints/s) + * > 40000 Rx packets per second (8000 ints/s) + * + * The math works out because the divisor is in 10^(-6) which + * turns the bytes/us input value into MB/s values, but + * make sure to use usecs, as the register values written + * are in 2 usec increments in the ITR registers, and make sure + * to use the smoothed values that the countdown timer gives us. */ - bytes_per_int = rc->total_bytes / rc->itr; + usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; + bytes_per_int = rc->total_bytes / usecs; + switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) @@ -433,35 +366,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: - if (bytes_per_int <= 20) - new_latency_range = I40E_LOW_LATENCY; - break; + case I40E_ULTRA_LATENCY: default: if (bytes_per_int <= 20) new_latency_range = I40E_LOW_LATENCY; break; } + + /* this is to adjust RX more aggressively when streaming small + * packets. The value of 40000 was picked as it is just beyond + * what the hardware can receive per second if in low latency + * mode. + */ +#define RX_ULTRA_PACKET_RATE 40000 + + if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) && + (&qv->rx == rc)) + new_latency_range = I40E_ULTRA_LATENCY; + rc->latency_range = new_latency_range; switch (new_latency_range) { case I40E_LOWEST_LATENCY: - new_itr = I40E_ITR_100K; + new_itr = I40E_ITR_50K; break; case I40E_LOW_LATENCY: new_itr = I40E_ITR_20K; break; case I40E_BULK_LATENCY: + new_itr = I40E_ITR_18K; + break; + case I40E_ULTRA_LATENCY: new_itr = I40E_ITR_8K; break; default: break; } - if (new_itr != rc->itr) - rc->itr = new_itr; - rc->total_bytes = 0; rc->total_packets = 0; + + if (new_itr != rc->itr) { + rc->itr = new_itr; + return true; + } + + return false; } /* @@ -822,16 +772,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { struct i40e_q_vector *q_vector = rx_ring->q_vector; - struct i40e_vsi *vsi = rx_ring->vsi; - u64 flags = vsi->back->flags; if (vlan_tag & VLAN_VID_MASK) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - if (flags & I40E_FLAG_IN_NETPOLL) - netif_rx(skb); - else - napi_gro_receive(&q_vector->napi, skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -997,7 +942,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - const int current_node = numa_node_id(); + const int current_node = numa_mem_id(); struct i40e_vsi *vsi = rx_ring->vsi; u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; @@ -1067,6 +1012,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) cleaned_count++; if (rx_hbo || rx_sph) { int len; + if (rx_hbo) len = I40E_RX_HDR_SIZE; else @@ -1240,9 +1186,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } @@ -1274,6 +1217,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) return total_rx_packets; } +static u32 i40e_buildreg_itr(const int type, const u16 itr) +{ + u32 val; + + val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); + + return val; +} + +/* a small macro to shorten up some long lines */ +#define INTREG I40E_VFINT_DYN_CTLN1 + /** * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt * @vsi: the VSI we care about @@ -1284,55 +1242,67 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { struct i40e_hw *hw = &vsi->back->hw; - u16 old_itr; + bool rx = false, tx = false; + u32 rxval, txval; int vector; - u32 val; vector = (q_vector->v_idx + vsi->base_vector); + + /* avoid dynamic calculation if in countdown mode OR if + * all dynamic is disabled + */ + rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + + if (q_vector->itr_countdown > 0 || + (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) && + !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) { + goto enable_int; + } + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { - old_itr = q_vector->rx.itr; - i40e_set_new_dynamic_itr(&q_vector->rx); - if (old_itr != q_vector->rx.itr) { - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - (I40E_RX_ITR << - I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | - (q_vector->rx.itr << - I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); - } else { - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - (I40E_ITR_NONE << - I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT); - } - if (!test_bit(__I40E_DOWN, &vsi->state)) - wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val); - } else { - i40evf_irq_enable_queues(vsi->back, 1 - << q_vector->v_idx); + rx = i40e_set_new_dynamic_itr(&q_vector->rx); + rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); } if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { - old_itr = q_vector->tx.itr; - i40e_set_new_dynamic_itr(&q_vector->tx); - if (old_itr != q_vector->tx.itr) { - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - (I40E_TX_ITR << - I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | - (q_vector->tx.itr << - I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); + tx = i40e_set_new_dynamic_itr(&q_vector->tx); + txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); + } + if (rx || tx) { + /* get the higher of the two ITR adjustments and + * use the same value for both ITR registers + * when in adaptive mode (Rx and/or Tx) + */ + u16 itr = max(q_vector->tx.itr, q_vector->rx.itr); - } else { - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - (I40E_ITR_NONE << - I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT); - } - if (!test_bit(__I40E_DOWN, &vsi->state)) - wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val); - } else { - i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx)); + q_vector->tx.itr = q_vector->rx.itr = itr; + txval = i40e_buildreg_itr(I40E_TX_ITR, itr); + tx = true; + rxval = i40e_buildreg_itr(I40E_RX_ITR, itr); + rx = true; } + + /* only need to enable the interrupt once, but need + * to possibly update both ITR values + */ + if (rx) { + /* set the INTENA_MSK_MASK so that this first write + * won't actually enable the interrupt, instead just + * updating the ITR (it's bit 31 PF and VF) + */ + rxval |= BIT(31); + /* don't check _DOWN because interrupt isn't being enabled */ + wr32(hw, INTREG(vector - 1), rxval); + } + +enable_int: + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, INTREG(vector - 1), txval); + + if (q_vector->itr_countdown) + q_vector->itr_countdown--; + else + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } /** @@ -1353,7 +1323,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; - int cleaned; + int work_done = 0; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1366,26 +1336,36 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) i40e_for_each_ring(ring, q_vector->tx) { clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); arm_wb |= ring->arm_wb; + ring->arm_wb = false; } + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { + int cleaned; + if (ring_is_ps_enabled(ring)) cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); else cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + + work_done += cleaned; /* if we didn't clean as many as budgeted, we must be done */ clean_complete &= (budget_per_ring != cleaned); } /* If work not completed, return budget and polling will return */ if (!clean_complete) { +tx_only: if (arm_wb) - i40e_force_wb(vsi, q_vector); + i40evf_force_wb(vsi, q_vector); return budget; } @@ -1393,7 +1373,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) q_vector->arm_wb_state = false; /* Work is done so exit the polling mode and re-enable the interrupt */ - napi_complete(napi); + napi_complete_done(napi, work_done); i40e_update_enable_itr(vsi, q_vector); return 0; } @@ -1437,6 +1417,7 @@ static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, /* else if it is a SW VLAN, check the next protocol and store the tag */ } else if (protocol == htons(ETH_P_8021Q)) { struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); if (!vhdr) return -EINVAL; @@ -1979,6 +1960,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, u32 td_cmd = 0; u8 hdr_len = 0; int tso; + if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; @@ -2006,10 +1988,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; - if (i40e_chk_linearize(skb, tx_flags)) + if (i40e_chk_linearize(skb, tx_flags)) { if (skb_linearize(skb)) goto out_drop; - + tx_ring->tx_stats.tx_linearize++; + } skb_tx_timestamp(skb); /* always enable CRC insertion offload */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 9a30f5d8c089..ebc1bf77f036 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -32,11 +32,14 @@ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 +#define I40E_ITR_50K 0x000A #define I40E_ITR_20K 0x0019 +#define I40E_ITR_18K 0x001B #define I40E_ITR_8K 0x003E #define I40E_ITR_4K 0x007A -#define I40E_ITR_RX_DEF I40E_ITR_8K -#define I40E_ITR_TX_DEF I40E_ITR_4K +#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ +#define I40E_ITR_RX_DEF I40E_ITR_20K +#define I40E_ITR_TX_DEF I40E_ITR_20K #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ @@ -44,6 +47,15 @@ #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) +/* 0x40 is the enable bit for interrupt rate limiting, and must be set if + * the value of the rate limit is non-zero + */ +#define INTRL_ENA BIT(6) +#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) +#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) +#define I40E_INTRL_8K 125 /* 8000 ints/sec */ +#define I40E_INTRL_62K 16 /* 62500 ints/sec */ +#define I40E_INTRL_83K 12 /* 83333 ints/sec */ #define I40E_QUEUE_END_OF_LIST 0x7FF @@ -79,16 +91,16 @@ enum i40e_dyn_idx_t { BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ - BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ - BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ - BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ - BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ - BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ - BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) #define i40e_pf_get_default_rss_hena(pf) \ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ - I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) + I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) /* Supported Rx Buffer Sizes */ #define I40E_RXBUFFER_512 512 /* Used for packet split */ @@ -164,6 +176,7 @@ struct i40e_tx_buffer { }; unsigned int bytecount; unsigned short gso_segs; + DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); u32 tx_flags; @@ -187,6 +200,7 @@ struct i40e_tx_queue_stats { u64 restart_queue; u64 tx_busy; u64 tx_done_old; + u64 tx_linearize; }; struct i40e_rx_queue_stats { @@ -198,8 +212,6 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, - __I40E_TX_DETECT_HANG, - __I40E_HANG_CHECK_ARMED, __I40E_RX_PS_ENABLED, __I40E_RX_16BYTE_DESC_ENABLED, }; @@ -210,12 +222,6 @@ enum i40e_ring_state_t { set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) #define clear_ring_ps_enabled(ring) \ clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define check_for_tx_hang(ring) \ - test_bit(__I40E_TX_DETECT_HANG, &(ring)->state) -#define set_check_for_tx_hang(ring) \ - set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) -#define clear_check_for_tx_hang(ring) \ - clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) #define ring_is_16byte_desc_enabled(ring) \ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) #define set_ring_16byte_desc_enabled(ring) \ @@ -287,6 +293,7 @@ enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, I40E_BULK_LATENCY = 2, + I40E_ULTRA_LATENCY = 3, }; struct i40e_ring_container { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 24a2693869a1..301fe2b6dd03 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -33,29 +33,7 @@ #include "i40e_adminq.h" #include "i40e_hmc.h" #include "i40e_lan_hmc.h" - -/* Device IDs */ -#define I40E_DEV_ID_SFP_XL710 0x1572 -#define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F -#define I40E_DEV_ID_KX_B 0x1580 -#define I40E_DEV_ID_KX_C 0x1581 -#define I40E_DEV_ID_QSFP_A 0x1583 -#define I40E_DEV_ID_QSFP_B 0x1584 -#define I40E_DEV_ID_QSFP_C 0x1585 -#define I40E_DEV_ID_10G_BASE_T 0x1586 -#define I40E_DEV_ID_20G_KR2 0x1587 -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 -#define I40E_DEV_ID_SFP_X722 0x37D0 -#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 -#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 -#define I40E_DEV_ID_X722_VF 0x37CD -#define I40E_DEV_ID_X722_VF_HV 0x37D9 - -#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ - (d) == I40E_DEV_ID_QSFP_B || \ - (d) == I40E_DEV_ID_QSFP_C) +#include "i40e_devids.h" /* I40E_MASK is a macro used on 32 bit registers */ #define I40E_MASK(mask, shift) (mask << shift) @@ -158,14 +136,14 @@ enum i40e_set_fc_aq_failures { }; enum i40e_vsi_type { - I40E_VSI_MAIN = 0, - I40E_VSI_VMDQ1, - I40E_VSI_VMDQ2, - I40E_VSI_CTRL, - I40E_VSI_FCOE, - I40E_VSI_MIRROR, - I40E_VSI_SRIOV, - I40E_VSI_FDIR, + I40E_VSI_MAIN = 0, + I40E_VSI_VMDQ1 = 1, + I40E_VSI_VMDQ2 = 2, + I40E_VSI_CTRL = 3, + I40E_VSI_FCOE = 4, + I40E_VSI_MIRROR = 5, + I40E_VSI_SRIOV = 6, + I40E_VSI_FDIR = 7, I40E_VSI_TYPE_UNKNOWN }; @@ -189,16 +167,65 @@ struct i40e_link_status { bool crc_enable; u8 pacing; u8 requested_speeds; + u8 module_type[3]; + /* 1st byte: module identifier */ +#define I40E_MODULE_TYPE_SFP 0x03 +#define I40E_MODULE_TYPE_QSFP 0x0D + /* 2nd byte: ethernet compliance codes for 10/40G */ +#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 +#define I40E_MODULE_TYPE_40G_LR4 0x02 +#define I40E_MODULE_TYPE_40G_SR4 0x04 +#define I40E_MODULE_TYPE_40G_CR4 0x08 +#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 +#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 +#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 +#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 + /* 3rd byte: ethernet compliance codes for 1G */ +#define I40E_MODULE_TYPE_1000BASE_SX 0x01 +#define I40E_MODULE_TYPE_1000BASE_LX 0x02 +#define I40E_MODULE_TYPE_1000BASE_CX 0x04 +#define I40E_MODULE_TYPE_1000BASE_T 0x08 +}; + +enum i40e_aq_capabilities_phy_type { + I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII), + I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX), + I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4), + I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR), + I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4), + I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI), + I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI), + I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI), + I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI), + I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI), + I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU), + I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC), + I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC), + I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX), + I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T), + I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T), + I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR), + I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR), + I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1), + I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4), + I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4), + I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4), + I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX), + I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX), + I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = + BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL), + I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2) }; struct i40e_phy_info { struct i40e_link_status link_info; struct i40e_link_status link_info_old; - u32 autoneg_advertised; - u32 phy_id; - u32 module_type; bool get_link_info; enum i40e_media_type media_type; + /* all the phy types the NVM is capable of */ + enum i40e_aq_capabilities_phy_type phy_types; }; #define I40E_HW_CAP_MAX_GPIO 30 @@ -286,6 +313,7 @@ struct i40e_nvm_info { bool blank_nvm_mode; /* is NVM empty (no FW present)*/ u16 version; /* NVM package version */ u32 eetrack; /* NVM data version */ + u32 oem_ver; /* OEM version info */ }; /* definitions used in NVM update support */ @@ -304,12 +332,17 @@ enum i40e_nvmupd_cmd { I40E_NVMUPD_CSUM_CON, I40E_NVMUPD_CSUM_SA, I40E_NVMUPD_CSUM_LCB, + I40E_NVMUPD_STATUS, + I40E_NVMUPD_EXEC_AQ, + I40E_NVMUPD_GET_AQ_RESULT, }; enum i40e_nvmupd_state { I40E_NVMUPD_STATE_INIT, I40E_NVMUPD_STATE_READING, - I40E_NVMUPD_STATE_WRITING + I40E_NVMUPD_STATE_WRITING, + I40E_NVMUPD_STATE_INIT_WAIT, + I40E_NVMUPD_STATE_WRITE_WAIT, }; /* nvm_access definition and its masks/shifts need to be accessible to @@ -328,6 +361,7 @@ enum i40e_nvmupd_state { #define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) #define I40E_NVM_ERA 0x4 #define I40E_NVM_CSUM 0x8 +#define I40E_NVM_EXEC 0xf #define I40E_NVM_ADAPT_SHIFT 16 #define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) @@ -486,6 +520,8 @@ struct i40e_hw { /* state of nvm update process */ enum i40e_nvmupd_state nvmupd_state; + struct i40e_aq_desc nvm_wb_desc; + struct i40e_virt_mem nvm_buff; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ @@ -494,8 +530,9 @@ struct i40e_hw { u16 dcbx_status; /* DCBX info */ - struct i40e_dcbx_config local_dcbx_config; - struct i40e_dcbx_config remote_dcbx_config; + struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ + struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ + struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ /* debug mask */ u32 debug_mask; @@ -1018,8 +1055,8 @@ enum i40e_filter_program_desc_fd_status { }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \ - BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ @@ -1162,6 +1199,7 @@ struct i40e_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 #define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index e6db20e8a395..9f7b279b9d9c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -81,7 +81,6 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, I40E_VIRTCHNL_OP_EVENT = 17, - I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue @@ -151,6 +150,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 3817cbbf45e6..22fc3d49c4b9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -48,10 +48,6 @@ #define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define PFX "i40evf: " -#define DPRINTK(nlevel, klevel, fmt, args...) \ - ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ - printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ - __func__ , ## args))) /* dummy struct to make common code less painful */ struct i40e_vsi { @@ -70,6 +66,7 @@ struct i40e_vsi { */ u16 rx_itr_setting; u16 tx_itr_setting; + u16 qs_handle; }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -90,7 +87,7 @@ struct i40e_vsi { #define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ #define I40EVF_MAX_AQ_BUF_SIZE 4096 #define I40EVF_AQ_LEN 32 -#define I40EVF_AQ_MAX_ERR 10 /* times to try before resetting AQ */ +#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -115,6 +112,8 @@ struct i40e_q_vector { struct i40e_ring_container tx; u32 ring_mask; u8 num_ringpairs; /* total number of ring pairs in vector */ +#define ITR_COUNTDOWN_START 100 + u8 itr_countdown; /* when 0 or 1 update ITR */ int v_idx; /* vector index in list */ char name[IFNAMSIZ + 9]; bool arm_wb_state; @@ -214,7 +213,6 @@ struct i40evf_adapter { #define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1) #define I40EVF_FLAG_RX_PS_CAPABLE BIT(2) #define I40EVF_FLAG_RX_PS_ENABLED BIT(3) -#define I40EVF_FLAG_IN_NETPOLL BIT(4) #define I40EVF_FLAG_IMIR_ENABLED BIT(5) #define I40EVF_FLAG_MQ_CAPABLE BIT(6) #define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) @@ -223,10 +221,10 @@ struct i40evf_adapter { #define I40EVF_FLAG_RESET_NEEDED BIT(10) #define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11) #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) +#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) /* duplicates for common code */ #define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 -#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED #define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE #define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index e85849b9ff98..d962164dfb0f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.3.5" +#define DRV_VERSION "1.3.33" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2015 Intel Corporation."; @@ -282,6 +282,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) /** * i40evf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure + * @flush: boolean value whether to run rd32() **/ void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush) { @@ -305,15 +306,14 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data) struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; u32 val; - u32 ena_mask; /* handle non-queue interrupts */ - val = rd32(hw, I40E_VFINT_ICR01); - ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1); + rd32(hw, I40E_VFINT_ICR01); + rd32(hw, I40E_VFINT_ICR0_ENA1); - val = rd32(hw, I40E_VFINT_DYN_CTL01); - val = val | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; + val = rd32(hw, I40E_VFINT_DYN_CTL01) | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, val); /* schedule work on the private workqueue */ @@ -334,7 +334,7 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -357,6 +357,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) q_vector->rx.ring = rx_ring; q_vector->rx.count++; q_vector->rx.latency_range = I40E_LOW_LATENCY; + q_vector->itr_countdown = ITR_COUNTDOWN_START; } /** @@ -377,6 +378,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) q_vector->tx.ring = tx_ring; q_vector->tx.count++; q_vector->tx.latency_range = I40E_LOW_LATENCY; + q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->num_ringpairs++; q_vector->ring_mask |= BIT(t_idx); } @@ -444,6 +446,29 @@ out: return err; } +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * i40evf_netpoll - A Polling 'interrupt' handler + * @netdev: network interface device structure + * + * This is used by netconsole to send skbs without having to re-enable + * interrupts. It's not called while the normal interrupt routine is executing. + **/ +static void i40evf_netpoll(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int q_vectors = adapter->num_msix_vectors - NONQ_VECS; + int i; + + /* if interface is down do nothing */ + if (test_bit(__I40E_DOWN, &adapter->vsi.state)) + return; + + for (i = 0; i < q_vectors; i++) + i40evf_msix_clean_rings(0, adapter->q_vector[i]); +} + +#endif /** * i40evf_request_traffic_irqs - Initialize MSI-X interrupts * @adapter: board private structure @@ -489,8 +514,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) q_vector); if (err) { dev_info(&adapter->pdev->dev, - "%s: request_irq failed, error: %d\n", - __func__, err); + "Request_irq failed, error: %d\n", err); goto free_queue_irqs; } /* assign the mask for this irq */ @@ -731,6 +755,8 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev, { struct i40evf_adapter *adapter = netdev_priv(netdev); + if (!VLAN_ALLOWED(adapter)) + return -EIO; if (i40evf_add_vlan(adapter, vid) == NULL) return -ENOMEM; return 0; @@ -746,8 +772,11 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, { struct i40evf_adapter *adapter = netdev_priv(netdev); - i40evf_del_vlan(adapter, vid); - return 0; + if (VLAN_ALLOWED(adapter)) { + i40evf_del_vlan(adapter, vid); + return 0; + } + return -EIO; } /** @@ -837,6 +866,15 @@ static int i40evf_set_mac(struct net_device *netdev, void *p) if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) return 0; + if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF) + return -EPERM; + + f = i40evf_find_filter(adapter, hw->mac.addr); + if (f) { + f->remove = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + } + f = i40evf_add_filter(adapter, addr->sa_data); if (f) { ether_addr_copy(hw->mac.addr, addr->sa_data); @@ -856,6 +894,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev) struct i40evf_mac_filter *f, *ftmp; struct netdev_hw_addr *uca; struct netdev_hw_addr *mca; + struct netdev_hw_addr *ha; int count = 50; /* add addr if not already in the filter list */ @@ -877,29 +916,27 @@ static void i40evf_set_rx_mode(struct net_device *netdev) } /* remove filter if not in netdev list */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { - bool found = false; - - if (is_multicast_ether_addr(f->macaddr)) { - netdev_for_each_mc_addr(mca, netdev) { - if (ether_addr_equal(mca->addr, f->macaddr)) { - found = true; - break; - } - } - } else { - netdev_for_each_uc_addr(uca, netdev) { - if (ether_addr_equal(uca->addr, f->macaddr)) { - found = true; - break; - } - } - if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) - found = true; - } - if (!found) { - f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; - } + netdev_for_each_mc_addr(mca, netdev) + if (ether_addr_equal(mca->addr, f->macaddr)) + goto bottom_of_search_loop; + + netdev_for_each_uc_addr(uca, netdev) + if (ether_addr_equal(uca->addr, f->macaddr)) + goto bottom_of_search_loop; + + for_each_dev_addr(netdev, ha) + if (ether_addr_equal(ha->addr, f->macaddr)) + goto bottom_of_search_loop; + + if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) + goto bottom_of_search_loop; + + /* f->macaddr wasn't found in uc, mc, or ha list so delete it */ + f->remove = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + +bottom_of_search_loop: + continue; } clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); } @@ -1111,6 +1148,8 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) tx_ring->netdev = adapter->netdev; tx_ring->dev = &adapter->pdev->dev; tx_ring->count = adapter->tx_desc_count; + if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) + tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; adapter->tx_rings[i] = tx_ring; rx_ring = &tx_ring[1]; @@ -1165,7 +1204,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; - i40evf_acquire_msix_vectors(adapter, v_budget); + err = i40evf_acquire_msix_vectors(adapter, v_budget); out: adapter->netdev->real_num_tx_queues = pairs; @@ -1421,16 +1460,16 @@ static void i40evf_watchdog_task(struct work_struct *work) struct i40evf_adapter, watchdog_task); struct i40e_hw *hw = &adapter->hw; - uint32_t rstat_val; + u32 reg_val; if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) goto restart_watchdog; if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { - rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if ((rstat_val == I40E_VFR_VFACTIVE) || - (rstat_val == I40E_VFR_COMPLETED)) { + reg_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if ((reg_val == I40E_VFR_VFACTIVE) || + (reg_val == I40E_VFR_COMPLETED)) { /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); adapter->state = __I40EVF_STARTUP; @@ -1455,11 +1494,8 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; /* check for reset */ - rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && - (rstat_val != I40E_VFR_VFACTIVE) && - (rstat_val != I40E_VFR_COMPLETED)) { + reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK; + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) { adapter->state = __I40EVF_RESETTING; adapter->flags |= I40EVF_FLAG_RESET_PENDING; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); @@ -1573,8 +1609,9 @@ static void i40evf_reset_task(struct work_struct *work) reset_task); struct net_device *netdev = adapter->netdev; struct i40e_hw *hw = &adapter->hw; + struct i40evf_vlan_filter *vlf; struct i40evf_mac_filter *f; - uint32_t rstat_val; + u32 reg_val; int i = 0, err; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, @@ -1595,12 +1632,11 @@ static void i40evf_reset_task(struct work_struct *work) /* poll until we see the reset actually happen */ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { - rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if ((rstat_val != I40E_VFR_VFACTIVE) && - (rstat_val != I40E_VFR_COMPLETED)) + reg_val = rd32(hw, I40E_VF_ARQLEN1) & + I40E_VF_ARQLEN1_ARQENABLE_MASK; + if (!reg_val) break; - usleep_range(500, 1000); + usleep_range(5000, 10000); } if (i == I40EVF_RESET_WAIT_COUNT) { dev_info(&adapter->pdev->dev, "Never saw reset\n"); @@ -1609,21 +1645,21 @@ static void i40evf_reset_task(struct work_struct *work) /* wait until the reset is complete and the PF is responding to us */ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { - rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if (rstat_val == I40E_VFR_VFACTIVE) + reg_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if (reg_val == I40E_VFR_VFACTIVE) break; msleep(I40EVF_RESET_WAIT_MS); } /* extra wait to make sure minimum wait is met */ msleep(I40EVF_RESET_WAIT_MS); if (i == I40EVF_RESET_WAIT_COUNT) { - struct i40evf_mac_filter *f, *ftmp; + struct i40evf_mac_filter *ftmp; struct i40evf_vlan_filter *fv, *fvtmp; /* reset never finished */ dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", - rstat_val); + reg_val); adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; if (netif_running(adapter->netdev)) { @@ -1697,8 +1733,8 @@ continue_reset: f->add = true; } /* re-add all VLAN filters */ - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - f->add = true; + list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { + vlf->add = true; } adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; @@ -1853,8 +1889,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) if (!err) continue; dev_err(&adapter->pdev->dev, - "%s: Allocation for Tx Queue %u failed\n", - __func__, i); + "Allocation for Tx Queue %u failed\n", i); break; } @@ -1881,8 +1916,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) if (!err) continue; dev_err(&adapter->pdev->dev, - "%s: Allocation for Rx Queue %u failed\n", - __func__, i); + "Allocation for Rx Queue %u failed\n", i); break; } return err; @@ -2041,6 +2075,9 @@ static const struct net_device_ops i40evf_netdev_ops = { .ndo_tx_timeout = i40evf_tx_timeout, .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = i40evf_netpoll, +#endif }; /** @@ -2089,7 +2126,10 @@ int i40evf_process_config(struct i40evf_adapter *adapter) if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { - netdev->vlan_features = netdev->features; + netdev->vlan_features = netdev->features & + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER); netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; @@ -2118,6 +2158,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC | ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); adapter->vsi.netdev = adapter->netdev; + adapter->vsi.qs_handle = adapter->vsi_res->qset_handle; return 0; } @@ -2246,10 +2287,13 @@ static void i40evf_init_task(struct work_struct *work) if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", adapter->hw.mac.addr); - random_ether_addr(adapter->hw.mac.addr); + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF; + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); - ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = &i40evf_watchdog_timer; @@ -2265,6 +2309,9 @@ static void i40evf_init_task(struct work_struct *work) if (err) goto err_sw_init; i40evf_map_rings_to_vectors(adapter); + if (adapter->vf_res->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; if (!RSS_AQ(adapter)) i40evf_configure_rss(adapter); err = i40evf_request_misc_irq(adapter); @@ -2300,8 +2347,7 @@ static void i40evf_init_task(struct work_struct *work) } return; restart: - schedule_delayed_work(&adapter->init_task, - msecs_to_jiffies(50)); + schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30)); return; err_register: @@ -2314,11 +2360,14 @@ err_alloc: err: /* Things went into the weeds, so try again later */ if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { - dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n"); + dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n"); adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - return; /* do not reschedule */ + i40evf_shutdown_adminq(hw); + adapter->state = __I40EVF_STARTUP; + schedule_delayed_work(&adapter->init_task, HZ * 5); + return; } - schedule_delayed_work(&adapter->init_task, HZ * 3); + schedule_delayed_work(&adapter->init_task, HZ); } /** @@ -2434,7 +2483,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task); INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task); - schedule_delayed_work(&adapter->init_task, 10); + schedule_delayed_work(&adapter->init_task, + msecs_to_jiffies(5 * (pdev->devfn & 0x07))); return 0; @@ -2510,6 +2560,7 @@ static int i40evf_resume(struct pci_dev *pdev) rtnl_lock(); err = i40evf_set_interrupt_capability(adapter); if (err) { + rtnl_unlock(); dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); return err; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index d4eb1a5e7d42..32e620e1eb5c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -156,7 +156,8 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | - I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + I40E_VIRTCHNL_VF_OFFLOAD_VLAN | + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; if (PF_IS_V11(adapter)) @@ -234,8 +235,8 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", + adapter->current_op); return; } adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; @@ -288,8 +289,8 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", + adapter->current_op); return; } adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; @@ -313,8 +314,8 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", + adapter->current_op); return; } adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; @@ -341,8 +342,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", + adapter->current_op); return; } adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; @@ -393,8 +394,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); return; } list_for_each_entry(f, &adapter->mac_filter_list, list) { @@ -410,8 +411,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) len = sizeof(struct i40e_virtchnl_ether_addr_list) + (count * sizeof(struct i40e_virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", - __func__); + dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); @@ -453,8 +453,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", + adapter->current_op); return; } list_for_each_entry(f, &adapter->mac_filter_list, list) { @@ -470,8 +470,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) len = sizeof(struct i40e_virtchnl_ether_addr_list) + (count * sizeof(struct i40e_virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", - __func__); + dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); @@ -513,8 +512,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", + adapter->current_op); return; } @@ -531,8 +530,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) len = sizeof(struct i40e_virtchnl_vlan_filter_list) + (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", - __func__); + dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); @@ -572,8 +570,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", + adapter->current_op); return; } @@ -590,8 +588,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) len = sizeof(struct i40e_virtchnl_vlan_filter_list) + (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", - __func__); + dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); @@ -629,8 +626,8 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", + adapter->current_op); return; } adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; @@ -720,17 +717,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, } break; default: - dev_err(&adapter->pdev->dev, - "%s: Unknown event %d from pf\n", - __func__, vpe->event); + dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", + vpe->event); break; } return; } if (v_retval) { - dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n", - __func__, v_retval, - i40evf_stat_str(&adapter->hw, v_retval), v_opcode); + dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", + v_retval, i40evf_stat_str(&adapter->hw, v_retval), + v_opcode); } switch (v_opcode) { case I40E_VIRTCHNL_OP_GET_STATS: { @@ -756,6 +752,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, sizeof(struct i40e_virtchnl_vsi_resource); memcpy(adapter->vf_res, msg, min(msglen, len)); i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); + /* restore current mac address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); i40evf_process_config(adapter); } break; diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 212d668dabb3..1a2f1cc44b28 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -444,8 +444,8 @@ struct igb_adapter { struct ptp_pin_desc sdp_config[IGB_N_SDP]; struct { - struct timespec start; - struct timespec period; + struct timespec64 start; + struct timespec64 period; } perout[IGB_N_PEROUT]; char fw_version[32]; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 74262768b09b..2529bc625de4 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -842,10 +842,6 @@ static void igb_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = IGB_STATS_LEN; - drvinfo->testinfo_len = IGB_TEST_LEN; - drvinfo->regdump_len = igb_get_regs_len(netdev); - drvinfo->eedump_len = igb_get_eeprom_len(netdev); } static void igb_get_ringparam(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index e174fbbdba40..ea7b09887245 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -151,7 +151,7 @@ static void igb_setup_dca(struct igb_adapter *); #endif /* CONFIG_IGB_DCA */ static int igb_poll(struct napi_struct *, int); static bool igb_clean_tx_irq(struct igb_q_vector *); -static bool igb_clean_rx_irq(struct igb_q_vector *, int); +static int igb_clean_rx_irq(struct igb_q_vector *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); @@ -2986,6 +2986,9 @@ static int igb_sw_init(struct igb_adapter *adapter) } #endif /* CONFIG_PCI_IOV */ + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGB_FLAG_HAS_MSIX; + igb_probe_vfs(adapter); igb_init_queue_configuration(adapter); @@ -5389,7 +5392,7 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct ptp_clock_event event; - struct timespec ts; + struct timespec64 ts; u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR); if (tsicr & TSINTR_SYS_WRAP) { @@ -5409,10 +5412,11 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter) if (tsicr & TSINTR_TT0) { spin_lock(&adapter->tmreg_lock); - ts = timespec_add(adapter->perout[0].start, - adapter->perout[0].period); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + /* u32 conversion of tv_sec is safe until y2106 */ wr32(E1000_TRGTTIML0, ts.tv_nsec); - wr32(E1000_TRGTTIMH0, ts.tv_sec); + wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec); tsauxc = rd32(E1000_TSAUXC); tsauxc |= TSAUXC_EN_TT0; wr32(E1000_TSAUXC, tsauxc); @@ -5423,10 +5427,10 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter) if (tsicr & TSINTR_TT1) { spin_lock(&adapter->tmreg_lock); - ts = timespec_add(adapter->perout[1].start, - adapter->perout[1].period); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); wr32(E1000_TRGTTIML1, ts.tv_nsec); - wr32(E1000_TRGTTIMH1, ts.tv_sec); + wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec); tsauxc = rd32(E1000_TSAUXC); tsauxc |= TSAUXC_EN_TT1; wr32(E1000_TSAUXC, tsauxc); @@ -6360,6 +6364,7 @@ static int igb_poll(struct napi_struct *napi, int budget) struct igb_q_vector, napi); bool clean_complete = true; + int work_done = 0; #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) @@ -6368,15 +6373,19 @@ static int igb_poll(struct napi_struct *napi, int budget) if (q_vector->tx.ring) clean_complete = igb_clean_tx_irq(q_vector); - if (q_vector->rx.ring) - clean_complete &= igb_clean_rx_irq(q_vector, budget); + if (q_vector->rx.ring) { + int cleaned = igb_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + clean_complete &= (cleaned < budget); + } /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* If not enough Rx work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); igb_ring_irq_enable(q_vector); return 0; @@ -6900,7 +6909,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring, skb->protocol = eth_type_trans(skb, rx_ring->netdev); } -static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) +static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) { struct igb_ring *rx_ring = q_vector->rx.ring; struct sk_buff *skb = rx_ring->skb; @@ -6974,7 +6983,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) if (cleaned_count) igb_alloc_rx_buffers(rx_ring, cleaned_count); - return total_packets < budget; + return total_packets; } static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 5982f28d521a..c44df87c38de 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -143,7 +143,7 @@ static void igb_ptp_write_i210(struct igb_adapter *adapter, * sub-nanosecond resolution. */ wr32(E1000_SYSTIML, ts->tv_nsec); - wr32(E1000_SYSTIMH, ts->tv_sec); + wr32(E1000_SYSTIMH, (u32)ts->tv_sec); } /** @@ -479,7 +479,7 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, struct e1000_hw *hw = &igb->hw; u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; unsigned long flags; - struct timespec ts; + struct timespec64 ts; int use_freq = 0, pin = -1; s64 ns; @@ -523,14 +523,14 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, } ts.tv_sec = rq->perout.period.sec; ts.tv_nsec = rq->perout.period.nsec; - ns = timespec_to_ns(&ts); + ns = timespec64_to_ns(&ts); ns = ns >> 1; if (on && ns <= 70000000LL) { if (ns < 8LL) return -EINVAL; use_freq = 1; } - ts = ns_to_timespec(ns); + ts = ns_to_timespec64(ns); if (rq->perout.index == 1) { if (use_freq) { tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1; diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index c6996feb1cb4..b74ce53d7b52 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -196,8 +196,6 @@ static void igbvf_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = igbvf_get_regs_len(netdev); - drvinfo->eedump_len = igbvf_get_eeprom_len(netdev); } static void igbvf_get_ringparam(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 686fa7184179..297af801f051 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1211,7 +1211,7 @@ static int igbvf_poll(struct napi_struct *napi, int budget) /* If not enough Rx work done, exit the polling mode */ if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (adapter->requested_itr & 3) igbvf_set_itr(adapter); @@ -2615,6 +2615,7 @@ static const struct net_device_ops igbvf_netdev_ops = { .ndo_poll_controller = igbvf_netpoll, #endif .ndo_set_features = igbvf_set_features, + .ndo_features_check = passthru_features_check, }; /** diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c index b311e9e710d2..d2b29b490ae0 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -479,9 +479,6 @@ ixgb_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = IXGB_STATS_LEN; - drvinfo->regdump_len = ixgb_get_regs_len(netdev); - drvinfo->eedump_len = ixgb_get_eeprom_len(netdev); } static void diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index edf1fb913209..1d2174526a4c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -152,9 +152,17 @@ struct vf_data_storage { u16 vlan_count; u8 spoofchk_enabled; bool rss_query_enabled; + u8 trusted; + int xcast_mode; unsigned int vf_api; }; +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + struct vf_macvlans { struct list_head l; int vf; @@ -539,8 +547,7 @@ struct hwmon_buff { #define IXGBE_MIN_RSC_ITR 24 #define IXGBE_100K_ITR 40 #define IXGBE_20K_ITR 200 -#define IXGBE_10K_ITR 400 -#define IXGBE_8K_ITR 500 +#define IXGBE_12K_ITR 336 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, @@ -595,6 +602,7 @@ struct ixgbe_mac_addr { /* default to trying for four seconds */ #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) +#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ /* board specific private data structure */ struct ixgbe_adapter { @@ -708,6 +716,7 @@ struct ixgbe_adapter { u32 link_speed; bool link_up; + unsigned long sfp_poll_time; unsigned long link_check_timeout; struct timer_list service_timer; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index dd7062fed61a..a39afcf03e2c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -44,9 +44,8 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); +static void +ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed); static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); @@ -109,6 +108,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) if (hw->phy.multispeed_fiber) { /* Set up dual speed SFP+ support */ mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; + mac->ops.set_rate_select_speed = + ixgbe_set_hard_rate_select_speed; } else { if ((mac->ops.get_media_type(hw) == ixgbe_media_type_backplane) && @@ -646,176 +648,32 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) } /** - * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed + * ixgbe_set_hard_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set * - * Set the link speed in the AUTOC register and restarts link. - **/ -static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) + * Set module link speed via RS0/RS1 rate select pins. + */ +static void +ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) { - s32 status = 0; - ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; - ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; - u32 speedcnt = 0; u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - u32 i = 0; - bool link_up = false; - bool autoneg = false; - - /* Mask off requested but non-supported speeds */ - status = hw->mac.ops.get_link_capabilities(hw, &link_speed, - &autoneg); - if (status != 0) - return status; - - speed &= link_speed; - - /* - * Try each speed one by one, highest priority first. We do this in - * software because 10gb fiber doesn't support speed autonegotiation. - */ - if (speed & IXGBE_LINK_SPEED_10GB_FULL) { - speedcnt++; - highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; - - /* If we already have link at this speed, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, &link_up, - false); - if (status != 0) - return status; - - if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) - goto out; - - /* Set the module link speed */ - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: - esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); - break; - case ixgbe_media_type_fiber_qsfp: - /* QSFP module automatically detects MAC link speed */ - break; - default: - hw_dbg(hw, "Unexpected media type.\n"); - break; - } - - /* Allow module to change analog characteristics (1G->10G) */ - msleep(40); - - status = ixgbe_setup_mac_link_82599(hw, - IXGBE_LINK_SPEED_10GB_FULL, - autoneg_wait_to_complete); - if (status != 0) - return status; - - /* Flap the tx laser if it has not already been done */ - if (hw->mac.ops.flap_tx_laser) - hw->mac.ops.flap_tx_laser(hw); - - /* - * Wait for the controller to acquire link. Per IEEE 802.3ap, - * Section 73.10.2, we may have to wait up to 500ms if KR is - * attempted. 82599 uses the same timing for 10g SFI. - */ - for (i = 0; i < 5; i++) { - /* Wait for the link partner to also set speed */ - msleep(100); - - /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); - if (status != 0) - return status; - - if (link_up) - goto out; - } - } - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) { - speedcnt++; - if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) - highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; - - /* If we already have link at this speed, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, &link_up, - false); - if (status != 0) - return status; - - if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) - goto out; - - /* Set the module link speed */ - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: - esdp_reg &= ~IXGBE_ESDP_SDP5; - esdp_reg |= IXGBE_ESDP_SDP5_DIR; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); - break; - case ixgbe_media_type_fiber_qsfp: - /* QSFP module automatically detects MAC link speed */ - break; - default: - hw_dbg(hw, "Unexpected media type.\n"); - break; - } - - /* Allow module to change analog characteristics (10G->1G) */ - msleep(40); - - status = ixgbe_setup_mac_link_82599(hw, - IXGBE_LINK_SPEED_1GB_FULL, - autoneg_wait_to_complete); - if (status != 0) - return status; - - /* Flap the tx laser if it has not already been done */ - if (hw->mac.ops.flap_tx_laser) - hw->mac.ops.flap_tx_laser(hw); - /* Wait for the link partner to also set speed */ - msleep(100); - - /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, &link_up, - false); - if (status != 0) - return status; - - if (link_up) - goto out; + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); + break; + case IXGBE_LINK_SPEED_1GB_FULL: + esdp_reg &= ~IXGBE_ESDP_SDP5; + esdp_reg |= IXGBE_ESDP_SDP5_DIR; + break; + default: + hw_dbg(hw, "Invalid fixed module speed\n"); + return; } - /* - * We didn't get link. Configure back to the highest speed we tried, - * (if there was more than one). We call ourselves back with just the - * single highest speed that the user requested. - */ - if (speedcnt > 1) - status = ixgbe_setup_mac_link_multispeed_fiber(hw, - highest_link_speed, - autoneg_wait_to_complete); - -out: - /* Set autoneg_advertised value based on input link speed */ - hw->phy.autoneg_advertised = 0; - - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - return status; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); } /** @@ -1766,6 +1624,16 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + /* also use it for SCTP */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); + break; + default: + break; + } + /* store source and destination IP masks (big-enian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, ~input_mask->formatted.src_ip[0]); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 3f56a8080118..ce61b36b94f1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -297,13 +297,13 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) /* Setup flow control */ ret_val = ixgbe_setup_fc(hw); - if (!ret_val) - return 0; + if (ret_val) + return ret_val; /* Clear adapter stopped flag */ hw->adapter_stopped = false; - return ret_val; + return 0; } /** @@ -2164,10 +2164,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) /* * In order to prevent Tx hangs when the internal Tx * switch is enabled we must set the high water mark - * to the maximum FCRTH value. This allows the Tx - * switch to function even under heavy Rx workloads. + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. */ - fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; } IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); @@ -2476,6 +2477,9 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + if (hw->mac.type >= ixgbe_mac_X550) + return 0; + /* * Before proceeding, make sure that the PCIe block does not have * transactions pending. @@ -3920,3 +3924,213 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw) fwsm &= IXGBE_FWSM_MODE_MASK; return fwsm == IXGBE_FWSM_FW_MODE_PT; } + +/** + * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + */ +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 status = 0; + u32 speedcnt = 0; + u32 i = 0; + bool autoneg, link_up = false; + + /* Mask off requested but non-supported speeds */ + status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg); + if (status) + return status; + + speed &= link_speed; + + /* Try each speed one by one, highest priority first. We do this in + * software because 10Gb fiber doesn't support speed autonegotiation. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status) + return status; + + if (link_speed == IXGBE_LINK_SPEED_10GB_FULL && link_up) + goto out; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + hw->mac.ops.set_rate_select_speed(hw, + IXGBE_LINK_SPEED_10GB_FULL); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects MAC link speed */ + break; + default: + hw_dbg(hw, "Unexpected media type\n"); + break; + } + + /* Allow module to change analog characteristics (1G->10G) */ + msleep(40); + + status = hw->mac.ops.setup_mac_link(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status) + return status; + + /* Flap the Tx laser if it has not already been done */ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); + + /* Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. 82599 uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msleep(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status) + return status; + + if (link_up) + goto out; + } + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + speedcnt++; + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status) + return status; + + if (link_speed == IXGBE_LINK_SPEED_1GB_FULL && link_up) + goto out; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + hw->mac.ops.set_rate_select_speed(hw, + IXGBE_LINK_SPEED_1GB_FULL); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects link speed */ + break; + default: + hw_dbg(hw, "Unexpected media type\n"); + break; + } + + /* Allow module to change analog characteristics (10G->1G) */ + msleep(40); + + status = hw->mac.ops.setup_mac_link(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status) + return status; + + /* Flap the Tx laser if it has not already been done */ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); + + /* Wait for the link partner to also set speed */ + msleep(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status) + return status; + + if (link_up) + goto out; + } + + /* We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = ixgbe_setup_mac_link_multispeed_fiber(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +/** + * ixgbe_set_soft_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via the soft rate select. + */ +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u8 rs, eeprom_data; + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + /* one bit mask same as setting on */ + rs = IXGBE_SFF_SOFT_RS_SELECT_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + rs = IXGBE_SFF_SOFT_RS_SELECT_1G; + break; + default: + hw_dbg(hw, "Invalid fixed module speed\n"); + return; + } + + /* Set RS0 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); + return; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); + return; + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 2f779f35dc4f..a0044e4a8b90 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -135,6 +135,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed); #define IXGBE_FAILED_READ_REG 0xffffffffU #define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 3b932fe64ab6..23277ab153b6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -259,7 +259,13 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); } else { - reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; + /* In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index ab2edc8e7703..d681273bd39d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -943,9 +943,6 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = IXGBE_STATS_LEN; - drvinfo->testinfo_len = IXGBE_TEST_LEN; - drvinfo->regdump_len = ixgbe_get_regs_len(netdev); } static void ixgbe_get_ringparam(struct net_device *netdev, @@ -2286,7 +2283,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, adapter->tx_itr_setting = ec->tx_coalesce_usecs; if (adapter->tx_itr_setting == 1) - tx_itr_param = IXGBE_10K_ITR; + tx_itr_param = IXGBE_12K_ITR; else tx_itr_param = adapter->tx_itr_setting; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 68e1e757ecef..f3168bcc7d87 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -866,7 +866,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, if (txr_count && !rxr_count) { /* tx only vector */ if (adapter->tx_itr_setting == 1) - q_vector->itr = IXGBE_10K_ITR; + q_vector->itr = IXGBE_12K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 63b2cfe9416b..47395ff5d908 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -79,7 +79,7 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "4.0.1-k" +#define DRV_VERSION "4.2.1-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2015 Intel Corporation."; @@ -137,6 +137,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, /* required last entry */ {0, } }; @@ -1244,9 +1245,12 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, int cpu) { struct ixgbe_hw *hw = &adapter->hw; - u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + u32 txctrl = 0; u16 reg_offset; + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + txctrl = dca3_get_tag(tx_ring->dev, cpu); + switch (hw->mac.type) { case ixgbe_mac_82598EB: reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); @@ -1278,9 +1282,11 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, int cpu) { struct ixgbe_hw *hw = &adapter->hw; - u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu); + u32 rxctrl = 0; u8 reg_idx = rx_ring->reg_idx; + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + rxctrl = dca3_get_tag(rx_ring->dev, cpu); switch (hw->mac.type) { case ixgbe_mac_82599EB: @@ -1297,6 +1303,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, * which will cause the DCA tag to be cleared. */ rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | + IXGBE_DCA_RXCTRL_DATA_DCA_EN | IXGBE_DCA_RXCTRL_DESC_DCA_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); @@ -1326,11 +1333,13 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) { int i; - if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) - return; - /* always use CB2 mode, difference is masked in the CB driver */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_MODE_CB2); + else + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_DISABLE); for (i = 0; i < adapter->num_q_vectors; i++) { adapter->q_vector[i]->cpu = -1; @@ -1353,7 +1362,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) break; if (dca_add_requester(dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; - ixgbe_setup_dca(adapter); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_MODE_CB2); break; } /* Fall Through since DCA is disabled. */ @@ -1361,7 +1371,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { dca_remove_requester(dev); adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_DISABLE); } break; } @@ -2261,7 +2272,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, /* simple throttlerate management * 0-10MB/s lowest (100000 ints/s) * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (8000 ints/s) + * 20-1249MB/s bulk (12000 ints/s) */ /* what was last interrupt timeslice? */ timepassed_us = q_vector->itr >> 2; @@ -2350,7 +2361,7 @@ static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) new_itr = IXGBE_20K_ITR; break; case bulk_latency: - new_itr = IXGBE_8K_ITR; + new_itr = IXGBE_12K_ITR; break; default: break; @@ -2495,17 +2506,27 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) { struct ixgbe_hw *hw = &adapter->hw; + u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw); + + if (!ixgbe_is_sfp(hw)) + return; - if (eicr & IXGBE_EICR_GPI_SDP2(hw)) { + /* Later MAC's use different SDP */ + if (hw->mac.type >= ixgbe_mac_X540) + eicr_mask = IXGBE_EICR_GPI_SDP0_X540; + + if (eicr & eicr_mask) { /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2(hw)); + IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + adapter->sfp_poll_time = 0; ixgbe_service_event_schedule(adapter); } } - if (eicr & IXGBE_EICR_GPI_SDP1(hw)) { + if (adapter->hw.mac.type == ixgbe_mac_82599EB && + (eicr & IXGBE_EICR_GPI_SDP1(hw))) { /* Clear the interrupt */ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); if (!test_bit(__IXGBE_DOWN, &adapter->state)) { @@ -2622,6 +2643,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP) + mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) mask |= IXGBE_EICR_GPI_SDP0_X540; mask |= IXGBE_EIMS_ECC; @@ -2752,7 +2775,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *ring; - int per_ring_budget; + int per_ring_budget, work_done = 0; bool clean_complete = true; #ifdef CONFIG_IXGBE_DCA @@ -2773,9 +2796,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - ixgbe_for_each_ring(ring, q_vector->rx) - clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring, - per_ring_budget) < per_ring_budget); + ixgbe_for_each_ring(ring, q_vector->rx) { + int cleaned = ixgbe_clean_rx_irq(q_vector, ring, + per_ring_budget); + + work_done += cleaned; + clean_complete &= (cleaned < per_ring_budget); + } ixgbe_qv_unlock_napi(q_vector); /* If all work not completed, return budget and keep polling */ @@ -2783,7 +2810,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) return budget; /* all work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); if (adapter->rx_itr_setting & 1) ixgbe_set_itr(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -3700,14 +3727,20 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), adapter->num_vfs); - /* Ensure LLDP is set for Ethertype Antispoofing if we will be + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be * calling set_ethertype_anti_spoofing for each VF in loop below */ - if (hw->mac.ops.set_ethertype_anti_spoofing) + if (hw->mac.ops.set_ethertype_anti_spoofing) { IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), - (IXGBE_ETQF_FILTER_EN | /* enable filter */ - IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */ - IXGBE_ETH_P_LLDP)); /* LLDP eth type */ + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + IXGBE_ETH_P_LLDP)); + + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + ETH_P_PAUSE)); + } /* For VFs that have spoof checking turned off */ for (i = 0; i < adapter->num_vfs; i++) { @@ -3777,8 +3810,6 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: case ixgbe_mac_82598EB: /* * For VMDq support of different descriptor types or @@ -3792,6 +3823,11 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) */ rdrxctl |= IXGBE_RDRXCTL_MVMEN; break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + if (adapter->num_vfs) + rdrxctl |= IXGBE_RDRXCTL_PSP; + /* fall through for older HW */ case ixgbe_mac_82599EB: case ixgbe_mac_X540: /* Disable RSC for ACK packets */ @@ -4767,6 +4803,12 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) break; } +#ifdef CONFIG_IXGBE_DCA + /* configure DCA */ + if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) + ixgbe_setup_dca(adapter); +#endif /* CONFIG_IXGBE_DCA */ + #ifdef IXGBE_FCOE /* configure FCoE L2 filters, redirection table, and Rx control */ ixgbe_configure_fcoe(adapter); @@ -4793,6 +4835,7 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + adapter->sfp_poll_time = 0; } /** @@ -4883,9 +4926,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) case ixgbe_mac_82599EB: gpie |= IXGBE_SDP0_GPIEN_8259X; break; - case ixgbe_mac_X540: - gpie |= IXGBE_EIMS_TS; - break; default: break; } @@ -4895,9 +4935,15 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) gpie |= IXGBE_SDP1_GPIEN(hw); - if (hw->mac.type == ixgbe_mac_82599EB) { - gpie |= IXGBE_SDP1_GPIEN_8259X; - gpie |= IXGBE_SDP2_GPIEN_8259X; + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; + break; + case ixgbe_mac_X550EM_x: + gpie |= IXGBE_SDP0_GPIEN_X540; + break; + default: + break; } IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); @@ -5220,11 +5266,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_clean_all_tx_rings(adapter); ixgbe_clean_all_rx_rings(adapter); - -#ifdef CONFIG_IXGBE_DCA - /* since we reset the hardware DCA settings were cleared */ - ixgbe_setup_dca(adapter); -#endif } /** @@ -5270,7 +5311,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); adapter->ring_feature[RING_F_RSS].limit = rss; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; - adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; adapter->max_q_vectors = MAX_Q_VECTORS_82599; adapter->atr_sample_rate = 20; fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); @@ -5296,7 +5336,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) switch (hw->mac.type) { case ixgbe_mac_82598EB: adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; - adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; if (hw->device_id == IXGBE_DEV_ID_82598AT) adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; @@ -6692,10 +6731,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) return; + if (adapter->sfp_poll_time && + time_after(adapter->sfp_poll_time, jiffies)) + return; /* If not yet time to poll for SFP */ + /* someone else is in init, wait until next service event */ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) return; + adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1; + err = hw->phy.ops.identify_sfp(hw); if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) goto sfp_out; @@ -8362,6 +8407,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, + .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, #ifdef CONFIG_IXGBE_DCB @@ -8695,8 +8741,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->phy.reset_if_overtemp = true; err = hw->mac.ops.reset_hw(hw); hw->phy.reset_if_overtemp = false; - if (err == IXGBE_ERR_SFP_NOT_PRESENT && - hw->mac.type == ixgbe_mac_82598EB) { + if (err == IXGBE_ERR_SFP_NOT_PRESENT) { err = 0; } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); @@ -9008,7 +9053,8 @@ static void ixgbe_remove(struct pci_dev *pdev) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; dca_remove_requester(&pdev->dev); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_DISABLE); } #endif @@ -9019,12 +9065,12 @@ static void ixgbe_remove(struct pci_dev *pdev) /* remove the added san mac */ ixgbe_del_sanmac_netdev(netdev); - if (netdev->reg_state == NETREG_REGISTERED) - unregister_netdev(netdev); - #ifdef CONFIG_PCI_IOV ixgbe_disable_sriov(adapter); #endif + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + ixgbe_clear_interrupt_scheme(adapter); ixgbe_release_hw_control(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index b1e4703ff2a5..8daa95f74548 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -102,6 +102,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ #define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 597d0b1c2370..fb8673d63806 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -100,16 +100,17 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) } /** - * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to read from * @reg: I2C device register to read from * @val: pointer to location to receive read value + * @lock: true if to take and release semaphore * * Returns an error code on error. - **/ -s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, - u16 reg, u16 *val) + */ +static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; int max_retry = 10; @@ -124,7 +125,7 @@ s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); csum = ~csum; do { - if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); /* Device Address and write indication */ @@ -157,13 +158,15 @@ s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, if (ixgbe_clock_out_i2c_bit(hw, false)) goto fail; ixgbe_i2c_stop(hw); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); *val = (high_bits << 8) | low_bits; return 0; fail: ixgbe_i2c_bus_clear(hw); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte read combined error - Retry.\n"); @@ -175,17 +178,49 @@ fail: } /** - * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + */ +s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_read_i2c_combined_generic_unlocked - Unlocked I2C read combined + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + */ +s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** + * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to write to * @reg: I2C device register to write to * @val: value to write + * @lock: true if to take and release semaphore * * Returns an error code on error. - **/ -s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, - u8 addr, u16 reg, u16 val) + */ +static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 val, bool lock) { + u32 swfw_mask = hw->phy.phy_semaphore_mask; int max_retry = 1; int retry = 0; u8 reg_high; @@ -197,6 +232,8 @@ s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); csum = ~csum; do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); /* Device Address and write indication */ if (ixgbe_out_i2c_byte_ack(hw, addr)) @@ -217,10 +254,14 @@ s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, if (ixgbe_out_i2c_byte_ack(hw, csum)) goto fail; ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); return 0; fail: ixgbe_i2c_bus_clear(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte write combined error - Retry.\n"); @@ -232,6 +273,36 @@ fail: } /** + * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + */ +s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_write_i2c_combined_generic_unlocked - Unlocked I2C write combined + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + */ +s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** * ixgbe_identify_phy_generic - Get physical layer module * @hw: pointer to hardware structure * @@ -1100,6 +1171,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) return IXGBE_ERR_SFP_NOT_PRESENT; } + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); @@ -1107,9 +1181,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) if (status) goto err_read_i2c_eeprom; - /* LAN ID is needed for sfp_type determination */ - hw->mac.ops.set_lan_id(hw); - if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { hw->phy.type = ixgbe_phy_sfp_unsupported; return IXGBE_ERR_SFP_NOT_SUPPORTED; @@ -1159,7 +1230,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) hw->phy.sfp_type = ixgbe_sfp_type_lr; else hw->phy.sfp_type = ixgbe_sfp_type_unknown; - } else if (hw->mac.type == ixgbe_mac_82599EB) { + } else { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = @@ -1660,26 +1731,46 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, } /** - * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * ixgbe_is_sfp_probe - Returns true if SFP is being detected + * @hw: pointer to hardware structure + * @offset: eeprom offset to be read + * @addr: I2C address to be read + */ +static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) +{ + if (addr == IXGBE_I2C_EEPROM_DEV_ADDR && + offset == IXGBE_SFF_IDENTIFIER && + hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return true; + return false; +} + +/** + * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @data: value read + * @lock: true if to take and release semaphore * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. - **/ -s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data) + */ +static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data, bool lock) { s32 status; u32 max_retry = 10; u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = true; + + if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) + max_retry = IXGBE_SFP_DETECT_RETRIES; + *data = 0; do { - if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); @@ -1721,12 +1812,16 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, goto fail; ixgbe_i2c_stop(hw); - break; + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return 0; fail: ixgbe_i2c_bus_clear(hw); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msleep(100); + if (lock) { + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msleep(100); + } retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte read error - Retrying.\n"); @@ -1735,29 +1830,60 @@ fail: } while (retry < max_retry); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - return status; } /** - * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, false); +} + +/** + * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @data: value to write + * @lock: true if to take and release semaphore * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. - **/ -s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data) + */ +static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data, bool lock) { s32 status; u32 max_retry = 1; u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; - if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; do { @@ -1788,7 +1914,9 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, goto fail; ixgbe_i2c_stop(hw); - break; + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return 0; fail: ixgbe_i2c_bus_clear(hw); @@ -1799,21 +1927,57 @@ fail: hw_dbg(hw, "I2C byte write error.\n"); } while (retry < max_retry); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); return status; } /** + * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, false); +} + +/** * ixgbe_i2c_start - Sets I2C start condition * @hw: pointer to hardware structure * * Sets I2C start condition (High -> Low on SDA while SCL is High) + * Set bit-bang mode on X550 hardware. **/ static void ixgbe_i2c_start(struct ixgbe_hw *hw) { u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + i2cctl |= IXGBE_I2C_BB_EN(hw); + /* Start condition must begin with data and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 1); ixgbe_raise_i2c_clk(hw, &i2cctl); @@ -1838,10 +2002,15 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * * Sets I2C stop condition (Low -> High on SDA while SCL is High) + * Disables bit-bang mode and negates data output enable on X550 + * hardware. **/ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) { u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw); + u32 bb_en_bit = IXGBE_I2C_BB_EN(hw); /* Stop condition must begin with data low and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 0); @@ -1854,6 +2023,13 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) /* bus free time between stop and start (4.7us)*/ udelay(IXGBE_I2C_T_BUF); + + if (bb_en_bit || data_oe_bit || clk_oe_bit) { + i2cctl &= ~bb_en_bit; + i2cctl |= data_oe_bit | clk_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } } /** @@ -1868,6 +2044,7 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) s32 i; bool bit = false; + *data = 0; for (i = 7; i >= 0; i--) { ixgbe_clock_in_i2c_bit(hw, &bit); *data |= bit << i; @@ -1901,6 +2078,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) /* Release SDA line (set high) */ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw); IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); @@ -1915,15 +2093,21 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) **/ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) { + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); s32 status = 0; u32 i = 0; u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); u32 timeout = 10; bool ack = true; + if (data_oe_bit) { + i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } ixgbe_raise_i2c_clk(hw, &i2cctl); - /* Minimum high period of clock is 4us */ udelay(IXGBE_I2C_T_HIGH); @@ -1961,7 +2145,14 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) { u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + if (data_oe_bit) { + i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ @@ -2016,13 +2207,20 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) * @i2cctl: Current value of I2CCTL register * * Raises the I2C clock line '0'->'1' + * Negates the I2C clock output enable on X550 hardware. **/ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw); u32 i = 0; u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; u32 i2cctl_r = 0; + if (clk_oe_bit) { + *i2cctl |= clk_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + } + for (i = 0; i < timeout; i++) { *i2cctl |= IXGBE_I2C_CLK_OUT(hw); IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); @@ -2042,11 +2240,13 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) * @i2cctl: Current value of I2CCTL register * * Lowers the I2C clock line '1'->'0' + * Asserts the I2C clock output enable on X550 hardware. **/ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw); + *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw); IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); @@ -2062,13 +2262,17 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) * @data: I2C data value (0 or 1) to set * * Sets the I2C data bit + * Asserts the I2C data output enable on X550 hardware. **/ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) { + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + if (data) *i2cctl |= IXGBE_I2C_DATA_OUT(hw); else *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw); + *i2cctl &= ~data_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); @@ -2076,6 +2280,14 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); + if (!data) /* Can't verify data in this case */ + return 0; + if (data_oe_bit) { + *i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + } + /* Verify data was set correctly */ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); if (data != ixgbe_get_i2c_data(hw, i2cctl)) { @@ -2092,9 +2304,19 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) * @i2cctl: Current value of I2CCTL register * * Returns the I2C data bit value + * Negates the I2C data output enable on X550 hardware. **/ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) { + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + + if (data_oe_bit) { + *i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + udelay(IXGBE_I2C_T_FALL); + } + if (*i2cctl & IXGBE_I2C_DATA_IN(hw)) return true; return false; @@ -2109,10 +2331,11 @@ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) **/ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 i2cctl; u32 i; ixgbe_i2c_start(hw); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); ixgbe_set_i2c_data(hw, &i2cctl, 1); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index e45988c4dad5..5abd66c84d00 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -66,6 +66,9 @@ #define IXGBE_SFF_1GBASET_CAPABLE 0x8 #define IXGBE_SFF_10GBASESR_CAPABLE 0x10 #define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 #define IXGBE_SFF_ADDRESSING_MODE 0x4 #define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 #define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 @@ -78,9 +81,29 @@ #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 #define IXGBE_CS4227 0xBE /* CS4227 address */ -#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define IXGBE_CS4227_SCRATCH 2 +#define IXGBE_CS4227_RESET_PENDING 0x1357 +#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 +#define IXGBE_CS4227_RETRIES 15 +#define IXGBE_CS4227_EFUSE_STATUS 0x0181 +#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to set speed */ +#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to set EDC */ +#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to set speed */ +#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define IXGBE_CS4227_EEPROM_STATUS 0x5001 +#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001 +#define IXGBE_CS4227_SPEED_1G 0x8000 +#define IXGBE_CS4227_SPEED_10G 0 #define IXGBE_CS4227_EDC_MODE_CX1 0x0002 #define IXGBE_CS4227_EDC_MODE_SR 0x0004 +#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008 +#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define IXGBE_CS4227_RESET_DELAY 500 /* milliseconds */ +#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define IXGBE_PE 0xE0 /* Port expander addr */ +#define IXGBE_PE_OUTPUT 1 /* Output reg offset */ +#define IXGBE_PE_CONFIG 3 /* Config reg offset */ +#define IXGBE_PE_BIT1 (1 << 1) /* Flow control defines */ #define IXGBE_TAF_SYM_PAUSE 0x400 @@ -109,6 +132,8 @@ #define IXGBE_I2C_T_SU_STO 4 #define IXGBE_I2C_T_BUF 5 +#define IXGBE_SFP_DETECT_RETRIES 2 + #define IXGBE_TN_LASI_STATUS_REG 0x9005 #define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 @@ -154,8 +179,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); +s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); +s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, @@ -164,6 +193,10 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); +s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val); s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); +s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 val); #endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 1d17b5872dd1..fcd8b27a0ccb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -116,6 +116,12 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) * we want to disable the querying by default. */ adapter->vfinfo[i].rss_query_enabled = 0; + + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; + + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; } return 0; @@ -1001,6 +1007,59 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, return 0; } +static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && + !adapter->vfinfo[vf].trusted) { + xcast_mode = IXGBEVF_XCAST_MODE_MULTI; + } + + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case IXGBEVF_XCAST_MODE_NONE: + disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + enable = 0; + break; + case IXGBEVF_XCAST_MODE_MULTI: + disable = IXGBE_VMOLR_MPE; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; + break; + case IXGBEVF_XCAST_MODE_ALLMULTI: + disable = 0; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + vmolr &= ~disable; + vmolr |= enable; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + adapter->vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) { u32 mbx_size = IXGBE_VFMAILBOX_SIZE; @@ -1063,6 +1122,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_GET_RSS_KEY: retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); break; + case IXGBE_VF_UPDATE_XCAST_MODE: + retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); + break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; @@ -1124,6 +1186,17 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); } +static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ping; + + ping = IXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[vf].clear_to_send) + ping |= IXGBE_VT_MSGTYPE_CTS; + ixgbe_write_mbx(hw, &ping, 1, vf); +} + void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1416,6 +1489,28 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, return 0; } +int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + ixgbe_ping_vf(adapter, vf); + + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { @@ -1430,5 +1525,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, ivi->qos = adapter->vfinfo[vf].pf_qos; ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; + ivi->trusted = adapter->vfinfo[vf].trusted; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 2c197e6d1fe7..dad925706f4c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -49,6 +49,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting); +int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 63689192b149..995f03107eac 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -402,6 +402,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FDIRSIP4M 0x0EE40 #define IXGBE_FDIRTCPM 0x0EE44 #define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRSCTPM 0x0EE78 #define IXGBE_FDIRIP6M 0x0EE74 #define IXGBE_FDIRM 0x0EE70 @@ -1192,6 +1193,7 @@ struct ixgbe_thermal_sensor_data { /* RDRXCTL Bit Masks */ #define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ #define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad small packet */ #define IXGBE_RDRXCTL_MVMEN 0x00000020 #define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ #define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ @@ -1750,6 +1752,9 @@ enum { * FCoE (0x8906): Filter 2 * 1588 (0x88f7): Filter 3 * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 */ #define IXGBE_ETQF_FILTER_EAPOL 0 #define IXGBE_ETQF_FILTER_FCOE 2 @@ -1757,6 +1762,7 @@ enum { #define IXGBE_ETQF_FILTER_FIP 4 #define IXGBE_ETQF_FILTER_LLDP 5 #define IXGBE_ETQF_FILTER_LACP 6 +#define IXGBE_ETQF_FILTER_FC 7 /* VLAN Control Bit Masks */ #define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ @@ -1948,6 +1954,7 @@ enum { #define IXGBE_GSSR_SW_MNG_SM 0x0400 #define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ #define IXGBE_GSSR_I2C_MASK 0x1800 +#define IXGBE_GSSR_NVM_PHY_MASK 0xF /* FW Status register bitmask */ #define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ @@ -3255,9 +3262,11 @@ struct ixgbe_mac_operations { void (*flap_tx_laser)(struct ixgbe_hw *); void (*stop_link_on_d3)(struct ixgbe_hw *); s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed); /* Packet Buffer Manipulation */ void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); @@ -3328,6 +3337,10 @@ struct ixgbe_phy_operations { s32 (*set_phy_power)(struct ixgbe_hw *, bool on); s32 (*enter_lplu)(struct ixgbe_hw *); s32 (*handle_lasi)(struct ixgbe_hw *hw); + s32 (*read_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + u16 *value); + s32 (*write_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + u16 value); }; struct ixgbe_eeprom_info { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 4e758435ece8..c1d4584f6469 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -567,19 +567,25 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) **/ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { - u32 swfw_sync; - u32 swmask = mask; - u32 fwmask = mask << 5; - u32 hwmask = 0; + u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; + u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; + u32 fwmask = swmask << 5; u32 timeout = 200; + u32 hwmask = 0; + u32 swfw_sync; u32 i; - if (swmask == IXGBE_GSSR_EEP_SM) + if (swmask & IXGBE_GSSR_EEP_SM) hwmask = IXGBE_GSSR_FLASH_SM; + /* SW only mask does not have FW bit pair */ + if (mask & IXGBE_GSSR_SW_MNG_SM) + swmask |= IXGBE_GSSR_SW_MNG_SM; + + swmask |= swi2c_mask; + fwmask |= swi2c_mask << 2; for (i = 0; i < timeout; i++) { - /* - * SW NVM semaphore bit is used for access to all + /* SW NVM semaphore bit is used for access to all * SW_FW_SYNC bits (not just NVM) */ if (ixgbe_get_swfw_sync_semaphore(hw)) @@ -590,39 +596,56 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) swfw_sync |= swmask; IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); - break; - } else { - /* - * Firmware currently using resource (fwmask), - * hardware currently using resource (hwmask), - * or other software thread currently using - * resource (swmask) - */ - ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 10000); + usleep_range(5000, 6000); + return 0; } + /* Firmware currently using resource (fwmask), hardware + * currently using resource (hwmask), or other software + * thread currently using resource (swmask) + */ + ixgbe_release_swfw_sync_semaphore(hw); + usleep_range(5000, 10000); } - /* - * If the resource is not released by the FW/HW the SW can assume that - * the FW/HW malfunctions. In that case the SW should sets the - * SW bit(s) of the requested resource(s) while ignoring the - * corresponding FW/HW bits in the SW_FW_SYNC register. - */ - if (i >= timeout) { - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); - if (swfw_sync & (fwmask | hwmask)) { - if (ixgbe_get_swfw_sync_semaphore(hw)) - return IXGBE_ERR_SWFW_SYNC; + /* Failed to get SW only semaphore */ + if (swmask == IXGBE_GSSR_SW_MNG_SM) { + hw_dbg(hw, "Failed to get SW only semaphore\n"); + return IXGBE_ERR_SWFW_SYNC; + } - swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); - ixgbe_release_swfw_sync_semaphore(hw); - } + /* If the resource is not released by the FW/HW the SW can assume that + * the FW/HW malfunctions. In that case the SW should set the SW bit(s) + * of the requested resource(s) while ignoring the corresponding FW/HW + * bits in the SW_FW_SYNC register. + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + if (swfw_sync & (fwmask | hwmask)) { + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + usleep_range(5000, 6000); + return 0; } + /* If the resource is not released by other SW the SW can assume that + * the other SW malfunctions. In that case the SW should clear all SW + * flags that it does not own and then repeat the whole process once + * again. + */ + if (swfw_sync & swmask) { + u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | + IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM; + + if (swi2c_mask) + rmask |= IXGBE_GSSR_I2C_MASK; + ixgbe_release_swfw_sync_X540(hw, rmask); + ixgbe_release_swfw_sync_semaphore(hw); + return IXGBE_ERR_SWFW_SYNC; + } + ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 10000); - return 0; + return IXGBE_ERR_SWFW_SYNC; } /** @@ -635,9 +658,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) **/ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { + u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); u32 swfw_sync; - u32 swmask = mask; + if (mask & IXGBE_GSSR_I2C_MASK) + swmask |= mask & IXGBE_GSSR_I2C_MASK; ixgbe_get_swfw_sync_semaphore(hw); swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); @@ -645,7 +670,7 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 10000); + usleep_range(5000, 6000); } /** @@ -686,6 +711,11 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) usleep_range(50, 100); } + /* Release semaphores and return error if SW NVM semaphore + * was not granted because we do not have access to the EEPROM + */ + hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n"); + ixgbe_release_swfw_sync_semaphore(hw); return IXGBE_ERR_EEPROM; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 9fe9445cd73b..ebe0ac950b14 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -56,6 +56,292 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); } +/** + * ixgbe_read_cs4227 - Read CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: pointer to receive value read + * + * Returns status code + */ +static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) +{ + return hw->phy.ops.read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, + value); +} + +/** + * ixgbe_write_cs4227 - Write CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write to register + * + * Returns status code + */ +static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) +{ + return hw->phy.ops.write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, + value); +} + +/** + * ixgbe_check_cs4227_reg - Perform diag on a CS4227 register + * @hw: pointer to hardware structure + * @reg: the register to check + * + * Performs a diagnostic on a register in the CS4227 chip. Returns an error + * if it is not operating correctly. + * This function assumes that the caller has acquired the proper semaphore. + */ +static s32 ixgbe_check_cs4227_reg(struct ixgbe_hw *hw, u16 reg) +{ + s32 status; + u32 retry; + u16 reg_val; + + reg_val = (IXGBE_CS4227_EDC_MODE_DIAG << 1) | 1; + status = ixgbe_write_cs4227(hw, reg, reg_val); + if (status) + return status; + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + msleep(IXGBE_CS4227_CHECK_DELAY); + reg_val = 0xFFFF; + ixgbe_read_cs4227(hw, reg, ®_val); + if (!reg_val) + break; + } + if (reg_val) { + hw_err(hw, "CS4227 reg 0x%04X failed diagnostic\n", reg); + return status; + } + + return 0; +} + +/** + * ixgbe_get_cs4227_status - Return CS4227 status + * @hw: pointer to hardware structure + * + * Performs a diagnostic on the CS4227 chip. Returns an error if it is + * not operating correctly. + * This function assumes that the caller has acquired the proper semaphore. + */ +static s32 ixgbe_get_cs4227_status(struct ixgbe_hw *hw) +{ + s32 status; + u16 value = 0; + + /* Exit if the diagnostic has already been performed. */ + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); + if (status) + return status; + if (value == IXGBE_CS4227_RESET_COMPLETE) + return 0; + + /* Check port 0. */ + status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB); + if (status) + return status; + + status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB); + if (status) + return status; + + /* Check port 1. */ + status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB + + (1 << 12)); + if (status) + return status; + + return ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB + + (1 << 12)); +} + +/** + * ixgbe_read_pe - Read register from port expander + * @hw: pointer to hardware structure + * @reg: register number to read + * @value: pointer to receive read value + * + * Returns status code + */ +static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) +{ + s32 status; + + status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value); + if (status) + hw_err(hw, "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_write_pe - Write register to port expander + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write + * + * Returns status code + */ +static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) +{ + s32 status; + + status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, + value); + if (status) + hw_err(hw, "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_reset_cs4227 - Reset CS4227 using port expander + * @hw: pointer to hardware structure + * + * This function assumes that the caller has acquired the proper semaphore. + * Returns error code + */ +static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) +{ + s32 status; + u32 retry; + u16 value; + u8 reg; + + /* Trigger hard reset. */ + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®); + if (status) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); + if (status) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status) + return status; + + usleep_range(IXGBE_CS4227_RESET_HOLD, IXGBE_CS4227_RESET_HOLD + 100); + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status) + return status; + + /* Wait for the reset to complete. */ + msleep(IXGBE_CS4227_RESET_DELAY); + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS, + &value); + if (!status && value == IXGBE_CS4227_EEPROM_LOAD_OK) + break; + msleep(IXGBE_CS4227_CHECK_DELAY); + } + if (retry == IXGBE_CS4227_RETRIES) { + hw_err(hw, "CS4227 reset did not complete\n"); + return IXGBE_ERR_PHY; + } + + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); + if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { + hw_err(hw, "CS4227 EEPROM did not load successfully\n"); + return IXGBE_ERR_PHY; + } + + return 0; +} + +/** + * ixgbe_check_cs4227 - Check CS4227 and reset as needed + * @hw: pointer to hardware structure + */ +static void ixgbe_check_cs4227(struct ixgbe_hw *hw) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + s32 status; + u16 value; + u8 retry; + + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_err(hw, "semaphore failed with %d\n", status); + msleep(IXGBE_CS4227_CHECK_DELAY); + continue; + } + + /* Get status of reset flow. */ + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); + if (!status && value == IXGBE_CS4227_RESET_COMPLETE) + goto out; + + if (status || value != IXGBE_CS4227_RESET_PENDING) + break; + + /* Reset is pending. Wait and check again. */ + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msleep(IXGBE_CS4227_CHECK_DELAY); + } + /* If still pending, assume other instance failed. */ + if (retry == IXGBE_CS4227_RETRIES) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_err(hw, "semaphore failed with %d\n", status); + return; + } + } + + /* Reset the CS4227. */ + status = ixgbe_reset_cs4227(hw); + if (status) { + hw_err(hw, "CS4227 reset failed: %d", status); + goto out; + } + + /* Reset takes so long, temporarily release semaphore in case the + * other driver instance is waiting for the reset indication. + */ + ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_RESET_PENDING); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + usleep_range(10000, 12000); + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_err(hw, "semaphore failed with %d", status); + return; + } + + /* Is the CS4227 working correctly? */ + status = ixgbe_get_cs4227_status(hw); + if (status) { + hw_err(hw, "CS4227 status failed: %d", status); + goto out; + } + + /* Record completion for next time. */ + status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_RESET_COMPLETE); + +out: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msleep(hw->eeprom.semaphore_delay); +} + /** ixgbe_identify_phy_x550em - Get PHY type based on device id * @hw: pointer to hardware structure * @@ -68,7 +354,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) /* set up for CS4227 usage */ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); - + ixgbe_check_cs4227(hw); return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_KX4: hw->phy.type = ixgbe_phy_x550em_kx4; @@ -910,6 +1196,96 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) } /** + * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported + * @hw: pointer to hardware structure + * @linear: true if SFP module is linear + */ +static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) +{ + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_not_present: + return IXGBE_ERR_SFP_NOT_PRESENT; + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + *linear = true; + break; + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_da_act_lmt_core0: + case ixgbe_sfp_type_da_act_lmt_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + case ixgbe_sfp_type_1g_lx_core0: + case ixgbe_sfp_type_1g_lx_core1: + *linear = false; + break; + case ixgbe_sfp_type_unknown: + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + default: + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return 0; +} + +/** + * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP. + * @hw: pointer to hardware structure + * + * Configures the extern PHY and the integrated KR PHY for SFP support. + */ +static s32 +ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + s32 status; + u16 slice, value; + bool setup_linear = false; + + /* Check if SFP module is supported and linear */ + status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * there is no reason to configure CS4227 and SFP not present error is + * not accepted in the setup MAC link flow. + */ + if (status == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + + if (status) + return status; + + /* Configure CS4227 LINE side to 10G SR. */ + slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); + value = IXGBE_CS4227_SPEED_10G; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, + value); + + /* Configure CS4227 for HOST connection rate then type. */ + slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); + value = speed & IXGBE_LINK_SPEED_10GB_FULL ? + IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, + value); + + slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); + if (setup_linear) + value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, + value); + + /* If internal link mode is XFI, then setup XFI internal link. */ + if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) + status = ixgbe_setup_ixfi_x550em(hw, &speed); + + return status; +} + +/** * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed * @hw: pointer to hardware structure * @speed: new link speed @@ -1003,6 +1379,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.disable_tx_laser = NULL; mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; + mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; + mac->ops.set_rate_select_speed = + ixgbe_set_soft_rate_select_speed; break; case ixgbe_media_type_copper: mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; @@ -1018,53 +1398,18 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) */ static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) { - bool setup_linear; - u16 reg_slice, edc_mode; - s32 ret_val; + s32 status; + bool linear; - switch (hw->phy.sfp_type) { - case ixgbe_sfp_type_unknown: - return 0; - case ixgbe_sfp_type_not_present: - return IXGBE_ERR_SFP_NOT_PRESENT; - case ixgbe_sfp_type_da_cu_core0: - case ixgbe_sfp_type_da_cu_core1: - setup_linear = true; - break; - case ixgbe_sfp_type_srlr_core0: - case ixgbe_sfp_type_srlr_core1: - case ixgbe_sfp_type_da_act_lmt_core0: - case ixgbe_sfp_type_da_act_lmt_core1: - case ixgbe_sfp_type_1g_sx_core0: - case ixgbe_sfp_type_1g_sx_core1: - setup_linear = false; - break; - default: - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); + if (status) + return status; ixgbe_init_mac_link_ops_X550em(hw); hw->phy.ops.reset = NULL; - /* The CS4227 slice address is the base address + the port-pair reg - * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0. - */ - reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12); - - if (setup_linear) - edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; - else - edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; - - /* Configure CS4227 for connection type. */ - ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice, - edc_mode); - - if (ret_val) - ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice, - edc_mode); - - return ret_val; + return 0; } /** ixgbe_get_link_capabilities_x550em - Determines link capabilities @@ -1272,7 +1617,7 @@ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) if (status) return status; - if (lsc) + if (lsc && phy->ops.setup_internal_link) return phy->ops.setup_internal_link(hw); return 0; @@ -1927,6 +2272,62 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); } +/** + * ixgbe_set_mux - Set mux for port 1 access with CS4227 + * @hw: pointer to hardware structure + * @state: set mux if 1, clear if 0 + */ +static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) +{ + u32 esdp; + + if (!hw->bus.lan_id) + return; + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (state) + esdp |= IXGBE_ESDP_SDP1; + else + esdp &= ~IXGBE_ESDP_SDP1; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and sets the I2C MUX + */ +static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +{ + s32 status; + + status = ixgbe_acquire_swfw_sync_X540(hw, mask); + if (status) + return status; + + if (mask & IXGBE_GSSR_I2C_MASK) + ixgbe_set_mux(hw, 1); + + return 0; +} + +/** + * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore and sets the I2C MUX + */ +static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +{ + if (mask & IXGBE_GSSR_I2C_MASK) + ixgbe_set_mux(hw, 0); + + ixgbe_release_swfw_sync_X540(hw, mask); +} + #define X550_COMMON_MAC \ .init_hw = &ixgbe_init_hw_generic, \ .start_hw = &ixgbe_start_hw_X540, \ @@ -1964,8 +2365,6 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, &ixgbe_set_source_address_pruning_X550, \ .set_ethertype_anti_spoofing = \ &ixgbe_set_ethertype_anti_spoofing_X550, \ - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \ - .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \ .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ .get_thermal_sensor_data = NULL, \ @@ -1985,6 +2384,8 @@ static struct ixgbe_mac_operations mac_ops_X550 = { .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, .get_bus_info = &ixgbe_get_bus_info_generic, .setup_sfp = NULL, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, + .release_swfw_sync = &ixgbe_release_swfw_sync_X540, }; static struct ixgbe_mac_operations mac_ops_X550EM_x = { @@ -1997,7 +2398,8 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = { .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, .get_bus_info = &ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, - + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, + .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, }; #define X550_COMMON_EEP \ @@ -2039,14 +2441,17 @@ static struct ixgbe_phy_operations phy_ops_X550 = { X550_COMMON_PHY .init = NULL, .identify = &ixgbe_identify_phy_generic, - .read_i2c_combined = &ixgbe_read_i2c_combined_generic, - .write_i2c_combined = &ixgbe_write_i2c_combined_generic, }; static struct ixgbe_phy_operations phy_ops_X550EM_x = { X550_COMMON_PHY .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, + .read_i2c_combined = &ixgbe_read_i2c_combined_generic, + .write_i2c_combined = &ixgbe_write_i2c_combined_generic, + .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, + .write_i2c_combined_unlocked = + &ixgbe_write_i2c_combined_generic_unlocked, }; static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 04c7ec8446e0..ec3147279621 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -471,6 +471,12 @@ enum ixgbevf_boards { board_X550EM_x_vf, }; +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + extern const struct ixgbevf_info ixgbevf_82599_vf_info; extern const struct ixgbevf_info ixgbevf_X540_vf_info; extern const struct ixgbevf_info ixgbevf_X550_vf_info; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 149a0b4489be..592ff237d692 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1008,7 +1008,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) container_of(napi, struct ixgbevf_q_vector, napi); struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_ring *ring; - int per_ring_budget; + int per_ring_budget, work_done = 0; bool clean_complete = true; ixgbevf_for_each_ring(ring, q_vector->tx) @@ -1027,10 +1027,12 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - ixgbevf_for_each_ring(ring, q_vector->rx) - clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, - per_ring_budget) - < per_ring_budget); + ixgbevf_for_each_ring(ring, q_vector->rx) { + int cleaned = ixgbevf_clean_rx_irq(q_vector, ring, + per_ring_budget); + work_done += cleaned; + clean_complete &= (cleaned < per_ring_budget); + } #ifdef CONFIG_NET_RX_BUSY_POLL ixgbevf_qv_unlock_napi(q_vector); @@ -1040,7 +1042,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; /* all work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); if (adapter->rx_itr_setting & 1) ixgbevf_set_itr(q_vector); if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && @@ -1892,9 +1894,17 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + unsigned int flags = netdev->flags; + int xcast_mode; + + xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI : + (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? + IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE; spin_lock_bh(&adapter->mbx_lock); + hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode); + /* reprogram multicast list */ hw->mac.ops.update_mc_addr_list(hw, netdev); @@ -3896,6 +3906,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbevf_netpoll, #endif + .ndo_features_check = passthru_features_check, }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 82f44e06e5fc..340cdd469455 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -112,6 +112,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ #define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index d1339b050627..427f3605cbfc 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -469,6 +469,46 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, } /** + * ixgbevf_update_xcast_mode - Update Multicast mode + * @hw: pointer to the HW structure + * @netdev: pointer to net device structure + * @xcast_mode: new multicast mode + * + * Updates the Multicast Mode of VF. + **/ +static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, + struct net_device *netdev, int xcast_mode) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + switch (hw->api_version) { + case ixgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; + msgbuf[1] = xcast_mode; + + err = mbx->ops.write_posted(hw, msgbuf, 2); + if (err) + return err; + + err = mbx->ops.read_posted(hw, msgbuf, 2); + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + return 0; +} + +/** * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure * @vlan: 12 bit VLAN ID @@ -727,6 +767,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .check_link = ixgbevf_check_mac_link_vf, .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, + .update_xcast_mode = ixgbevf_update_xcast_mode, .set_uc_addr = ixgbevf_set_uc_addr_vf, .set_vfta = ixgbevf_set_vfta_vf, }; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index d40f036b6df0..ef9f7736b4dc 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -63,6 +63,7 @@ struct ixgbe_mac_operations { s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); |