diff options
Diffstat (limited to 'drivers/net/ethernet')
85 files changed, 2396 insertions, 1117 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 9142b473175c..c53384d41c96 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -252,7 +252,7 @@ static int el3_isa_id_sequence(__be16 *phys_addr) for (i = 0; i < el3_cards; i++) { struct el3_private *lp = netdev_priv(el3_devs[i]); if (lp->type == EL3_PNP && - ether_addr_equal(phys_addr, el3_devs[i]->dev_addr)) { + ether_addr_equal((u8 *)phys_addr, el3_devs[i]->dev_addr)) { if (el3_debug > 3) pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n", phys_addr[0] & 0xff, phys_addr[0] >> 8, diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h index b1fae36360d2..a75092d584cc 100644 --- a/drivers/net/ethernet/amd/amd8111e.h +++ b/drivers/net/ethernet/amd/amd8111e.h @@ -751,7 +751,7 @@ struct amd8111e_priv{ const char *name; struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */ struct net_device* amd8111e_net_dev; /* ptr to associated net_device */ - /* Transmit and recive skbs */ + /* Transmit and receive skbs */ struct sk_buff *tx_skbuff[NUM_TX_BUFFERS]; struct sk_buff *rx_skbuff[NUM_RX_BUFFERS]; /* Transmit and receive dma mapped addr */ diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h index d71103dbf2cd..8fc93c5f6abc 100644 --- a/drivers/net/ethernet/atheros/alx/alx.h +++ b/drivers/net/ethernet/atheros/alx/alx.h @@ -106,6 +106,9 @@ struct alx_priv { u16 msg_enable; bool msi; + + /* protects hw.stats */ + spinlock_t stats_lock; }; extern const struct ethtool_ops alx_ethtool_ops; diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c index 45b36507abc1..08e22df2a300 100644 --- a/drivers/net/ethernet/atheros/alx/ethtool.c +++ b/drivers/net/ethernet/atheros/alx/ethtool.c @@ -46,6 +46,66 @@ #include "reg.h" #include "hw.h" +/* The order of these strings must match the order of the fields in + * struct alx_hw_stats + * See hw.h + */ +static const char alx_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", + "rx_bcast_packets", + "rx_mcast_packets", + "rx_pause_packets", + "rx_ctrl_packets", + "rx_fcs_errors", + "rx_length_errors", + "rx_bytes", + "rx_runt_packets", + "rx_fragments", + "rx_64B_or_less_packets", + "rx_65B_to_127B_packets", + "rx_128B_to_255B_packets", + "rx_256B_to_511B_packets", + "rx_512B_to_1023B_packets", + "rx_1024B_to_1518B_packets", + "rx_1519B_to_mtu_packets", + "rx_oversize_packets", + "rx_rxf_ov_drop_packets", + "rx_rrd_ov_drop_packets", + "rx_align_errors", + "rx_bcast_bytes", + "rx_mcast_bytes", + "rx_address_errors", + "tx_packets", + "tx_bcast_packets", + "tx_mcast_packets", + "tx_pause_packets", + "tx_exc_defer_packets", + "tx_ctrl_packets", + "tx_defer_packets", + "tx_bytes", + "tx_64B_or_less_packets", + "tx_65B_to_127B_packets", + "tx_128B_to_255B_packets", + "tx_256B_to_511B_packets", + "tx_512B_to_1023B_packets", + "tx_1024B_to_1518B_packets", + "tx_1519B_to_mtu_packets", + "tx_single_collision", + "tx_multiple_collisions", + "tx_late_collision", + "tx_abort_collision", + "tx_underrun", + "tx_trd_eop", + "tx_length_errors", + "tx_trunc_packets", + "tx_bcast_bytes", + "tx_mcast_bytes", + "tx_update", +}; + +#define ALX_NUM_STATS ARRAY_SIZE(alx_gstrings_stats) + + static u32 alx_get_supported_speeds(struct alx_hw *hw) { u32 supported = SUPPORTED_10baseT_Half | @@ -201,6 +261,44 @@ static void alx_set_msglevel(struct net_device *netdev, u32 data) alx->msg_enable = data; } +static void alx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *estats, u64 *data) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + spin_lock(&alx->stats_lock); + + alx_update_hw_stats(hw); + BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) < + ALX_NUM_STATS * sizeof(u64)); + memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64)); + + spin_unlock(&alx->stats_lock); +} + +static void alx_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, &alx_gstrings_stats, sizeof(alx_gstrings_stats)); + break; + default: + WARN_ON(1); + break; + } +} + +static int alx_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ALX_NUM_STATS; + default: + return -EINVAL; + } +} + const struct ethtool_ops alx_ethtool_ops = { .get_settings = alx_get_settings, .set_settings = alx_set_settings, @@ -209,4 +307,7 @@ const struct ethtool_ops alx_ethtool_ops = { .get_msglevel = alx_get_msglevel, .set_msglevel = alx_set_msglevel, .get_link = ethtool_op_get_link, + .get_strings = alx_get_strings, + .get_sset_count = alx_get_sset_count, + .get_ethtool_stats = alx_get_ethtool_stats, }; diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c index 1e8c24a3cb4e..7712f068f6d4 100644 --- a/drivers/net/ethernet/atheros/alx/hw.c +++ b/drivers/net/ethernet/atheros/alx/hw.c @@ -1050,3 +1050,61 @@ bool alx_get_phy_info(struct alx_hw *hw) return true; } + +void alx_update_hw_stats(struct alx_hw *hw) +{ + /* RX stats */ + hw->stats.rx_ok += alx_read_mem32(hw, ALX_MIB_RX_OK); + hw->stats.rx_bcast += alx_read_mem32(hw, ALX_MIB_RX_BCAST); + hw->stats.rx_mcast += alx_read_mem32(hw, ALX_MIB_RX_MCAST); + hw->stats.rx_pause += alx_read_mem32(hw, ALX_MIB_RX_PAUSE); + hw->stats.rx_ctrl += alx_read_mem32(hw, ALX_MIB_RX_CTRL); + hw->stats.rx_fcs_err += alx_read_mem32(hw, ALX_MIB_RX_FCS_ERR); + hw->stats.rx_len_err += alx_read_mem32(hw, ALX_MIB_RX_LEN_ERR); + hw->stats.rx_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BYTE_CNT); + hw->stats.rx_runt += alx_read_mem32(hw, ALX_MIB_RX_RUNT); + hw->stats.rx_frag += alx_read_mem32(hw, ALX_MIB_RX_FRAG); + hw->stats.rx_sz_64B += alx_read_mem32(hw, ALX_MIB_RX_SZ_64B); + hw->stats.rx_sz_127B += alx_read_mem32(hw, ALX_MIB_RX_SZ_127B); + hw->stats.rx_sz_255B += alx_read_mem32(hw, ALX_MIB_RX_SZ_255B); + hw->stats.rx_sz_511B += alx_read_mem32(hw, ALX_MIB_RX_SZ_511B); + hw->stats.rx_sz_1023B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1023B); + hw->stats.rx_sz_1518B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1518B); + hw->stats.rx_sz_max += alx_read_mem32(hw, ALX_MIB_RX_SZ_MAX); + hw->stats.rx_ov_sz += alx_read_mem32(hw, ALX_MIB_RX_OV_SZ); + hw->stats.rx_ov_rxf += alx_read_mem32(hw, ALX_MIB_RX_OV_RXF); + hw->stats.rx_ov_rrd += alx_read_mem32(hw, ALX_MIB_RX_OV_RRD); + hw->stats.rx_align_err += alx_read_mem32(hw, ALX_MIB_RX_ALIGN_ERR); + hw->stats.rx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BCCNT); + hw->stats.rx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_MCCNT); + hw->stats.rx_err_addr += alx_read_mem32(hw, ALX_MIB_RX_ERRADDR); + + /* TX stats */ + hw->stats.tx_ok += alx_read_mem32(hw, ALX_MIB_TX_OK); + hw->stats.tx_bcast += alx_read_mem32(hw, ALX_MIB_TX_BCAST); + hw->stats.tx_mcast += alx_read_mem32(hw, ALX_MIB_TX_MCAST); + hw->stats.tx_pause += alx_read_mem32(hw, ALX_MIB_TX_PAUSE); + hw->stats.tx_exc_defer += alx_read_mem32(hw, ALX_MIB_TX_EXC_DEFER); + hw->stats.tx_ctrl += alx_read_mem32(hw, ALX_MIB_TX_CTRL); + hw->stats.tx_defer += alx_read_mem32(hw, ALX_MIB_TX_DEFER); + hw->stats.tx_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BYTE_CNT); + hw->stats.tx_sz_64B += alx_read_mem32(hw, ALX_MIB_TX_SZ_64B); + hw->stats.tx_sz_127B += alx_read_mem32(hw, ALX_MIB_TX_SZ_127B); + hw->stats.tx_sz_255B += alx_read_mem32(hw, ALX_MIB_TX_SZ_255B); + hw->stats.tx_sz_511B += alx_read_mem32(hw, ALX_MIB_TX_SZ_511B); + hw->stats.tx_sz_1023B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1023B); + hw->stats.tx_sz_1518B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1518B); + hw->stats.tx_sz_max += alx_read_mem32(hw, ALX_MIB_TX_SZ_MAX); + hw->stats.tx_single_col += alx_read_mem32(hw, ALX_MIB_TX_SINGLE_COL); + hw->stats.tx_multi_col += alx_read_mem32(hw, ALX_MIB_TX_MULTI_COL); + hw->stats.tx_late_col += alx_read_mem32(hw, ALX_MIB_TX_LATE_COL); + hw->stats.tx_abort_col += alx_read_mem32(hw, ALX_MIB_TX_ABORT_COL); + hw->stats.tx_underrun += alx_read_mem32(hw, ALX_MIB_TX_UNDERRUN); + hw->stats.tx_trd_eop += alx_read_mem32(hw, ALX_MIB_TX_TRD_EOP); + hw->stats.tx_len_err += alx_read_mem32(hw, ALX_MIB_TX_LEN_ERR); + hw->stats.tx_trunc += alx_read_mem32(hw, ALX_MIB_TX_TRUNC); + hw->stats.tx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BCCNT); + hw->stats.tx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_MCCNT); + + hw->stats.update += alx_read_mem32(hw, ALX_MIB_UPDATE); +} diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h index 96f3b4381e17..15548802d6f8 100644 --- a/drivers/net/ethernet/atheros/alx/hw.h +++ b/drivers/net/ethernet/atheros/alx/hw.h @@ -381,6 +381,73 @@ struct alx_rrd { ALX_ISR_RX_Q6 | \ ALX_ISR_RX_Q7) +/* Statistics counters collected by the MAC + * + * The order of the fields must match the strings in alx_gstrings_stats + * All stats fields should be u64 + * See ethtool.c + */ +struct alx_hw_stats { + /* rx */ + u64 rx_ok; /* good RX packets */ + u64 rx_bcast; /* good RX broadcast packets */ + u64 rx_mcast; /* good RX multicast packets */ + u64 rx_pause; /* RX pause frames */ + u64 rx_ctrl; /* RX control packets other than pause frames */ + u64 rx_fcs_err; /* RX packets with bad FCS */ + u64 rx_len_err; /* RX packets with length != actual size */ + u64 rx_byte_cnt; /* good bytes received. FCS is NOT included */ + u64 rx_runt; /* RX packets < 64 bytes with good FCS */ + u64 rx_frag; /* RX packets < 64 bytes with bad FCS */ + u64 rx_sz_64B; /* 64 byte RX packets */ + u64 rx_sz_127B; /* 65-127 byte RX packets */ + u64 rx_sz_255B; /* 128-255 byte RX packets */ + u64 rx_sz_511B; /* 256-511 byte RX packets */ + u64 rx_sz_1023B; /* 512-1023 byte RX packets */ + u64 rx_sz_1518B; /* 1024-1518 byte RX packets */ + u64 rx_sz_max; /* 1519 byte to MTU RX packets */ + u64 rx_ov_sz; /* truncated RX packets, size > MTU */ + u64 rx_ov_rxf; /* frames dropped due to RX FIFO overflow */ + u64 rx_ov_rrd; /* frames dropped due to RRD overflow */ + u64 rx_align_err; /* alignment errors */ + u64 rx_bc_byte_cnt; /* RX broadcast bytes, excluding FCS */ + u64 rx_mc_byte_cnt; /* RX multicast bytes, excluding FCS */ + u64 rx_err_addr; /* packets dropped due to address filtering */ + + /* tx */ + u64 tx_ok; /* good TX packets */ + u64 tx_bcast; /* good TX broadcast packets */ + u64 tx_mcast; /* good TX multicast packets */ + u64 tx_pause; /* TX pause frames */ + u64 tx_exc_defer; /* TX packets deferred excessively */ + u64 tx_ctrl; /* TX control frames, excluding pause frames */ + u64 tx_defer; /* TX packets deferred */ + u64 tx_byte_cnt; /* bytes transmitted, FCS is NOT included */ + u64 tx_sz_64B; /* 64 byte TX packets */ + u64 tx_sz_127B; /* 65-127 byte TX packets */ + u64 tx_sz_255B; /* 128-255 byte TX packets */ + u64 tx_sz_511B; /* 256-511 byte TX packets */ + u64 tx_sz_1023B; /* 512-1023 byte TX packets */ + u64 tx_sz_1518B; /* 1024-1518 byte TX packets */ + u64 tx_sz_max; /* 1519 byte to MTU TX packets */ + u64 tx_single_col; /* packets TX after a single collision */ + u64 tx_multi_col; /* packets TX after multiple collisions */ + u64 tx_late_col; /* TX packets with late collisions */ + u64 tx_abort_col; /* TX packets aborted w/excessive collisions */ + u64 tx_underrun; /* TX packets aborted due to TX FIFO underrun + * or TRD FIFO underrun + */ + u64 tx_trd_eop; /* reads beyond the EOP into the next frame + * when TRD was not written timely + */ + u64 tx_len_err; /* TX packets where length != actual size */ + u64 tx_trunc; /* TX packets truncated due to size > MTU */ + u64 tx_bc_byte_cnt; /* broadcast bytes transmitted, excluding FCS */ + u64 tx_mc_byte_cnt; /* multicast bytes transmitted, excluding FCS */ + u64 update; +}; + + /* maximum interrupt vectors for msix */ #define ALX_MAX_MSIX_INTRS 16 @@ -424,6 +491,9 @@ struct alx_hw { /* PHY link patch flag */ bool lnk_patch; + + /* cumulated stats from the hardware (registers are cleared on read) */ + struct alx_hw_stats stats; }; static inline int alx_hw_revision(struct alx_hw *hw) @@ -491,6 +561,7 @@ bool alx_phy_configured(struct alx_hw *hw); void alx_configure_basic(struct alx_hw *hw); void alx_disable_rss(struct alx_hw *hw); bool alx_get_phy_info(struct alx_hw *hw); +void alx_update_hw_stats(struct alx_hw *hw); static inline u32 alx_speed_to_ethadv(int speed, u8 duplex) { diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index c3c4c266b846..e92ffd6e1c15 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1166,10 +1166,60 @@ static void alx_poll_controller(struct net_device *netdev) } #endif +static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *net_stats) +{ + struct alx_priv *alx = netdev_priv(dev); + struct alx_hw_stats *hw_stats = &alx->hw.stats; + + spin_lock(&alx->stats_lock); + + alx_update_hw_stats(&alx->hw); + + net_stats->tx_bytes = hw_stats->tx_byte_cnt; + net_stats->rx_bytes = hw_stats->rx_byte_cnt; + net_stats->multicast = hw_stats->rx_mcast; + net_stats->collisions = hw_stats->tx_single_col + + hw_stats->tx_multi_col + + hw_stats->tx_late_col + + hw_stats->tx_abort_col; + + net_stats->rx_errors = hw_stats->rx_frag + + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + + hw_stats->rx_ov_sz + + hw_stats->rx_ov_rrd + + hw_stats->rx_align_err + + hw_stats->rx_ov_rxf; + + net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf; + net_stats->rx_length_errors = hw_stats->rx_len_err; + net_stats->rx_crc_errors = hw_stats->rx_fcs_err; + net_stats->rx_frame_errors = hw_stats->rx_align_err; + net_stats->rx_dropped = hw_stats->rx_ov_rrd; + + net_stats->tx_errors = hw_stats->tx_late_col + + hw_stats->tx_abort_col + + hw_stats->tx_underrun + + hw_stats->tx_trunc; + + net_stats->tx_aborted_errors = hw_stats->tx_abort_col; + net_stats->tx_fifo_errors = hw_stats->tx_underrun; + net_stats->tx_window_errors = hw_stats->tx_late_col; + + net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; + net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; + + spin_unlock(&alx->stats_lock); + + return net_stats; +} + static const struct net_device_ops alx_netdev_ops = { .ndo_open = alx_open, .ndo_stop = alx_stop, .ndo_start_xmit = alx_start_xmit, + .ndo_get_stats64 = alx_get_stats64, .ndo_set_rx_mode = alx_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = alx_set_mac_address, diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h index e4358c98bc4e..af006b44b2a6 100644 --- a/drivers/net/ethernet/atheros/alx/reg.h +++ b/drivers/net/ethernet/atheros/alx/reg.h @@ -404,15 +404,59 @@ /* MIB */ #define ALX_MIB_BASE 0x1700 + #define ALX_MIB_RX_OK (ALX_MIB_BASE + 0) +#define ALX_MIB_RX_BCAST (ALX_MIB_BASE + 4) +#define ALX_MIB_RX_MCAST (ALX_MIB_BASE + 8) +#define ALX_MIB_RX_PAUSE (ALX_MIB_BASE + 12) +#define ALX_MIB_RX_CTRL (ALX_MIB_BASE + 16) +#define ALX_MIB_RX_FCS_ERR (ALX_MIB_BASE + 20) +#define ALX_MIB_RX_LEN_ERR (ALX_MIB_BASE + 24) +#define ALX_MIB_RX_BYTE_CNT (ALX_MIB_BASE + 28) +#define ALX_MIB_RX_RUNT (ALX_MIB_BASE + 32) +#define ALX_MIB_RX_FRAG (ALX_MIB_BASE + 36) +#define ALX_MIB_RX_SZ_64B (ALX_MIB_BASE + 40) +#define ALX_MIB_RX_SZ_127B (ALX_MIB_BASE + 44) +#define ALX_MIB_RX_SZ_255B (ALX_MIB_BASE + 48) +#define ALX_MIB_RX_SZ_511B (ALX_MIB_BASE + 52) +#define ALX_MIB_RX_SZ_1023B (ALX_MIB_BASE + 56) +#define ALX_MIB_RX_SZ_1518B (ALX_MIB_BASE + 60) +#define ALX_MIB_RX_SZ_MAX (ALX_MIB_BASE + 64) +#define ALX_MIB_RX_OV_SZ (ALX_MIB_BASE + 68) +#define ALX_MIB_RX_OV_RXF (ALX_MIB_BASE + 72) +#define ALX_MIB_RX_OV_RRD (ALX_MIB_BASE + 76) +#define ALX_MIB_RX_ALIGN_ERR (ALX_MIB_BASE + 80) +#define ALX_MIB_RX_BCCNT (ALX_MIB_BASE + 84) +#define ALX_MIB_RX_MCCNT (ALX_MIB_BASE + 88) #define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92) + #define ALX_MIB_TX_OK (ALX_MIB_BASE + 96) +#define ALX_MIB_TX_BCAST (ALX_MIB_BASE + 100) +#define ALX_MIB_TX_MCAST (ALX_MIB_BASE + 104) +#define ALX_MIB_TX_PAUSE (ALX_MIB_BASE + 108) +#define ALX_MIB_TX_EXC_DEFER (ALX_MIB_BASE + 112) +#define ALX_MIB_TX_CTRL (ALX_MIB_BASE + 116) +#define ALX_MIB_TX_DEFER (ALX_MIB_BASE + 120) +#define ALX_MIB_TX_BYTE_CNT (ALX_MIB_BASE + 124) +#define ALX_MIB_TX_SZ_64B (ALX_MIB_BASE + 128) +#define ALX_MIB_TX_SZ_127B (ALX_MIB_BASE + 132) +#define ALX_MIB_TX_SZ_255B (ALX_MIB_BASE + 136) +#define ALX_MIB_TX_SZ_511B (ALX_MIB_BASE + 140) +#define ALX_MIB_TX_SZ_1023B (ALX_MIB_BASE + 144) +#define ALX_MIB_TX_SZ_1518B (ALX_MIB_BASE + 148) +#define ALX_MIB_TX_SZ_MAX (ALX_MIB_BASE + 152) +#define ALX_MIB_TX_SINGLE_COL (ALX_MIB_BASE + 156) +#define ALX_MIB_TX_MULTI_COL (ALX_MIB_BASE + 160) +#define ALX_MIB_TX_LATE_COL (ALX_MIB_BASE + 164) +#define ALX_MIB_TX_ABORT_COL (ALX_MIB_BASE + 168) +#define ALX_MIB_TX_UNDERRUN (ALX_MIB_BASE + 172) +#define ALX_MIB_TX_TRD_EOP (ALX_MIB_BASE + 176) +#define ALX_MIB_TX_LEN_ERR (ALX_MIB_BASE + 180) +#define ALX_MIB_TX_TRUNC (ALX_MIB_BASE + 184) +#define ALX_MIB_TX_BCCNT (ALX_MIB_BASE + 188) #define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192) +#define ALX_MIB_UPDATE (ALX_MIB_BASE + 196) -#define ALX_RX_STATS_BIN ALX_MIB_RX_OK -#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR -#define ALX_TX_STATS_BIN ALX_MIB_TX_OK -#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT #define ALX_ISR 0x1600 #define ALX_ISR_DIS BIT(31) diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 29801750f239..4d3258dd0a88 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1500,31 +1500,40 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev) struct net_device_stats *net_stats = &netdev->stats; atl1c_update_hw_stats(adapter); - net_stats->rx_packets = hw_stats->rx_ok; - net_stats->tx_packets = hw_stats->tx_ok; net_stats->rx_bytes = hw_stats->rx_byte_cnt; net_stats->tx_bytes = hw_stats->tx_byte_cnt; net_stats->multicast = hw_stats->rx_mcast; net_stats->collisions = hw_stats->tx_1_col + - hw_stats->tx_2_col * 2 + - hw_stats->tx_late_col + hw_stats->tx_abort_col; - net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + - hw_stats->rx_len_err + hw_stats->rx_sz_ov + - hw_stats->rx_rrd_ov + hw_stats->rx_align_err; + hw_stats->tx_2_col + + hw_stats->tx_late_col + + hw_stats->tx_abort_col; + + net_stats->rx_errors = hw_stats->rx_frag + + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + + hw_stats->rx_sz_ov + + hw_stats->rx_rrd_ov + + hw_stats->rx_align_err + + hw_stats->rx_rxf_ov; + net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; net_stats->rx_length_errors = hw_stats->rx_len_err; net_stats->rx_crc_errors = hw_stats->rx_fcs_err; net_stats->rx_frame_errors = hw_stats->rx_align_err; - net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; + net_stats->rx_dropped = hw_stats->rx_rrd_ov; - net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; + net_stats->tx_errors = hw_stats->tx_late_col + + hw_stats->tx_abort_col + + hw_stats->tx_underrun + + hw_stats->tx_trunc; - net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + - hw_stats->tx_underrun + hw_stats->tx_trunc; net_stats->tx_fifo_errors = hw_stats->tx_underrun; net_stats->tx_aborted_errors = hw_stats->tx_abort_col; net_stats->tx_window_errors = hw_stats->tx_late_col; + net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; + net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; + return net_stats; } diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 7a73f3a9fcb5..d5c2d3e912e5 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1177,32 +1177,40 @@ static struct net_device_stats *atl1e_get_stats(struct net_device *netdev) struct atl1e_hw_stats *hw_stats = &adapter->hw_stats; struct net_device_stats *net_stats = &netdev->stats; - net_stats->rx_packets = hw_stats->rx_ok; - net_stats->tx_packets = hw_stats->tx_ok; net_stats->rx_bytes = hw_stats->rx_byte_cnt; net_stats->tx_bytes = hw_stats->tx_byte_cnt; net_stats->multicast = hw_stats->rx_mcast; net_stats->collisions = hw_stats->tx_1_col + - hw_stats->tx_2_col * 2 + - hw_stats->tx_late_col + hw_stats->tx_abort_col; + hw_stats->tx_2_col + + hw_stats->tx_late_col + + hw_stats->tx_abort_col; + + net_stats->rx_errors = hw_stats->rx_frag + + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + + hw_stats->rx_sz_ov + + hw_stats->rx_rrd_ov + + hw_stats->rx_align_err + + hw_stats->rx_rxf_ov; - net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + - hw_stats->rx_len_err + hw_stats->rx_sz_ov + - hw_stats->rx_rrd_ov + hw_stats->rx_align_err; net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; net_stats->rx_length_errors = hw_stats->rx_len_err; net_stats->rx_crc_errors = hw_stats->rx_fcs_err; net_stats->rx_frame_errors = hw_stats->rx_align_err; - net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; + net_stats->rx_dropped = hw_stats->rx_rrd_ov; - net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; + net_stats->tx_errors = hw_stats->tx_late_col + + hw_stats->tx_abort_col + + hw_stats->tx_underrun + + hw_stats->tx_trunc; - net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + - hw_stats->tx_underrun + hw_stats->tx_trunc; net_stats->tx_fifo_errors = hw_stats->tx_underrun; net_stats->tx_aborted_errors = hw_stats->tx_abort_col; net_stats->tx_window_errors = hw_stats->tx_late_col; + net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; + net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; + return net_stats; } diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 55d86ecdacfe..287272dd69da 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -1678,33 +1678,42 @@ static void atl1_inc_smb(struct atl1_adapter *adapter) struct net_device *netdev = adapter->netdev; struct stats_msg_block *smb = adapter->smb.smb; + u64 new_rx_errors = smb->rx_frag + + smb->rx_fcs_err + + smb->rx_len_err + + smb->rx_sz_ov + + smb->rx_rxf_ov + + smb->rx_rrd_ov + + smb->rx_align_err; + u64 new_tx_errors = smb->tx_late_col + + smb->tx_abort_col + + smb->tx_underrun + + smb->tx_trunc; + /* Fill out the OS statistics structure */ - adapter->soft_stats.rx_packets += smb->rx_ok; - adapter->soft_stats.tx_packets += smb->tx_ok; + adapter->soft_stats.rx_packets += smb->rx_ok + new_rx_errors; + adapter->soft_stats.tx_packets += smb->tx_ok + new_tx_errors; adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; adapter->soft_stats.multicast += smb->rx_mcast; - adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + - smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); + adapter->soft_stats.collisions += smb->tx_1_col + + smb->tx_2_col + + smb->tx_late_col + + smb->tx_abort_col; /* Rx Errors */ - adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + - smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + - smb->rx_rrd_ov + smb->rx_align_err); + adapter->soft_stats.rx_errors += new_rx_errors; adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; adapter->soft_stats.rx_length_errors += smb->rx_len_err; adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; adapter->soft_stats.rx_frame_errors += smb->rx_align_err; - adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + - smb->rx_rxf_ov); adapter->soft_stats.rx_pause += smb->rx_pause; adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; adapter->soft_stats.rx_trunc += smb->rx_sz_ov; /* Tx Errors */ - adapter->soft_stats.tx_errors += (smb->tx_late_col + - smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); + adapter->soft_stats.tx_errors += new_tx_errors; adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; adapter->soft_stats.tx_window_errors += smb->tx_late_col; @@ -1718,23 +1727,18 @@ static void atl1_inc_smb(struct atl1_adapter *adapter) adapter->soft_stats.tx_trunc += smb->tx_trunc; adapter->soft_stats.tx_pause += smb->tx_pause; - netdev->stats.rx_packets = adapter->soft_stats.rx_packets; - netdev->stats.tx_packets = adapter->soft_stats.tx_packets; netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes; netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes; netdev->stats.multicast = adapter->soft_stats.multicast; netdev->stats.collisions = adapter->soft_stats.collisions; netdev->stats.rx_errors = adapter->soft_stats.rx_errors; - netdev->stats.rx_over_errors = - adapter->soft_stats.rx_missed_errors; netdev->stats.rx_length_errors = adapter->soft_stats.rx_length_errors; netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; netdev->stats.rx_frame_errors = adapter->soft_stats.rx_frame_errors; netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; - netdev->stats.rx_missed_errors = - adapter->soft_stats.rx_missed_errors; + netdev->stats.rx_dropped = adapter->soft_stats.rx_rrd_ov; netdev->stats.tx_errors = adapter->soft_stats.tx_errors; netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; netdev->stats.tx_aborted_errors = @@ -1743,6 +1747,9 @@ static void atl1_inc_smb(struct atl1_adapter *adapter) adapter->soft_stats.tx_window_errors; netdev->stats.tx_carrier_errors = adapter->soft_stats.tx_carrier_errors; + + netdev->stats.rx_packets = adapter->soft_stats.rx_packets; + netdev->stats.tx_packets = adapter->soft_stats.tx_packets; } static void atl1_update_mailbox(struct atl1_adapter *adapter) @@ -1872,7 +1879,7 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) adapter->rx_buffer_len); if (unlikely(!skb)) { /* Better luck next round */ - adapter->netdev->stats.rx_dropped++; + adapter->soft_stats.rx_dropped++; break; } diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h index 3bf79a56220d..34a58cd846a0 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.h +++ b/drivers/net/ethernet/atheros/atlx/atl1.h @@ -666,6 +666,7 @@ struct atl1_sft_stats { u64 rx_errors; u64 rx_length_errors; u64 rx_crc_errors; + u64 rx_dropped; u64 rx_frame_errors; u64 rx_fifo_errors; u64 rx_missed_errors; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 7f968a9b52c8..0297a79a38e1 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1518,14 +1518,12 @@ static int bgmac_probe(struct bcma_device *core) err = bgmac_mii_register(bgmac); if (err) { bgmac_err(bgmac, "Cannot register MDIO\n"); - err = -ENOTSUPP; goto err_dma_free; } err = register_netdev(bgmac->net_dev); if (err) { bgmac_err(bgmac, "Cannot register net device\n"); - err = -ENOTSUPP; goto err_mii_unregister; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index eb105abcf0e7..391f29ef6d2e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -520,10 +520,12 @@ struct bnx2x_fastpath { #define BNX2X_FP_STATE_IDLE 0 #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ -#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ -#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ +#define BNX2X_FP_STATE_DISABLED (1 << 2) +#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ +#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ +#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) -#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) +#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) /* protect state */ spinlock_t lock; @@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) { bool rc = true; - spin_lock(&fp->lock); + spin_lock_bh(&fp->lock); if (fp->state & BNX2X_FP_LOCKED) { WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); fp->state |= BNX2X_FP_STATE_NAPI_YIELD; @@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) /* we don't care if someone yielded */ fp->state = BNX2X_FP_STATE_NAPI; } - spin_unlock(&fp->lock); + spin_unlock_bh(&fp->lock); return rc; } @@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) { bool rc = false; - spin_lock(&fp->lock); + spin_lock_bh(&fp->lock); WARN_ON(fp->state & (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); if (fp->state & BNX2X_FP_STATE_POLL_YIELD) rc = true; - fp->state = BNX2X_FP_STATE_IDLE; - spin_unlock(&fp->lock); + + /* state ==> idle, unless currently disabled */ + fp->state &= BNX2X_FP_STATE_DISABLED; + spin_unlock_bh(&fp->lock); return rc; } @@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) if (fp->state & BNX2X_FP_STATE_POLL_YIELD) rc = true; - fp->state = BNX2X_FP_STATE_IDLE; + + /* state ==> idle, unless currently disabled */ + fp->state &= BNX2X_FP_STATE_DISABLED; spin_unlock_bh(&fp->lock); return rc; } @@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) /* true if a socket is polling, even if it did not get the lock */ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) { - WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); + WARN_ON(!(fp->state & BNX2X_FP_OWNED)); return fp->state & BNX2X_FP_USER_PEND; } + +/* false if fp is currently owned */ +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ + int rc = true; + + spin_lock_bh(&fp->lock); + if (fp->state & BNX2X_FP_OWNED) + rc = false; + fp->state |= BNX2X_FP_STATE_DISABLED; + spin_unlock_bh(&fp->lock); + + return rc; +} #else static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) { @@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) { return false; } +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ + return true; +} #endif /* CONFIG_NET_RX_BUSY_POLL */ /* Use 2500 as a mini-jumbo MTU for FCoE */ @@ -1542,6 +1566,7 @@ struct bnx2x { #define NO_ISCSI_FLAG (1 << 14) #define NO_FCOE_FLAG (1 << 15) #define BC_SUPPORTS_PFC_STATS (1 << 17) +#define TX_SWITCHING (1 << 18) #define BC_SUPPORTS_FCOE_FEATURES (1 << 19) #define USING_SINGLE_MSIX_FLAG (1 << 20) #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) @@ -2057,7 +2082,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp, void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, u8 vf_valid, int fw_sb_id, int igu_sb_id); -u32 bnx2x_get_pretend_reg(struct bnx2x *bp); int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 282ebf61f530..9d7419e0390b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -30,6 +30,43 @@ #include "bnx2x_init.h" #include "bnx2x_sp.h" +static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); +static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); +static int bnx2x_alloc_fp_mem(struct bnx2x *bp); +static int bnx2x_poll(struct napi_struct *napi, int budget); + +static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) +{ + int i; + + /* Add NAPI objects */ + for_each_rx_queue_cnic(bp, i) { + netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), + bnx2x_poll, NAPI_POLL_WEIGHT); + napi_hash_add(&bnx2x_fp(bp, i, napi)); + } +} + +static void bnx2x_add_all_napi(struct bnx2x *bp) +{ + int i; + + /* Add NAPI objects */ + for_each_eth_queue(bp, i) { + netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), + bnx2x_poll, NAPI_POLL_WEIGHT); + napi_hash_add(&bnx2x_fp(bp, i, napi)); + } +} + +static int bnx2x_calc_num_queues(struct bnx2x *bp) +{ + return bnx2x_num_queues ? + min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) : + min_t(int, netif_get_num_default_rss_queues(), + BNX2X_MAX_QUEUES(bp)); +} + /** * bnx2x_move_fp - move content of the fastpath structure. * @@ -145,7 +182,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) } } -int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ +int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ /* free skb in the packet ring at pos idx * return idx of last bd freed @@ -160,6 +197,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, struct sk_buff *skb = tx_buf->skb; u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; int nbd; + u16 split_bd_len = 0; /* prefetch skb end pointer to speedup dev_kfree_skb() */ prefetch(&skb->end); @@ -167,10 +205,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", txdata->txq_index, idx, tx_buf, skb); - /* unmap first bd */ tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; - dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), - BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); nbd = le16_to_cpu(tx_start_bd->nbd) - 1; #ifdef BNX2X_STOP_ON_ERROR @@ -188,12 +223,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, --nbd; bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - /* ...and the TSO split header bd since they have no mapping */ + /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; + split_bd_len = BD_UNMAP_LEN(tx_data_bd); --nbd; bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); } + /* unmap first bd */ + dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), + BD_UNMAP_LEN(tx_start_bd) + split_bd_len, + DMA_TO_DEVICE); + /* now free frags */ while (nbd > 0) { @@ -813,7 +855,7 @@ void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, skb->ip_summed = CHECKSUM_UNNECESSARY; } -int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) +static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) { struct bnx2x *bp = fp->bp; u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; @@ -1483,7 +1525,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) } } -void bnx2x_free_skbs_cnic(struct bnx2x *bp) +static void bnx2x_free_skbs_cnic(struct bnx2x *bp) { bnx2x_free_tx_skbs_cnic(bp); bnx2x_free_rx_skbs_cnic(bp); @@ -1792,26 +1834,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp) { int i; - local_bh_disable(); for_each_rx_queue_cnic(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); - while (!bnx2x_fp_lock_napi(&bp->fp[i])) - mdelay(1); + while (!bnx2x_fp_ll_disable(&bp->fp[i])) + usleep_range(1000, 2000); } - local_bh_enable(); } static void bnx2x_napi_disable(struct bnx2x *bp) { int i; - local_bh_disable(); for_each_eth_queue(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); - while (!bnx2x_fp_lock_napi(&bp->fp[i])) - mdelay(1); + while (!bnx2x_fp_ll_disable(&bp->fp[i])) + usleep_range(1000, 2000); } - local_bh_enable(); } void bnx2x_netif_start(struct bnx2x *bp) @@ -1834,7 +1872,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) bnx2x_napi_disable_cnic(bp); } -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { struct bnx2x *bp = netdev_priv(dev); @@ -2302,16 +2341,16 @@ static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) int path = BP_PATH(bp); DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - load_count[path][0]++; - load_count[path][1 + port]++; + path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], + bnx2x_load_count[path][2]); + bnx2x_load_count[path][0]++; + bnx2x_load_count[path][1 + port]++; DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - if (load_count[path][0] == 1) + path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], + bnx2x_load_count[path][2]); + if (bnx2x_load_count[path][0] == 1) return FW_MSG_CODE_DRV_LOAD_COMMON; - else if (load_count[path][1 + port] == 1) + else if (bnx2x_load_count[path][1 + port] == 1) return FW_MSG_CODE_DRV_LOAD_PORT; else return FW_MSG_CODE_DRV_LOAD_FUNCTION; @@ -3069,7 +3108,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) /* * net_device service functions */ -int bnx2x_poll(struct napi_struct *napi, int budget) +static int bnx2x_poll(struct napi_struct *napi, int budget) { int work_done = 0; u8 cos; @@ -4196,7 +4235,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) /* end of fastpath */ } -void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) +static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) { int i; for_each_cnic_queue(bp, i) @@ -4410,7 +4449,7 @@ alloc_mem_err: return 0; } -int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) +static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) { if (!NO_FCOE(bp)) /* FCoE */ @@ -4423,7 +4462,7 @@ int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) return 0; } -int bnx2x_alloc_fp_mem(struct bnx2x *bp) +static int bnx2x_alloc_fp_mem(struct bnx2x *bp) { int i; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index da8fcaa74495..17d1689aec6b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -26,10 +26,8 @@ #include "bnx2x_sriov.h" /* This is used as a replacement for an MCP if it's not present */ -extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ - -extern int num_queues; -extern int int_mode; +extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ +extern int bnx2x_num_queues; /************************ Macros ********************************/ #define BNX2X_PCI_FREE(x, y, size) \ @@ -417,35 +415,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); * If bp->state is OPEN, should be called with * netif_addr_lock_bh() */ -void bnx2x_set_rx_mode(struct net_device *dev); void bnx2x_set_rx_mode_inner(struct bnx2x *bp); -/** - * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. - * - * @bp: driver handle - * - * If bp->state is OPEN, should be called with - * netif_addr_lock_bh(). - */ -int bnx2x_set_storm_rx_mode(struct bnx2x *bp); - -/** - * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. - * - * @bp: driver handle - * @cl_id: client id - * @rx_mode_flags: rx mode configuration - * @rx_accept_flags: rx accept configuration - * @tx_accept_flags: tx accept configuration (tx switch) - * @ramrod_flags: ramrod configuration - */ -int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, - unsigned long rx_mode_flags, - unsigned long rx_accept_flags, - unsigned long tx_accept_flags, - unsigned long ramrod_flags); - /* Parity errors related */ void bnx2x_set_pf_load(struct bnx2x *bp); bool bnx2x_clear_pf_load(struct bnx2x *bp); @@ -524,7 +495,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); /* select_queue callback */ -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv); static inline void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, @@ -564,9 +536,6 @@ int bnx2x_reload_if_running(struct net_device *dev); int bnx2x_change_mac_addr(struct net_device *dev, void *p); -/* NAPI poll Rx part */ -int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); - /* NAPI poll Tx part */ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); @@ -577,13 +546,9 @@ int bnx2x_resume(struct pci_dev *pdev); /* Release IRQ vectors */ void bnx2x_free_irq(struct bnx2x *bp); -void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); void bnx2x_free_fp_mem(struct bnx2x *bp); -int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); -int bnx2x_alloc_fp_mem(struct bnx2x *bp); void bnx2x_init_rx_rings(struct bnx2x *bp); void bnx2x_init_rx_rings_cnic(struct bnx2x *bp); -void bnx2x_free_skbs_cnic(struct bnx2x *bp); void bnx2x_free_skbs(struct bnx2x *bp); void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); void bnx2x_netif_start(struct bnx2x *bp); @@ -607,15 +572,6 @@ int bnx2x_enable_msix(struct bnx2x *bp); int bnx2x_enable_msi(struct bnx2x *bp); /** - * bnx2x_poll - NAPI callback - * - * @napi: napi structure - * @budget: - * - */ -int bnx2x_poll(struct napi_struct *napi, int budget); - -/** * bnx2x_low_latency_recv - LL callback * * @napi: napi structure @@ -861,30 +817,6 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, sge->addr_lo = 0; } -static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp) -{ - int i; - - /* Add NAPI objects */ - for_each_rx_queue_cnic(bp, i) { - netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), - bnx2x_poll, NAPI_POLL_WEIGHT); - napi_hash_add(&bnx2x_fp(bp, i, napi)); - } -} - -static inline void bnx2x_add_all_napi(struct bnx2x *bp) -{ - int i; - - /* Add NAPI objects */ - for_each_eth_queue(bp, i) { - netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), - bnx2x_poll, NAPI_POLL_WEIGHT); - napi_hash_add(&bnx2x_fp(bp, i, napi)); - } -} - static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) { int i; @@ -918,14 +850,6 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp) } } -static inline int bnx2x_calc_num_queues(struct bnx2x *bp) -{ - return num_queues ? - min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : - min_t(int, netif_get_num_default_rss_queues(), - BNX2X_MAX_QUEUES(bp)); -} - static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) { int i, j; @@ -1172,8 +1096,6 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) return fp->cl_id; } -u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); - static inline void bnx2x_init_txdata(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, __le16 *tx_cons_sb, @@ -1206,47 +1128,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) return bp->igu_base_sb; } -static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) -{ - struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); - unsigned long q_type = 0; - - bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); - bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, - BNX2X_FCOE_ETH_CL_ID_IDX); - bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); - bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; - bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; - bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; - bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), - fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, - fp); - - DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); - - /* qZone id equals to FW (per path) client id */ - bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); - /* init shortcut */ - bnx2x_fcoe(bp, ustorm_rx_prods_offset) = - bnx2x_rx_ustorm_prods_offset(fp); - - /* Configure Queue State object */ - __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); - __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); - - /* No multi-CoS for FCoE L2 client */ - BUG_ON(fp->max_cos != 1); - - bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, - &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), - bnx2x_sp_mapping(bp, q_rdata), q_type); - - DP(NETIF_MSG_IFUP, - "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", - fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, - fp->igu_sb_id); -} - static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 11fc79585491..9b6b3d7304b6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -205,6 +205,11 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, (_bank + (_addr & 0xf)), \ _val) +static int bnx2x_check_half_open_conn(struct link_params *params, + struct link_vars *vars, u8 notify); +static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, + struct link_params *params); + static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) { u32 val = REG_RD(bp, reg); @@ -1399,57 +1404,6 @@ static void bnx2x_update_pfc_xmac(struct link_params *params, udelay(30); } - -static void bnx2x_emac_get_pfc_stat(struct link_params *params, - u32 pfc_frames_sent[2], - u32 pfc_frames_received[2]) -{ - /* Read pfc statistic */ - struct bnx2x *bp = params->bp; - u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - u32 val_xon = 0; - u32 val_xoff = 0; - - DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n"); - - /* PFC received frames */ - val_xoff = REG_RD(bp, emac_base + - EMAC_REG_RX_PFC_STATS_XOFF_RCVD); - val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT; - val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD); - val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT; - - pfc_frames_received[0] = val_xon + val_xoff; - - /* PFC received sent */ - val_xoff = REG_RD(bp, emac_base + - EMAC_REG_RX_PFC_STATS_XOFF_SENT); - val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT; - val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT); - val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT; - - pfc_frames_sent[0] = val_xon + val_xoff; -} - -/* Read pfc statistic*/ -void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, - u32 pfc_frames_sent[2], - u32 pfc_frames_received[2]) -{ - /* Read pfc statistic */ - struct bnx2x *bp = params->bp; - - DP(NETIF_MSG_LINK, "pfc statistic\n"); - - if (!vars->link_up) - return; - - if (vars->mac_type == MAC_TYPE_EMAC) { - DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n"); - bnx2x_emac_get_pfc_stat(params, pfc_frames_sent, - pfc_frames_received); - } -} /******************************************************************/ /* MAC/PBF section */ /******************************************************************/ @@ -8648,8 +8602,8 @@ static void bnx2x_set_limiting_mode(struct link_params *params, } } -int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, - struct link_params *params) +static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, + struct link_params *params) { struct bnx2x *bp = params->bp; u16 edc_mode; @@ -13413,9 +13367,9 @@ static u8 bnx2x_analyze_link_error(struct link_params *params, * a fault, for example, due to break in the TX side of fiber. * ******************************************************************************/ -int bnx2x_check_half_open_conn(struct link_params *params, - struct link_vars *vars, - u8 notify) +static int bnx2x_check_half_open_conn(struct link_params *params, + struct link_vars *vars, + u8 notify) { struct bnx2x *bp = params->bp; u32 lss_status = 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index 4df45234fdc0..389f5f8cb0a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -533,19 +533,11 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); int bnx2x_ets_e3b0_config(const struct link_params *params, const struct link_vars *vars, struct bnx2x_ets_params *ets_params); -/* Read pfc statistic*/ -void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, - u32 pfc_frames_sent[2], - u32 pfc_frames_received[2]); + void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, u32 chip_id, u32 shmem_base, u32 shmem2_base, u8 port); -int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, - struct link_params *params); - void bnx2x_period_func(struct link_params *params, struct link_vars *vars); -int bnx2x_check_half_open_conn(struct link_params *params, - struct link_vars *vars, u8 notify); #endif /* BNX2X_LINK_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 18498fed520b..cf17b660b4ee 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -94,8 +94,8 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); -int num_queues; -module_param(num_queues, int, 0); +int bnx2x_num_queues; +module_param_named(num_queues, bnx2x_num_queues, int, 0); MODULE_PARM_DESC(num_queues, " Set number of queues (default is as a number of CPUs)"); @@ -103,7 +103,7 @@ static int disable_tpa; module_param(disable_tpa, int, 0); MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); -int int_mode; +static int int_mode; module_param(int_mode, int, 0); MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " "(1 INT#x; 2 MSI)"); @@ -279,6 +279,12 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); #define BNX2X_PREV_WAIT_NEEDED 1 static DEFINE_SEMAPHORE(bnx2x_prev_sem); static LIST_HEAD(bnx2x_prev_list); + +/* Forward declaration */ +static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); +static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); +static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); + /**************************************************************************** * General service functions ****************************************************************************/ @@ -3001,6 +3007,9 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, if (zero_stats) __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); + if (bp->flags & TX_SWITCHING) + __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags); + __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags); __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags); @@ -5857,11 +5866,11 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp) } /* called with netif_addr_lock_bh() */ -int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, - unsigned long rx_mode_flags, - unsigned long rx_accept_flags, - unsigned long tx_accept_flags, - unsigned long ramrod_flags) +static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags) { struct bnx2x_rx_mode_ramrod_params ramrod_param; int rc; @@ -5969,7 +5978,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, } /* called with netif_addr_lock_bh() */ -int bnx2x_set_storm_rx_mode(struct bnx2x *bp) +static int bnx2x_set_storm_rx_mode(struct bnx2x *bp) { unsigned long rx_mode_flags = 0, ramrod_flags = 0; unsigned long rx_accept_flags = 0, tx_accept_flags = 0; @@ -6165,6 +6174,47 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp) bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); } +static void bnx2x_init_fcoe_fp(struct bnx2x *bp) +{ + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + unsigned long q_type = 0; + + bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); + bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, + BNX2X_FCOE_ETH_CL_ID_IDX); + bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); + bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; + bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; + bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; + bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), + fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, + fp); + + DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); + + /* qZone id equals to FW (per path) client id */ + bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); + /* init shortcut */ + bnx2x_fcoe(bp, ustorm_rx_prods_offset) = + bnx2x_rx_ustorm_prods_offset(fp); + + /* Configure Queue State object */ + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + /* No multi-CoS for FCoE L2 client */ + BUG_ON(fp->max_cos != 1); + + bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, + &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_sp_mapping(bp, q_rdata), q_type); + + DP(NETIF_MSG_IFUP, + "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", + fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); +} + void bnx2x_nic_init_cnic(struct bnx2x *bp) { if (!NO_FCOE(bp)) @@ -8737,16 +8787,16 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) int path = BP_PATH(bp); DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - load_count[path][0]--; - load_count[path][1 + port]--; + path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], + bnx2x_load_count[path][2]); + bnx2x_load_count[path][0]--; + bnx2x_load_count[path][1 + port]--; DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - if (load_count[path][0] == 0) + path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], + bnx2x_load_count[path][2]); + if (bnx2x_load_count[path][0] == 0) reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; - else if (load_count[path][1 + port] == 0) + else if (bnx2x_load_count[path][1 + port] == 0) reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; else reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; @@ -9772,7 +9822,7 @@ period_task_exit: * Init service functions */ -u32 bnx2x_get_pretend_reg(struct bnx2x *bp) +static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) { u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; @@ -12005,7 +12055,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp) } /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ -void bnx2x_set_rx_mode(struct net_device *dev) +static void bnx2x_set_rx_mode(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@ -12783,8 +12833,6 @@ static int set_is_vf(int chip_id) } } -struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); - static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -13859,7 +13907,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev) return 0; } -struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) +static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); struct cnic_eth_dev *cp = &bp->cnic_eth_dev; @@ -13909,7 +13957,7 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) return cp; } -u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) +static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) { struct bnx2x *bp = fp->bp; u32 offset = BAR_USTRORM_INTMEM; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 98cccd487fc2..0fb6ff2ac8e3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -355,23 +355,6 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) return vp->get(vp, 1); } - -static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) -{ - struct bnx2x_credit_pool_obj *mp = o->macs_pool; - struct bnx2x_credit_pool_obj *vp = o->vlans_pool; - - if (!mp->get(mp, 1)) - return false; - - if (!vp->get(vp, 1)) { - mp->put(mp, 1); - return false; - } - - return true; -} - static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) { struct bnx2x_credit_pool_obj *mp = o->macs_pool; @@ -400,22 +383,6 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) return vp->put(vp, 1); } -static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) -{ - struct bnx2x_credit_pool_obj *mp = o->macs_pool; - struct bnx2x_credit_pool_obj *vp = o->vlans_pool; - - if (!mp->put(mp, 1)) - return false; - - if (!vp->put(vp, 1)) { - mp->get(mp, 1); - return false; - } - - return true; -} - /** * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock * @@ -507,22 +474,6 @@ static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, } } -/** - * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock - * - * @bp: device handle - * @o: vlan_mac object - * - * @details Notice if a pending execution exists, it would perform it - - * possibly releasing and reclaiming the execution queue lock. - */ -void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o) -{ - spin_lock_bh(&o->exe_queue.lock); - __bnx2x_vlan_mac_h_write_unlock(bp, o); - spin_unlock_bh(&o->exe_queue.lock); -} /** * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock @@ -685,25 +636,6 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp, return 0; } -static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - union bnx2x_classification_ramrod_data *data) -{ - struct bnx2x_vlan_mac_registry_elem *pos; - - DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n", - data->vlan_mac.mac, data->vlan_mac.vlan); - - list_for_each_entry(pos, &o->head, link) - if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && - ether_addr_equal_unaligned(data->vlan_mac.mac, pos->u.vlan_mac.mac) && - (data->vlan_mac.is_inner_mac == - pos->u.vlan_mac.is_inner_mac)) - return -EEXIST; - - return 0; -} - /* check_del() callbacks */ static struct bnx2x_vlan_mac_registry_elem * bnx2x_check_mac_del(struct bnx2x *bp, @@ -738,26 +670,6 @@ static struct bnx2x_vlan_mac_registry_elem * return NULL; } -static struct bnx2x_vlan_mac_registry_elem * - bnx2x_check_vlan_mac_del(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - union bnx2x_classification_ramrod_data *data) -{ - struct bnx2x_vlan_mac_registry_elem *pos; - - DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n", - data->vlan_mac.mac, data->vlan_mac.vlan); - - list_for_each_entry(pos, &o->head, link) - if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && - ether_addr_equal_unaligned(data->vlan_mac.mac, pos->u.vlan_mac.mac) && - (data->vlan_mac.is_inner_mac == - pos->u.vlan_mac.is_inner_mac)) - return pos; - - return NULL; -} - /* check_move() callback */ static bool bnx2x_check_move(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *src_o, @@ -809,8 +721,8 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) return rx_tx_flag; } -void bnx2x_set_mac_in_nig(struct bnx2x *bp, - bool add, unsigned char *dev_addr, int index) +static void bnx2x_set_mac_in_nig(struct bnx2x *bp, + bool add, unsigned char *dev_addr, int index) { u32 wb_data[2]; u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : @@ -1124,97 +1036,6 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, rule_cnt); } -static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - struct bnx2x_exeq_elem *elem, - int rule_idx, int cam_offset) -{ - struct bnx2x_raw_obj *raw = &o->raw; - struct eth_classify_rules_ramrod_data *data = - (struct eth_classify_rules_ramrod_data *)(raw->rdata); - int rule_cnt = rule_idx + 1; - union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; - enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; - bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; - u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; - u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; - - /* Reset the ramrod data buffer for the first rule */ - if (rule_idx == 0) - memset(data, 0, sizeof(*data)); - - /* Set a rule header */ - bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, - &rule_entry->pair.header); - - /* Set VLAN and MAC themselves */ - rule_entry->pair.vlan = cpu_to_le16(vlan); - bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, - &rule_entry->pair.mac_mid, - &rule_entry->pair.mac_lsb, mac); - rule_entry->pair.inner_mac = - cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac); - /* MOVE: Add a rule that will add this MAC to the target Queue */ - if (cmd == BNX2X_VLAN_MAC_MOVE) { - rule_entry++; - rule_cnt++; - - /* Setup ramrod data */ - bnx2x_vlan_mac_set_cmd_hdr_e2(bp, - elem->cmd_data.vlan_mac.target_obj, - true, CLASSIFY_RULE_OPCODE_PAIR, - &rule_entry->pair.header); - - /* Set a VLAN itself */ - rule_entry->pair.vlan = cpu_to_le16(vlan); - bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, - &rule_entry->pair.mac_mid, - &rule_entry->pair.mac_lsb, mac); - rule_entry->pair.inner_mac = - cpu_to_le16(elem->cmd_data.vlan_mac.u. - vlan_mac.is_inner_mac); - } - - /* Set the ramrod data header */ - /* TODO: take this to the higher level in order to prevent multiple - writing */ - bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, - rule_cnt); -} - -/** - * bnx2x_set_one_vlan_mac_e1h - - * - * @bp: device handle - * @o: bnx2x_vlan_mac_obj - * @elem: bnx2x_exeq_elem - * @rule_idx: rule_idx - * @cam_offset: cam_offset - */ -static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - struct bnx2x_exeq_elem *elem, - int rule_idx, int cam_offset) -{ - struct bnx2x_raw_obj *raw = &o->raw; - struct mac_configuration_cmd *config = - (struct mac_configuration_cmd *)(raw->rdata); - /* 57710 and 57711 do not support MOVE command, - * so it's either ADD or DEL - */ - bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? - true : false; - - /* Reset the ramrod data buffer */ - memset(config, 0, sizeof(*config)); - - bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, - cam_offset, add, - elem->cmd_data.vlan_mac.u.vlan_mac.mac, - elem->cmd_data.vlan_mac.u.vlan_mac.vlan, - ETH_VLAN_FILTER_CLASSIFY, config); -} - /** * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element * @@ -1314,24 +1135,6 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( return NULL; } -static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( - struct bnx2x_exe_queue_obj *o, - struct bnx2x_exeq_elem *elem) -{ - struct bnx2x_exeq_elem *pos; - struct bnx2x_vlan_mac_ramrod_data *data = - &elem->cmd_data.vlan_mac.u.vlan_mac; - - /* Check pending for execution commands */ - list_for_each_entry(pos, &o->exe_queue, link) - if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, - sizeof(*data)) && - (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) - return pos; - - return NULL; -} - /** * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed * @@ -2239,69 +2042,6 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, } } -void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *vlan_mac_obj, - u8 cl_id, u32 cid, u8 func_id, void *rdata, - dma_addr_t rdata_mapping, int state, - unsigned long *pstate, bnx2x_obj_type type, - struct bnx2x_credit_pool_obj *macs_pool, - struct bnx2x_credit_pool_obj *vlans_pool) -{ - union bnx2x_qable_obj *qable_obj = - (union bnx2x_qable_obj *)vlan_mac_obj; - - bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, - rdata_mapping, state, pstate, type, - macs_pool, vlans_pool); - - /* CAM pool handling */ - vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; - vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; - /* CAM offset is relevant for 57710 and 57711 chips only which have a - * single CAM for both MACs and VLAN-MAC pairs. So the offset - * will be taken from MACs' pool object only. - */ - vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; - vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; - - if (CHIP_IS_E1(bp)) { - BNX2X_ERR("Do not support chips others than E2\n"); - BUG(); - } else if (CHIP_IS_E1H(bp)) { - vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; - vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; - vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; - vlan_mac_obj->check_move = bnx2x_check_move_always_err; - vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; - - /* Exe Queue */ - bnx2x_exe_queue_init(bp, - &vlan_mac_obj->exe_queue, 1, qable_obj, - bnx2x_validate_vlan_mac, - bnx2x_remove_vlan_mac, - bnx2x_optimize_vlan_mac, - bnx2x_execute_vlan_mac, - bnx2x_exeq_get_vlan_mac); - } else { - vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; - vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; - vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; - vlan_mac_obj->check_move = bnx2x_check_move; - vlan_mac_obj->ramrod_cmd = - RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; - - /* Exe Queue */ - bnx2x_exe_queue_init(bp, - &vlan_mac_obj->exe_queue, - CLASSIFY_RULES_COUNT, - qable_obj, bnx2x_validate_vlan_mac, - bnx2x_remove_vlan_mac, - bnx2x_optimize_vlan_mac, - bnx2x_execute_vlan_mac, - bnx2x_exeq_get_vlan_mac); - } -} - /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ static inline void __storm_memset_mac_filters(struct bnx2x *bp, struct tstorm_eth_mac_filter_config *mac_filters, @@ -4988,6 +4728,13 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp, test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); + + /* tx switching */ + data->tx_switching_flg = + test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags); + data->tx_switching_change_flg = + test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, + ¶ms->update_flags); } static inline int bnx2x_q_send_update(struct bnx2x *bp, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 6a53c15c85a3..00d7f214a40a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -448,9 +448,6 @@ enum { BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 }; -void bnx2x_set_mac_in_nig(struct bnx2x *bp, - bool add, unsigned char *dev_addr, int index); - /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ /* RX_MODE ramrod special flags: set in rx_mode_flags field in @@ -770,7 +767,9 @@ enum { BNX2X_Q_UPDATE_DEF_VLAN_EN, BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, - BNX2X_Q_UPDATE_SILENT_VLAN_REM + BNX2X_Q_UPDATE_SILENT_VLAN_REM, + BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, + BNX2X_Q_UPDATE_TX_SWITCHING }; /* Allowed Queue states */ @@ -1307,22 +1306,12 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, unsigned long *pstate, bnx2x_obj_type type, struct bnx2x_credit_pool_obj *vlans_pool); -void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *vlan_mac_obj, - u8 cl_id, u32 cid, u8 func_id, void *rdata, - dma_addr_t rdata_mapping, int state, - unsigned long *pstate, bnx2x_obj_type type, - struct bnx2x_credit_pool_obj *macs_pool, - struct bnx2x_credit_pool_obj *vlans_pool); - int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); -void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o); int bnx2x_config_vlan_mac(struct bnx2x *bp, struct bnx2x_vlan_mac_ramrod_params *p); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 31ab924600c1..d0e246b8a871 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -799,10 +799,10 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, return -ENOMEM; } -int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, u16 vid, bool add) +static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid, u16 vid, bool add) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); int rc; @@ -3130,6 +3130,60 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, vf->abs_vfid, vf->op_current); } +static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) +{ + struct bnx2x_queue_state_params q_params; + u32 prev_flags; + int i, rc; + + /* Verify changes are needed and record current Tx switching state */ + prev_flags = bp->flags; + if (enable) + bp->flags |= TX_SWITCHING; + else + bp->flags &= ~TX_SWITCHING; + if (prev_flags == bp->flags) + return 0; + + /* Verify state enables the sending of queue ramrods */ + if ((bp->state != BNX2X_STATE_OPEN) || + (bnx2x_get_q_logical_state(bp, + &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != + BNX2X_Q_LOGICAL_STATE_ACTIVE)) + return 0; + + /* send q. update ramrod to configure Tx switching */ + memset(&q_params, 0, sizeof(q_params)); + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, + &q_params.params.update.update_flags); + if (enable) + __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING, + &q_params.params.update.update_flags); + else + __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING, + &q_params.params.update.update_flags); + + /* send the ramrod on all the queues of the PF */ + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + /* Set the appropriate Queue object */ + q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure Tx switching\n"); + return rc; + } + } + + DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled"); + return 0; +} + int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) { struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); @@ -3157,12 +3211,14 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) bp->requested_nr_virtfn = num_vfs_param; if (num_vfs_param == 0) { + bnx2x_set_pf_tx_switching(bp, false); pci_disable_sriov(dev); return 0; } else { return bnx2x_enable_sriov(bp); } } + #define IGU_ENTRY_SIZE 4 int bnx2x_enable_sriov(struct bnx2x *bp) @@ -3240,6 +3296,11 @@ int bnx2x_enable_sriov(struct bnx2x *bp) */ DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); bnx2x_disable_sriov(bp); + + rc = bnx2x_set_pf_tx_switching(bp, true); + if (rc) + return rc; + rc = pci_enable_sriov(bp->pdev, req_vfs); if (rc) { BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index d72ab7e24de0..d9fcca1b5a9d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -665,11 +665,6 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, struct bnx2x_vfop_filters *macs, int qid, bool drv_only); -int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, u16 vid, bool add); - int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vfop_cmd *cmd, @@ -727,13 +722,6 @@ void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid); /* Handles an FLR (or VF_DISABLE) notification form the MCP */ void bnx2x_vf_handle_flr_event(struct bnx2x *bp); -void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, - u16 length); -void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, - u16 type, u16 length); -void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv); -void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list); - bool bnx2x_tlv_supported(u16 tlvtype); u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, @@ -750,7 +738,6 @@ int bnx2x_vfpf_init(struct bnx2x *bp); void bnx2x_vfpf_close_vf(struct bnx2x *bp); int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading); -int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); int bnx2x_vfpf_config_rss(struct bnx2x *bp, struct bnx2x_config_rss_params *params); @@ -814,7 +801,6 @@ static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } -static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; } static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) {return 0; } static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 1b1ad31b4553..3fa6c2a2a5a9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -21,9 +21,11 @@ #include "bnx2x_cmn.h" #include <linux/crc32.h> +static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); + /* place a given tlv on the tlv buffer at a given offset */ -void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, - u16 length) +static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, + u16 offset, u16 type, u16 length) { struct channel_tlv *tl = (struct channel_tlv *)(tlvs_list + offset); @@ -33,8 +35,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, } /* Clear the mailbox and init the header of the first tlv */ -void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, - u16 type, u16 length) +static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, + u16 type, u16 length) { mutex_lock(&bp->vf2pf_mutex); @@ -52,7 +54,8 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, } /* releases the mailbox */ -void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv) +static void bnx2x_vfpf_finalize(struct bnx2x *bp, + struct vfpf_first_tlv *first_tlv) { DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n", first_tlv->tl.type); @@ -85,7 +88,7 @@ static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list, } /* list the types and lengths of the tlvs on the buffer */ -void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) +static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) { int i = 1; struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list; @@ -633,7 +636,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, return rc; } -int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) +static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) { struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op; struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index cb05be905def..81e8402a74b4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -423,7 +423,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev, * in the Compressed Filter Tuple. */ if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) - ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift; + ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift; if (tp->port_shift >= 0) ntuple |= (u64)l2t->lport << tp->port_shift; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 4ccaf9af6fc9..8d09615da585 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -34,7 +34,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "4.9.224.0u" +#define DRV_VER "10.0.600.0u" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" @@ -42,7 +42,7 @@ #define OC_NAME_BE OC_NAME "(be3)" #define OC_NAME_LANCER OC_NAME "(Lancer)" #define OC_NAME_SH OC_NAME "(Skyhawk)" -#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver" +#define DRV_DESC "Emulex OneConnect NIC Driver" #define BE_VENDOR_ID 0x19a2 #define EMULEX_VENDOR_ID 0x10df @@ -283,7 +283,6 @@ struct be_rx_compl_info { u32 rss_hash; u16 vlan_tag; u16 pkt_size; - u16 rxq_idx; u16 port; u8 vlanf; u8 num_rcvd; @@ -493,7 +492,7 @@ struct be_adapter { u16 pvid; struct phy_info phy; u8 wol_cap; - bool wol; + bool wol_en; u32 uc_macs; /* Count of secondary UC MAC programmed */ u16 asic_rev; u16 qnq_vid; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 94c35c8d799d..48076a6370c3 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1101,23 +1101,22 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter, OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); - if (lancer_chip(adapter)) { - req->hdr.version = 1; - req->cq_id = cpu_to_le16(cq->id); - - AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt, - be_encoded_q_len(mccq->len)); - AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1); - AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id, - ctxt, cq->id); - AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid, - ctxt, 1); - - } else { + if (BEx_chip(adapter)) { AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, be_encoded_q_len(mccq->len)); AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); + } else { + req->hdr.version = 1; + req->cq_id = cpu_to_le16(cq->id); + + AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt, + be_encoded_q_len(mccq->len)); + AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id, + ctxt, cq->id); + AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid, + ctxt, 1); } /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ @@ -1187,7 +1186,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter, int status; status = be_cmd_mccq_ext_create(adapter, mccq, cq); - if (status && !lancer_chip(adapter)) { + if (status && BEx_chip(adapter)) { dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " "or newer to avoid conflicting priorities between NIC " "and FCoE traffic"); @@ -2692,6 +2691,13 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, struct be_cmd_resp_get_fn_privileges *resp = embedded_payload(wrb); *privilege = le32_to_cpu(resp->privilege_mask); + + /* In UMC mode FW does not return right privileges. + * Override with correct privilege equivalent to PF. + */ + if (BEx_chip(adapter) && be_is_mc(adapter) && + be_physfn(adapter)) + *privilege = MAX_PRIVILEGES; } err: @@ -2736,7 +2742,8 @@ err: * If pmac_id is returned, pmac_id_valid is returned as true */ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, - bool *pmac_id_valid, u32 *pmac_id, u8 domain) + bool *pmac_id_valid, u32 *pmac_id, u32 if_handle, + u8 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_mac_list *req; @@ -2774,7 +2781,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, req->mac_type = MAC_ADDRESS_TYPE_NETWORK; if (*pmac_id_valid) { req->mac_id = cpu_to_le32(*pmac_id); - req->iface_id = cpu_to_le16(adapter->if_handle); + req->iface_id = cpu_to_le16(if_handle); req->perm_override = 0; } else { req->perm_override = 1; @@ -2827,17 +2834,21 @@ out: return status; } -int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac) +int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac, + u32 if_handle, bool active, u32 domain) { - bool active = true; + if (!active) + be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, + if_handle, domain); if (BEx_chip(adapter)) return be_cmd_mac_addr_query(adapter, mac, false, - adapter->if_handle, curr_pmac_id); + if_handle, curr_pmac_id); else /* Fetch the MAC address using pmac_id */ return be_cmd_get_mac_from_list(adapter, mac, &active, - &curr_pmac_id, 0); + &curr_pmac_id, + if_handle, domain); } int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) @@ -2856,7 +2867,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) adapter->if_handle, 0); } else { status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, - NULL, 0); + NULL, adapter->if_handle, 0); } return status; @@ -2917,7 +2928,8 @@ int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom) int status; status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, - &pmac_id, dom); + &pmac_id, if_id, dom); + if (!status && active_mac) be_cmd_pmac_del(adapter, if_id, pmac_id, dom); @@ -2997,7 +3009,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, ctxt, intf_id); AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); - if (!BEx_chip(adapter)) { + if (!BEx_chip(adapter) && mode) { AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt, adapter->hba_port_num); AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); @@ -3028,14 +3040,16 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_acpi_wol_magic_config_v1 *req; - int status; - int payload_len = sizeof(*req); + int status = 0; struct be_dma_mem cmd; if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, CMD_SUBSYSTEM_ETH)) return -EPERM; + if (be_is_wol_excluded(adapter)) + return status; + if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; @@ -3060,7 +3074,7 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, - payload_len, wrb, &cmd); + sizeof(*req), wrb, &cmd); req->hdr.version = 1; req->query_options = BE_GET_WOL_CAP; @@ -3070,13 +3084,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; - /* the command could succeed misleadingly on old f/w - * which is not aware of the V1 version. fake an error. */ - if (resp->hdr.response_length < payload_len) { - status = -1; - goto err; - } adapter->wol_cap = resp->wol_settings; + if (adapter->wol_cap & BE_WOL_CAP) + adapter->wol_en = true; } err: mutex_unlock(&adapter->mbox_lock); @@ -3085,6 +3095,76 @@ err: return status; } + +int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) +{ + struct be_dma_mem extfat_cmd; + struct be_fat_conf_params *cfgs; + int status; + int i, j; + + memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); + extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); + extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, + &extfat_cmd.dma); + if (!extfat_cmd.va) + return -ENOMEM; + + status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); + if (status) + goto err; + + cfgs = (struct be_fat_conf_params *) + (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); + for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { + u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); + for (j = 0; j < num_modes; j++) { + if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) + cfgs->module[i].trace_lvl[j].dbg_lvl = + cpu_to_le32(level); + } + } + + status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); +err: + pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); + return status; +} + +int be_cmd_get_fw_log_level(struct be_adapter *adapter) +{ + struct be_dma_mem extfat_cmd; + struct be_fat_conf_params *cfgs; + int status, j; + int level = 0; + + memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); + extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); + extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, + &extfat_cmd.dma); + + if (!extfat_cmd.va) { + dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", + __func__); + goto err; + } + + status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); + if (!status) { + cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + + sizeof(struct be_cmd_resp_hdr)); + for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { + if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) + level = cfgs->module[0].trace_lvl[j].dbg_lvl; + } + } + pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); +err: + return level; +} + int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, struct be_dma_mem *cmd) { @@ -3609,6 +3689,40 @@ int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable) return status; } +/* Uses MBOX */ +int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id) +{ + struct be_cmd_req_get_active_profile *req; + struct be_mcc_wrb *wrb; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req), + wrb, NULL); + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_active_profile *resp = + embedded_payload(wrb); + *profile_id = le16_to_cpu(resp->active_profile_id); + } + +err: + mutex_unlock(&adapter->mbox_lock); + return status; +} + int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, int wrb_payload_size, u16 *cmd_status, u16 *ext_status) { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 0075686276aa..fc4e076dc202 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -216,6 +216,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_GET_FUNC_CONFIG 160 #define OPCODE_COMMON_GET_PROFILE_CONFIG 164 #define OPCODE_COMMON_SET_PROFILE_CONFIG 165 +#define OPCODE_COMMON_GET_ACTIVE_PROFILE 167 #define OPCODE_COMMON_SET_HSW_CONFIG 153 #define OPCODE_COMMON_GET_FN_PRIVILEGES 170 #define OPCODE_COMMON_READ_OBJECT 171 @@ -452,7 +453,7 @@ struct amap_mcc_context_be { u8 rsvd2[32]; } __packed; -struct amap_mcc_context_lancer { +struct amap_mcc_context_v1 { u8 async_cq_id[16]; u8 ring_size[4]; u8 rsvd0[12]; @@ -476,7 +477,7 @@ struct be_cmd_req_mcc_ext_create { u16 num_pages; u16 cq_id; u32 async_event_bitmap[1]; - u8 context[sizeof(struct amap_mcc_context_be) / 8]; + u8 context[sizeof(struct amap_mcc_context_v1) / 8]; struct phys_addr pages[8]; } __packed; @@ -1097,6 +1098,14 @@ struct be_cmd_resp_query_fw_cfg { u32 function_caps; }; +/* Is BE in a multi-channel mode */ +static inline bool be_is_mc(struct be_adapter *adapter) +{ + return adapter->function_mode & FLEX10_MODE || + adapter->function_mode & VNIC_MODE || + adapter->function_mode & UMC_ENABLED; +} + /******************** RSS Config ****************************************/ /* RSS type Input parameters used to compute RX hash * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4 @@ -1917,6 +1926,17 @@ struct be_cmd_resp_set_profile_config { struct be_cmd_resp_hdr hdr; }; +struct be_cmd_req_get_active_profile { + struct be_cmd_req_hdr hdr; + u32 rsvd; +} __packed; + +struct be_cmd_resp_get_active_profile { + struct be_cmd_resp_hdr hdr; + u16 active_profile_id; + u16 next_profile_id; +} __packed; + struct be_cmd_enable_disable_vf { struct be_cmd_req_hdr hdr; u8 enable; @@ -2037,8 +2057,10 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, u32 vf_num); int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, - bool *pmac_id_active, u32 *pmac_id, u8 domain); -int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac); + bool *pmac_id_active, u32 *pmac_id, + u32 if_handle, u8 domain); +int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac, + u32 if_handle, bool active, u32 domain); int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac); int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain); @@ -2048,6 +2070,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain, int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain, u16 intf_id, u8 *mode); int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); +int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level); +int be_cmd_get_fw_log_level(struct be_adapter *adapter); int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, struct be_dma_mem *cmd); int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, @@ -2063,6 +2087,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, int be_cmd_get_profile_config(struct be_adapter *adapter, struct be_resources *res, u8 domain); int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain); +int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile); int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, int vf_num); int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 08330034d9ef..05be0070f55f 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -713,12 +713,13 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct be_adapter *adapter = netdev_priv(netdev); - if (be_is_wol_supported(adapter)) { + if (adapter->wol_cap & BE_WOL_CAP) { wol->supported |= WAKE_MAGIC; - if (adapter->wol) + if (adapter->wol_en) wol->wolopts |= WAKE_MAGIC; - } else + } else { wol->wolopts = 0; + } memset(&wol->sopass, 0, sizeof(wol->sopass)); } @@ -730,15 +731,15 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; - if (!be_is_wol_supported(adapter)) { + if (!(adapter->wol_cap & BE_WOL_CAP)) { dev_warn(&adapter->pdev->dev, "WOL not supported\n"); return -EOPNOTSUPP; } if (wol->wolopts & WAKE_MAGIC) - adapter->wol = true; + adapter->wol_en = true; else - adapter->wol = false; + adapter->wol_en = false; return 0; } @@ -904,73 +905,21 @@ static u32 be_get_msg_level(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); - if (lancer_chip(adapter)) { - dev_err(&adapter->pdev->dev, "Operation not supported\n"); - return -EOPNOTSUPP; - } - return adapter->msg_enable; } -static void be_set_fw_log_level(struct be_adapter *adapter, u32 level) -{ - struct be_dma_mem extfat_cmd; - struct be_fat_conf_params *cfgs; - int status; - int i, j; - - memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); - extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); - extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, - &extfat_cmd.dma); - if (!extfat_cmd.va) { - dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", - __func__); - goto err; - } - status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); - if (!status) { - cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + - sizeof(struct be_cmd_resp_hdr)); - for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { - u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); - for (j = 0; j < num_modes; j++) { - if (cfgs->module[i].trace_lvl[j].mode == - MODE_UART) - cfgs->module[i].trace_lvl[j].dbg_lvl = - cpu_to_le32(level); - } - } - status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, - cfgs); - if (status) - dev_err(&adapter->pdev->dev, - "Message level set failed\n"); - } else { - dev_err(&adapter->pdev->dev, "Message level get failed\n"); - } - - pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, - extfat_cmd.dma); -err: - return; -} - static void be_set_msg_level(struct net_device *netdev, u32 level) { struct be_adapter *adapter = netdev_priv(netdev); - if (lancer_chip(adapter)) { - dev_err(&adapter->pdev->dev, "Operation not supported\n"); - return; - } - if (adapter->msg_enable == level) return; if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW)) - be_set_fw_log_level(adapter, level & NETIF_MSG_HW ? - FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL); + if (BEx_chip(adapter)) + be_cmd_set_fw_log_level(adapter, level & NETIF_MSG_HW ? + FW_LOG_LEVEL_DEFAULT : + FW_LOG_LEVEL_FATAL); adapter->msg_enable = level; return; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3acf137b5784..6d22d6f439e3 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -121,12 +121,6 @@ static const char * const ue_status_hi_desc[] = { "Unknown" }; -/* Is BE in a multi-channel mode */ -static inline bool be_is_mc(struct be_adapter *adapter) { - return (adapter->function_mode & FLEX10_MODE || - adapter->function_mode & VNIC_MODE || - adapter->function_mode & UMC_ENABLED); -} static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) { @@ -258,6 +252,12 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + /* Proceed further only if, User provided MAC is different + * from active MAC + */ + if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) + return 0; + /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT * privilege or if PF did not provision the new MAC address. * On BE3, this cmd will always fail if the VF doesn't have the @@ -280,7 +280,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) /* Decide if the new MAC is successfully activated only after * querying the FW */ - status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac); + status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac, + adapter->if_handle, true, 0); if (status) goto err; @@ -1442,12 +1443,12 @@ static inline bool csum_passed(struct be_rx_compl_info *rxcp) (rxcp->ip_csum || rxcp->ipv6); } -static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo, - u16 frag_idx) +static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo) { struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *rx_page_info; struct be_queue_info *rxq = &rxo->q; + u16 frag_idx = rxq->tail; rx_page_info = &rxo->page_info_tbl[frag_idx]; BUG_ON(!rx_page_info->page); @@ -1459,6 +1460,7 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo, rx_page_info->last_page_user = false; } + queue_tail_inc(rxq); atomic_dec(&rxq->used); return rx_page_info; } @@ -1467,15 +1469,13 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo, static void be_rx_compl_discard(struct be_rx_obj *rxo, struct be_rx_compl_info *rxcp) { - struct be_queue_info *rxq = &rxo->q; struct be_rx_page_info *page_info; u16 i, num_rcvd = rxcp->num_rcvd; for (i = 0; i < num_rcvd; i++) { - page_info = get_rx_page_info(rxo, rxcp->rxq_idx); + page_info = get_rx_page_info(rxo); put_page(page_info->page); memset(page_info, 0, sizeof(*page_info)); - index_inc(&rxcp->rxq_idx, rxq->len); } } @@ -1486,13 +1486,12 @@ static void be_rx_compl_discard(struct be_rx_obj *rxo, static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, struct be_rx_compl_info *rxcp) { - struct be_queue_info *rxq = &rxo->q; struct be_rx_page_info *page_info; u16 i, j; u16 hdr_len, curr_frag_len, remaining; u8 *start; - page_info = get_rx_page_info(rxo, rxcp->rxq_idx); + page_info = get_rx_page_info(rxo); start = page_address(page_info->page) + page_info->page_offset; prefetch(start); @@ -1526,10 +1525,9 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, } /* More frags present for this completion */ - index_inc(&rxcp->rxq_idx, rxq->len); remaining = rxcp->pkt_size - curr_frag_len; for (i = 1, j = 0; i < rxcp->num_rcvd; i++) { - page_info = get_rx_page_info(rxo, rxcp->rxq_idx); + page_info = get_rx_page_info(rxo); curr_frag_len = min(remaining, rx_frag_size); /* Coalesce all frags from the same physical page in one slot */ @@ -1550,7 +1548,6 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, skb->data_len += curr_frag_len; skb->truesize += rx_frag_size; remaining -= curr_frag_len; - index_inc(&rxcp->rxq_idx, rxq->len); page_info->page = NULL; } BUG_ON(j > MAX_SKB_FRAGS); @@ -1598,7 +1595,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *page_info; struct sk_buff *skb = NULL; - struct be_queue_info *rxq = &rxo->q; u16 remaining, curr_frag_len; u16 i, j; @@ -1610,7 +1606,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, remaining = rxcp->pkt_size; for (i = 0, j = -1; i < rxcp->num_rcvd; i++) { - page_info = get_rx_page_info(rxo, rxcp->rxq_idx); + page_info = get_rx_page_info(rxo); curr_frag_len = min(remaining, rx_frag_size); @@ -1628,7 +1624,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len); skb->truesize += rx_frag_size; remaining -= curr_frag_len; - index_inc(&rxcp->rxq_idx, rxq->len); memset(page_info, 0, sizeof(*page_info)); } BUG_ON(j > MAX_SKB_FRAGS); @@ -1663,8 +1658,6 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl, AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl); rxcp->ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl); - rxcp->rxq_idx = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl); rxcp->num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl); rxcp->pkt_type = @@ -1695,8 +1688,6 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl, AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl); rxcp->ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl); - rxcp->rxq_idx = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl); rxcp->num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl); rxcp->pkt_type = @@ -1914,7 +1905,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) struct be_rx_compl_info *rxcp; struct be_adapter *adapter = rxo->adapter; int flush_wait = 0; - u16 tail; /* Consume pending rx completions. * Wait for the flush completion (identified by zero num_rcvd) @@ -1947,9 +1937,8 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) be_cq_notify(adapter, rx_cq->id, false, 0); /* Then free posted rx buffers that were not used */ - tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; - for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) { - page_info = get_rx_page_info(rxo, tail); + while (atomic_read(&rxq->used) > 0) { + page_info = get_rx_page_info(rxo); put_page(page_info->page); memset(page_info, 0, sizeof(*page_info)); } @@ -2884,14 +2873,11 @@ static int be_vfs_mac_query(struct be_adapter *adapter) int status, vf; u8 mac[ETH_ALEN]; struct be_vf_cfg *vf_cfg; - bool active = false; for_all_vfs(adapter, vf_cfg, vf) { - be_cmd_get_mac_from_list(adapter, mac, &active, - &vf_cfg->pmac_id, 0); - - status = be_cmd_mac_addr_query(adapter, mac, false, - vf_cfg->if_handle, 0); + status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id, + mac, vf_cfg->if_handle, + false, vf+1); if (status) return status; memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); @@ -3233,6 +3219,7 @@ static int be_get_resources(struct be_adapter *adapter) /* Routine to query per function resource limits */ static int be_get_config(struct be_adapter *adapter) { + u16 profile_id; int status; status = be_cmd_query_fw_cfg(adapter, &adapter->port_num, @@ -3242,6 +3229,13 @@ static int be_get_config(struct be_adapter *adapter) if (status) return status; + if (be_physfn(adapter)) { + status = be_cmd_get_active_profile(adapter, &profile_id); + if (!status) + dev_info(&adapter->pdev->dev, + "Using profile 0x%x\n", profile_id); + } + status = be_get_resources(adapter); if (status) return status; @@ -3396,11 +3390,6 @@ static int be_setup(struct be_adapter *adapter) goto err; be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0); - /* In UMC mode FW does not return right privileges. - * Override with correct privilege equivalent to PF. - */ - if (be_is_mc(adapter)) - adapter->cmd_privileges = MAX_PRIVILEGES; status = be_mac_setup(adapter); if (status) @@ -3419,6 +3408,8 @@ static int be_setup(struct be_adapter *adapter) be_set_rx_mode(adapter->netdev); + be_cmd_get_acpi_wol_cap(adapter); + be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc); if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) @@ -4288,74 +4279,22 @@ static void be_remove(struct pci_dev *pdev) free_netdev(adapter->netdev); } -bool be_is_wol_supported(struct be_adapter *adapter) -{ - return ((adapter->wol_cap & BE_WOL_CAP) && - !be_is_wol_excluded(adapter)) ? true : false; -} - -u32 be_get_fw_log_level(struct be_adapter *adapter) -{ - struct be_dma_mem extfat_cmd; - struct be_fat_conf_params *cfgs; - int status; - u32 level = 0; - int j; - - if (lancer_chip(adapter)) - return 0; - - memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); - extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); - extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, - &extfat_cmd.dma); - - if (!extfat_cmd.va) { - dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", - __func__); - goto err; - } - - status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); - if (!status) { - cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + - sizeof(struct be_cmd_resp_hdr)); - for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { - if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) - level = cfgs->module[0].trace_lvl[j].dbg_lvl; - } - } - pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, - extfat_cmd.dma); -err: - return level; -} - static int be_get_initial_config(struct be_adapter *adapter) { - int status; - u32 level; + int status, level; status = be_cmd_get_cntl_attributes(adapter); if (status) return status; - status = be_cmd_get_acpi_wol_cap(adapter); - if (status) { - /* in case of a failure to get wol capabillities - * check the exclusion list to determine WOL capability */ - if (!be_is_wol_excluded(adapter)) - adapter->wol_cap |= BE_WOL_CAP; - } - - if (be_is_wol_supported(adapter)) - adapter->wol = true; - /* Must be a power of 2 or else MODULO will BUG_ON */ adapter->be_get_temp_freq = 64; - level = be_get_fw_log_level(adapter); - adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; + if (BEx_chip(adapter)) { + level = be_cmd_get_fw_log_level(adapter); + adapter->msg_enable = + level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; + } adapter->cfg_num_qs = netif_get_num_default_rss_queues(); return 0; @@ -4618,7 +4557,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) struct be_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; - if (adapter->wol) + if (adapter->wol_en) be_setup_wol(adapter, true); be_intr_set(adapter, false); @@ -4674,7 +4613,7 @@ static int be_resume(struct pci_dev *pdev) msecs_to_jiffies(1000)); netif_device_attach(netdev); - if (adapter->wol) + if (adapter->wol_en) be_setup_wol(adapter, false); return 0; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index d3d7ede27ef1..5900dbaec242 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -889,11 +889,9 @@ static int gfar_set_hash_opts(struct gfar_private *priv, static int gfar_check_filer_hardware(struct gfar_private *priv) { - struct gfar __iomem *regs = NULL; + struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 i; - regs = priv->gfargrp[0].regs; - /* Check if we are in FIFO mode */ i = gfar_read(®s->ecntrl); i &= ECNTRL_FIFM; @@ -927,7 +925,7 @@ static int gfar_check_filer_hardware(struct gfar_private *priv) /* Sets the properties for arbitrary filer rule * to the first 4 Layer 4 Bytes */ - regs->rbifx = 0xC0C1C2C3; + gfar_write(®s->rbifx, 0xC0C1C2C3); return 0; } @@ -1055,10 +1053,18 @@ static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value, struct ethtool_tcpip4_spec *mask, struct filer_table *tab) { - gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); - gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); - gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab); - gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab); + gfar_set_attribute(be32_to_cpu(value->ip4src), + be32_to_cpu(mask->ip4src), + RQFCR_PID_SIA, tab); + gfar_set_attribute(be32_to_cpu(value->ip4dst), + be32_to_cpu(mask->ip4dst), + RQFCR_PID_DIA, tab); + gfar_set_attribute(be16_to_cpu(value->pdst), + be16_to_cpu(mask->pdst), + RQFCR_PID_DPT, tab); + gfar_set_attribute(be16_to_cpu(value->psrc), + be16_to_cpu(mask->psrc), + RQFCR_PID_SPT, tab); gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); } @@ -1067,12 +1073,17 @@ static void gfar_set_user_ip(struct ethtool_usrip4_spec *value, struct ethtool_usrip4_spec *mask, struct filer_table *tab) { - gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); - gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); + gfar_set_attribute(be32_to_cpu(value->ip4src), + be32_to_cpu(mask->ip4src), + RQFCR_PID_SIA, tab); + gfar_set_attribute(be32_to_cpu(value->ip4dst), + be32_to_cpu(mask->ip4dst), + RQFCR_PID_DIA, tab); gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab); - gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB, - tab); + gfar_set_attribute(be32_to_cpu(value->l4_4_bytes), + be32_to_cpu(mask->l4_4_bytes), + RQFCR_PID_ARB, tab); } @@ -1139,7 +1150,41 @@ static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask, } } - gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab); + gfar_set_attribute(be16_to_cpu(value->h_proto), + be16_to_cpu(mask->h_proto), + RQFCR_PID_ETY, tab); +} + +static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule) +{ + return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK; +} + +static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule) +{ + return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK; +} + +static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule) +{ + return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK; +} + +static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule) +{ + return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK; +} + +static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule) +{ + return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; +} + +static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule) +{ + return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; } /* Convert a rule to binary filter format of gianfar */ @@ -1153,22 +1198,21 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, u32 old_index = tab->index; /* Check if vlan is wanted */ - if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) { + if ((rule->flow_type & FLOW_EXT) && + (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) { if (!rule->m_ext.vlan_tci) - rule->m_ext.vlan_tci = 0xFFFF; + rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF); vlan = RQFPR_VLN; vlan_mask = RQFPR_VLN; /* Separate the fields */ - id = rule->h_ext.vlan_tci & VLAN_VID_MASK; - id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK; - cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK; - cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK; - prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> - VLAN_PRIO_SHIFT; - prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> - VLAN_PRIO_SHIFT; + id = vlan_tci_vid(rule); + id_mask = vlan_tci_vidm(rule); + cfi = vlan_tci_cfi(rule); + cfi_mask = vlan_tci_cfim(rule); + prio = vlan_tci_prio(rule); + prio_mask = vlan_tci_priom(rule); if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { vlan |= RQFPR_CFI; @@ -1666,10 +1710,10 @@ static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow) for (i = 0; i < sizeof(flow->m_u); i++) flow->m_u.hdata[i] ^= 0xFF; - flow->m_ext.vlan_etype ^= 0xFFFF; - flow->m_ext.vlan_tci ^= 0xFFFF; - flow->m_ext.data[0] ^= ~0; - flow->m_ext.data[1] ^= ~0; + flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF); + flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF); + flow->m_ext.data[0] ^= cpu_to_be32(~0); + flow->m_ext.data[1] ^= cpu_to_be32(~0); } static int gfar_add_cls(struct gfar_private *priv, diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index e006a09ba899..6ba2fd44dbd7 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -134,7 +134,7 @@ struct gianfar_ptp_registers { #define REG_SIZE sizeof(struct gianfar_ptp_registers) struct etsects { - struct gianfar_ptp_registers *regs; + struct gianfar_ptp_registers __iomem *regs; spinlock_t lock; /* protects regs */ struct ptp_clock *clock; struct ptp_clock_info caps; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 9fb2eb8cf152..333bb54d0bdf 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -243,6 +243,7 @@ config IXGBEVF config I40E tristate "Intel(R) Ethernet Controller XL710 Family support" + select PTP_1588_CLOCK depends on PCI ---help--- This driver supports Intel(R) Ethernet Controller XL710 Family of diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 6ec1a795f184..e4d2e27078a7 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -40,4 +40,5 @@ i40e-objs := i40e_main.o \ i40e_debugfs.o \ i40e_diag.o \ i40e_txrx.o \ + i40e_ptp.o \ i40e_virtchnl_pf.o diff --git a/drivers/net/ethernet/intel/i40e/Module.symvers b/drivers/net/ethernet/intel/i40e/Module.symvers new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/Module.symvers diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 4d4cdbf6fad7..91b00523d3d4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -50,6 +50,9 @@ #include <net/ip6_checksum.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> +#include <linux/clocksource.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> #include "i40e_type.h" #include "i40e_prototype.h" #include "i40e_virtchnl.h" @@ -88,8 +91,7 @@ /* The values in here are decimal coded as hex as is the case in the NVM map*/ #define I40E_CURRENT_NVM_VERSION_HI 0x2 -#define I40E_CURRENT_NVM_VERSION_LO 0x1 - +#define I40E_CURRENT_NVM_VERSION_LO 0x30 /* magic for getting defines into strings */ #define STRINGIFY(foo) #foo @@ -242,6 +244,7 @@ struct i40e_pf { #define I40E_FLAG_DCB_ENABLED (u64)(1 << 20) #define I40E_FLAG_FDIR_ENABLED (u64)(1 << 21) #define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 22) +#define I40E_FLAG_PTP (u64)(1 << 25) #define I40E_FLAG_MFP_ENABLED (u64)(1 << 26) #ifdef CONFIG_I40E_VXLAN #define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) @@ -302,6 +305,20 @@ struct i40e_pf { u32 fcoe_hmc_filt_num; u32 fcoe_hmc_cntx_num; struct i40e_filter_control_settings filter_settings; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct sk_buff *ptp_tx_skb; + struct work_struct ptp_tx_work; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_rx_ptp_check; + spinlock_t tmreg_lock; /* Used to protect the device time registers. */ + u64 ptp_base_adj; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; + bool ptp_tx; + bool ptp_rx; }; struct i40e_mac_filter { @@ -566,4 +583,12 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); +void i40e_ptp_rx_hang(struct i40e_vsi *vsi); +void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf); +void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index); +void i40e_ptp_set_increment(struct i40e_pf *pf); +int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr); +int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr); +void i40e_ptp_init(struct i40e_pf *pf); +void i40e_ptp_stop(struct i40e_pf *pf); #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index c75aa2d07491..a50e6b3479ae 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -128,7 +128,7 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw) /** * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure **/ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) { @@ -195,7 +195,7 @@ unwind_alloc_arq_bufs: /** * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure **/ static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) { @@ -235,7 +235,7 @@ unwind_alloc_asq_bufs: /** * i40e_free_arq_bufs - Free receive queue buffer info elements - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure **/ static void i40e_free_arq_bufs(struct i40e_hw *hw) { @@ -254,7 +254,7 @@ static void i40e_free_arq_bufs(struct i40e_hw *hw) /** * i40e_free_asq_bufs - Free send queue buffer info elements - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure **/ static void i40e_free_asq_bufs(struct i40e_hw *hw) { @@ -277,7 +277,7 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw) /** * i40e_config_asq_regs - configure ASQ registers - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * Configure base address and length registers for the transmit queue **/ @@ -304,7 +304,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw) /** * i40e_config_arq_regs - ARQ register configuration - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * Configure base address and length registers for the receive (event queue) **/ @@ -334,7 +334,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw) /** * i40e_init_asq - main initialization routine for ASQ - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * This is the main initialization routine for the Admin Send Queue * Prior to calling this function, drivers *MUST* set the following fields @@ -391,7 +391,7 @@ init_adminq_exit: /** * i40e_init_arq - initialize ARQ - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * The main initialization routine for the Admin Receive (Event) Queue. * Prior to calling this function, drivers *MUST* set the following fields @@ -448,7 +448,7 @@ init_adminq_exit: /** * i40e_shutdown_asq - shutdown the ASQ - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * The main shutdown routine for the Admin Send Queue **/ @@ -479,7 +479,7 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) /** * i40e_shutdown_arq - shutdown ARQ - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * The main shutdown routine for the Admin Receive Queue **/ @@ -510,7 +510,7 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) /** * i40e_init_adminq - main initialization routine for Admin Queue - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * Prior to calling this function, drivers *MUST* set the following fields * in the hw->aq structure: @@ -607,7 +607,7 @@ init_adminq_exit: /** * i40e_shutdown_adminq - shutdown routine for the Admin Queue - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure **/ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) { @@ -626,7 +626,7 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) /** * i40e_clean_asq - cleans Admin send queue - * @asq: pointer to the adminq send ring + * @hw: pointer to the hardware structure * * returns the number of free desc **/ @@ -922,7 +922,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, "AQRX: Event received with error 0x%X.\n", hw->aq.arq_last_status); } else { - memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc)); + e->desc = *desc; datalen = le16_to_cpu(desc->datalen); e->msg_size = min(datalen, e->msg_size); if (e->msg_buf != NULL && (e->msg_size != 0)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index c009eb47058a..be61a474fd84 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1195,8 +1195,8 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { } v4; struct { u8 data[16]; - } v6; - } ipaddr; + } v6; + } ipaddr; __le16 flags; #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 #define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 807312bb62a2..aedc71b6f3f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -74,7 +74,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) /** * i40e_debug_aq * @hw: debug mask related to admin queue - * @cap: pointer to adminq command descriptor + * @mask: debug mask + * @desc: pointer to admin queue descriptor * @buffer: pointer to command buffer * * Dumps debug log about adminq command with descriptor contents. @@ -161,7 +162,6 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, return status; } - /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure @@ -599,8 +599,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, goto aq_get_link_info_exit; /* save off old link status information */ - memcpy(&hw->phy.link_info_old, hw_link_info, - sizeof(struct i40e_link_status)); + hw->phy.link_info_old = *hw_link_info; /* update link status */ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; @@ -630,7 +629,7 @@ aq_get_link_info_exit: /** * i40e_aq_add_vsi * @hw: pointer to the hw struct - * @vsi: pointer to a vsi context struct + * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Add a VSI context to the hardware. @@ -682,7 +681,8 @@ aq_add_vsi_exit: * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, - u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) + u16 seid, bool set, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = @@ -776,7 +776,7 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, /** * i40e_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct - * @vsi: pointer to a vsi context struct + * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, @@ -818,7 +818,7 @@ aq_get_vsi_params_exit: /** * i40e_aq_update_vsi_params * @hw: pointer to the hw struct - * @vsi: pointer to a vsi context struct + * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Update a VSI context. @@ -921,7 +921,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, /** * i40e_aq_send_driver_version * @hw: pointer to the hw struct - * @event: driver event: driver ok, start or stop * @dv: driver's major, minor version * @cmd_details: pointer to command details structure or NULL * @@ -1039,10 +1038,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, * @hw: pointer to the hw struct * @veb_seid: the SEID of the VEB to query * @switch_id: the uplink switch id - * @floating_veb: set to true if the VEB is floating + * @floating: set to true if the VEB is floating * @statistic_index: index of the stats counter block for this VEB * @vebs_used: number of VEB's used by function - * @vebs_unallocated: total VEB's not reserved by any function + * @vebs_free: total VEB's not reserved by any function * @cmd_details: pointer to command details structure or NULL * * This retrieves the parameters for a particular VEB, specified by @@ -1179,6 +1178,8 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure * @vfid: vf id to send msg + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code * @msg: pointer to the msg buffer * @msglen: msg length * @cmd_details: pointer to command details @@ -1723,6 +1724,7 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, * @udp_port: the UDP port to add * @header_len: length of the tunneling header length in DWords * @protocol_index: protocol index type + * @filter_index: pointer to filter index * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 0220b18b2b18..36a5cc89bf0b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1083,7 +1083,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, - "add relay: vsi VSI %d not found\n", vsi_seid); + "add relay: VSI %d not found\n", vsi_seid); goto command_write_done; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index b886ee5bd99d..7b87d5115816 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -108,6 +108,8 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("rx_oversize", stats.rx_oversize), I40E_PF_STAT("rx_jabber", stats.rx_jabber), I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), + I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), }; #define I40E_QUEUE_STATS_LEN(n) \ @@ -748,7 +750,36 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, static int i40e_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { - return ethtool_op_get_ts_info(dev, info); + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (pf->ptp_clock) + info->phc_index = ptp_clock_index(pf->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + + return 0; } static int i40e_link_test(struct net_device *netdev, u64 *data) @@ -1634,7 +1665,7 @@ static int i40e_set_channels(struct net_device *dev, * class queue mapping */ new_count = i40e_reconfig_rss_queues(pf, count); - if (new_count > 1) + if (new_count > 0) return 0; else return -EINVAL; diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 76dfef3e0104..bf2d4cc5b569 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -89,11 +89,9 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, sd_entry->u.pd_table.pd_entry = (struct i40e_hmc_pd_entry *) sd_entry->u.pd_table.pd_entry_virt_mem.va; - memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, - sizeof(struct i40e_dma_mem)); + sd_entry->u.pd_table.pd_page_addr = mem; } else { - memcpy(&sd_entry->u.bp.addr, &mem, - sizeof(struct i40e_dma_mem)); + sd_entry->u.bp.addr = mem; sd_entry->u.bp.sd_pd_index = sd_index; } /* initialize the sd entry */ @@ -164,7 +162,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, if (ret_code) goto exit; - memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem)); + pd_entry->bp.addr = mem; pd_entry->bp.sd_pd_index = pd_index; pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; /* Set page address and valid bit */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 72bf30aba150..0cd4701234f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -116,7 +116,6 @@ struct i40e_hmc_info { * @hw: pointer to our hw struct * @pa: pointer to physical address * @sd_index: segment descriptor index - * @hmc_fn_id: hmc function id * @type: if sd entry is direct or paged **/ #define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ @@ -138,7 +137,6 @@ struct i40e_hmc_info { * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware * @hw: pointer to our hw struct * @sd_index: segment descriptor index - * @hmc_fn_id: hmc function id * @type: if sd entry is direct or paged **/ #define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ @@ -159,7 +157,6 @@ struct i40e_hmc_info { * @hw: pointer to our hw struct * @sd_idx: segment descriptor index * @pd_idx: page descriptor index - * @hmc_fn_id: hmc function id **/ #define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ wr32((hw), I40E_PFHMC_PDINV, \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 101ed4107312..d5d98fe2691d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -485,8 +485,7 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, /* Make one big object, a single SD */ info.count = 1; ret_code = i40e_create_lan_hmc_object(hw, &info); - if ((ret_code) && - (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) + if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) goto try_type_paged; else if (ret_code) goto configure_lan_hmc_out; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 1b792ee592d0..7bfa78967276 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -38,7 +38,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 25 +#define DRV_VERSION_BUILD 30 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -356,7 +356,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); int i; - if (test_bit(__I40E_DOWN, &vsi->state)) return stats; @@ -1698,6 +1697,27 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) } /** + * i40e_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + switch (cmd) { + case SIOCGHWTSTAMP: + return i40e_ptp_get_ts_config(pf, ifr); + case SIOCSHWTSTAMP: + return i40e_ptp_set_ts_config(pf, ifr); + default: + return -EOPNOTSUPP; + } +} + +/** * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI * @vsi: the vsi being adjusted **/ @@ -1824,7 +1844,10 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) return -ENOMEM; } } + } + /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ + if (vid > 0 && !vsi->info.pvid) { list_for_each_entry(f, &vsi->mac_filter_list, list) { if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, is_vf, is_netdev)) { @@ -2150,7 +2173,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) tx_ctx.base = (ring->dma / 128); tx_ctx.qlen = ring->count; tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED | - I40E_FLAG_FDIR_ATR_ENABLED)); + I40E_FLAG_FDIR_ATR_ENABLED)); + tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); /* As part of VSI creation/update, FW allocates certain * Tx arbitration queue sets for each TC enabled for @@ -2184,7 +2208,10 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) } /* Now associate this queue with this PCI function */ - qtx_ctl = I40E_QTX_CTL_PF_QUEUE; + if (vsi->type == I40E_VSI_VMDQ2) + qtx_ctl = I40E_QTX_CTL_VM_QUEUE; + else + qtx_ctl = I40E_QTX_CTL_PF_QUEUE; qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); @@ -2488,6 +2515,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw) I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | I40E_PFINT_ICR0_ENA_GPIO_MASK | + I40E_PFINT_ICR0_ENA_TIMESYNC_MASK | I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | @@ -2831,6 +2859,18 @@ static irqreturn_t i40e_intr(int irq, void *data) dev_info(&pf->pdev->dev, "HMC error interrupt\n"); } + if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { + u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); + + if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { + ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + i40e_ptp_tx_hwtstamp(pf); + prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK; + } + + wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat); + } + /* If a critical error is pending we have no choice but to reset the * device. * Report and mask out any remaining unexpected interrupts. @@ -3008,11 +3048,13 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) continue; /* turn on/off the queue */ - if (enable) + if (enable) { + wr32(hw, I40E_QTX_HEAD(pf_q), 0); tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK | I40E_QTX_ENA_QENA_STAT_MASK; - else + } else { tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; + } wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); @@ -3113,7 +3155,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) **/ int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) { - int ret; + int ret = 0; /* do rx first for enable and last for disable */ if (request) { @@ -3122,10 +3164,9 @@ int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) return ret; ret = i40e_vsi_control_tx(vsi, request); } else { - ret = i40e_vsi_control_tx(vsi, request); - if (ret) - return ret; - ret = i40e_vsi_control_rx(vsi, request); + /* Ignore return value, we need to shutdown whatever we can */ + i40e_vsi_control_tx(vsi, request); + i40e_vsi_control_rx(vsi, request); } return ret; @@ -3564,7 +3605,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) /* Get the VSI level BW configuration per TC */ aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, - NULL); + NULL); if (aq_ret) { dev_info(&pf->pdev->dev, "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", @@ -4303,6 +4344,9 @@ static void i40e_link_event(struct i40e_pf *pf) if (pf->vf) i40e_vc_notify_link_state(pf); + + if (pf->flags & I40E_FLAG_PTP) + i40e_ptp_set_increment(pf); } /** @@ -4384,6 +4428,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i]) i40e_update_veb_stats(pf->veb[i]); + + i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); } /** @@ -6032,6 +6078,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = i40e_set_mac, .ndo_change_mtu = i40e_change_mtu, + .ndo_do_ioctl = i40e_ioctl, .ndo_tx_timeout = i40e_tx_timeout, .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, @@ -7298,6 +7345,8 @@ no_autoneg: ~I40E_PRTDCB_MFLCN_RFCE_MASK); fc_complete: + i40e_ptp_init(pf); + return ret; } @@ -7611,6 +7660,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pf_reset; } + i40e_clear_pxe_mode(hw); err = i40e_get_capabilities(pf); if (err) goto err_adminq_setup; @@ -7801,6 +7851,8 @@ static void i40e_remove(struct pci_dev *pdev) i40e_dbg_pf_exit(pf); + i40e_ptp_stop(pf); + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 37d66c87abc9..e12bf07fd23d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -244,6 +244,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, /** * i40e_calc_nvm_checksum - Calculates and returns the checksum * @hw: pointer to hardware structure + * @checksum: pointer to the checksum * * This function calculate SW Checksum that covers the whole 64kB shadow RAM * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index c7c3d8231b36..f8c53a6ec580 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -93,9 +93,9 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, u16 vsi_id, bool set_filter, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, - u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, - u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c new file mode 100644 index 000000000000..e33ec6c842b7 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -0,0 +1,662 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e.h" +#include <linux/export.h> +#include <linux/ptp_classify.h> + +/* The XL710 timesync is very much like Intel's 82599 design when it comes to + * the fundamental clock design. However, the clock operations are much simpler + * in the XL710 because the device supports a full 64 bits of nanoseconds. + * Because the field is so wide, we can forgo the cycle counter and just + * operate with the nanosecond field directly without fear of overflow. + * + * Much like the 82599, the update period is dependent upon the link speed: + * At 40Gb link or no link, the period is 1.6ns. + * At 10Gb link, the period is multiplied by 2. (3.2ns) + * At 1Gb link, the period is multiplied by 20. (32ns) + * 1588 functionality is not supported at 100Mbps. + */ +#define I40E_PTP_40GB_INCVAL 0x0199999999ULL +#define I40E_PTP_10GB_INCVAL 0x0333333333ULL +#define I40E_PTP_1GB_INCVAL 0x2000000000ULL + +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \ + I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \ + I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PTP_TX_TIMEOUT (HZ * 15) + +/** + * i40e_ptp_read - Read the PHC time from the device + * @pf: Board private structure + * @ts: timespec structure to hold the current time value + * + * This function reads the PRTTSYN_TIME registers and stores them in a + * timespec. However, since the registers are 64 bits of nanoseconds, we must + * convert the result to a timespec before we can return. + **/ +static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts) +{ + struct i40e_hw *hw = &pf->hw; + u32 hi, lo; + u64 ns; + + /* The timer latches on the lowest register read. */ + lo = rd32(hw, I40E_PRTTSYN_TIME_L); + hi = rd32(hw, I40E_PRTTSYN_TIME_H); + + ns = (((u64)hi) << 32) | lo; + + *ts = ns_to_timespec(ns); +} + +/** + * i40e_ptp_write - Write the PHC time to the device + * @pf: Board private structure + * @ts: timespec structure that holds the new time value + * + * This function writes the PRTTSYN_TIME registers with the user value. Since + * we receive a timespec from the stack, we must convert that timespec into + * nanoseconds before programming the registers. + **/ +static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts) +{ + struct i40e_hw *hw = &pf->hw; + u64 ns = timespec_to_ns(ts); + + /* The timer will not update until the high register is written, so + * write the low register first. + */ + wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32); +} + +/** + * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time + * @hwtstamps: Timestamp structure to update + * @timestamp: Timestamp from the hardware + * + * We need to convert the NIC clock value into a hwtstamp which can be used by + * the upper level timestamping functions. Since the timestamp is simply a 64- + * bit nanosecond value, we can call ns_to_ktime directly to handle this. + **/ +static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps, + u64 timestamp) +{ + memset(hwtstamps, 0, sizeof(*hwtstamps)); + + hwtstamps->hwtstamp = ns_to_ktime(timestamp); +} + +/** + * i40e_ptp_adjfreq - Adjust the PHC frequency + * @ptp: The PTP clock structure + * @ppb: Parts per billion adjustment from the base + * + * Adjust the frequency of the PHC by the indicated parts per billion from the + * base frequency. + **/ +static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + struct i40e_hw *hw = &pf->hw; + u64 adj, freq, diff; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + smp_mb(); /* Force any pending update before accessing. */ + adj = ACCESS_ONCE(pf->ptp_base_adj); + + freq = adj; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + if (neg_adj) + adj -= diff; + else + adj += diff; + + wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); + + return 0; +} + +/** + * i40e_ptp_adjtime - Adjust the PHC time + * @ptp: The PTP clock structure + * @delta: Offset in nanoseconds to adjust the PHC time by + * + * Adjust the frequency of the PHC by the indicated parts per billion from the + * base frequency. + **/ +static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + struct timespec now, then = ns_to_timespec(delta); + unsigned long flags; + + spin_lock_irqsave(&pf->tmreg_lock, flags); + + i40e_ptp_read(pf, &now); + now = timespec_add(now, then); + i40e_ptp_write(pf, (const struct timespec *)&now); + + spin_unlock_irqrestore(&pf->tmreg_lock, flags); + + return 0; +} + +/** + * i40e_ptp_gettime - Get the time of the PHC + * @ptp: The PTP clock structure + * @ts: timespec structure to hold the current time value + * + * Read the device clock and return the correct value on ns, after converting it + * into a timespec struct. + **/ +static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&pf->tmreg_lock, flags); + i40e_ptp_read(pf, ts); + spin_unlock_irqrestore(&pf->tmreg_lock, flags); + + return 0; +} + +/** + * i40e_ptp_settime - Set the time of the PHC + * @ptp: The PTP clock structure + * @ts: timespec structure that holds the new time value + * + * Set the device clock to the user input value. The conversion from timespec + * to ns happens in the write function. + **/ +static int i40e_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&pf->tmreg_lock, flags); + i40e_ptp_write(pf, ts); + spin_unlock_irqrestore(&pf->tmreg_lock, flags); + + return 0; +} + +/** + * i40e_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a + * Tx timestamp event has occurred, in order to pass the Tx timestamp value up + * the stack in the skb. + */ +static void i40e_ptp_tx_work(struct work_struct *work) +{ + struct i40e_pf *pf = container_of(work, struct i40e_pf, + ptp_tx_work); + struct i40e_hw *hw = &pf->hw; + u32 prttsyn_stat_0; + + if (!pf->ptp_tx_skb) + return; + + if (time_is_before_jiffies(pf->ptp_tx_start + + I40E_PTP_TX_TIMEOUT)) { + dev_kfree_skb_any(pf->ptp_tx_skb); + pf->ptp_tx_skb = NULL; + pf->tx_hwtstamp_timeouts++; + dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang"); + return; + } + + prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0); + if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK) + i40e_ptp_tx_hwtstamp(pf); + else + schedule_work(&pf->ptp_tx_work); +} + +/** + * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem + * @ptp: The PTP clock structure + * @rq: The requested feature to change + * @on: Enable/disable flag + * + * The XL710 does not support any of the ancillary features of the PHC + * subsystem, so this function may just return. + **/ +static int i40e_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/** + * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung + * @vsi: The VSI with the rings relevant to 1588 + * + * This watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + **/ +void i40e_ptp_rx_hang(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_ring *rx_ring; + unsigned long rx_event; + u32 prttsyn_stat; + int n; + + if (pf->flags & I40E_FLAG_PTP) + return; + + prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); + + /* Unless all four receive timestamp registers are latched, we are not + * concerned about a possible PTP Rx hang, so just update the timeout + * counter and exit. + */ + if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK << + I40E_PRTTSYN_STAT_1_RXT0_SHIFT) | + (I40E_PRTTSYN_STAT_1_RXT1_MASK << + I40E_PRTTSYN_STAT_1_RXT1_SHIFT) | + (I40E_PRTTSYN_STAT_1_RXT2_MASK << + I40E_PRTTSYN_STAT_1_RXT2_SHIFT) | + (I40E_PRTTSYN_STAT_1_RXT3_MASK << + I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) { + pf->last_rx_ptp_check = jiffies; + return; + } + + /* Determine the most recent watchdog or rx_timestamp event. */ + rx_event = pf->last_rx_ptp_check; + for (n = 0; n < vsi->num_queue_pairs; n++) { + rx_ring = vsi->rx_rings[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); + pf->last_rx_ptp_check = jiffies; + pf->rx_hwtstamp_cleared++; + dev_warn(&vsi->back->pdev->dev, + "%s: clearing Rx timestamp hang", + __func__); + } +} + +/** + * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp + * @pf: Board private structure + * + * Read the value of the Tx timestamp from the registers, convert it into a + * value consumable by the stack, and store that result into the shhwtstamps + * struct before returning it up the stack. + **/ +void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct i40e_hw *hw = &pf->hw; + u32 hi, lo; + u64 ns; + + lo = rd32(hw, I40E_PRTTSYN_TXTIME_L); + hi = rd32(hw, I40E_PRTTSYN_TXTIME_H); + + ns = (((u64)hi) << 32) | lo; + + i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns); + skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps); + dev_kfree_skb_any(pf->ptp_tx_skb); + pf->ptp_tx_skb = NULL; +} + +/** + * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp + * @pf: Board private structure + * @skb: Particular skb to send timestamp with + * @index: Index into the receive timestamp registers for the timestamp + * + * The XL710 receives a notification in the receive descriptor with an offset + * into the set of RXTIME registers where the timestamp is for that skb. This + * function goes and fetches the receive timestamp from that offset, if a valid + * one exists. The RXTIME registers are in ns, so we must convert the result + * first. + **/ +void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) +{ + u32 prttsyn_stat, hi, lo; + struct i40e_hw *hw; + u64 ns; + + /* Since we cannot turn off the Rx timestamp logic if the device is + * doing Tx timestamping, check if Rx timestamping is configured. + */ + if (!pf->ptp_rx) + return; + + hw = &pf->hw; + + prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); + + if (!(prttsyn_stat & (1 << index))) + return; + + lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index)); + hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index)); + + ns = (((u64)hi) << 32) | lo; + + i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns); +} + +/** + * i40e_ptp_set_increment - Utility function to update clock increment rate + * @pf: Board private structure + * + * During a link change, the DMA frequency that drives the 1588 logic will + * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds, + * we must update the increment value per clock tick. + **/ +void i40e_ptp_set_increment(struct i40e_pf *pf) +{ + struct i40e_link_status *hw_link_info; + struct i40e_hw *hw = &pf->hw; + u64 incval; + + hw_link_info = &hw->phy.link_info; + + i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); + + switch (hw_link_info->link_speed) { + case I40E_LINK_SPEED_10GB: + incval = I40E_PTP_10GB_INCVAL; + break; + case I40E_LINK_SPEED_1GB: + incval = I40E_PTP_1GB_INCVAL; + break; + case I40E_LINK_SPEED_100MB: + dev_warn(&pf->pdev->dev, + "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n", + __func__); + incval = 0; + break; + case I40E_LINK_SPEED_40GB: + default: + incval = I40E_PTP_40GB_INCVAL; + break; + } + + /* Write the new increment value into the increment register. The + * hardware will not update the clock until both registers have been + * written. + */ + wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); + + /* Update the base adjustement value. */ + ACCESS_ONCE(pf->ptp_base_adj) = incval; + smp_mb(); /* Force the above update. */ +} + +/** + * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping + * @pf: Board private structure + * @ifreq: ioctl data + * + * Obtain the current hardware timestamping settigs as requested. To do this, + * keep a shadow copy of the timestamp settings rather than attempting to + * deconstruct it from the registers. + **/ +int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &pf->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping + * @pf: Board private structure + * @ifreq: ioctl data + * + * Respond to the user filter requests and make the appropriate hardware + * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping + * logic, so keep track in software of whether to indicate these timestamps + * or not. + * + * It is permissible to "upgrade" the user request to a broader filter, as long + * as the user receives the timestamps they care about and the user is notified + * the filter has been broadened. + **/ +int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) +{ + struct i40e_hw *hw = &pf->hw; + struct hwtstamp_config *config = &pf->tstamp_config; + u32 pf_id, tsyntype, regval; + + if (copy_from_user(config, ifr->ifr_data, sizeof(*config))) + return -EFAULT; + + /* Reserved for future extensions. */ + if (config->flags) + return -EINVAL; + + /* Confirm that 1588 is supported on this PF. */ + pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> + I40E_PRTTSYN_CTL0_PF_ID_SHIFT; + if (hw->pf_id != pf_id) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + pf->ptp_tx = false; + break; + case HWTSTAMP_TX_ON: + pf->ptp_tx = true; + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + pf->ptp_rx = false; + tsyntype = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + pf->ptp_rx = true; + tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK | + I40E_PRTTSYN_CTL1_TSYNTYPE_V1 | + I40E_PRTTSYN_CTL1_UDP_ENA_MASK; + config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + pf->ptp_rx = true; + tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK | + I40E_PRTTSYN_CTL1_TSYNTYPE_V2 | + I40E_PRTTSYN_CTL1_UDP_ENA_MASK; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_ALL: + default: + return -ERANGE; + } + + /* Clear out all 1588-related registers to clear and unlatch them. */ + rd32(hw, I40E_PRTTSYN_STAT_0); + rd32(hw, I40E_PRTTSYN_TXTIME_H); + rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); + + /* Enable/disable the Tx timestamp interrupt based on user input. */ + regval = rd32(hw, I40E_PRTTSYN_CTL0); + if (pf->ptp_tx) + regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK; + else + regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL0, regval); + + regval = rd32(hw, I40E_PFINT_ICR0_ENA); + if (pf->ptp_tx) + regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + else + regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, regval); + + /* There is no simple on/off switch for Rx. To "disable" Rx support, + * ignore any received timestamps, rather than turn off the clock. + */ + if (pf->ptp_rx) { + regval = rd32(hw, I40E_PRTTSYN_CTL1); + /* clear everything but the enable bit */ + regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK; + /* now enable bits for desired Rx timestamps */ + regval |= tsyntype; + wr32(hw, I40E_PRTTSYN_CTL1, regval); + } + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * i40e_ptp_init - Initialize the 1588 support and register the PHC + * @pf: Board private structure + * + * This function registers the device clock as a PHC. If it is successful, it + * starts the clock in the hardware. + **/ +void i40e_ptp_init(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; + + strncpy(pf->ptp_caps.name, "i40e", sizeof(pf->ptp_caps.name)); + pf->ptp_caps.owner = THIS_MODULE; + pf->ptp_caps.max_adj = 999999999; + pf->ptp_caps.n_ext_ts = 0; + pf->ptp_caps.pps = 0; + pf->ptp_caps.adjfreq = i40e_ptp_adjfreq; + pf->ptp_caps.adjtime = i40e_ptp_adjtime; + pf->ptp_caps.gettime = i40e_ptp_gettime; + pf->ptp_caps.settime = i40e_ptp_settime; + pf->ptp_caps.enable = i40e_ptp_enable; + + /* Attempt to register the clock before enabling the hardware. */ + pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev); + if (IS_ERR(pf->ptp_clock)) { + pf->ptp_clock = NULL; + dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n", + __func__); + } else { + struct timespec ts; + u32 regval; + + spin_lock_init(&pf->tmreg_lock); + INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work); + + dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, + netdev->name); + pf->flags |= I40E_FLAG_PTP; + + /* Ensure the clocks are running. */ + regval = rd32(hw, I40E_PRTTSYN_CTL0); + regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL0, regval); + regval = rd32(hw, I40E_PRTTSYN_CTL1); + regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL1, regval); + + /* Set the increment value per clock tick. */ + i40e_ptp_set_increment(pf); + + /* reset the tstamp_config */ + memset(&pf->tstamp_config, 0, sizeof(pf->tstamp_config)); + + /* Set the clock value. */ + ts = ktime_to_timespec(ktime_get_real()); + i40e_ptp_settime(&pf->ptp_caps, &ts); + } +} + +/** + * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC + * @pf: Board private structure + * + * This function handles the cleanup work required from the initialization by + * clearing out the important information and unregistering the PHC. + **/ +void i40e_ptp_stop(struct i40e_pf *pf) +{ + pf->flags &= ~I40E_FLAG_PTP; + pf->ptp_tx = false; + pf->ptp_rx = false; + + cancel_work_sync(&pf->ptp_tx_work); + if (pf->ptp_tx_skb) { + dev_kfree_skb_any(pf->ptp_tx_skb); + pf->ptp_tx_skb = NULL; + } + + if (pf->ptp_clock) { + ptp_clock_unregister(pf->ptp_clock); + pf->ptp_clock = NULL; + dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, + pf->vsi[pf->lan_vsi]->netdev->name); + } +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 43d88dd66ed4..f57a8f8ca385 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -892,6 +892,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; + /* likely incorrect csum if alternate IP extention headers found */ + if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + return; + /* IP or L4 or outmost IP checksum error */ if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) | @@ -973,8 +977,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) - >> I40E_RXD_QW1_STATUS_SHIFT; + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { union i40e_rx_desc *next_rxd; @@ -1084,6 +1088,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) } skb->rxhash = i40e_rx_hash(rx_ring, rx_desc); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { + i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & + I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); + rx_ring->last_rx_timestamp = jiffies; + } + /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; @@ -1422,6 +1433,46 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, } /** + * i40e_tsyn - set up the tsyn context descriptor + * @tx_ring: ptr to the ring to send + * @skb: ptr to the skb we're sending + * @tx_flags: the collected send information + * + * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen + **/ +static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, u64 *cd_type_cmd_tso_mss) +{ + struct i40e_pf *pf; + + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + return 0; + + /* Tx timestamps cannot be sampled when doing TSO */ + if (tx_flags & I40E_TX_FLAGS_TSO) + return 0; + + /* only timestamp the outbound packet if the user has requested it and + * we are not already transmitting a packet to be timestamped + */ + pf = i40e_netdev_to_pf(tx_ring->netdev); + if (pf->ptp_tx && !pf->ptp_tx_skb) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + pf->ptp_tx_skb = skb_get(skb); + } else { + return 0; + } + + *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << + I40E_TXD_CTX_QW1_CMD_SHIFT; + + pf->ptp_tx_start = jiffies; + schedule_work(&pf->ptp_tx_work); + + return 1; +} + +/** * i40e_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer * @tx_flags: Tx flags currently set @@ -1797,6 +1848,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, __be16 protocol; u32 td_cmd = 0; u8 hdr_len = 0; + int tsyn; int tso; if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; @@ -1827,6 +1879,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, skb_tx_timestamp(skb); + tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); + + if (tsyn) + tx_flags |= I40E_TX_FLAGS_TSYN; + /* always enable CRC insertion offload */ td_cmd |= I40E_TX_DESC_CMD_ICRC; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 6f8506c181d9..d5349698e513 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -29,9 +29,8 @@ /* Interrupt Throttling and Rate Limiting (storm control) Goodies */ -#define I40E_MAX_ITR 0x07FF -#define I40E_MIN_ITR 0x0001 -#define I40E_ITR_USEC_RESOLUTION 2 +#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ +#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ #define I40E_MAX_IRATE 0x03F #define I40E_MIN_IRATE 0x001 #define I40E_IRATE_USEC_RESOLUTION 4 @@ -137,6 +136,7 @@ enum i40e_dyn_idx_t { #define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) #define I40E_TX_FLAGS_FSO (u32)(1 << 7) +#define I40E_TX_FLAGS_TSYN (u32)(1 << 8) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -249,6 +249,8 @@ struct i40e_ring { u8 atr_sample_rate; u8 atr_count; + unsigned long last_rx_timestamp; + bool ring_active; /* is ring online or not */ /* stats structs */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 12473ad5c8e6..ccfc52debb19 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -60,15 +60,6 @@ /* Max default timeout in ms, */ #define I40E_MAX_NVM_TIMEOUT 18000 -/* Check whether address is multicast. This is little-endian specific check.*/ -#define I40E_IS_MULTICAST(address) \ - (bool)(((u8 *)(address))[0] & ((u8)0x01)) - -/* Check whether an address is broadcast. */ -#define I40E_IS_BROADCAST(address) \ - ((((u8 *)(address))[0] == ((u8)0xff)) && \ - (((u8 *)(address))[1] == ((u8)0xff))) - /* Switch from mc to the 2usec global time (this is the GTIME resolution) */ #define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2) @@ -84,6 +75,7 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); /* bitfields for Tx queue mapping in QTX_CTL */ #define I40E_QTX_CTL_VF_QUEUE 0x0 +#define I40E_QTX_CTL_VM_QUEUE 0x1 #define I40E_QTX_CTL_PF_QUEUE 0x2 /* debug masks - set these bits in hw->debug_mask to control output */ @@ -508,7 +500,9 @@ enum i40e_rx_desc_status_bits { I40E_RX_DESC_STATUS_FLM_SHIFT = 11, I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, - I40E_RX_DESC_STATUS_UDP_0_SHIFT = 16 + I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, + I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ + I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18 }; #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 51a4f6125437..44bb3597d736 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -386,8 +386,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) vf->lan_vsi_index = vsi->idx; vf->lan_vsi_id = vsi->id; dev_info(&pf->pdev->dev, - "LAN VSI index %d, VSI id %d\n", - vsi->idx, vsi->id); + "VF %d assigned LAN VSI index %d, VSI id %d\n", + vf->vf_id, vsi->idx, vsi->id); /* If the port VLAN has been configured and then the * VF driver was removed then the VSI port VLAN * configuration was destroyed. Check if there is @@ -1525,9 +1525,13 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } for (i = 0; i < al->num_elements; i++) { - ret = i40e_check_vf_permission(vf, al->list[i].addr); - if (ret) + if (is_broadcast_ether_addr(al->list[i].addr) || + is_zero_ether_addr(al->list[i].addr)) { + dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", + al->list[i].addr); + ret = I40E_ERR_INVALID_MAC_ADDR; goto error_param; + } } vsi = pf->vsi[vsi_id]; @@ -1788,7 +1792,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, local_vf_id, v_opcode, msglen); return ret; } - wr32(hw, I40E_VFGEN_RSTAT1(local_vf_id), I40E_VFR_VFACTIVE); + switch (v_opcode) { case I40E_VIRTCHNL_OP_VERSION: ret = i40e_vc_get_version_msg(vf); @@ -2072,6 +2076,20 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, goto error_pvid; } + if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) + dev_err(&pf->pdev->dev, + "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", + vf_id); + + /* Check for condition where there was already a port VLAN ID + * filter set and now it is being deleted by setting it to zero. + * Before deleting all the old VLAN filters we must add new ones + * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our + * MAC addresses deleted. + */ + if (!(vlan_id || qos) && vsi->info.pvid) + ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); + if (vsi->info.pvid) { /* kill old VLAN */ ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & @@ -2100,6 +2118,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, vsi->back->hw.aq.asq_last_status); goto error_pvid; } + /* Kill non-vlan MAC filters - ignore error return since + * there might not be any non-vlan MAC filters. + */ + i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); } if (ret) { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 8f2b3b2a2a90..ffdb01d853db 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -21,6 +21,8 @@ * ******************************************************************************/ +#include <linux/prefetch.h> + #include "i40evf.h" static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 49531cd18987..3a4373f689eb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -585,6 +585,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } +static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value) +{ + writel(value, ring->tail); +} + #define IXGBE_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) #define IXGBE_TX_DESC(R, i) \ @@ -742,6 +747,7 @@ struct ixgbe_adapter { #ifdef IXGBE_FCOE struct ixgbe_fcoe fcoe; #endif /* IXGBE_FCOE */ + u8 __iomem *io_addr; /* Mainly for iounmap use */ u32 wol; u16 bd_number; @@ -798,6 +804,7 @@ enum ixgbe_state_t { __IXGBE_TESTING, __IXGBE_RESETTING, __IXGBE_DOWN, + __IXGBE_REMOVING, __IXGBE_SERVICE_SCHED, __IXGBE_IN_SFP_INIT, __IXGBE_PTP_RUNNING, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index d259dc76604e..f2e3919750ec 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -124,24 +124,65 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); -#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) +#define IXGBE_FAILED_READ_REG 0xffffffffU -#ifndef writeq -#define writeq(val, addr) writel((u32) (val), addr); \ - writel((u32) (val >> 32), (addr + 4)); -#endif +static inline bool ixgbe_removed(void __iomem *addr) +{ + return unlikely(!addr); +} -#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) +void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg); -#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg)) +static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); -#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\ - writel((value), ((a)->hw_addr + (reg) + ((offset) << 2)))) + if (ixgbe_removed(reg_addr)) + return; + writel(value, reg_addr + reg); +} +#define IXGBE_WRITE_REG(a, reg, value) ixgbe_write_reg((a), (reg), (value)) -#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\ - readl((a)->hw_addr + (reg) + ((offset) << 2))) +#ifndef writeq +#define writeq writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel((u32)val, addr); + writel((u32)(val >> 32), addr + 4); +} +#endif -#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) +static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + + if (ixgbe_removed(reg_addr)) + return; + writeq(value, reg_addr + reg); +} +#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value)) + +static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u32 value; + + if (ixgbe_removed(reg_addr)) + return IXGBE_FAILED_READ_REG; + value = readl(reg_addr + reg); + if (unlikely(value == IXGBE_FAILED_READ_REG)) + ixgbe_check_remove(hw, reg); + return value; +} +#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg)) + +#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ + ixgbe_write_reg((a), (reg) + ((offset) << 2), (value)) + +#define IXGBE_READ_REG_ARRAY(a, reg, offset) \ + ixgbe_read_reg((a), (reg) + ((offset) << 2)) + +#define IXGBE_WRITE_FLUSH(a) ixgbe_read_reg((a), IXGBE_STATUS) #define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 4e7c9b098b58..043307024c4a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1342,61 +1342,61 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, static const u32 test_pattern[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + if (ixgbe_removed(adapter->hw.hw_addr)) { + *data = 1; + return 1; + } for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { - before = readl(adapter->hw.hw_addr + reg); - writel((test_pattern[pat] & write), - (adapter->hw.hw_addr + reg)); - val = readl(adapter->hw.hw_addr + reg); + before = ixgbe_read_reg(&adapter->hw, reg); + ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); + val = ixgbe_read_reg(&adapter->hw, reg); if (val != (test_pattern[pat] & write & mask)) { e_err(drv, "pattern test reg %04X failed: got " "0x%08X expected 0x%08X\n", reg, val, (test_pattern[pat] & write & mask)); *data = reg; - writel(before, adapter->hw.hw_addr + reg); - return 1; + ixgbe_write_reg(&adapter->hw, reg, before); + return true; } - writel(before, adapter->hw.hw_addr + reg); + ixgbe_write_reg(&adapter->hw, reg, before); } - return 0; + return false; } static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { u32 val, before; - before = readl(adapter->hw.hw_addr + reg); - writel((write & mask), (adapter->hw.hw_addr + reg)); - val = readl(adapter->hw.hw_addr + reg); + + if (ixgbe_removed(adapter->hw.hw_addr)) { + *data = 1; + return 1; + } + before = ixgbe_read_reg(&adapter->hw, reg); + ixgbe_write_reg(&adapter->hw, reg, write & mask); + val = ixgbe_read_reg(&adapter->hw, reg); if ((write & mask) != (val & mask)) { e_err(drv, "set/check reg %04X test failed: got 0x%08X " "expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; - writel(before, (adapter->hw.hw_addr + reg)); - return 1; + ixgbe_write_reg(&adapter->hw, reg, before); + return true; } - writel(before, (adapter->hw.hw_addr + reg)); - return 0; + ixgbe_write_reg(&adapter->hw, reg, before); + return false; } -#define REG_PATTERN_TEST(reg, mask, write) \ - do { \ - if (reg_pattern_test(adapter, data, reg, mask, write)) \ - return 1; \ - } while (0) \ - - -#define REG_SET_AND_CHECK(reg, mask, write) \ - do { \ - if (reg_set_and_check(adapter, data, reg, mask, write)) \ - return 1; \ - } while (0) \ - static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) { const struct ixgbe_reg_test *test; u32 value, before, after; u32 i, toggle; + if (ixgbe_removed(adapter->hw.hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return 1; + } switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: toggle = 0x7FFFF3FF; @@ -1419,10 +1419,10 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) * tests. Some bits are read-only, some toggle, and some * are writeable on newer MACs. */ - before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); - value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); - after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; + before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS); + value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle); + ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); + after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; if (value != after) { e_err(drv, "failed STATUS register test got: 0x%08X " "expected: 0x%08X\n", after, value); @@ -1430,7 +1430,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) return 1; } /* restore previous status */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); + ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before); /* * Perform the remainder of the register test, looping through @@ -1438,38 +1438,47 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) */ while (test->reg) { for (i = 0; i < test->array_len; i++) { + bool b = false; + switch (test->test_type) { case PATTERN_TEST: - REG_PATTERN_TEST(test->reg + (i * 0x40), - test->mask, - test->write); + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); break; case SET_READ_TEST: - REG_SET_AND_CHECK(test->reg + (i * 0x40), - test->mask, - test->write); + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); break; case WRITE_NO_TEST: - writel(test->write, - (adapter->hw.hw_addr + test->reg) - + (i * 0x40)); + ixgbe_write_reg(&adapter->hw, + test->reg + (i * 0x40), + test->write); break; case TABLE32_TEST: - REG_PATTERN_TEST(test->reg + (i * 4), - test->mask, - test->write); + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); break; case TABLE64_TEST_LO: - REG_PATTERN_TEST(test->reg + (i * 8), - test->mask, - test->write); + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); break; case TABLE64_TEST_HI: - REG_PATTERN_TEST((test->reg + 4) + (i * 8), - test->mask, - test->write); + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, + test->write); break; } + if (b) + return 1; } test++; } @@ -1954,6 +1963,15 @@ static void ixgbe_diag_test(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); bool if_running = netif_running(netdev); + if (ixgbe_removed(adapter->hw.hw_addr)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } set_bit(__IXGBE_TESTING, &adapter->state); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { struct ixgbe_hw *hw = &adapter->hw; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cc06854296a3..3ca59d21d0b2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -278,10 +278,41 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) { if (!test_bit(__IXGBE_DOWN, &adapter->state) && + !test_bit(__IXGBE_REMOVING, &adapter->state) && !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) schedule_work(&adapter->service_task); } +static void ixgbe_remove_adapter(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + e_dev_err("Adapter removed\n"); + ixgbe_service_event_schedule(adapter); +} + +void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) +{ + u32 value; + + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned IXGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == IXGBE_STATUS) { + ixgbe_remove_adapter(hw); + return; + } + value = ixgbe_read_reg(hw, IXGBE_STATUS); + if (value == IXGBE_FAILED_READ_REG) + ixgbe_remove_adapter(hw); +} + static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) { BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); @@ -1314,7 +1345,7 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) * such as IA-64). */ wmb(); - writel(val, rx_ring->tail); + ixgbe_write_tail(rx_ring, val); } static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, @@ -2969,7 +3000,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, ring->count * sizeof(union ixgbe_adv_tx_desc)); IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); - ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx); + ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); /* * set WTHRESH to encourage burst writeback, it should not be set @@ -3308,6 +3339,8 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; + if (ixgbe_removed(hw->hw_addr)) + return; /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ if (hw->mac.type == ixgbe_mac_82598EB && !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) @@ -3332,6 +3365,8 @@ void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; + if (ixgbe_removed(hw->hw_addr)) + return; rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; @@ -3372,7 +3407,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, ring->count * sizeof(union ixgbe_adv_rx_desc)); IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); - ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx); + ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); ixgbe_configure_srrctl(adapter, ring); ixgbe_configure_rscctl(adapter, ring); @@ -4572,6 +4607,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); + smp_mb__before_clear_bit(); clear_bit(__IXGBE_DOWN, &adapter->state); ixgbe_napi_enable_all(adapter); @@ -4656,6 +4692,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; int err; + if (ixgbe_removed(hw->hw_addr)) + return; /* lock SFP init bit to prevent race conditions with the watchdog */ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) usleep_range(1000, 2000); @@ -4783,7 +4821,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) int i; /* signal that we are down to the interrupt handler */ - set_bit(__IXGBE_DOWN, &adapter->state); + if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) + return; /* do nothing if already down */ /* disable receives */ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); @@ -5874,8 +5913,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) u64 eics = 0; int i; - /* If we're down or resetting, just bail */ + /* If we're down, removing or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_REMOVING, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; @@ -6122,8 +6162,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) **/ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) { - /* if interface is down do nothing */ + /* if interface is down, removing or resetting, do nothing */ if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_REMOVING, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; @@ -6341,8 +6382,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; - /* If we're already down or resetting, just bail */ + /* If we're already down, removing or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_REMOVING, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; @@ -6362,6 +6404,15 @@ static void ixgbe_service_task(struct work_struct *work) struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, service_task); + if (ixgbe_removed(adapter->hw.hw_addr)) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + ixgbe_down(adapter); + rtnl_unlock(); + } + ixgbe_service_event_complete(adapter); + return; + } ixgbe_reset_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); ixgbe_sfp_link_config_subtask(adapter); @@ -6693,7 +6744,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, tx_ring->next_to_use = i; /* notify HW of packet */ - writel(i, tx_ring->tail); + ixgbe_write_tail(tx_ring, i); return; dma_error: @@ -6827,12 +6878,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) return __ixgbe_maybe_stop_tx(tx_ring, size); } -#ifdef IXGBE_FCOE -static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { + struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; +#ifdef IXGBE_FCOE struct ixgbe_adapter *adapter; struct ixgbe_ring_feature *f; int txq; +#endif + + if (fwd_adapter) + return skb->queue_mapping + fwd_adapter->tx_base_queue; + +#ifdef IXGBE_FCOE /* * only execute the code below if protocol is FCoE @@ -6858,9 +6917,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) txq -= f->indices; return txq + f->offset; +#else + return __netdev_pick_tx(dev, skb); +#endif } -#endif netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) @@ -7629,27 +7690,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) kfree(fwd_adapter); } -static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb, - struct net_device *dev, - void *priv) -{ - struct ixgbe_fwd_adapter *fwd_adapter = priv; - unsigned int queue; - struct ixgbe_ring *tx_ring; - - queue = skb->queue_mapping + fwd_adapter->tx_base_queue; - tx_ring = fwd_adapter->real_adapter->tx_ring[queue]; - - return __ixgbe_xmit_frame(skb, dev, tx_ring); -} - static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, -#ifdef IXGBE_FCOE .ndo_select_queue = ixgbe_select_queue, -#endif .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, @@ -7689,7 +7734,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, - .ndo_dfwd_start_xmit = ixgbe_fwd_xmit, }; /** @@ -7881,6 +7925,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + adapter->io_addr = hw->hw_addr; if (!hw->hw_addr) { err = -EIO; goto err_ioremap; @@ -8189,7 +8234,7 @@ err_register: err_sw_init: ixgbe_disable_sriov(adapter); adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; - iounmap(hw->hw_addr); + iounmap(adapter->io_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: @@ -8217,7 +8262,7 @@ static void ixgbe_remove(struct pci_dev *pdev) ixgbe_dbg_adapter_exit(adapter); - set_bit(__IXGBE_DOWN, &adapter->state); + set_bit(__IXGBE_REMOVING, &adapter->state); cancel_work_sync(&adapter->service_task); @@ -8256,7 +8301,7 @@ static void ixgbe_remove(struct pci_dev *pdev) kfree(adapter->ixgbe_ieee_ets); #endif - iounmap(adapter->hw.hw_addr); + iounmap(adapter->io_addr); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index d4a64e665398..cc3101afd29f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -27,8 +27,7 @@ #include <linux/pci.h> #include <linux/delay.h> -#include "ixgbe_type.h" -#include "ixgbe_common.h" +#include "ixgbe.h" #include "ixgbe_mbx.h" /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 39217e5ff7dc..132557c318f8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -29,7 +29,7 @@ #include <linux/delay.h> #include <linux/sched.h> -#include "ixgbe_common.h" +#include "ixgbe.h" #include "ixgbe_phy.h" static void ixgbe_i2c_start(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 359f6e60320d..0558c7139f38 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -631,11 +631,14 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct ixgbe_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; u32 reg, reg_offset, vf_shift; u32 msgbuf[4] = {0, 0, 0, 0}; u8 *addr = (u8 *)(&msgbuf[1]); + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + int i; e_info(probe, "VF Reset msg received from vf %d\n", vf); @@ -654,6 +657,17 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) reg |= 1 << vf_shift; IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); + /* force drop enable for all VF Rx queues */ + for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { + /* flush previous write */ + IXGBE_WRITE_FLUSH(hw); + + /* indicate to hardware that we want to set drop enable */ + reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE; + reg |= i << IXGBE_QDE_IDX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); + } + /* enable receive for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); reg |= 1 << vf_shift; @@ -684,6 +698,15 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) reg |= (1 << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); + /* + * Reset the VFs TDWBAL and TDWBAH registers + * which are not cleared by an FLR + */ + for (i = 0; i < q_per_pool; i++) { + IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); + IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); + } + /* reply to reset with ack and vf mac address */ msgbuf[0] = IXGBE_VF_RESET; if (!is_zero_ether_addr(vf_mac)) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 7c19e969576f..0d39cfc4a3bf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1980,9 +1980,10 @@ enum { #define IXGBE_FWSM_TS_ENABLED 0x1 /* Queue Drop Enable */ -#define IXGBE_QDE_ENABLE 0x00000001 -#define IXGBE_QDE_IDX_MASK 0x00007F00 -#define IXGBE_QDE_IDX_SHIFT 8 +#define IXGBE_QDE_ENABLE 0x00000001 +#define IXGBE_QDE_IDX_MASK 0x00007F00 +#define IXGBE_QDE_IDX_SHIFT 8 +#define IXGBE_QDE_WRITE 0x00010000 #define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ #define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ @@ -2173,6 +2174,14 @@ enum { #define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) #define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) #define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) +/* Translated register #defines */ +#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) +#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) + +#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) enum ixgbe_fdir_pballoc_type { IXGBE_FDIR_PBALLOC_NONE = 0, diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 974a007c4277..8f9266c64c75 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -618,7 +618,8 @@ ltq_etop_set_multicast_list(struct net_device *dev) } static u16 -ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) +ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { /* we are currently only using the first queue */ return 0; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index a49e81bdf8e8..6300fd27f2db 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -33,6 +33,7 @@ config MV643XX_ETH config MVMDIO tristate "Marvell MDIO interface support" + depends on HAS_IOMEM select PHYLIB ---help--- This driver supports the MDIO interface found in the network diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 3b66f26ba049..890922c1c8ee 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -724,7 +724,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * - not an IP fragment * - no LLS polling in progress */ - if (!mlx4_en_cq_ll_polling(cq) && + if (!mlx4_en_cq_busy_polling(cq) && (dev->features & NETIF_F_GRO)) { struct sk_buff *gro_skb = napi_get_frags(&cq->napi); if (!gro_skb) @@ -816,8 +816,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud skb_mark_napi_id(skb, &cq->napi); - /* Push it up the stack */ - netif_receive_skb(skb); + if (!mlx4_en_cq_busy_polling(cq)) + napi_gro_receive(&cq->napi, skb); + else + netif_receive_skb(skb); next: for (nr = 0; nr < priv->num_frags; nr++) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 160e86d21607..8e8a7eb43a2c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -628,7 +628,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk } } -u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { struct mlx4_en_priv *priv = netdev_priv(dev); u16 rings_p_up = priv->num_tx_rings_p_up; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 2f1e200f2e4c..3af04c3f42ea 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -661,7 +661,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) } /* true if a socket is polling, even if it did not get the lock */ -static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq) +static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) { WARN_ON(!(cq->state & MLX4_CQ_LOCKED)); return cq->state & CQ_USER_PEND; @@ -691,7 +691,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) return false; } -static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq) +static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) { return false; } @@ -722,7 +722,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_tx_irq(struct mlx4_cq *mcq); -u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 8e9dad770900..ce84dc289c8f 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -5853,15 +5853,12 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct dev_info *hw_priv = priv->adapter; struct ksz_hw *hw = &hw_priv->hw; struct ksz_port *port = &priv->port; - int rc; int result = 0; struct mii_ioctl_data *data = if_mii(ifr); if (down_interruptible(&priv->proc_sem)) return -ERESTARTSYS; - /* assume success */ - rc = 0; switch (cmd) { /* Get address of MII PHY in use. */ case SIOCGMIIPHY: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 35d48766d842..1accd9531224 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -38,8 +38,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 53 -#define QLCNIC_LINUX_VERSIONID "5.3.53" +#define _QLCNIC_LINUX_SUBVERSION 54 +#define QLCNIC_LINUX_VERSIONID "5.3.54" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -970,6 +970,9 @@ struct qlcnic_ipaddr { #define QLCNIC_BEACON_EANBLE 0xC #define QLCNIC_BEACON_DISABLE 0xD +#define QLCNIC_BEACON_ON 2 +#define QLCNIC_BEACON_OFF 0 + #define QLCNIC_MSIX_TBL_SPACE 8192 #define QLCNIC_PCI_REG_MSIX_TBL 0x44 #define QLCNIC_MSIX_TBL_PGSIZE 4096 @@ -1079,6 +1082,7 @@ struct qlcnic_adapter { u64 dev_rst_time; bool drv_mac_learn; bool fdb_mac_learn; + u8 rx_mac_learn; unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)]; u8 flash_mfg_id; struct qlcnic_npar_info *npars; @@ -1267,7 +1271,7 @@ struct qlcnic_pci_func_cfg { u16 port_num; u8 pci_func; u8 func_state; - u8 def_mac_addr[6]; + u8 def_mac_addr[ETH_ALEN]; }; struct qlcnic_npar_func_cfg { @@ -1640,7 +1644,6 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *); int qlcnic_reset_npar_config(struct qlcnic_adapter *); int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *); void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16); -int qlcnic_get_beacon_state(struct qlcnic_adapter *, u8 *); int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); int qlcnic_read_mac_addr(struct qlcnic_adapter *); int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); @@ -1720,6 +1723,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *); void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); +void qlcnic_update_stats(struct qlcnic_adapter *); /* Adapter hardware abstraction */ struct qlcnic_hardware_ops { @@ -1767,6 +1771,7 @@ struct qlcnic_hardware_ops { pci_channel_state_t); pci_ers_result_t (*io_slot_reset) (struct pci_dev *); void (*io_resume) (struct pci_dev *); + void (*get_beacon_state)(struct qlcnic_adapter *); }; extern struct qlcnic_nic_template qlcnic_vf_ops; @@ -1993,6 +1998,11 @@ static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter) adapter->ahw->hw_ops->set_mac_filter_count(adapter); } +static inline void qlcnic_get_beacon_state(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->get_beacon_state(adapter); +} + static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter) { if (adapter->ahw->hw_ops->read_phys_port_id) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 03eb2ad9611a..a684d28e37af 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -181,7 +181,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { .io_error_detected = qlcnic_83xx_io_error_detected, .io_slot_reset = qlcnic_83xx_io_slot_reset, .io_resume = qlcnic_83xx_io_resume, - + .get_beacon_state = qlcnic_83xx_get_beacon_state, }; static struct qlcnic_nic_template qlcnic_83xx_ops = { @@ -1388,6 +1388,33 @@ out: netif_device_attach(netdev); } +void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_cmd_args cmd; + u8 beacon_state; + int err = 0; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_CONFIG); + if (!err) { + err = qlcnic_issue_cmd(adapter, &cmd); + if (!err) { + beacon_state = cmd.rsp.arg[4]; + if (beacon_state == QLCNIC_BEACON_DISABLE) + ahw->beacon_state = QLC_83XX_BEACON_OFF; + else if (beacon_state == QLC_83XX_ENABLE_BEACON) + ahw->beacon_state = QLC_83XX_BEACON_ON; + } + } else { + netdev_err(adapter->netdev, "Get beacon state failed, err=%d\n", + err); + } + + qlcnic_free_mbx_args(&cmd); + + return; +} + int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 beacon) { @@ -1591,7 +1618,9 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *interface_id) { if (qlcnic_sriov_pf_check(adapter)) { + qlcnic_alloc_lb_filters_mem(adapter); qlcnic_pf_set_interface_id_promisc(adapter, interface_id); + adapter->rx_mac_learn = 1; } else { if (!qlcnic_sriov_vf_check(adapter)) *interface_id = adapter->recv_ctx->context_id << 16; @@ -1618,6 +1647,10 @@ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) cmd->type = QLC_83XX_MBX_CMD_NO_WAIT; qlcnic_83xx_set_interface_id_promisc(adapter, &temp); + + if (qlcnic_84xx_check(adapter) && qlcnic_sriov_pf_check(adapter)) + mode = VPORT_MISS_MODE_ACCEPT_ALL; + cmd->req.arg[1] = mode | temp; err = qlcnic_issue_cmd(adapter, cmd); if (!err) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 34d291168b79..4643b159df86 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -381,6 +381,8 @@ enum qlcnic_83xx_states { /* LED configuration settings */ #define QLC_83XX_ENABLE_BEACON 0xe +#define QLC_83XX_BEACON_ON 1 +#define QLC_83XX_BEACON_OFF 0 #define QLC_83XX_LED_RATE 0xff #define QLC_83XX_LED_ACT (1 << 10) #define QLC_83XX_LED_MOD (0 << 13) @@ -559,6 +561,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *); void qlcnic_83xx_napi_enable(struct qlcnic_adapter *); void qlcnic_83xx_napi_disable(struct qlcnic_adapter *); int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32); +void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *); void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32); int qlcnic_ind_rd(struct qlcnic_adapter *, u32); int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 22ae884728b8..abe3924c61c5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2214,6 +2214,7 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) struct qlcnic_hardware_context *ahw = adapter->ahw; int err = 0; + adapter->rx_mac_learn = 0; ahw->msix_supported = !!qlcnic_use_msi_x; qlcnic_83xx_init_rings(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c index 474320a5f0c1..23c4fd10e505 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c @@ -224,10 +224,14 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter) return -EIO; } - if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) + if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) { adapter->flags |= QLCNIC_ESWITCH_ENABLED; - else + if (adapter->drv_mac_learn) + adapter->rx_mac_learn = 1; + } else { adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; + adapter->rx_mac_learn = 0; + } ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER; ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 45fa6eff56c9..18ced0fb6cf0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) -static inline int qlcnic_82xx_statistics(void) +static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter) { - return ARRAY_SIZE(qlcnic_device_gstrings_stats) + - ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); + return ARRAY_SIZE(qlcnic_gstrings_stats) + + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; } -static inline int qlcnic_83xx_statistics(void) +static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter) { - return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + + return ARRAY_SIZE(qlcnic_gstrings_stats) + + ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + - ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); + ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) + + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; } static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) { - if (qlcnic_82xx_check(adapter)) - return qlcnic_82xx_statistics(); - else if (qlcnic_83xx_check(adapter)) - return qlcnic_83xx_statistics(); - else - return -1; + int len = -1; + + if (qlcnic_82xx_check(adapter)) { + len = qlcnic_82xx_statistics(adapter); + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) + len += ARRAY_SIZE(qlcnic_device_gstrings_stats); + } else if (qlcnic_83xx_check(adapter)) { + len = qlcnic_83xx_statistics(adapter); + } + + return len; } #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 @@ -923,18 +931,13 @@ static int qlcnic_eeprom_test(struct net_device *dev) static int qlcnic_get_sset_count(struct net_device *dev, int sset) { - int len; struct qlcnic_adapter *adapter = netdev_priv(dev); switch (sset) { case ETH_SS_TEST: return QLCNIC_TEST_LEN; case ETH_SS_STATS: - len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; - if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || - qlcnic_83xx_check(adapter)) - return len; - return qlcnic_82xx_statistics(); + return qlcnic_dev_statistics_len(adapter); default: return -EOPNOTSUPP; } @@ -1270,7 +1273,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type) return data; } -static void qlcnic_update_stats(struct qlcnic_adapter *adapter) +void qlcnic_update_stats(struct qlcnic_adapter *adapter) { struct qlcnic_host_tx_ring *tx_ring; int ring; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index a9a149b82375..6ca5e57da3da 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -546,8 +546,11 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan) !adapter->fdb_mac_learn) { qlcnic_alloc_lb_filters_mem(adapter); adapter->drv_mac_learn = 1; + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) + adapter->rx_mac_learn = 1; } else { adapter->drv_mac_learn = 0; + adapter->rx_mac_learn = 0; } qlcnic_nic_set_promisc(adapter, mode); @@ -779,8 +782,8 @@ void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter) "Could not send interrupt coalescing parameters\n"); } -#define QLCNIC_ENABLE_IPV4_LRO 1 -#define QLCNIC_ENABLE_IPV6_LRO 2 +#define QLCNIC_ENABLE_IPV4_LRO BIT_0 +#define QLCNIC_ENABLE_IPV6_LRO (BIT_1 | BIT_9) int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable) { @@ -1530,19 +1533,34 @@ int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) return rv; } -int qlcnic_get_beacon_state(struct qlcnic_adapter *adapter, u8 *h_state) +void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *adapter) { + struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_cmd_args cmd; - int err; + u8 beacon_state; + int err = 0; - err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_STATUS); - if (!err) { - err = qlcnic_issue_cmd(adapter, &cmd); - if (!err) - *h_state = cmd.rsp.arg[1]; + if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_LED_STATUS); + if (!err) { + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + netdev_err(adapter->netdev, + "Failed to get current beacon state, err=%d\n", + err); + } else { + beacon_state = cmd.rsp.arg[1]; + if (beacon_state == QLCNIC_BEACON_DISABLE) + ahw->beacon_state = QLCNIC_BEACON_OFF; + else if (beacon_state == QLCNIC_BEACON_EANBLE) + ahw->beacon_state = QLCNIC_BEACON_ON; + } + } + qlcnic_free_mbx_args(&cmd); } - qlcnic_free_mbx_args(&cmd); - return err; + + return; } void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 13303e7d1ed7..0e739aed1bc6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -169,6 +169,7 @@ int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int); int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32); int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev); +void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *); void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, u16 vlan_id); void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 6373f6022486..2f967441e1a8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -156,9 +156,9 @@ static inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter, writel(1, tx_ring->crb_intr_mask); } -static inline u8 qlcnic_mac_hash(u64 mac) +static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan) { - return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff)); + return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff)); } static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter, @@ -221,8 +221,11 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb, u8 hindex, op; int ret; + if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff)) + vlan_id = 0; + memcpy(&src_addr, phdr->h_source, ETH_ALEN); - hindex = qlcnic_mac_hash(src_addr) & + hindex = qlcnic_mac_hash(src_addr, vlan_id) & (adapter->fhash.fbucket_size - 1); if (loopback_pkt) { @@ -322,31 +325,47 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, struct cmd_desc_type0 *first_desc, struct sk_buff *skb) { + struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data); + struct ethhdr *phdr = (struct ethhdr *)(skb->data); + struct net_device *netdev = adapter->netdev; + u16 protocol = ntohs(skb->protocol); struct qlcnic_filter *fil, *tmp_fil; - struct hlist_node *n; struct hlist_head *head; - struct net_device *netdev = adapter->netdev; - struct ethhdr *phdr = (struct ethhdr *)(skb->data); + struct hlist_node *n; u64 src_addr = 0; u16 vlan_id = 0; - u8 hindex; + u8 hindex, hval; - if (ether_addr_equal(phdr->h_source, adapter->mac_addr)) - return; + if (!qlcnic_sriov_pf_check(adapter)) { + if (ether_addr_equal(phdr->h_source, adapter->mac_addr)) + return; + } else { + if (protocol == ETH_P_8021Q) { + vh = (struct vlan_ethhdr *)skb->data; + vlan_id = ntohs(vh->h_vlan_TCI); + } else if (vlan_tx_tag_present(skb)) { + vlan_id = vlan_tx_tag_get(skb); + } + + if (ether_addr_equal(phdr->h_source, adapter->mac_addr) && + !vlan_id) + return; + } if (adapter->fhash.fnum >= adapter->fhash.fmax) { adapter->stats.mac_filter_limit_overrun++; - netdev_info(netdev, "Can not add more than %d mac addresses\n", - adapter->fhash.fmax); + netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n", + adapter->fhash.fmax, adapter->fhash.fnum); return; } memcpy(&src_addr, phdr->h_source, ETH_ALEN); - hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1); + hval = qlcnic_mac_hash(src_addr, vlan_id); + hindex = hval & (adapter->fhash.fbucket_size - 1); head = &(adapter->fhash.fhead[hindex]); hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { - if (ether_addr_equal(tmp_fil->faddr, &src_addr) && + if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) && tmp_fil->vlan_id == vlan_id) { if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) qlcnic_change_filter(adapter, &src_addr, @@ -1599,7 +1618,8 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter, struct sk_buff *skb; struct qlcnic_host_rds_ring *rds_ring; int index, length, cksum, is_lb_pkt; - u16 vid = 0xffff, t_vid; + u16 vid = 0xffff; + int err; if (unlikely(ring >= adapter->max_rds_rings)) return NULL; @@ -1617,19 +1637,19 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter, if (!skb) return buffer; - if (adapter->drv_mac_learn && - (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { - t_vid = 0; - is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0); - qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid); - } - if (length > rds_ring->skb_size) skb_put(skb, rds_ring->skb_size); else skb_put(skb, length); - if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { + err = qlcnic_check_rx_tagging(adapter, skb, &vid); + + if (adapter->rx_mac_learn) { + is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0); + qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid); + } + + if (unlikely(err)) { adapter->stats.rxdropped++; dev_kfree_skb(skb); return buffer; @@ -1664,7 +1684,8 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter, int l2_hdr_offset, l4_hdr_offset; int index, is_lb_pkt; u16 lro_length, length, data_offset, gso_size; - u16 vid = 0xffff, t_vid; + u16 vid = 0xffff; + int err; if (unlikely(ring > adapter->max_rds_rings)) return NULL; @@ -1686,12 +1707,6 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter, if (!skb) return buffer; - if (adapter->drv_mac_learn && - (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { - t_vid = 0; - is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1); - qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid); - } if (qlcnic_83xx_is_tstamp(sts_data[1])) data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE; else @@ -1700,7 +1715,14 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter, skb_put(skb, lro_length + data_offset); skb_pull(skb, l2_hdr_offset); - if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { + err = qlcnic_check_rx_tagging(adapter, skb, &vid); + + if (adapter->rx_mac_learn) { + is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1); + qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid); + } + + if (unlikely(err)) { adapter->stats.rxdropped++; dev_kfree_skb(skb); return buffer; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index eeec83a0e664..eec7b412477c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -546,6 +546,7 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = { .io_error_detected = qlcnic_82xx_io_error_detected, .io_slot_reset = qlcnic_82xx_io_slot_reset, .io_resume = qlcnic_82xx_io_resume, + .get_beacon_state = qlcnic_82xx_get_beacon_state, }; static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) @@ -2779,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct net_device_stats *stats = &netdev->stats; + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_update_stats(adapter); + stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; stats->tx_packets = adapter->stats.xmitfinished; stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index bf8fca7d874f..f998fdcd7551 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -277,9 +277,7 @@ static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter) void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) { - struct qlcnic_sriov *sriov = adapter->ahw->sriov; - - if (!sriov) + if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state)) return; qlcnic_sriov_free_vlans(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index d14d9a139eef..09acf15c3a56 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -9,9 +9,14 @@ #include "qlcnic.h" #include <linux/types.h> -#define QLCNIC_SRIOV_VF_MAX_MAC 8 +#define QLCNIC_SRIOV_VF_MAX_MAC 7 #define QLC_VF_MIN_TX_RATE 100 #define QLC_VF_MAX_TX_RATE 9999 +#define QLC_MAC_OPCODE_MASK 0x7 +#define QLC_MAC_STAR_ADD 6 +#define QLC_MAC_STAR_DEL 7 +#define QLC_VF_FLOOD_BIT BIT_16 +#define QLC_FLOOD_MODE 0x5 static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8); @@ -344,6 +349,28 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter, return err; } +/* On configuring VF flood bit, PFD will receive traffic from all VFs */ +static int qlcnic_sriov_pf_cfg_flood(struct qlcnic_adapter *adapter) +{ + struct qlcnic_cmd_args cmd; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + if (err) + return err; + + cmd.req.arg[1] = QLC_FLOOD_MODE | QLC_VF_FLOOD_BIT; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to configure VF Flood bit on PF, err=%d\n", + err); + + qlcnic_free_mbx_args(&cmd); + return err; +} + static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter, u8 func, u8 enable) { @@ -471,6 +498,12 @@ static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter) if (err) return err; + if (qlcnic_84xx_check(adapter)) { + err = qlcnic_sriov_pf_cfg_flood(adapter); + if (err) + goto disable_vlan_filtering; + } + err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1); if (err) goto disable_vlan_filtering; @@ -1173,6 +1206,13 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter, struct qlcnic_vport *vp = vf->vp; u8 op, new_op; + if (((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_ADD) || + ((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_DEL)) { + netdev_err(adapter->netdev, "MAC + any VLAN filter not allowed from VF %d\n", + vf->pci_func); + return -EINVAL; + } + if (!(cmd->req.arg[1] & BIT_8)) return -EINVAL; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index b5296672c447..1c8552f682d6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -6,7 +6,6 @@ */ #include <linux/slab.h> -#include <linux/vmalloc.h> #include <linux/interrupt.h> #include "qlcnic.h" @@ -127,6 +126,8 @@ static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter, if (kstrtoul(buf, 2, &h_beacon)) return -EINVAL; + qlcnic_get_beacon_state(adapter); + if (ahw->beacon_state == h_beacon) return len; @@ -158,7 +159,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, struct qlcnic_hardware_context *ahw = adapter->ahw; int err, drv_sds_rings = adapter->drv_sds_rings; u16 beacon; - u8 h_beacon_state, b_state, b_rate; + u8 b_state, b_rate; if (len != sizeof(u16)) return QL_STATUS_INVALID_PARAM; @@ -168,18 +169,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, if (err) return err; - if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { - err = qlcnic_get_beacon_state(adapter, &h_beacon_state); - if (err) { - netdev_err(adapter->netdev, - "Failed to get current beacon state\n"); - } else { - if (h_beacon_state == QLCNIC_BEACON_DISABLE) - ahw->beacon_state = 0; - else if (h_beacon_state == QLCNIC_BEACON_EANBLE) - ahw->beacon_state = 2; - } - } + qlcnic_get_beacon_state(adapter); if (ahw->beacon_state == b_state) return len; @@ -927,38 +917,35 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, u32 pci_func_count = qlcnic_get_pci_func_count(adapter); struct qlcnic_pci_func_cfg *pci_cfg; struct qlcnic_pci_info *pci_info; - size_t pci_info_sz, pci_cfg_sz; + size_t pci_cfg_sz; int i, ret; pci_cfg_sz = pci_func_count * sizeof(*pci_cfg); if (size != pci_cfg_sz) return QL_STATUS_INVALID_PARAM; - pci_info_sz = pci_func_count * sizeof(*pci_info); - pci_info = vmalloc(pci_info_sz); + pci_info = kcalloc(pci_func_count, sizeof(*pci_info), GFP_KERNEL); if (!pci_info) return -ENOMEM; - memset(pci_info, 0, pci_info_sz); - memset(buf, 0, pci_cfg_sz); - pci_cfg = (struct qlcnic_pci_func_cfg *)buf; - ret = qlcnic_get_pci_info(adapter, pci_info); if (ret) { - vfree(pci_info); + kfree(pci_info); return ret; } + pci_cfg = (struct qlcnic_pci_func_cfg *)buf; for (i = 0; i < pci_func_count; i++) { pci_cfg[i].pci_func = pci_info[i].id; pci_cfg[i].func_type = pci_info[i].type; + pci_cfg[i].func_state = 0; pci_cfg[i].port_num = pci_info[i].default_port; pci_cfg[i].min_bw = pci_info[i].tx_min_bw; pci_cfg[i].max_bw = pci_info[i].tx_max_bw; memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN); } - vfree(pci_info); + kfree(pci_info); return size; } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 888410737dbd..ba1f6c928b91 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1513,11 +1513,11 @@ ignore_link: if (intr_status & mask) { /* Tx error */ u32 edtrr = sh_eth_read(ndev, EDTRR); + /* dmesg */ - dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", - intr_status, mdp->cur_tx); - dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", - mdp->dirty_tx, (u32) ndev->state, edtrr); + dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", + intr_status, mdp->cur_tx, mdp->dirty_tx, + (u32)ndev->state, edtrr); /* dirty buffer free */ sh_eth_txfree(ndev); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 96f79f7c4395..a6a9e1ebe30c 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -1894,7 +1894,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr, SMC_SELECT_BANK(lp, 1); val = SMC_GET_BASE(lp); val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT; - if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) { + if (((unsigned long)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) { netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n", CARDNAME, ioaddr, val); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b8e3a4ce24b0..ecdc8ab50425 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1951,6 +1951,23 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } +static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) +{ + struct ethhdr *ehdr; + u16 vlanid; + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) == + NETIF_F_HW_VLAN_CTAG_RX && + !__vlan_get_tag(skb, &vlanid)) { + /* pop the vlan tag */ + ehdr = (struct ethhdr *)skb->data; + memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); + } +} + + /** * stmmac_rx_refill: refill used skb preallocated buffers * @priv: driver private structure @@ -2102,6 +2119,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) print_pkt(skb->data, frame_len); } + stmmac_rx_vlan(priv->dev, skb); + skb->protocol = eth_type_trans(skb, priv->dev); if (unlikely(!coe)) diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 4f1d2549130e..2ead87759ab4 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1764,7 +1764,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv) WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR); /* We reclaimed resources, so in case the Q is stopped by xmit callback, - * we resume the transmition and use tx_lock to synchronize with xmit.*/ + * we resume the transmission and use tx_lock to synchronize with xmit.*/ spin_lock(&priv->tx_lock); priv->tx_level += tx_level; BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL); diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 570495be77f3..023237a65720 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -2070,7 +2070,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) } /* Return subqueue id on this core (one per core). */ -static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { return smp_processor_id(); } |