diff options
author | David S. Miller <davem@davemloft.net> | 2016-01-13 00:21:27 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-01-13 00:21:27 -0500 |
commit | ddb5388ffd0ad75d07e7b78181a0b579824ba6f0 (patch) | |
tree | be1e2bd103c69d7bbace3fffd97bc3d714bbc3d7 /drivers/net/ethernet/mellanox/mlx4 | |
parent | ccdf6ce6a8dba374668ae9b4d763e19903611c38 (diff) | |
parent | 67990608c8b95d2b8ccc29932376ae73d5818727 (diff) | |
download | linux-ddb5388ffd0ad75d07e7b78181a0b579824ba6f0.tar.gz linux-ddb5388ffd0ad75d07e7b78181a0b579824ba6f0.tar.bz2 linux-ddb5388ffd0ad75d07e7b78181a0b579824ba6f0.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_cq.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 40 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_rx.c | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/eq.c | 24 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/fw.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/fw.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/main.c | 107 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mlx4.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 126 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/port.c | 598 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 193 |
12 files changed, 902 insertions, 252 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index eb8a4988de63..af975a2b74c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -155,13 +155,11 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; - if (cq->is_tx) { - netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, - NAPI_POLL_WEIGHT); - } else { + if (cq->is_tx) + netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, + NAPI_POLL_WEIGHT); + else netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); - napi_hash_add(&cq->napi); - } napi_enable(&cq->napi); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ddb5541882f5..dd84cabb2a51 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -337,11 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset) case ETH_SS_STATS: return bitmap_iterator_count(&it) + (priv->tx_ring_num * 2) + -#ifdef CONFIG_NET_RX_BUSY_POLL - (priv->rx_ring_num * 5); -#else (priv->rx_ring_num * 2); -#endif case ETH_SS_TEST: return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; @@ -408,11 +404,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, for (i = 0; i < priv->rx_ring_num; i++) { data[index++] = priv->rx_ring[i]->packets; data[index++] = priv->rx_ring[i]->bytes; -#ifdef CONFIG_NET_RX_BUSY_POLL - data[index++] = priv->rx_ring[i]->yields; - data[index++] = priv->rx_ring[i]->misses; - data[index++] = priv->rx_ring[i]->cleaned; -#endif } spin_unlock_bh(&priv->stats_lock); @@ -486,14 +477,6 @@ static void mlx4_en_get_strings(struct net_device *dev, "rx%d_packets", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_bytes", i); -#ifdef CONFIG_NET_RX_BUSY_POLL - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_napi_yield", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_misses", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_cleaned", i); -#endif } break; case ETH_SS_PRIV_FLAGS: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 7869f97de5da..0c7e3f69a73b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -69,34 +69,6 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) return 0; } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int mlx4_en_low_latency_recv(struct napi_struct *napi) -{ - struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); - struct net_device *dev = cq->dev; - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; - int done; - - if (!priv->port_up) - return LL_FLUSH_FAILED; - - if (!mlx4_en_cq_lock_poll(cq)) - return LL_FLUSH_BUSY; - - done = mlx4_en_process_rx_cq(dev, cq, 4); - if (likely(done)) - rx_ring->cleaned += done; - else - rx_ring->misses++; - - mlx4_en_cq_unlock_poll(cq); - - return done; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - #ifdef CONFIG_RFS_ACCEL struct mlx4_en_filter { @@ -1561,8 +1533,6 @@ int mlx4_en_start_port(struct net_device *dev) for (i = 0; i < priv->rx_ring_num; i++) { cq = priv->rx_cq[i]; - mlx4_en_cq_init_lock(cq); - err = mlx4_en_init_affinity_hint(priv, i); if (err) { en_err(priv, "Failed preparing IRQ affinity hint\n"); @@ -1859,13 +1829,6 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) for (i = 0; i < priv->rx_ring_num; i++) { struct mlx4_en_cq *cq = priv->rx_cq[i]; - local_bh_disable(); - while (!mlx4_en_cq_lock_napi(cq)) { - pr_info("CQ %d locked\n", i); - mdelay(1); - } - local_bh_enable(); - napi_synchronize(&cq->napi); mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); mlx4_en_deactivate_cq(priv, cq); @@ -2507,9 +2470,6 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = mlx4_en_low_latency_recv, -#endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, #ifdef CONFIG_MLX4_EN_VXLAN .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index e7a5000aa12c..41440b2b20a3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -873,10 +873,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * - TCP/IP (v4) * - without IP options * - not an IP fragment - * - no LLS polling in progress */ - if (!mlx4_en_cq_busy_polling(cq) && - (dev->features & NETIF_F_GRO)) { + if (dev->features & NETIF_F_GRO) { struct sk_buff *gro_skb = napi_get_frags(&cq->napi); if (!gro_skb) goto next; @@ -927,7 +925,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud PKT_HASH_TYPE_L3); skb_record_rx_queue(gro_skb, cq->ring); - skb_mark_napi_id(gro_skb, &cq->napi); if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { timestamp = mlx4_en_get_cqe_ts(cqe); @@ -990,13 +987,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud timestamp); } - skb_mark_napi_id(skb, &cq->napi); - - if (!mlx4_en_cq_busy_polling(cq)) - napi_gro_receive(&cq->napi, skb); - else - netif_receive_skb(skb); - + napi_gro_receive(&cq->napi, skb); next: for (nr = 0; nr < priv->num_frags; nr++) mlx4_en_free_frag(priv, frags, nr); @@ -1038,13 +1029,8 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) struct mlx4_en_priv *priv = netdev_priv(dev); int done; - if (!mlx4_en_cq_lock_napi(cq)) - return budget; - done = mlx4_en_process_rx_cq(dev, cq, budget); - mlx4_en_cq_unlock_napi(cq); - /* If we used up all the quota - we're probably not done yet... */ if (done == budget) { const struct cpumask *aff; diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 603d1c3d3b2e..4696053165f8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -151,6 +151,17 @@ void mlx4_gen_slave_eqe(struct work_struct *work) eqe = next_slave_event_eqe(slave_eq)) { slave = eqe->slave_id; + if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE && + eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN && + mlx4_is_bonded(dev)) { + struct mlx4_port_cap port_cap; + + if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state) + goto consume; + + if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state) + goto consume; + } /* All active slaves need to receive the event */ if (slave == ALL_SLAVES) { for (i = 0; i <= dev->persist->num_vfs; i++) { @@ -174,6 +185,7 @@ void mlx4_gen_slave_eqe(struct work_struct *work) mlx4_warn(dev, "Failed to generate event for slave %d\n", slave); } +consume: ++slave_eq->cons; } } @@ -594,7 +606,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; for (i = 0; i < dev->persist->num_vfs + 1; i++) { - if (!test_bit(i, slaves_port.slaves)) + int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); + + if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) continue; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { if (i == mlx4_master_func_num(dev)) @@ -606,7 +620,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eqe->event.port_change.port = cpu_to_be32( (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) - | (mlx4_phys_to_slave_port(dev, i, port) << 28)); + | (reported_port << 28)); mlx4_slave_event(dev, i, eqe); } } else { /* IB port */ @@ -636,7 +650,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) for (i = 0; i < dev->persist->num_vfs + 1; i++) { - if (!test_bit(i, slaves_port.slaves)) + int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); + + if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) continue; if (i == mlx4_master_func_num(dev)) continue; @@ -645,7 +661,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eqe->event.port_change.port = cpu_to_be32( (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) - | (mlx4_phys_to_slave_port(dev, i, port) << 28)); + | (reported_port << 28)); mlx4_slave_event(dev, i, eqe); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 90db94e83fde..2c2baab9d880 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1104,6 +1104,7 @@ int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_c goto out; MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); + port_cap->link_state = (field & 0x80) >> 7; port_cap->supported_port_types = field & 3; port_cap->suggested_type = (field >> 3) & 1; port_cap->default_sense = (field >> 4) & 1; @@ -1310,6 +1311,15 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, port_type |= MLX4_PORT_LINK_UP_MASK; else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) port_type &= ~MLX4_PORT_LINK_UP_MASK; + else if (IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev)) { + int other_port = (port == 1) ? 2 : 1; + struct mlx4_port_cap port_cap; + + err = mlx4_QUERY_PORT(dev, other_port, &port_cap); + if (err) + goto out; + port_type |= (port_cap.link_state << 7); + } MLX4_PUT(outbox->buf, port_type, QUERY_PORT_SUPPORTED_TYPE_OFFSET); @@ -1325,7 +1335,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, short_field, QUERY_PORT_CUR_MAX_PKEY_OFFSET); } - +out: return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 08de5555c2f4..7ea258af636a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -44,6 +44,7 @@ struct mlx4_mod_stat_cfg { }; struct mlx4_port_cap { + u8 link_state; u8 supported_port_types; u8 suggested_type; u8 default_sense; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 31c491e02e69..f1b6d219e445 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1221,6 +1221,76 @@ err_set_port: return err ? err : count; } +/* bond for multi-function device */ +#define MAX_MF_BOND_ALLOWED_SLAVES 63 +static int mlx4_mf_bond(struct mlx4_dev *dev) +{ + int err = 0; + struct mlx4_slaves_pport slaves_port1; + struct mlx4_slaves_pport slaves_port2; + DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); + + slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); + slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); + bitmap_and(slaves_port_1_2, + slaves_port1.slaves, slaves_port2.slaves, + dev->persist->num_vfs + 1); + + /* only single port vfs are allowed */ + if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) { + mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); + return -EINVAL; + } + + /* limit on maximum allowed VFs */ + if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + + bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) > + MAX_MF_BOND_ALLOWED_SLAVES) + return -EINVAL; + + if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { + mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); + return -EINVAL; + } + + err = mlx4_bond_mac_table(dev); + if (err) + return err; + err = mlx4_bond_vlan_table(dev); + if (err) + goto err1; + err = mlx4_bond_fs_rules(dev); + if (err) + goto err2; + + return 0; +err2: + (void)mlx4_unbond_vlan_table(dev); +err1: + (void)mlx4_unbond_mac_table(dev); + return err; +} + +static int mlx4_mf_unbond(struct mlx4_dev *dev) +{ + int ret, ret1; + + ret = mlx4_unbond_fs_rules(dev); + if (ret) + mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); + ret1 = mlx4_unbond_mac_table(dev); + if (ret1) { + mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); + ret = ret1; + } + ret1 = mlx4_unbond_vlan_table(dev); + if (ret1) { + mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1); + ret = ret1; + } + return ret; +} + int mlx4_bond(struct mlx4_dev *dev) { int ret = 0; @@ -1228,16 +1298,23 @@ int mlx4_bond(struct mlx4_dev *dev) mutex_lock(&priv->bond_mutex); - if (!mlx4_is_bonded(dev)) + if (!mlx4_is_bonded(dev)) { ret = mlx4_do_bond(dev, true); - else - ret = 0; + if (ret) + mlx4_err(dev, "Failed to bond device: %d\n", ret); + if (!ret && mlx4_is_master(dev)) { + ret = mlx4_mf_bond(dev); + if (ret) { + mlx4_err(dev, "bond for multifunction failed\n"); + mlx4_do_bond(dev, false); + } + } + } mutex_unlock(&priv->bond_mutex); - if (ret) - mlx4_err(dev, "Failed to bond device: %d\n", ret); - else + if (!ret) mlx4_dbg(dev, "Device is bonded\n"); + return ret; } EXPORT_SYMBOL_GPL(mlx4_bond); @@ -1249,14 +1326,24 @@ int mlx4_unbond(struct mlx4_dev *dev) mutex_lock(&priv->bond_mutex); - if (mlx4_is_bonded(dev)) + if (mlx4_is_bonded(dev)) { + int ret2 = 0; + ret = mlx4_do_bond(dev, false); + if (ret) + mlx4_err(dev, "Failed to unbond device: %d\n", ret); + if (mlx4_is_master(dev)) + ret2 = mlx4_mf_unbond(dev); + if (ret2) { + mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2); + ret = ret2; + } + } mutex_unlock(&priv->bond_mutex); - if (ret) - mlx4_err(dev, "Failed to unbond device: %d\n", ret); - else + if (!ret) mlx4_dbg(dev, "Device is unbonded\n"); + return ret; } EXPORT_SYMBOL_GPL(mlx4_unbond); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index e1cf9036af22..2404c22ad2b2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -736,6 +736,7 @@ struct mlx4_catas_err { struct mlx4_mac_table { __be64 entries[MLX4_MAX_MAC_NUM]; int refs[MLX4_MAX_MAC_NUM]; + bool is_dup[MLX4_MAX_MAC_NUM]; struct mutex mutex; int total; int max; @@ -758,6 +759,7 @@ struct mlx4_roce_gid_table { struct mlx4_vlan_table { __be32 entries[MLX4_MAX_VLAN_NUM]; int refs[MLX4_MAX_VLAN_NUM]; + int is_dup[MLX4_MAX_VLAN_NUM]; struct mutex mutex; int total; int max; @@ -1225,6 +1227,10 @@ void mlx4_init_roce_gid_table(struct mlx4_dev *dev, struct mlx4_roce_gid_table *table); void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); +int mlx4_bond_vlan_table(struct mlx4_dev *dev); +int mlx4_unbond_vlan_table(struct mlx4_dev *dev); +int mlx4_bond_mac_table(struct mlx4_dev *dev); +int mlx4_unbond_mac_table(struct mlx4_dev *dev); int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); /* resource tracker functions*/ @@ -1385,6 +1391,8 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); int mlx4_config_mad_demux(struct mlx4_dev *dev); int mlx4_do_bond(struct mlx4_dev *dev, bool enable); +int mlx4_bond_fs_rules(struct mlx4_dev *dev); +int mlx4_unbond_fs_rules(struct mlx4_dev *dev); enum mlx4_zone_flags { MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index c41f15102ae0..35de7d2e6b34 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -320,11 +320,6 @@ struct mlx4_en_rx_ring { void *rx_info; unsigned long bytes; unsigned long packets; -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned long yields; - unsigned long misses; - unsigned long cleaned; -#endif unsigned long csum_ok; unsigned long csum_none; unsigned long csum_complete; @@ -347,18 +342,6 @@ struct mlx4_en_cq { struct mlx4_cqe *buf; #define MLX4_EN_OPCODE_ERROR 0x1e -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int state; -#define MLX4_EN_CQ_STATE_IDLE 0 -#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */ -#define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */ -#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL) -#define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */ -#define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */ -#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD) -#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) - spinlock_t poll_lock; /* protects from LLS/napi conflicts */ -#endif /* CONFIG_NET_RX_BUSY_POLL */ struct irq_desc *irq_desc; }; @@ -622,115 +605,6 @@ static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz) return buf + idx * cqe_sz; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) -{ - spin_lock_init(&cq->poll_lock); - cq->state = MLX4_EN_CQ_STATE_IDLE; -} - -/* called from the device poll rutine to get ownership of a cq */ -static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) -{ - int rc = true; - spin_lock(&cq->poll_lock); - if (cq->state & MLX4_CQ_LOCKED) { - WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI); - cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD; - rc = false; - } else - /* we don't care if someone yielded */ - cq->state = MLX4_EN_CQ_STATE_NAPI; - spin_unlock(&cq->poll_lock); - return rc; -} - -/* returns true is someone tried to get the cq while napi had it */ -static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) -{ - int rc = false; - spin_lock(&cq->poll_lock); - WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL | - MLX4_EN_CQ_STATE_NAPI_YIELD)); - - if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) - rc = true; - cq->state = MLX4_EN_CQ_STATE_IDLE; - spin_unlock(&cq->poll_lock); - return rc; -} - -/* called from mlx4_en_low_latency_poll() */ -static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) -{ - int rc = true; - spin_lock_bh(&cq->poll_lock); - if ((cq->state & MLX4_CQ_LOCKED)) { - struct net_device *dev = cq->dev; - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; - - cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; - rc = false; - rx_ring->yields++; - } else - /* preserve yield marks */ - cq->state |= MLX4_EN_CQ_STATE_POLL; - spin_unlock_bh(&cq->poll_lock); - return rc; -} - -/* returns true if someone tried to get the cq while it was locked */ -static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) -{ - int rc = false; - spin_lock_bh(&cq->poll_lock); - WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI)); - - if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) - rc = true; - cq->state = MLX4_EN_CQ_STATE_IDLE; - spin_unlock_bh(&cq->poll_lock); - return rc; -} - -/* true if a socket is polling, even if it did not get the lock */ -static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) -{ - WARN_ON(!(cq->state & MLX4_CQ_LOCKED)); - return cq->state & CQ_USER_PEND; -} -#else -static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) -{ -} - -static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) -{ - return true; -} - -static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) -{ - return false; -} - -static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) -{ - return false; -} - -static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) -{ - return false; -} - -static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) -{ - return false; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) void mlx4_en_update_loopback_state(struct net_device *dev, diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index c2b21313dba7..f2550425c251 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -61,6 +61,7 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { table->entries[i] = 0; table->refs[i] = 0; + table->is_dup[i] = false; } table->max = 1 << dev->caps.log_num_macs; table->total = 0; @@ -74,6 +75,7 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { table->entries[i] = 0; table->refs[i] = 0; + table->is_dup[i] = false; } table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR; table->total = 0; @@ -159,21 +161,94 @@ int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx) } EXPORT_SYMBOL_GPL(mlx4_find_cached_mac); +static bool mlx4_need_mf_bond(struct mlx4_dev *dev) +{ + int i, num_eth_ports = 0; + + if (!mlx4_is_mfunc(dev)) + return false; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) + ++num_eth_ports; + + return (num_eth_ports == 2) ? true : false; +} + int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; struct mlx4_mac_table *table = &info->mac_table; int i, err = 0; int free = -1; + int free_for_dup = -1; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table; + bool need_mf_bond = mlx4_need_mf_bond(dev); + bool can_mf_bond = true; + + mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d %s duplicate\n", + (unsigned long long)mac, port, + dup ? "with" : "without"); + + if (need_mf_bond) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } + + if (need_mf_bond) { + int index_at_port = -1; + int index_at_dup_port = -1; - mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n", - (unsigned long long) mac, port); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))) + index_at_port = i; + if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i])))) + index_at_dup_port = i; + } + + /* check that same mac is not in the tables at different indices */ + if ((index_at_port != index_at_dup_port) && + (index_at_port >= 0) && + (index_at_dup_port >= 0)) + can_mf_bond = false; + + /* If the mac is already in the primary table, the slot must be + * available in the duplicate table as well. + */ + if (index_at_port >= 0 && index_at_dup_port < 0 && + dup_table->refs[index_at_port]) { + can_mf_bond = false; + } + /* If the mac is already in the duplicate table, check that the + * corresponding index is not occupied in the primary table, or + * the primary table already contains the mac at the same index. + * Otherwise, you cannot bond (primary contains a different mac + * at that index). + */ + if (index_at_dup_port >= 0) { + if (!table->refs[index_at_dup_port] || + ((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port])))) + free_for_dup = index_at_dup_port; + else + can_mf_bond = false; + } + } - mutex_lock(&table->mutex); for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { if (!table->refs[i]) { if (free < 0) free = i; + if (free_for_dup < 0 && need_mf_bond && can_mf_bond) { + if (!dup_table->refs[i]) + free_for_dup = i; + } continue; } @@ -182,10 +257,30 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) /* MAC already registered, increment ref count */ err = i; ++table->refs[i]; + if (dup) { + u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]); + + if (dup_mac != mac || !dup_table->is_dup[i]) { + mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n", + mac, dup_port, i); + } + } goto out; } } + if (need_mf_bond && (free_for_dup < 0)) { + if (dup) { + mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n"); + mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n"); + dup = false; + } + can_mf_bond = false; + } + + if (need_mf_bond && can_mf_bond) + free = free_for_dup; + mlx4_dbg(dev, "Free MAC index is %d\n", free); if (table->total == table->max) { @@ -205,10 +300,35 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) goto out; } table->refs[free] = 1; - err = free; + table->is_dup[free] = false; ++table->total; + if (dup) { + dup_table->refs[free] = 0; + dup_table->is_dup[free] = true; + dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries); + if (unlikely(err)) { + mlx4_warn(dev, "Failed adding duplicate mac: 0x%llx\n", mac); + dup_table->is_dup[free] = false; + dup_table->entries[free] = 0; + goto out; + } + ++dup_table->total; + } + err = free; out: - mutex_unlock(&table->mutex); + if (need_mf_bond) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } return err; } EXPORT_SYMBOL_GPL(__mlx4_register_mac); @@ -255,6 +375,9 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) struct mlx4_port_info *info; struct mlx4_mac_table *table; int index; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table; if (port < 1 || port > dev->caps.num_ports) { mlx4_warn(dev, "invalid port number (%d), aborting...\n", port); @@ -262,22 +385,59 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) } info = &mlx4_priv(dev)->port[port]; table = &info->mac_table; - mutex_lock(&table->mutex); + + if (dup) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } + index = find_index(dev, table, mac); if (validate_index(dev, table, index)) goto out; - if (--table->refs[index]) { + + if (--table->refs[index] || table->is_dup[index]) { mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n", index); + if (!table->refs[index]) + dup_table->is_dup[index] = false; goto out; } table->entries[index] = 0; - mlx4_set_port_mac_table(dev, port, table->entries); + if (mlx4_set_port_mac_table(dev, port, table->entries)) + mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port); --table->total; + + if (dup) { + dup_table->is_dup[index] = false; + if (dup_table->refs[index]) + goto out; + dup_table->entries[index] = 0; + if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries)) + mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port); + + --table->total; + } out: - mutex_unlock(&table->mutex); + if (dup) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } } EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); @@ -311,9 +471,22 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) struct mlx4_mac_table *table = &info->mac_table; int index = qpn - info->base_qpn; int err = 0; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table; /* CX1 doesn't support multi-functions */ - mutex_lock(&table->mutex); + if (dup) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } err = validate_index(dev, table, index); if (err) @@ -326,9 +499,30 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac); table->entries[index] = 0; + } else { + if (dup) { + dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries); + if (unlikely(err)) { + mlx4_err(dev, "Failed adding duplicate MAC: 0x%llx\n", + (unsigned long long)new_mac); + dup_table->entries[index] = 0; + } + } } out: - mutex_unlock(&table->mutex); + if (dup) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } return err; } EXPORT_SYMBOL_GPL(__mlx4_replace_mac); @@ -380,8 +574,28 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; int i, err = 0; int free = -1; - - mutex_lock(&table->mutex); + int free_for_dup = -1; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table; + bool need_mf_bond = mlx4_need_mf_bond(dev); + bool can_mf_bond = true; + + mlx4_dbg(dev, "Registering VLAN: %d for port %d %s duplicate\n", + vlan, port, + dup ? "with" : "without"); + + if (need_mf_bond) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } if (table->total == table->max) { /* No free vlan entries */ @@ -389,22 +603,85 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, goto out; } + if (need_mf_bond) { + int index_at_port = -1; + int index_at_dup_port = -1; + + for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { + if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))) + index_at_port = i; + if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i])))) + index_at_dup_port = i; + } + /* check that same vlan is not in the tables at different indices */ + if ((index_at_port != index_at_dup_port) && + (index_at_port >= 0) && + (index_at_dup_port >= 0)) + can_mf_bond = false; + + /* If the vlan is already in the primary table, the slot must be + * available in the duplicate table as well. + */ + if (index_at_port >= 0 && index_at_dup_port < 0 && + dup_table->refs[index_at_port]) { + can_mf_bond = false; + } + /* If the vlan is already in the duplicate table, check that the + * corresponding index is not occupied in the primary table, or + * the primary table already contains the vlan at the same index. + * Otherwise, you cannot bond (primary contains a different vlan + * at that index). + */ + if (index_at_dup_port >= 0) { + if (!table->refs[index_at_dup_port] || + (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port])))) + free_for_dup = index_at_dup_port; + else + can_mf_bond = false; + } + } + for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { - if (free < 0 && (table->refs[i] == 0)) { - free = i; - continue; + if (!table->refs[i]) { + if (free < 0) + free = i; + if (free_for_dup < 0 && need_mf_bond && can_mf_bond) { + if (!dup_table->refs[i]) + free_for_dup = i; + } } - if (table->refs[i] && + if ((table->refs[i] || table->is_dup[i]) && (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))) { /* Vlan already registered, increase references count */ + mlx4_dbg(dev, "vlan %u is already registered.\n", vlan); *index = i; ++table->refs[i]; + if (dup) { + u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]); + + if (dup_vlan != vlan || !dup_table->is_dup[i]) { + mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n", + vlan, dup_port, i); + } + } goto out; } } + if (need_mf_bond && (free_for_dup < 0)) { + if (dup) { + mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n"); + mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n"); + dup = false; + } + can_mf_bond = false; + } + + if (need_mf_bond && can_mf_bond) + free = free_for_dup; + if (free < 0) { err = -ENOMEM; goto out; @@ -412,6 +689,7 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, /* Register new VLAN */ table->refs[free] = 1; + table->is_dup[free] = false; table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); err = mlx4_set_port_vlan_table(dev, port, table->entries); @@ -421,11 +699,35 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, table->entries[free] = 0; goto out; } + ++table->total; + if (dup) { + dup_table->refs[free] = 0; + dup_table->is_dup[free] = true; + dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); + + err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries); + if (unlikely(err)) { + mlx4_warn(dev, "Failed adding duplicate vlan: %u\n", vlan); + dup_table->is_dup[free] = false; + dup_table->entries[free] = 0; + goto out; + } + ++dup_table->total; + } *index = free; - ++table->total; out: - mutex_unlock(&table->mutex); + if (need_mf_bond) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } return err; } @@ -455,8 +757,22 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) { struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; int index; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table; + + if (dup) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } - mutex_lock(&table->mutex); if (mlx4_find_cached_vlan(dev, port, vlan, &index)) { mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan); goto out; @@ -467,16 +783,38 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) goto out; } - if (--table->refs[index]) { + if (--table->refs[index] || table->is_dup[index]) { mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n", table->refs[index], index); + if (!table->refs[index]) + dup_table->is_dup[index] = false; goto out; } table->entries[index] = 0; - mlx4_set_port_vlan_table(dev, port, table->entries); + if (mlx4_set_port_vlan_table(dev, port, table->entries)) + mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port); --table->total; + if (dup) { + dup_table->is_dup[index] = false; + if (dup_table->refs[index]) + goto out; + dup_table->entries[index] = 0; + if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries)) + mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port); + --dup_table->total; + } out: - mutex_unlock(&table->mutex); + if (dup) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } } void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) @@ -495,6 +833,220 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) } EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); +int mlx4_bond_mac_table(struct mlx4_dev *dev) +{ + struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table; + struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table; + int ret = 0; + int i; + bool update1 = false; + bool update2 = false; + + mutex_lock(&t1->mutex); + mutex_lock(&t2->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if ((t1->entries[i] != t2->entries[i]) && + t1->entries[i] && t2->entries[i]) { + mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i); + ret = -EINVAL; + goto unlock; + } + } + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (t1->entries[i] && !t2->entries[i]) { + t2->entries[i] = t1->entries[i]; + t2->is_dup[i] = true; + update2 = true; + } else if (!t1->entries[i] && t2->entries[i]) { + t1->entries[i] = t2->entries[i]; + t1->is_dup[i] = true; + update1 = true; + } else if (t1->entries[i] && t2->entries[i]) { + t1->is_dup[i] = true; + t2->is_dup[i] = true; + } + } + + if (update1) { + ret = mlx4_set_port_mac_table(dev, 1, t1->entries); + if (ret) + mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret); + } + if (!ret && update2) { + ret = mlx4_set_port_mac_table(dev, 2, t2->entries); + if (ret) + mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret); + } + + if (ret) + mlx4_warn(dev, "failed to create mirror MAC tables\n"); +unlock: + mutex_unlock(&t2->mutex); + mutex_unlock(&t1->mutex); + return ret; +} + +int mlx4_unbond_mac_table(struct mlx4_dev *dev) +{ + struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table; + struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table; + int ret = 0; + int ret1; + int i; + bool update1 = false; + bool update2 = false; + + mutex_lock(&t1->mutex); + mutex_lock(&t2->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (t1->entries[i] != t2->entries[i]) { + mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n"); + ret = -EINVAL; + goto unlock; + } + } + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (!t1->entries[i]) + continue; + t1->is_dup[i] = false; + if (!t1->refs[i]) { + t1->entries[i] = 0; + update1 = true; + } + t2->is_dup[i] = false; + if (!t2->refs[i]) { + t2->entries[i] = 0; + update2 = true; + } + } + + if (update1) { + ret = mlx4_set_port_mac_table(dev, 1, t1->entries); + if (ret) + mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret); + } + if (update2) { + ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries); + if (ret1) { + mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1); + ret = ret1; + } + } +unlock: + mutex_unlock(&t2->mutex); + mutex_unlock(&t1->mutex); + return ret; +} + +int mlx4_bond_vlan_table(struct mlx4_dev *dev) +{ + struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table; + struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table; + int ret = 0; + int i; + bool update1 = false; + bool update2 = false; + + mutex_lock(&t1->mutex); + mutex_lock(&t2->mutex); + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + if ((t1->entries[i] != t2->entries[i]) && + t1->entries[i] && t2->entries[i]) { + mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i); + ret = -EINVAL; + goto unlock; + } + } + + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + if (t1->entries[i] && !t2->entries[i]) { + t2->entries[i] = t1->entries[i]; + t2->is_dup[i] = true; + update2 = true; + } else if (!t1->entries[i] && t2->entries[i]) { + t1->entries[i] = t2->entries[i]; + t1->is_dup[i] = true; + update1 = true; + } else if (t1->entries[i] && t2->entries[i]) { + t1->is_dup[i] = true; + t2->is_dup[i] = true; + } + } + + if (update1) { + ret = mlx4_set_port_vlan_table(dev, 1, t1->entries); + if (ret) + mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret); + } + if (!ret && update2) { + ret = mlx4_set_port_vlan_table(dev, 2, t2->entries); + if (ret) + mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret); + } + + if (ret) + mlx4_warn(dev, "failed to create mirror VLAN tables\n"); +unlock: + mutex_unlock(&t2->mutex); + mutex_unlock(&t1->mutex); + return ret; +} + +int mlx4_unbond_vlan_table(struct mlx4_dev *dev) +{ + struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table; + struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table; + int ret = 0; + int ret1; + int i; + bool update1 = false; + bool update2 = false; + + mutex_lock(&t1->mutex); + mutex_lock(&t2->mutex); + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + if (t1->entries[i] != t2->entries[i]) { + mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n"); + ret = -EINVAL; + goto unlock; + } + } + + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + if (!t1->entries[i]) + continue; + t1->is_dup[i] = false; + if (!t1->refs[i]) { + t1->entries[i] = 0; + update1 = true; + } + t2->is_dup[i] = false; + if (!t2->refs[i]) { + t2->entries[i] = 0; + update2 = true; + } + } + + if (update1) { + ret = mlx4_set_port_vlan_table(dev, 1, t1->entries); + if (ret) + mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret); + } + if (update2) { + ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries); + if (ret1) { + mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1); + ret = ret1; + } + } +unlock: + mutex_unlock(&t2->mutex); + mutex_unlock(&t1->mutex); + return ret; +} + int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) { struct mlx4_cmd_mailbox *inmailbox, *outmailbox; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index cad6c44df91c..b46dbe29ef6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -222,6 +222,13 @@ enum res_fs_rule_states { struct res_fs_rule { struct res_common com; int qpn; + /* VF DMFS mbox with port flipped */ + void *mirr_mbox; + /* > 0 --> apply mirror when getting into HA mode */ + /* = 0 --> un-apply mirror when getting out of HA mode */ + u32 mirr_mbox_size; + struct list_head mirr_list; + u64 mirr_rule_id; }; static void *res_tracker_lookup(struct rb_root *root, u64 res_id) @@ -4284,6 +4291,22 @@ err_mac: return err; } +static u32 qp_attach_mbox_size(void *mbox) +{ + u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl); + struct _rule_hw *rule_header; + + rule_header = (struct _rule_hw *)(mbox + size); + + while (rule_header->size) { + size += rule_header->size * sizeof(u32); + rule_header += 1; + } + return size; +} + +static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule); + int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -4300,6 +4323,8 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_net_trans_rule_hw_ctrl *ctrl; struct _rule_hw *rule_header; int header_id; + struct res_fs_rule *rrule; + u32 mbox_size; if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) @@ -4329,7 +4354,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, case MLX4_NET_TRANS_RULE_ID_ETH: if (validate_eth_header_mac(slave, rule_header, rlist)) { err = -EINVAL; - goto err_put; + goto err_put_qp; } break; case MLX4_NET_TRANS_RULE_ID_IB: @@ -4340,7 +4365,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); if (add_eth_header(dev, slave, inbox, rlist, header_id)) { err = -EINVAL; - goto err_put; + goto err_put_qp; } vhcr->in_modifier += sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; @@ -4348,7 +4373,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, default: pr_err("Corrupted mailbox\n"); err = -EINVAL; - goto err_put; + goto err_put_qp; } execute: @@ -4357,23 +4382,69 @@ execute: MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - goto err_put; + goto err_put_qp; + err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); if (err) { mlx4_err(dev, "Fail to add flow steering resources\n"); - /* detach rule*/ + goto err_detach; + } + + err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule); + if (err) + goto err_detach; + + mbox_size = qp_attach_mbox_size(inbox->buf); + rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL); + if (!rrule->mirr_mbox) { + err = -ENOMEM; + goto err_put_rule; + } + rrule->mirr_mbox_size = mbox_size; + rrule->mirr_rule_id = 0; + memcpy(rrule->mirr_mbox, inbox->buf, mbox_size); + + /* set different port */ + ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox; + if (ctrl->port == 1) + ctrl->port = 2; + else + ctrl->port = 1; + + if (mlx4_is_bonded(dev)) + mlx4_do_mirror_rule(dev, rrule); + + atomic_inc(&rqp->ref_count); + +err_put_rule: + put_res(dev, slave, vhcr->out_param, RES_FS_RULE); +err_detach: + /* detach rule on error */ + if (err) mlx4_cmd(dev, vhcr->out_param, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); - goto err_put; - } - atomic_inc(&rqp->ref_count); -err_put: +err_put_qp: put_res(dev, slave, qpn, RES_QP); return err; } +static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) +{ + int err; + + err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0); + if (err) { + mlx4_err(dev, "Fail to remove flow steering resources\n"); + return err; + } + + mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + return 0; +} + int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -4383,6 +4454,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, int err; struct res_qp *rqp; struct res_fs_rule *rrule; + u64 mirr_reg_id; if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) @@ -4391,12 +4463,30 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); if (err) return err; + + if (!rrule->mirr_mbox) { + mlx4_err(dev, "Mirror rules cannot be removed explicitly\n"); + put_res(dev, slave, vhcr->in_param, RES_FS_RULE); + return -EINVAL; + } + mirr_reg_id = rrule->mirr_rule_id; + kfree(rrule->mirr_mbox); + /* Release the rule form busy state before removal */ put_res(dev, slave, vhcr->in_param, RES_FS_RULE); err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); if (err) return err; + if (mirr_reg_id && mlx4_is_bonded(dev)) { + err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule); + if (err) { + mlx4_err(dev, "Fail to get resource of mirror rule\n"); + } else { + put_res(dev, slave, mirr_reg_id, RES_FS_RULE); + mlx4_undo_mirror_rule(dev, rrule); + } + } err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); if (err) { mlx4_err(dev, "Fail to remove flow steering resources\n"); @@ -4834,6 +4924,91 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave) spin_unlock_irq(mlx4_tlock(dev)); } +static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) +{ + struct mlx4_cmd_mailbox *mailbox; + int err; + struct res_fs_rule *mirr_rule; + u64 reg_id; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + if (!fs_rule->mirr_mbox) { + mlx4_err(dev, "rule mirroring mailbox is null\n"); + return -EINVAL; + } + memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size); + err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0, + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + mlx4_free_cmd_mailbox(dev, mailbox); + + if (err) + goto err; + + err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn); + if (err) + goto err_detach; + + err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule); + if (err) + goto err_rem; + + fs_rule->mirr_rule_id = reg_id; + mirr_rule->mirr_rule_id = 0; + mirr_rule->mirr_mbox_size = 0; + mirr_rule->mirr_mbox = NULL; + put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE); + + return 0; +err_rem: + rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0); +err_detach: + mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +err: + return err; +} + +static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct rb_root *root = &tracker->res_tree[RES_FS_RULE]; + struct rb_node *p; + struct res_fs_rule *fs_rule; + int err = 0; + LIST_HEAD(mirr_list); + + for (p = rb_first(root); p; p = rb_next(p)) { + fs_rule = rb_entry(p, struct res_fs_rule, com.node); + if ((bond && fs_rule->mirr_mbox_size) || + (!bond && !fs_rule->mirr_mbox_size)) + list_add_tail(&fs_rule->mirr_list, &mirr_list); + } + + list_for_each_entry(fs_rule, &mirr_list, mirr_list) { + if (bond) + err += mlx4_do_mirror_rule(dev, fs_rule); + else + err += mlx4_undo_mirror_rule(dev, fs_rule); + } + return err; +} + +int mlx4_bond_fs_rules(struct mlx4_dev *dev) +{ + return mlx4_mirror_fs_rules(dev, true); +} + +int mlx4_unbond_fs_rules(struct mlx4_dev *dev) +{ + return mlx4_mirror_fs_rules(dev, false); +} + static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); |