diff options
author | Jakub Kicinski <kuba@kernel.org> | 2022-11-10 16:29:56 -0800 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-11-10 16:29:57 -0800 |
commit | abd5ac18ae661681fbacd8c9d0a577943da4c89e (patch) | |
tree | 03bfbf324f81c1e177fe90ffbd321e2716306a83 | |
parent | b3bbeba09453f87d37bd5f211887537c7bbc5a0b (diff) | |
parent | 7f1a6d4b9e820b08479a07f6e14c36ccfa641751 (diff) | |
download | linux-stable-abd5ac18ae661681fbacd8c9d0a577943da4c89e.tar.gz linux-stable-abd5ac18ae661681fbacd8c9d0a577943da4c89e.tar.bz2 linux-stable-abd5ac18ae661681fbacd8c9d0a577943da4c89e.zip |
Merge tag 'mlx5-fixes-2022-11-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says:
====================
mlx5 fixes 2022-11-02
This series provides bug fixes to mlx5 driver.
* tag 'mlx5-fixes-2022-11-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
net/mlx5e: TC, Fix slab-out-of-bounds in parse_tc_actions
net/mlx5e: E-Switch, Fix comparing termination table instance
net/mlx5e: TC, Fix wrong rejection of packet-per-second policing
net/mlx5e: Fix tc acts array not to be dependent on enum order
net/mlx5e: Fix usage of DMA sync API
net/mlx5e: Add missing sanity checks for max TX WQE size
net/mlx5: fw_reset: Don't try to load device in case PCI isn't working
net/mlx5: E-switch, Set to legacy mode if failed to change switchdev mode
net/mlx5: Allow async trigger completion execution on single CPU systems
net/mlx5: Bridge, verify LAG state when adding bond to bridge
====================
Link: https://lore.kernel.org/r/20221109184050.108379-1-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
13 files changed, 149 insertions, 115 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 46ba4c2faad2..2e0d59ca62b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1770,12 +1770,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev) struct mlx5_cmd *cmd = &dev->cmd; int i; - for (i = 0; i < cmd->max_reg_cmds; i++) - while (down_trylock(&cmd->sem)) + for (i = 0; i < cmd->max_reg_cmds; i++) { + while (down_trylock(&cmd->sem)) { mlx5_cmd_trigger_completions(dev); + cond_resched(); + } + } - while (down_trylock(&cmd->pages_sem)) + while (down_trylock(&cmd->pages_sem)) { mlx5_cmd_trigger_completions(dev); + cond_resched(); + } /* Unlock cmdif */ up(&cmd->pages_sem); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 39ef2a2561a3..8099a21e674c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -164,6 +164,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr return err; } +static int +mlx5_esw_bridge_changeupper_validate_netdev(void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info = ptr; + struct net_device *upper = info->upper_dev; + struct net_device *lower; + struct list_head *iter; + + if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev)) + return 0; + + netdev_for_each_lower_dev(dev, lower, iter) { + struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; + + if (!mlx5e_eswitch_rep(lower)) + continue; + + priv = netdev_priv(lower); + mdev = priv->mdev; + if (!mlx5_lag_is_active(mdev)) + return -EAGAIN; + if (!mlx5_lag_is_shared_fdb(mdev)) + return -EOPNOTSUPP; + } + + return 0; +} + static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -171,6 +201,7 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb, switch (event) { case NETDEV_PRECHANGEUPPER: + err = mlx5_esw_bridge_changeupper_validate_netdev(ptr); break; case NETDEV_CHANGEUPPER: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c index 305fde62a78d..3337241cfd84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c @@ -6,70 +6,42 @@ #include "en/tc_priv.h" #include "mlx5_core.h" -/* Must be aligned with enum flow_action_id. */ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = { - &mlx5e_tc_act_accept, - &mlx5e_tc_act_drop, - &mlx5e_tc_act_trap, - &mlx5e_tc_act_goto, - &mlx5e_tc_act_mirred, - &mlx5e_tc_act_mirred, - &mlx5e_tc_act_redirect_ingress, - NULL, /* FLOW_ACTION_MIRRED_INGRESS, */ - &mlx5e_tc_act_vlan, - &mlx5e_tc_act_vlan, - &mlx5e_tc_act_vlan_mangle, - &mlx5e_tc_act_tun_encap, - &mlx5e_tc_act_tun_decap, - &mlx5e_tc_act_pedit, - &mlx5e_tc_act_pedit, - &mlx5e_tc_act_csum, - NULL, /* FLOW_ACTION_MARK, */ - &mlx5e_tc_act_ptype, - NULL, /* FLOW_ACTION_PRIORITY, */ - NULL, /* FLOW_ACTION_WAKE, */ - NULL, /* FLOW_ACTION_QUEUE, */ - &mlx5e_tc_act_sample, - &mlx5e_tc_act_police, - &mlx5e_tc_act_ct, - NULL, /* FLOW_ACTION_CT_METADATA, */ - &mlx5e_tc_act_mpls_push, - &mlx5e_tc_act_mpls_pop, - NULL, /* FLOW_ACTION_MPLS_MANGLE, */ - NULL, /* FLOW_ACTION_GATE, */ - NULL, /* FLOW_ACTION_PPPOE_PUSH, */ - NULL, /* FLOW_ACTION_JUMP, */ - NULL, /* FLOW_ACTION_PIPE, */ - &mlx5e_tc_act_vlan, - &mlx5e_tc_act_vlan, + [FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept, + [FLOW_ACTION_DROP] = &mlx5e_tc_act_drop, + [FLOW_ACTION_TRAP] = &mlx5e_tc_act_trap, + [FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto, + [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred, + [FLOW_ACTION_MIRRED] = &mlx5e_tc_act_mirred, + [FLOW_ACTION_REDIRECT_INGRESS] = &mlx5e_tc_act_redirect_ingress, + [FLOW_ACTION_VLAN_PUSH] = &mlx5e_tc_act_vlan, + [FLOW_ACTION_VLAN_POP] = &mlx5e_tc_act_vlan, + [FLOW_ACTION_VLAN_MANGLE] = &mlx5e_tc_act_vlan_mangle, + [FLOW_ACTION_TUNNEL_ENCAP] = &mlx5e_tc_act_tun_encap, + [FLOW_ACTION_TUNNEL_DECAP] = &mlx5e_tc_act_tun_decap, + [FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit, + [FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit, + [FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum, + [FLOW_ACTION_PTYPE] = &mlx5e_tc_act_ptype, + [FLOW_ACTION_SAMPLE] = &mlx5e_tc_act_sample, + [FLOW_ACTION_POLICE] = &mlx5e_tc_act_police, + [FLOW_ACTION_CT] = &mlx5e_tc_act_ct, + [FLOW_ACTION_MPLS_PUSH] = &mlx5e_tc_act_mpls_push, + [FLOW_ACTION_MPLS_POP] = &mlx5e_tc_act_mpls_pop, + [FLOW_ACTION_VLAN_PUSH_ETH] = &mlx5e_tc_act_vlan, + [FLOW_ACTION_VLAN_POP_ETH] = &mlx5e_tc_act_vlan, }; -/* Must be aligned with enum flow_action_id. */ static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = { - &mlx5e_tc_act_accept, - &mlx5e_tc_act_drop, - NULL, /* FLOW_ACTION_TRAP, */ - &mlx5e_tc_act_goto, - &mlx5e_tc_act_mirred_nic, - NULL, /* FLOW_ACTION_MIRRED, */ - NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */ - NULL, /* FLOW_ACTION_MIRRED_INGRESS, */ - NULL, /* FLOW_ACTION_VLAN_PUSH, */ - NULL, /* FLOW_ACTION_VLAN_POP, */ - NULL, /* FLOW_ACTION_VLAN_MANGLE, */ - NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */ - NULL, /* FLOW_ACTION_TUNNEL_DECAP, */ - &mlx5e_tc_act_pedit, - &mlx5e_tc_act_pedit, - &mlx5e_tc_act_csum, - &mlx5e_tc_act_mark, - NULL, /* FLOW_ACTION_PTYPE, */ - NULL, /* FLOW_ACTION_PRIORITY, */ - NULL, /* FLOW_ACTION_WAKE, */ - NULL, /* FLOW_ACTION_QUEUE, */ - NULL, /* FLOW_ACTION_SAMPLE, */ - NULL, /* FLOW_ACTION_POLICE, */ - &mlx5e_tc_act_ct, + [FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept, + [FLOW_ACTION_DROP] = &mlx5e_tc_act_drop, + [FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto, + [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred_nic, + [FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit, + [FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit, + [FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum, + [FLOW_ACTION_MARK] = &mlx5e_tc_act_mark, + [FLOW_ACTION_CT] = &mlx5e_tc_act_ct, }; /** diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index cb164b62f543..853f312cd757 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -11,6 +11,27 @@ #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) +/* IPSEC inline data includes: + * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for + * next header. + * 2. ESP authentication data: 16 bytes for ICV. + */ +#define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \ + 255 + 1 + 1 + 16, MLX5_SEND_WQE_DS) + +/* 366 should be big enough to cover all L2, L3 and L4 headers with possible + * encapsulations. + */ +#define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \ + MLX5_SEND_WQE_DS) + +/* Sync the calculation with mlx5e_sq_calc_wqe_attr. */ +#define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \ + MLX5E_MAX_TX_INLINE_DS + \ + MLX5E_MAX_TX_IPSEC_DS + \ + MAX_SKB_FRAGS + 1, \ + MLX5_SEND_WQEBB_NUM_DS) + #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) static inline @@ -424,6 +445,8 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size) { + WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev)); + /* A WQE must not cross the page boundary, hence two conditions: * 1. Its size must not exceed the page size. * 2. If the WQE size is X, and the space remaining in a page is less @@ -436,7 +459,6 @@ static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_si "wqe_size %u is greater than max SQ WQEBBs %u", wqe_size, mlx5e_get_max_sq_wqebbs(mdev)); - return MLX5E_STOP_ROOM(wqe_size); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 4685c652c97e..20507ef2f956 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -117,7 +117,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, xdpi.page.rq = rq; dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf); - dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE); + dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_BIDIRECTIONAL); if (unlikely(xdp_frame_has_frags(xdpf))) { sinfo = xdp_get_shared_info_from_frame(xdpf); @@ -131,7 +131,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, skb_frag_off(frag); len = skb_frag_size(frag); dma_sync_single_for_device(sq->pdev, addr, len, - DMA_TO_DEVICE); + DMA_BIDIRECTIONAL); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 364f04309149..e3a4f01bcceb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5694,6 +5694,13 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) mlx5e_fs_set_state_destroy(priv->fs, !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); + /* Validate the max_wqe_size_sq capability. */ + if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) { + mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %lu\n", + mlx5e_get_max_sq_wqebbs(priv->mdev), MLX5E_MAX_TX_WQEBBS); + return -EIO; + } + /* max number of channels may have changed */ max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile); if (priv->channels.params.num_channels > max_nch) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 58084650151f..a61a43fc8d5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -266,7 +266,7 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_uni addr = page_pool_get_dma_addr(au->page); /* Non-XSK always uses PAGE_SIZE. */ - dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, rq->buff.map_dir); return true; } @@ -282,8 +282,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_u return -ENOMEM; /* Non-XSK always uses PAGE_SIZE. */ - addr = dma_map_page_attrs(rq->pdev, au->page, 0, PAGE_SIZE, - rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC); + addr = dma_map_page(rq->pdev, au->page, 0, PAGE_SIZE, rq->buff.map_dir); if (unlikely(dma_mapping_error(rq->pdev, addr))) { page_pool_recycle_direct(rq->page_pool, au->page); au->page = NULL; @@ -427,14 +426,15 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, { dma_addr_t addr = page_pool_get_dma_addr(au->page); - dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, + rq->buff.map_dir); page_ref_inc(au->page); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, au->page, frag_offset, len, truesize); } static inline void -mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, +mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb, struct page *page, dma_addr_t addr, int offset_from, int dma_offset, u32 headlen) { @@ -442,7 +442,8 @@ mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, /* Aligning len to sizeof(long) optimizes memcpy performance */ unsigned int len = ALIGN(headlen, sizeof(long)); - dma_sync_single_for_cpu(pdev, addr + dma_offset, len, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len, + rq->buff.map_dir); skb_copy_to_linear_data(skb, from, len); } @@ -1538,7 +1539,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, addr = page_pool_get_dma_addr(au->page); dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, - frag_size, DMA_FROM_DEVICE); + frag_size, rq->buff.map_dir); net_prefetch(data); prog = rcu_dereference(rq->xdp_prog); @@ -1587,7 +1588,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi addr = page_pool_get_dma_addr(au->page); dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, - rq->buff.frame0_sz, DMA_FROM_DEVICE); + rq->buff.frame0_sz, rq->buff.map_dir); net_prefetchw(va); /* xdp_frame data area */ net_prefetch(va + rx_headroom); @@ -1608,7 +1609,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi addr = page_pool_get_dma_addr(au->page); dma_sync_single_for_cpu(rq->pdev, addr + wi->offset, - frag_consumed_bytes, DMA_FROM_DEVICE); + frag_consumed_bytes, rq->buff.map_dir); if (!xdp_buff_has_frags(&xdp)) { /* Init on the first fragment to avoid cold cache access @@ -1905,7 +1906,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset); /* copy header */ addr = page_pool_get_dma_addr(head_au->page); - mlx5e_copy_skb_header(rq->pdev, skb, head_au->page, addr, + mlx5e_copy_skb_header(rq, skb, head_au->page, addr, head_offset, head_offset, headlen); /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; @@ -1939,7 +1940,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, addr = page_pool_get_dma_addr(au->page); dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset, - frag_size, DMA_FROM_DEVICE); + frag_size, rq->buff.map_dir); net_prefetch(data); prog = rcu_dereference(rq->xdp_prog); @@ -1987,7 +1988,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { /* build SKB around header */ - dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE); + dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir); prefetchw(hdr); prefetch(data); skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); @@ -2009,7 +2010,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, } prefetchw(skb->data); - mlx5e_copy_skb_header(rq->pdev, skb, head->page, head->addr, + mlx5e_copy_skb_header(rq, skb, head->page, head->addr, head_offset + rx_headroom, rx_headroom, head_size); /* skb linear part was allocated with headlen and aligned to long */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index dd6fea9e9a5b..5a6aa61ec82a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3633,10 +3633,14 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr, attr2->action = 0; attr2->flags = 0; attr2->parse_attr = parse_attr; - attr2->esw_attr->out_count = 0; - attr2->esw_attr->split_count = 0; attr2->dest_chain = 0; attr2->dest_ft = NULL; + + if (ns_type == MLX5_FLOW_NAMESPACE_FDB) { + attr2->esw_attr->out_count = 0; + attr2->esw_attr->split_count = 0; + } + return attr2; } @@ -4758,12 +4762,6 @@ int mlx5e_policer_validate(const struct flow_action *action, return -EOPNOTSUPP; } - if (act->police.rate_pkt_ps) { - NL_SET_ERR_MSG_MOD(extack, - "QoS offload not support packets per second"); - return -EOPNOTSUPP; - } - return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 6adca01fbdc9..f7897ddb29c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -305,6 +305,8 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at u16 ds_cnt_inl = 0; u16 ds_cnt_ids = 0; + /* Sync the calculation with MLX5E_MAX_TX_WQEBBS. */ + if (attr->insz) ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz, MLX5_SEND_WQE_DS); @@ -317,6 +319,9 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at inl += VLAN_HLEN; ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS); + if (WARN_ON_ONCE(ds_cnt_inl > MLX5E_MAX_TX_INLINE_DS)) + netdev_warn(skb->dev, "ds_cnt_inl = %u > max %u\n", ds_cnt_inl, + (u16)MLX5E_MAX_TX_INLINE_DS); ds_cnt += ds_cnt_inl; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c59107fa9e6d..2169486c4bfb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1387,12 +1387,14 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw) esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", esw->esw_funcs.num_vfs, esw->enabled_vports); - esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED; - if (esw->mode == MLX5_ESWITCH_OFFLOADS) - esw_offloads_disable(esw); - else if (esw->mode == MLX5_ESWITCH_LEGACY) - esw_legacy_disable(esw); - mlx5_esw_acls_ns_cleanup(esw); + if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) { + esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED; + if (esw->mode == MLX5_ESWITCH_OFFLOADS) + esw_offloads_disable(esw); + else if (esw->mode == MLX5_ESWITCH_LEGACY) + esw_legacy_disable(esw); + mlx5_esw_acls_ns_cleanup(esw); + } if (esw->mode == MLX5_ESWITCH_OFFLOADS) devl_rate_nodes_destroy(devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 4e50df3139c6..728ca9f2bb9d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2310,7 +2310,7 @@ out_free: static int esw_offloads_start(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { - int err, err1; + int err; esw->mode = MLX5_ESWITCH_OFFLOADS; err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs); @@ -2318,11 +2318,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to offloads"); esw->mode = MLX5_ESWITCH_LEGACY; - err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); - if (err1) { - NL_SET_ERR_MSG_MOD(extack, - "Failed setting eswitch back to legacy"); - } mlx5_rescan_drivers(esw->dev); } if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { @@ -3389,19 +3384,12 @@ err_metadata: static int esw_offloads_stop(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { - int err, err1; + int err; esw->mode = MLX5_ESWITCH_LEGACY; err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); - if (err) { + if (err) NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); - esw->mode = MLX5_ESWITCH_OFFLOADS; - err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); - if (err1) { - NL_SET_ERR_MSG_MOD(extack, - "Failed setting eswitch back to offloads"); - } - } return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index ee568bf34ae2..108a3503f413 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -30,9 +30,9 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act, sizeof(dest->vport.num), hash); hash = jhash((const void *)&dest->vport.vhca_id, sizeof(dest->vport.num), hash); - if (dest->vport.pkt_reformat) - hash = jhash(dest->vport.pkt_reformat, - sizeof(*dest->vport.pkt_reformat), + if (flow_act->pkt_reformat) + hash = jhash(flow_act->pkt_reformat, + sizeof(*flow_act->pkt_reformat), hash); return hash; } @@ -53,9 +53,11 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1, if (ret) return ret; - return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ? - memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat, - sizeof(*dest1->vport.pkt_reformat)) : 0; + if (flow_act1->pkt_reformat && flow_act2->pkt_reformat) + return memcmp(flow_act1->pkt_reformat, flow_act2->pkt_reformat, + sizeof(*flow_act1->pkt_reformat)); + + return !(flow_act1->pkt_reformat == flow_act2->pkt_reformat); } static int diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 07c583996c29..9d908a0ccfef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -152,7 +152,8 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev) mlx5_unload_one(dev); if (mlx5_health_wait_pci_up(dev)) mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); - mlx5_load_one(dev, false); + else + mlx5_load_one(dev, false); devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0, BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE)); |