summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c39
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c90
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c42
8 files changed, 195 insertions, 70 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d3fef7fefea8..acf1e8b52b8e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3527,15 +3527,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
}
break;
case e1000_pch_spt:
- if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
- /* Stable 24MHz frequency */
- incperiod = INCPERIOD_24MHZ;
- incvalue = INCVALUE_24MHZ;
- shift = INCVALUE_SHIFT_24MHZ;
- adapter->cc.shift = shift;
- break;
- }
- return -EINVAL;
+ /* Stable 24MHz frequency */
+ incperiod = INCPERIOD_24MHZ;
+ incvalue = INCVALUE_24MHZ;
+ shift = INCVALUE_SHIFT_24MHZ;
+ adapter->cc.shift = shift;
+ break;
case e1000_pch_cnp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9b698c5acd05..105a26f447c0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2032,6 +2032,21 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
#if L1_CACHE_BYTES < 128
prefetch(xdp->data + L1_CACHE_BYTES);
#endif
+ /* Note, we get here by enabling legacy-rx via:
+ *
+ * ethtool --set-priv-flags <dev> legacy-rx on
+ *
+ * In this mode, we currently get 0 extra XDP headroom as
+ * opposed to having legacy-rx off, where we process XDP
+ * packets going to stack via i40e_build_skb(). The latter
+ * provides us currently with 192 bytes of headroom.
+ *
+ * For i40e_construct_skb() mode it means that the
+ * xdp->data_meta will always point to xdp->data, since
+ * the helper cannot expand the head. Should this ever
+ * change in future for legacy-rx mode on, then lets also
+ * add xdp->data_meta handling here.
+ */
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
@@ -2083,19 +2098,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
struct xdp_buff *xdp)
{
- unsigned int size = xdp->data_end - xdp->data;
+ unsigned int metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(I40E_SKB_PAD + size);
+ SKB_DATA_ALIGN(I40E_SKB_PAD +
+ (xdp->data_end -
+ xdp->data_hard_start));
#endif
struct sk_buff *skb;
- /* prefetch first cache line of first page */
- prefetch(xdp->data);
+ /* Prefetch first cache line of first page. If xdp->data_meta
+ * is unused, this points exactly as xdp->data, otherwise we
+ * likely have a consumer accessing first few bytes of meta
+ * data, and then actual data.
+ */
+ prefetch(xdp->data_meta);
#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
+ prefetch(xdp->data_meta + L1_CACHE_BYTES);
#endif
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
@@ -2103,8 +2124,10 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, I40E_SKB_PAD);
- __skb_put(skb, size);
+ skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start));
+ __skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
/* buffer is used by skb, update page_offset */
#if (PAGE_SIZE < 8192)
@@ -2341,7 +2364,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (!skb) {
xdp.data = page_address(rx_buffer->page) +
rx_buffer->page_offset;
- xdp_set_data_meta_invalid(&xdp);
+ xdp.data_meta = xdp.data;
xdp.data_hard_start = xdp.data -
i40e_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 78574c06635b..c33821d2afb3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2058,6 +2058,7 @@ int igb_up(struct igb_adapter *adapter)
igb_assign_vector(adapter->q_vector[0], 0);
/* Clear any pending interrupts. */
+ rd32(E1000_TSICR);
rd32(E1000_ICR);
igb_irq_enable(adapter);
@@ -3865,6 +3866,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
napi_enable(&(adapter->q_vector[i]->napi));
/* Clear any pending interrupts. */
+ rd32(E1000_TSICR);
rd32(E1000_ICR);
igb_irq_enable(adapter);
@@ -4053,11 +4055,6 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
u64 tdba = ring->dma;
int reg_idx = ring->reg_idx;
- /* disable the queue */
- wr32(E1000_TXDCTL(reg_idx), 0);
- wrfl();
- mdelay(10);
-
wr32(E1000_TDLEN(reg_idx),
ring->count * sizeof(union e1000_adv_tx_desc));
wr32(E1000_TDBAL(reg_idx),
@@ -4088,8 +4085,16 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
**/
static void igb_configure_tx(struct igb_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
int i;
+ /* disable the queues */
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
+
+ wrfl();
+ usleep_range(10000, 20000);
+
for (i = 0; i < adapter->num_tx_queues; i++)
igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 55fe8114fe99..50dfb02fa34c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -10,15 +10,9 @@ static struct dentry *ixgbe_dbg_root;
static char ixgbe_dbg_reg_ops_buf[256] = "";
-/**
- * ixgbe_dbg_reg_ops_read - read for reg_ops datum
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- **/
-static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
+static ssize_t ixgbe_dbg_common_ops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos,
+ char *dbg_buf)
{
struct ixgbe_adapter *adapter = filp->private_data;
char *buf;
@@ -29,8 +23,7 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
return 0;
buf = kasprintf(GFP_KERNEL, "%s: %s\n",
- adapter->netdev->name,
- ixgbe_dbg_reg_ops_buf);
+ adapter->netdev->name, dbg_buf);
if (!buf)
return -ENOMEM;
@@ -46,6 +39,20 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
}
/**
+ * ixgbe_dbg_reg_ops_read - read for reg_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos,
+ ixgbe_dbg_reg_ops_buf);
+}
+
+/**
* ixgbe_dbg_reg_ops_write - write into reg_ops datum
* @filp: the opened file
* @buffer: where to find the user's data
@@ -121,33 +128,11 @@ static char ixgbe_dbg_netdev_ops_buf[256] = "";
* @count: the size of the user's buffer
* @ppos: file position offset
**/
-static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp,
- char __user *buffer,
+static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
- struct ixgbe_adapter *adapter = filp->private_data;
- char *buf;
- int len;
-
- /* don't allow partial reads */
- if (*ppos != 0)
- return 0;
-
- buf = kasprintf(GFP_KERNEL, "%s: %s\n",
- adapter->netdev->name,
- ixgbe_dbg_netdev_ops_buf);
- if (!buf)
- return -ENOMEM;
-
- if (count < strlen(buf)) {
- kfree(buf);
- return -ENOSPC;
- }
-
- len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
-
- kfree(buf);
- return len;
+ return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos,
+ ixgbe_dbg_netdev_ops_buf);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 99b170f1efd1..344a1f213a5f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -445,6 +445,89 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
}
/**
+ * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
+ * @xs: pointer to transformer state struct
+ **/
+static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
+{
+ struct net_device *dev = xs->xso.dev;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mfval, manc, reg;
+ int num_filters = 4;
+ bool manc_ipv4;
+ u32 bmcipval;
+ int i, j;
+
+#define MANC_EN_IPV4_FILTER BIT(24)
+#define MFVAL_IPV4_FILTER_SHIFT 16
+#define MFVAL_IPV6_FILTER_SHIFT 24
+#define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4))
+
+#define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4))
+#define IXGBE_BMCIPVAL 0x5060
+#define BMCIP_V4 0x2
+#define BMCIP_V6 0x3
+#define BMCIP_MASK 0x3
+
+ manc = IXGBE_READ_REG(hw, IXGBE_MANC);
+ manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER);
+ mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL);
+ bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL);
+
+ if (xs->props.family == AF_INET) {
+ /* are there any IPv4 filters to check? */
+ if (manc_ipv4) {
+ /* the 4 ipv4 filters are all in MIPAF(3, i) */
+ for (i = 0; i < num_filters; i++) {
+ if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i)))
+ continue;
+
+ reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i));
+ if (reg == xs->id.daddr.a4)
+ return 1;
+ }
+ }
+
+ if ((bmcipval & BMCIP_MASK) == BMCIP_V4) {
+ reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3));
+ if (reg == xs->id.daddr.a4)
+ return 1;
+ }
+
+ } else {
+ /* if there are ipv4 filters, they are in the last ipv6 slot */
+ if (manc_ipv4)
+ num_filters = 3;
+
+ for (i = 0; i < num_filters; i++) {
+ if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i)))
+ continue;
+
+ for (j = 0; j < 4; j++) {
+ reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j));
+ if (reg != xs->id.daddr.a6[j])
+ break;
+ }
+ if (j == 4) /* did we match all 4 words? */
+ return 1;
+ }
+
+ if ((bmcipval & BMCIP_MASK) == BMCIP_V6) {
+ for (j = 0; j < 4; j++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j));
+ if (reg != xs->id.daddr.a6[j])
+ break;
+ }
+ if (j == 4) /* did we match all 4 words? */
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ixgbe_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
**/
@@ -465,6 +548,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
return -EINVAL;
}
+ if (ixgbe_ipsec_check_mgmt_ip(xs)) {
+ netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
+ return -EINVAL;
+ }
+
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
struct rx_sa rsa;
@@ -575,7 +663,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
/* hash the new entry for faster search in Rx path */
hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
- (__force u64)rsa.xs->id.spi);
+ (__force u32)rsa.xs->id.spi);
} else {
struct tx_sa tsa;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ba3035c08572..dd8a3a037c2f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7621,17 +7621,19 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
return;
+ rtnl_lock();
/* If we're already down, removing or resetting, just bail */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_REMOVING, &adapter->state) ||
- test_bit(__IXGBE_RESETTING, &adapter->state))
+ test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ rtnl_unlock();
return;
+ }
ixgbe_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
adapter->tx_timeout_count++;
- rtnl_lock();
ixgbe_reinit_locked(adapter);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 70c75681495f..56a1031dcc07 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -76,6 +76,7 @@ enum ixgbevf_ring_state_t {
__IXGBEVF_TX_DETECT_HANG,
__IXGBEVF_HANG_CHECK_ARMED,
__IXGBEVF_TX_XDP_RING,
+ __IXGBEVF_TX_XDP_RING_PRIMED,
};
#define ring_is_xdp(ring) \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 083041129539..59416eddd840 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -991,24 +991,45 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
return IXGBEVF_XDP_CONSUMED;
/* record the location of the first descriptor for this packet */
- tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
- tx_buffer->bytecount = len;
- tx_buffer->gso_segs = 1;
- tx_buffer->protocol = 0;
-
i = ring->next_to_use;
- tx_desc = IXGBEVF_TX_DESC(ring, i);
+ tx_buffer = &ring->tx_buffer_info[i];
dma_unmap_len_set(tx_buffer, len, len);
dma_unmap_addr_set(tx_buffer, dma, dma);
tx_buffer->data = xdp->data;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_buffer->bytecount = len;
+ tx_buffer->gso_segs = 1;
+ tx_buffer->protocol = 0;
+
+ /* Populate minimal context descriptor that will provide for the
+ * fact that we are expected to process Ethernet frames.
+ */
+ if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) {
+ struct ixgbe_adv_tx_context_desc *context_desc;
+
+ set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
+
+ context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
+ context_desc->vlan_macip_lens =
+ cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
+ context_desc->seqnum_seed = 0;
+ context_desc->type_tucmd_mlhl =
+ cpu_to_le32(IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+ context_desc->mss_l4len_idx = 0;
+
+ i = 1;
+ }
/* put descriptor type bits */
cmd_type = IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_DEXT |
IXGBE_ADVTXD_DCMD_IFCS;
cmd_type |= len | IXGBE_TXD_CMD;
+
+ tx_desc = IXGBEVF_TX_DESC(ring, i);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
tx_desc->read.olinfo_status =
cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
@@ -1688,6 +1709,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
sizeof(struct ixgbevf_tx_buffer) * ring->count);
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
+ clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
@@ -3119,15 +3141,17 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
return;
+ rtnl_lock();
/* If we're already down or resetting, just bail */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
- test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ test_bit(__IXGBEVF_RESETTING, &adapter->state)) {
+ rtnl_unlock();
return;
+ }
adapter->tx_timeout_count++;
- rtnl_lock();
ixgbevf_reinit_locked(adapter);
rtnl_unlock();
}