summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-07-16 14:09:34 -0700
committerDavid S. Miller <davem@davemloft.net>2014-07-16 14:09:34 -0700
commit1a98c69af1ecd97bfd1f4e4539924a9192434e36 (patch)
treea243defcf921ea174f8e43fce11d06830a6a9c36 /drivers/net
parent7a575f6b907ea5d207d2b5010293c189616eae34 (diff)
parentb6603fe574af289dbe9eb9fb4c540bca04f5a053 (diff)
downloadlinux-stable-1a98c69af1ecd97bfd1f4e4539924a9192434e36.tar.gz
linux-stable-1a98c69af1ecd97bfd1f4e4539924a9192434e36.tar.bz2
linux-stable-1a98c69af1ecd97bfd1f4e4539924a9192434e36.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c43
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c7
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c66
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h12
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c16
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/fddi/defxx.c17
-rw-r--r--drivers/net/phy/dp83640.c6
-rw-r--r--drivers/net/phy/mdio_bus.c44
-rw-r--r--drivers/net/ppp/ppp_generic.c8
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/usb/hso.c50
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c7
-rw-r--r--drivers/net/usb/smsc95xx.c14
-rw-r--r--drivers/net/wan/farsync.c112
-rw-r--r--drivers/net/xen-netfront.c27
38 files changed, 389 insertions, 275 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 041036e31aa3..1ff676caa9cd 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4037,7 +4037,7 @@ static int bond_check_params(struct bond_params *params)
}
if (ad_select) {
- bond_opt_initstr(&newval, lacp_rate);
+ bond_opt_initstr(&newval, ad_select);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
&newval);
if (!valptr) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 31b5340c685e..6f4e18644bd4 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -710,13 +710,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
- if (work_done < budget) {
+ if (work_done == 0) {
napi_complete(napi);
/* re-enable TX interrupt */
intrl2_1_mask_clear(ring->priv, BIT(ring->index));
}
- return work_done;
+ return 0;
}
static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@@ -1339,28 +1339,17 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
usleep_range(1000, 2000);
}
-static inline int umac_reset(struct bcm_sysport_priv *priv)
+static inline void umac_reset(struct bcm_sysport_priv *priv)
{
- unsigned int timeout = 0;
u32 reg;
- int ret = 0;
-
- umac_writel(priv, 0, UMAC_CMD);
- while (timeout++ < 1000) {
- reg = umac_readl(priv, UMAC_CMD);
- if (!(reg & CMD_SW_RESET))
- break;
-
- udelay(1);
- }
-
- if (timeout == 1000) {
- dev_err(&priv->pdev->dev,
- "timeout waiting for MAC to come out of reset\n");
- ret = -ETIMEDOUT;
- }
- return ret;
+ reg = umac_readl(priv, UMAC_CMD);
+ reg |= CMD_SW_RESET;
+ umac_writel(priv, reg, UMAC_CMD);
+ udelay(10);
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_SW_RESET;
+ umac_writel(priv, reg, UMAC_CMD);
}
static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
@@ -1412,11 +1401,7 @@ static int bcm_sysport_open(struct net_device *dev)
int ret;
/* Reset UniMAC */
- ret = umac_reset(priv);
- if (ret) {
- netdev_err(dev, "UniMAC reset failed\n");
- return ret;
- }
+ umac_reset(priv);
/* Flush TX and RX FIFOs at TOPCTRL level */
topctrl_flush(priv);
@@ -1699,12 +1684,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
dev->needed_headroom += sizeof(struct bcm_tsb);
- /* We are interfaced to a switch which handles the multicast
- * filtering for us, so we do not support programming any
- * multicast hash table in this Ethernet MAC.
- */
- dev->flags &= ~IFF_MULTICAST;
-
/* libphy will adjust the link state accordingly */
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index cb15e3ac03c4..dca1236dd1cd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -797,7 +797,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return;
}
- bnx2x_frag_free(fp, new_data);
+ if (new_data)
+ bnx2x_frag_free(fp, new_data);
drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6af9e3c046a0..3871ec49cc4d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12946,7 +12946,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
* without the default SB.
* For VFs there is no default SB, then we return (index+1).
*/
- pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
index = control & PCI_MSIX_FLAGS_QSIZE;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 5ba1cfbd60da..16281ad2da12 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1408,13 +1408,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
if (cb->skb)
continue;
- /* set the DMA descriptor length once and for all
- * it will only change if we support dynamically sizing
- * priv->rx_buf_len, but we do not
- */
- dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
- priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
-
ret = bcmgenet_rx_refill(priv, cb);
if (ret)
break;
@@ -2535,14 +2528,17 @@ static int bcmgenet_probe(struct platform_device *pdev)
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
- err = register_netdev(dev);
- if (err)
- goto err_clk_disable;
+ /* libphy will determine the link state */
+ netif_carrier_off(dev);
/* Turn off the main clock, WOL clock is handled separately */
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
+ err = register_netdev(dev);
+ if (err)
+ goto err;
+
return err;
err_clk_disable:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0f117105fed1..e23c993b1362 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -331,9 +331,9 @@ struct bcmgenet_mib_counters {
#define EXT_ENERGY_DET_MASK (1 << 12)
#define EXT_RGMII_OOB_CTRL 0x0C
-#define RGMII_MODE_EN (1 << 0)
#define RGMII_LINK (1 << 4)
#define OOB_DISABLE (1 << 5)
+#define RGMII_MODE_EN (1 << 6)
#define ID_MODE_DIS (1 << 16)
#define EXT_GPHY_CTRL 0x1C
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6297e72b77e2..9bced68527a9 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2897,7 +2897,7 @@ static int be_open(struct net_device *netdev)
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
be_enable_busy_poll(eqo);
- be_eq_notify(adapter, eqo->q.id, true, false, 0);
+ be_eq_notify(adapter, eqo->q.id, true, true, 0);
}
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index fab39e295441..36fc429298e3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -2990,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
if (ug_info->rxExtendedFiltering) {
size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
if (ug_info->largestexternallookupkeysize ==
- QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
if (ug_info->largestexternallookupkeysize ==
- QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 168a5ee5e0ba..72b454ce05ac 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1480,6 +1480,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
s32 ret_val;
u16 i, rar_count = mac->rar_entry_count;
+ if ((hw->mac.type >= e1000_i210) &&
+ !(igb_get_flash_presence_i210(hw))) {
+ ret_val = igb_pll_workaround_i210(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Initialize identification LED */
ret_val = igb_id_led_init(hw);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 2a8bb35c2df2..217f8138851b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -46,14 +46,15 @@
#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
/* Physical Func Reset Done Indication */
-#define E1000_CTRL_EXT_PFRSTD 0x00004000
-#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
-#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
-#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
-#define E1000_CTRL_EXT_EIAME 0x01000000
-#define E1000_CTRL_EXT_IRCA 0x00000001
+#define E1000_CTRL_EXT_PFRSTD 0x00004000
+#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_EIAME 0x01000000
+#define E1000_CTRL_EXT_IRCA 0x00000001
/* Interrupt delay cancellation */
/* Driver loaded bit for FW */
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
@@ -62,6 +63,7 @@
/* packet buffer parity error detection enabled */
/* descriptor FIFO parity error detection enable */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_PHYPDEN 0x00100000
#define E1000_I2CCMD_REG_ADDR_SHIFT 16
#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
#define E1000_I2CCMD_OPCODE_READ 0x08000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 89925e405849..ce55ea5d750c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
/* These functions must be implemented by drivers */
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 337161f440dd..65d931669f81 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
}
return ret_val;
}
+
+/**
+ * igb_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+s32 igb_pll_workaround_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
+ u16 nvm_word, phy_word, pci_word, tmp_nvm;
+ int i;
+
+ /* Get and set needed register values */
+ wuc = rd32(E1000_WUC);
+ mdicnfg = rd32(E1000_MDICNFG);
+ reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+ wr32(E1000_MDICNFG, reg_val);
+
+ /* Get data from NVM, or set default */
+ ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+ &nvm_word);
+ if (ret_val)
+ nvm_word = E1000_INVM_DEFAULT_AL;
+ tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+ for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+ /* check current state directly from internal PHY */
+ igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+ E1000_PHY_PLL_FREQ_REG), &phy_word);
+ if ((phy_word & E1000_PHY_PLL_UNCONF)
+ != E1000_PHY_PLL_UNCONF) {
+ ret_val = 0;
+ break;
+ } else {
+ ret_val = -E1000_ERR_PHY;
+ }
+ /* directly reset the internal PHY */
+ ctrl = rd32(E1000_CTRL);
+ wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+
+ wr32(E1000_WUC, 0);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+ wr32(E1000_EEARBC_I210, reg_val);
+
+ igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ pci_word |= E1000_PCI_PMCSR_D3;
+ igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ usleep_range(1000, 2000);
+ pci_word &= ~E1000_PCI_PMCSR_D3;
+ igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+ wr32(E1000_EEARBC_I210, reg_val);
+
+ /* restore WUC register */
+ wr32(E1000_WUC, wuc);
+ }
+ /* restore MDICNFG setting */
+ wr32(E1000_MDICNFG, mdicnfg);
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 9f34976687ba..3442b6357d01 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_pll_workaround_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
+/* PLL Defines */
+#define E1000_PCI_PMCSR 0x44
+#define E1000_PCI_PMCSR_D3 0x03
+#define E1000_MAX_PLL_TRIES 5
+#define E1000_PHY_PLL_UNCONF 0xFF
+#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
+#define E1000_PHY_PLL_FREQ_REG 0x000E
+#define E1000_INVM_DEFAULT_AL 0x202F
+#define E1000_INVM_AUTOLOAD 0x0A
+#define E1000_INVM_PLL_WO_VAL 0x0010
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 1cc4b1a7e597..f5ba4e4eafb9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -66,6 +66,7 @@
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5759a56aab00..4d2dc17fd31b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7217,6 +7217,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+
+ pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+
+ pci_write_config_word(adapter->pdev, reg, *value);
+}
+
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
@@ -7580,6 +7594,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
if (netif_running(netdev))
igb_close(netdev);
+ else
+ igb_reset(adapter);
igb_clear_interrupt_scheme(adapter);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 45beca17fa50..dadd9a5f6323 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
- if (l3_proto == swab16(ETH_P_IP))
+ if (l3_proto == htons(ETH_P_IP))
command |= MVNETA_TXD_IP_CSUM;
else
command |= MVNETA_TX_L3_IP6;
@@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
if (phydev->speed == SPEED_1000)
val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
- else
+ else if (phydev->speed == SPEED_100)
val |= MVNETA_GMAC_CONFIG_MII_SPEED;
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 80f725228f5b..56022d647837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
init_completion(&cq->free);
cq->irq = priv->eq_table.eq[cq->vector].irq;
- cq->irq_affinity_change = false;
-
return 0;
err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 4b2130760eed..14c00048bbec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -128,6 +128,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
name);
}
+
+ cq->irq_desc =
+ irq_to_desc(mlx4_eq_get_irq(mdev->dev,
+ cq->vector));
}
} else {
cq->vector = (cq->ring + 1 + priv->port) %
@@ -187,8 +191,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector) {
- if (!cq->is_tx)
- irq_set_affinity_hint(cq->mcq.irq, NULL);
mlx4_release_eq(priv->mdev->dev, cq->vector);
}
cq->vector = 0;
@@ -204,6 +206,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (!cq->is_tx) {
napi_hash_del(&cq->napi);
synchronize_rcu();
+ irq_set_affinity_hint(cq->mcq.irq, NULL);
}
netif_napi_del(&cq->napi);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fa1a069e14e6..68d763d2d030 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -417,6 +417,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
coal->tx_coalesce_usecs = priv->tx_usecs;
coal->tx_max_coalesced_frames = priv->tx_frames;
+ coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
+
coal->rx_coalesce_usecs = priv->rx_usecs;
coal->rx_max_coalesced_frames = priv->rx_frames;
@@ -426,6 +428,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
coal->rate_sample_interval = priv->sample_interval;
coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
+
return 0;
}
@@ -434,6 +437,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ if (!coal->tx_max_coalesced_frames_irq)
+ return -EINVAL;
+
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TARGET :
@@ -457,6 +463,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
priv->sample_interval = coal->rate_sample_interval;
priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+ priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
return mlx4_en_moderation_update(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f384b354c88d..887cf01d831d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2331,7 +2331,7 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
__be16 current_port;
- if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return;
if (sa_family == AF_INET6)
@@ -2468,6 +2468,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
MLX4_WQE_CTRL_SOLICITED);
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
+ priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b8ec9208e12a..7765a08f9e84 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -40,6 +40,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
+#include <linux/irq.h>
#include "mlx4_en.h"
@@ -782,6 +783,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
+ skb_mark_napi_id(gro_skb, &cq->napi);
if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -896,16 +898,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done == budget) {
+ int cpu_curr;
+ const struct cpumask *aff;
+
INC_PERF_COUNTER(priv->pstats.napi_quota);
- if (unlikely(cq->mcq.irq_affinity_change)) {
- cq->mcq.irq_affinity_change = false;
+
+ cpu_curr = smp_processor_id();
+ aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
+
+ if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
+ /* Current cpu is not according to smp_irq_affinity -
+ * probably affinity changed. need to stop this NAPI
+ * poll, and restart it on the right CPU
+ */
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return 0;
}
} else {
/* Done for now */
- cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8be7483f8236..5045bab59633 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -351,9 +351,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-static int mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq,
- int budget)
+static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +371,10 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
int factor = priv->cqe_factor;
u64 timestamp = 0;
int done = 0;
+ int budget = priv->tx_work_limit;
if (!priv->port_up)
- return 0;
+ return true;
index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
@@ -447,7 +447,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
}
- return done;
+ return done < budget;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -467,24 +467,16 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- int done;
+ int clean_complete;
- done = mlx4_en_process_tx_cq(dev, cq, budget);
+ clean_complete = mlx4_en_process_tx_cq(dev, cq);
+ if (!clean_complete)
+ return budget;
- /* If we used up all the quota - we're probably not done yet... */
- if (done < budget) {
- /* Done for now */
- cq->mcq.irq_affinity_change = false;
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
- return done;
- } else if (unlikely(cq->mcq.irq_affinity_change)) {
- cq->mcq.irq_affinity_change = false;
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
- return 0;
- }
- return budget;
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+
+ return 0;
}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d954ec1eac17..2a004b347e1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,11 +53,6 @@ enum {
MLX4_EQ_ENTRY_SIZE = 0x20
};
-struct mlx4_irq_notify {
- void *arg;
- struct irq_affinity_notify notify;
-};
-
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
iounmap(priv->clr_base);
}
-static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct mlx4_irq_notify *n = container_of(notify,
- struct mlx4_irq_notify,
- notify);
- struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
- struct radix_tree_iter iter;
- void **slot;
-
- radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
- struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
-
- if (cq->irq == notify->irq)
- cq->irq_affinity_change = true;
- }
-}
-
-static void mlx4_release_irq_notifier(struct kref *ref)
-{
- struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
- notify.kref);
- kfree(n);
-}
-
-static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
- struct mlx4_dev *dev, int irq)
-{
- struct mlx4_irq_notify *irq_notifier = NULL;
- int err = 0;
-
- irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
- if (!irq_notifier) {
- mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
- irq);
- return;
- }
-
- irq_notifier->notify.irq = irq;
- irq_notifier->notify.notify = mlx4_irq_notifier_notify;
- irq_notifier->notify.release = mlx4_release_irq_notifier;
- irq_notifier->arg = priv;
- err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
- if (err) {
- kfree(irq_notifier);
- irq_notifier = NULL;
- mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
- }
-}
-
-
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
continue;
/*we dont want to break here*/
}
- mlx4_assign_irq_notifier(priv, dev,
- priv->eq_table.eq[vec].irq);
eq_set_ci(&priv->eq_table.eq[vec], 1);
}
@@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
}
EXPORT_SYMBOL(mlx4_assign_eq);
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return priv->eq_table.eq[vec].irq;
+}
+EXPORT_SYMBOL(mlx4_eq_get_irq);
+
void mlx4_release_eq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
Belonging to a legacy EQ*/
mutex_lock(&priv->msix_ctl.pool_lock);
if (priv->msix_ctl.pool_bm & 1ULL << i) {
- irq_set_affinity_notifier(
- priv->eq_table.eq[vec].irq,
- NULL);
free_irq(priv->eq_table.eq[vec].irq,
&priv->eq_table.eq[vec]);
priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 7c1b5ec5378f..2b19dd1f2c5d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -126,6 +126,8 @@ enum {
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
MLX4_EN_NUM_UP)
+#define MLX4_EN_DEFAULT_TX_WORK 256
+
/* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44
#define MLX4_EN_RX_COAL_TIME 0x10
@@ -341,6 +343,7 @@ struct mlx4_en_cq {
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
spinlock_t poll_lock; /* protects from LLS/napi conflicts */
#endif /* CONFIG_NET_RX_BUSY_POLL */
+ struct irq_desc *irq_desc;
};
struct mlx4_en_port_profile {
@@ -540,6 +543,7 @@ struct mlx4_en_priv {
__be32 ctrl_flags;
u32 flags;
u8 num_tx_rings_p_up;
+ u32 tx_work_limit;
u32 tx_ring_num;
u32 rx_ring_num;
u32 rx_skb_size;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 51c78ce27b37..6175bd59190a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -540,6 +540,7 @@ enum rtl_register_content {
MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
+ Rdy_to_L23 = (1 << 1), /* L23 Enable */
Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
/* Config4 register */
@@ -4883,6 +4884,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
PCI_EXP_LNKCTL_CLKREQ_EN);
}
+static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ u8 data;
+
+ data = RTL_R8(Config3);
+
+ if (enable)
+ data |= Rdy_to_L23;
+ else
+ data &= ~Rdy_to_L23;
+
+ RTL_W8(Config3, data);
+}
+
#define R8168_CPCMD_QUIRK_MASK (\
EnableBist | \
Mac_dbgo_oe | \
@@ -5232,6 +5248,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
};
rtl_hw_start_8168f(tp);
+ rtl_pcie_state_l2l3_enable(tp, false);
rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
@@ -5270,6 +5287,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+
+ rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5522,6 +5541,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+
+ rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@@ -5557,6 +5578,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
+
+ rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8106(struct rtl8169_private *tp)
@@ -5569,6 +5592,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
+
+ rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8101(struct net_device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b3e148ef5683..9d3748361a1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -320,11 +320,8 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
{
- u32 value;
-
- value = readl(ioaddr + GMAC_AN_CTRL);
/* auto negotiation enable and External Loopback enable */
- value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+ u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
if (restart)
value |= GMAC_AN_CTRL_RAN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7e6628a91514..1e2bcf5f89e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -145,7 +145,7 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
x->rx_msg_type_delay_req++;
else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
x->rx_msg_type_delay_resp++;
- else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+ else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
x->rx_msg_type_pdelay_req++;
else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
x->rx_msg_type_pdelay_resp++;
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 6ea59ece7e0b..6eb849a56da5 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -292,7 +292,11 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
static void dfx_rcv_queue_process(DFX_board_t *bp);
+#ifdef DYNAMIC_BUFFERS
static void dfx_rcv_flush(DFX_board_t *bp);
+#else
+static inline void dfx_rcv_flush(DFX_board_t *bp) {}
+#endif
static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
struct net_device *dev);
@@ -2849,7 +2853,7 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
* Align an sk_buff to a boundary power of 2
*
*/
-
+#ifdef DYNAMIC_BUFFERS
static void my_skb_align(struct sk_buff *skb, int n)
{
unsigned long x = (unsigned long)skb->data;
@@ -2859,7 +2863,7 @@ static void my_skb_align(struct sk_buff *skb, int n)
skb_reserve(skb, v - x);
}
-
+#endif
/*
* ================
@@ -3108,10 +3112,7 @@ static void dfx_rcv_queue_process(
break;
}
else {
-#ifndef DYNAMIC_BUFFERS
- if (! rx_in_place)
-#endif
- {
+ if (!rx_in_place) {
/* Receive buffer allocated, pass receive packet up */
dma_sync_single_for_cpu(
bp->bus_dev,
@@ -3505,10 +3506,6 @@ static void dfx_rcv_flush( DFX_board_t *bp )
}
}
-#else
-static inline void dfx_rcv_flush( DFX_board_t *bp )
-{
-}
#endif /* DYNAMIC_BUFFERS */
/*
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 76fbd3948736..255c21ff274c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1341,15 +1341,15 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
{
struct dp83640_private *dp83640 = phydev->priv;
- if (!dp83640->hwts_rx_en)
- return false;
-
if (is_status_frame(skb, type)) {
decode_status_frame(dp83640, skb);
kfree_skb(skb);
return true;
}
+ if (!dp83640->hwts_rx_en)
+ return false;
+
SKB_PTP_TYPE(skb) = type;
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_work(&dp83640->ts_work);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e58aa54484c..4eaadcfcb0fe 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -187,6 +187,50 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
return d ? to_mii_bus(d) : NULL;
}
EXPORT_SYMBOL(of_mdio_find_bus);
+
+/* Walk the list of subnodes of a mdio bus and look for a node that matches the
+ * phy's address with its 'reg' property. If found, set the of_node pointer for
+ * the phy. This allows auto-probed pyh devices to be supplied with information
+ * passed in via DT.
+ */
+static void of_mdiobus_link_phydev(struct mii_bus *mdio,
+ struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct device_node *child;
+
+ if (dev->of_node || !mdio->dev.of_node)
+ return;
+
+ for_each_available_child_of_node(mdio->dev.of_node, child) {
+ int addr;
+ int ret;
+
+ ret = of_property_read_u32(child, "reg", &addr);
+ if (ret < 0) {
+ dev_err(dev, "%s has invalid PHY address\n",
+ child->full_name);
+ continue;
+ }
+
+ /* A PHY must have a reg property in the range [0-31] */
+ if (addr >= PHY_MAX_ADDR) {
+ dev_err(dev, "%s PHY address %i is too large\n",
+ child->full_name, addr);
+ continue;
+ }
+
+ if (addr == phydev->addr) {
+ dev->of_node = child;
+ return;
+ }
+ }
+}
+#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
+static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
+ struct phy_device *phydev)
+{
+}
#endif
/**
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index c38ee903bd59..3ed16a89b5d8 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
{
struct sock_fprog uprog;
struct sock_filter *code = NULL;
- int len, err;
+ int len;
if (copy_from_user(&uprog, arg, sizeof(uprog)))
return -EFAULT;
@@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
if (IS_ERR(code))
return PTR_ERR(code);
- err = sk_chk_filter(code, uprog.len);
- if (err) {
- kfree(code);
- return err;
- }
-
*p = code;
return uprog.len;
}
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2ea7efd11857..6c9c16d76935 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
dev->hard_header_len);
- po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
+ po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
po->chan.ops = &pppoe_chan_ops;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 50b36b299946..a36401802cec 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -258,10 +258,8 @@ struct hso_serial {
* so as not to drop characters on the floor.
*/
int curr_rx_urb_idx;
- u16 curr_rx_urb_offset;
u8 rx_urb_filled[MAX_RX_URBS];
struct tasklet_struct unthrottle_tasklet;
- struct work_struct retry_unthrottle_workqueue;
};
struct hso_device {
@@ -1252,14 +1250,6 @@ static void hso_unthrottle(struct tty_struct *tty)
tasklet_hi_schedule(&serial->unthrottle_tasklet);
}
-static void hso_unthrottle_workfunc(struct work_struct *work)
-{
- struct hso_serial *serial =
- container_of(work, struct hso_serial,
- retry_unthrottle_workqueue);
- hso_unthrottle_tasklet(serial);
-}
-
/* open the requested serial port */
static int hso_serial_open(struct tty_struct *tty, struct file *filp)
{
@@ -1295,8 +1285,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
tasklet_init(&serial->unthrottle_tasklet,
(void (*)(unsigned long))hso_unthrottle_tasklet,
(unsigned long)serial);
- INIT_WORK(&serial->retry_unthrottle_workqueue,
- hso_unthrottle_workfunc);
result = hso_start_serial_device(serial->parent, GFP_KERNEL);
if (result) {
hso_stop_serial_device(serial->parent);
@@ -1345,7 +1333,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
if (!usb_gone)
hso_stop_serial_device(serial->parent);
tasklet_kill(&serial->unthrottle_tasklet);
- cancel_work_sync(&serial->retry_unthrottle_workqueue);
}
if (!usb_gone)
@@ -2013,8 +2000,7 @@ static void ctrl_callback(struct urb *urb)
static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
{
struct tty_struct *tty;
- int write_length_remaining = 0;
- int curr_write_len;
+ int count;
/* Sanity check */
if (urb == NULL || serial == NULL) {
@@ -2024,29 +2010,28 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
tty = tty_port_tty_get(&serial->port);
+ if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+ tty_kref_put(tty);
+ return -1;
+ }
+
/* Push data to tty */
- write_length_remaining = urb->actual_length -
- serial->curr_rx_urb_offset;
D1("data to push to tty");
- while (write_length_remaining) {
- if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
- tty_kref_put(tty);
- return -1;
- }
- curr_write_len = tty_insert_flip_string(&serial->port,
- urb->transfer_buffer + serial->curr_rx_urb_offset,
- write_length_remaining);
- serial->curr_rx_urb_offset += curr_write_len;
- write_length_remaining -= curr_write_len;
+ count = tty_buffer_request_room(&serial->port, urb->actual_length);
+ if (count >= urb->actual_length) {
+ tty_insert_flip_string(&serial->port, urb->transfer_buffer,
+ urb->actual_length);
tty_flip_buffer_push(&serial->port);
+ } else {
+ dev_warn(&serial->parent->usb->dev,
+ "dropping data, %d bytes lost\n", urb->actual_length);
}
+
tty_kref_put(tty);
- if (write_length_remaining == 0) {
- serial->curr_rx_urb_offset = 0;
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
- }
- return write_length_remaining;
+ serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
+
+ return 0;
}
@@ -2217,7 +2202,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
}
}
serial->curr_rx_urb_idx = 0;
- serial->curr_rx_urb_offset = 0;
if (serial->tx_urb)
usb_kill_urb(serial->tx_urb);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index cf62d7e8329f..c4638c67f6b9 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -741,6 +741,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
+ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7d2dd80c4a7a..e1e430587868 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1367,7 +1367,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
struct sk_buff_head seg_list;
struct sk_buff *segs, *nskb;
- features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+ features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
segs = skb_gso_segment(skb, features);
if (IS_ERR(segs) || !segs)
goto drop;
@@ -3213,8 +3213,13 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev,
struct r8152 *tp = netdev_priv(dev);
struct tally_counter tally;
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
+ usb_autopm_put_interface(tp->intf);
+
data[0] = le64_to_cpu(tally.tx_packets);
data[1] = le64_to_cpu(tally.rx_packets);
data[2] = le64_to_cpu(tally.tx_errors);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 424db65e4396..d07bf4cb893f 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1714,6 +1714,18 @@ static int smsc95xx_resume(struct usb_interface *intf)
return ret;
}
+static int smsc95xx_reset_resume(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ int ret;
+
+ ret = smsc95xx_reset(dev);
+ if (ret < 0)
+ return ret;
+
+ return smsc95xx_resume(intf);
+}
+
static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
{
skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -2004,7 +2016,7 @@ static struct usb_driver smsc95xx_driver = {
.probe = usbnet_probe,
.suspend = smsc95xx_suspend,
.resume = smsc95xx_resume,
- .reset_resume = smsc95xx_resume,
+ .reset_resume = smsc95xx_reset_resume,
.disconnect = usbnet_disconnect,
.disable_hub_initiated_lpm = 1,
.supports_autosuspend = 1,
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 93ace042d0aa..1f041271f7fe 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2363,7 +2363,7 @@ static char *type_strings[] = {
"FarSync TE1"
};
-static void
+static int
fst_init_card(struct fst_card_info *card)
{
int i;
@@ -2374,24 +2374,21 @@ fst_init_card(struct fst_card_info *card)
* we'll have to revise it in some way then.
*/
for (i = 0; i < card->nports; i++) {
- err = register_hdlc_device(card->ports[i].dev);
- if (err < 0) {
- int j;
+ err = register_hdlc_device(card->ports[i].dev);
+ if (err < 0) {
pr_err("Cannot register HDLC device for port %d (errno %d)\n",
- i, -err);
- for (j = i; j < card->nports; j++) {
- free_netdev(card->ports[j].dev);
- card->ports[j].dev = NULL;
- }
- card->nports = i;
- break;
- }
+ i, -err);
+ while (i--)
+ unregister_hdlc_device(card->ports[i].dev);
+ return err;
+ }
}
pr_info("%s-%s: %s IRQ%d, %d ports\n",
port_to_dev(&card->ports[0])->name,
port_to_dev(&card->ports[card->nports - 1])->name,
type_strings[card->type], card->irq, card->nports);
+ return 0;
}
static const struct net_device_ops fst_ops = {
@@ -2447,15 +2444,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Try to enable the device */
if ((err = pci_enable_device(pdev)) != 0) {
pr_err("Failed to enable card. Err %d\n", -err);
- kfree(card);
- return err;
+ goto enable_fail;
}
if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
pr_err("Failed to allocate regions. Err %d\n", -err);
- pci_disable_device(pdev);
- kfree(card);
- return err;
+ goto regions_fail;
}
/* Get virtual addresses of memory regions */
@@ -2464,30 +2458,21 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
card->phys_ctlmem = pci_resource_start(pdev, 3);
if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
pr_err("Physical memory remap failed\n");
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- kfree(card);
- return -ENODEV;
+ err = -ENODEV;
+ goto ioremap_physmem_fail;
}
if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
pr_err("Control memory remap failed\n");
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->mem);
- kfree(card);
- return -ENODEV;
+ err = -ENODEV;
+ goto ioremap_ctlmem_fail;
}
dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
/* Register the interrupt handler */
if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
pr_err("Unable to register interrupt %d\n", card->irq);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->ctlmem);
- iounmap(card->mem);
- kfree(card);
- return -ENODEV;
+ err = -ENODEV;
+ goto irq_fail;
}
/* Record info we need */
@@ -2513,13 +2498,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
while (i--)
free_netdev(card->ports[i].dev);
pr_err("FarSync: out of memory\n");
- free_irq(card->irq, card);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->ctlmem);
- iounmap(card->mem);
- kfree(card);
- return -ENODEV;
+ err = -ENOMEM;
+ goto hdlcdev_fail;
}
card->ports[i].dev = dev;
card->ports[i].card = card;
@@ -2565,9 +2545,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, card);
/* Remainder of card setup */
+ if (no_of_cards_added >= FST_MAX_CARDS) {
+ pr_err("FarSync: too many cards\n");
+ err = -ENOMEM;
+ goto card_array_fail;
+ }
fst_card_array[no_of_cards_added] = card;
card->card_no = no_of_cards_added++; /* Record instance and bump it */
- fst_init_card(card);
+ err = fst_init_card(card);
+ if (err)
+ goto init_card_fail;
if (card->family == FST_FAMILY_TXU) {
/*
* Allocate a dma buffer for transmit and receives
@@ -2577,29 +2564,46 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
&card->rx_dma_handle_card);
if (card->rx_dma_handle_host == NULL) {
pr_err("Could not allocate rx dma buffer\n");
- fst_disable_intr(card);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->ctlmem);
- iounmap(card->mem);
- kfree(card);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto rx_dma_fail;
}
card->tx_dma_handle_host =
pci_alloc_consistent(card->device, FST_MAX_MTU,
&card->tx_dma_handle_card);
if (card->tx_dma_handle_host == NULL) {
pr_err("Could not allocate tx dma buffer\n");
- fst_disable_intr(card);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->ctlmem);
- iounmap(card->mem);
- kfree(card);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto tx_dma_fail;
}
}
return 0; /* Success */
+
+tx_dma_fail:
+ pci_free_consistent(card->device, FST_MAX_MTU,
+ card->rx_dma_handle_host,
+ card->rx_dma_handle_card);
+rx_dma_fail:
+ fst_disable_intr(card);
+ for (i = 0 ; i < card->nports ; i++)
+ unregister_hdlc_device(card->ports[i].dev);
+init_card_fail:
+ fst_card_array[card->card_no] = NULL;
+card_array_fail:
+ for (i = 0 ; i < card->nports ; i++)
+ free_netdev(card->ports[i].dev);
+hdlcdev_fail:
+ free_irq(card->irq, card);
+irq_fail:
+ iounmap(card->ctlmem);
+ioremap_ctlmem_fail:
+ iounmap(card->mem);
+ioremap_physmem_fail:
+ pci_release_regions(pdev);
+regions_fail:
+ pci_disable_device(pdev);
+enable_fail:
+ kfree(card);
+ return err;
}
/*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2ccb4a02368b..055222bae6e4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
unsigned int i = 0;
unsigned int num_queues = info->netdev->real_num_tx_queues;
+ netif_carrier_off(info->netdev);
+
for (i = 0; i < num_queues; ++i) {
struct netfront_queue *queue = &info->queues[i];
- /* Stop old i/f to prevent errors whilst we rebuild the state. */
- spin_lock_bh(&queue->rx_lock);
- spin_lock_irq(&queue->tx_lock);
- netif_carrier_off(queue->info->netdev);
- spin_unlock_irq(&queue->tx_lock);
- spin_unlock_bh(&queue->rx_lock);
-
if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
unbind_from_irqhandler(queue->tx_irq, queue);
if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
queue->tx_evtchn = queue->rx_evtchn = 0;
queue->tx_irq = queue->rx_irq = 0;
+ napi_synchronize(&queue->napi);
+
/* End access and free the pages */
xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -2046,13 +2043,15 @@ static int xennet_connect(struct net_device *dev)
/* By now, the queue structures have been set up */
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
- spin_lock_bh(&queue->rx_lock);
- spin_lock_irq(&queue->tx_lock);
/* Step 1: Discard all pending TX packet fragments. */
+ spin_lock_irq(&queue->tx_lock);
xennet_release_tx_bufs(queue);
+ spin_unlock_irq(&queue->tx_lock);
/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
+ spin_lock_bh(&queue->rx_lock);
+
for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
skb_frag_t *frag;
const struct page *page;
@@ -2076,6 +2075,8 @@ static int xennet_connect(struct net_device *dev)
}
queue->rx.req_prod_pvt = requeue_idx;
+
+ spin_unlock_bh(&queue->rx_lock);
}
/*
@@ -2087,13 +2088,17 @@ static int xennet_connect(struct net_device *dev)
netif_carrier_on(np->netdev);
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
+
notify_remote_via_irq(queue->tx_irq);
if (queue->tx_irq != queue->rx_irq)
notify_remote_via_irq(queue->rx_irq);
- xennet_tx_buf_gc(queue);
- xennet_alloc_rx_buffers(queue);
+ spin_lock_irq(&queue->tx_lock);
+ xennet_tx_buf_gc(queue);
spin_unlock_irq(&queue->tx_lock);
+
+ spin_lock_bh(&queue->rx_lock);
+ xennet_alloc_rx_buffers(queue);
spin_unlock_bh(&queue->rx_lock);
}