summaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c2389
1 files changed, 1556 insertions, 833 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 46a3f86125be..f09bc5dfe8b2 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.99"
-#define DRV_MODULE_RELDATE "April 20, 2009"
+#define DRV_MODULE_VERSION "3.102"
+#define DRV_MODULE_RELDATE "September 1, 2009"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -92,7 +92,7 @@
/* hardware minimum and maximum for a single frame's data payload */
#define TG3_MIN_MTU 60
#define TG3_MAX_MTU(tp) \
- ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
+ ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
/* These numbers seem to be hard coded in the NIC firmware somehow.
* You can't change the ring sizes, but you can change where you place
@@ -102,6 +102,7 @@
#define TG3_DEF_RX_RING_PENDING 200
#define TG3_RX_JUMBO_RING_SIZE 256
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
+#define TG3_RSS_INDIR_TBL_SIZE 128
/* Do not place this n-ring entries value into the tp struct itself,
* we really want to expose these constants to GCC so that modulo et
@@ -110,26 +111,34 @@
* replace things like '% foo' with '& (foo - 1)'.
*/
#define TG3_RX_RCB_RING_SIZE(tp) \
- ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
+ (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
+ !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
#define TG3_TX_RING_SIZE 512
#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
TG3_RX_RING_SIZE)
-#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
- TG3_RX_JUMBO_RING_SIZE)
+#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
+ TG3_RX_JUMBO_RING_SIZE)
#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
- TG3_RX_RCB_RING_SIZE(tp))
+ TG3_RX_RCB_RING_SIZE(tp))
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
TG3_TX_RING_SIZE)
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
-#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
-#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
+#define TG3_DMA_BYTE_ENAB 64
+
+#define TG3_RX_STD_DMA_SZ 1536
+#define TG3_RX_JMB_DMA_SZ 9046
+
+#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
+
+#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
+#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
/* minimum number of free TX descriptors required to wake up TX process */
-#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
+#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
#define TG3_RAW_IP_ALIGN 2
@@ -153,6 +162,7 @@ MODULE_FIRMWARE(FIRMWARE_TG3);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
+#define TG3_RSS_MIN_NUM_MSIX_VECS 2
static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
@@ -219,11 +229,12 @@ static struct pci_device_id tg3_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57720)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -605,39 +616,47 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
static void tg3_disable_ints(struct tg3 *tp)
{
+ int i;
+
tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
- tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
-}
-
-static inline void tg3_cond_int(struct tg3 *tp)
-{
- if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
- (tp->hw_status->status & SD_STATUS_UPDATED))
- tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
- else
- tw32(HOSTCC_MODE, tp->coalesce_mode |
- (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
+ for (i = 0; i < tp->irq_max; i++)
+ tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
}
static void tg3_enable_ints(struct tg3 *tp)
{
+ int i;
+ u32 coal_now = 0;
+
tp->irq_sync = 0;
wmb();
tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
- tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
- (tp->last_tag << 24));
- if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
- tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
- (tp->last_tag << 24));
- tg3_cond_int(tp);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+ if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
+ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+
+ coal_now |= tnapi->coal_now;
+ }
+
+ /* Force an initial interrupt */
+ if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
+ (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
+ tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+ else
+ tw32(HOSTCC_MODE, tp->coalesce_mode |
+ HOSTCC_MODE_ENABLE | coal_now);
}
-static inline unsigned int tg3_has_work(struct tg3 *tp)
+static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
{
- struct tg3_hw_status *sblk = tp->hw_status;
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
unsigned int work_exists = 0;
/* check for phy events */
@@ -648,22 +667,23 @@ static inline unsigned int tg3_has_work(struct tg3 *tp)
work_exists = 1;
}
/* check for RX/TX work to do */
- if (sblk->idx[0].tx_consumer != tp->tx_cons ||
- sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
+ if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
+ *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
work_exists = 1;
return work_exists;
}
-/* tg3_restart_ints
+/* tg3_int_reenable
* similar to tg3_enable_ints, but it accurately determines whether there
* is new work pending and can return without flushing the PIO write
* which reenables interrupts
*/
-static void tg3_restart_ints(struct tg3 *tp)
+static void tg3_int_reenable(struct tg3_napi *tnapi)
{
- tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
- tp->last_tag << 24);
+ struct tg3 *tp = tnapi->tp;
+
+ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
mmiowb();
/* When doing tagged status, this work check is unnecessary.
@@ -671,39 +691,58 @@ static void tg3_restart_ints(struct tg3 *tp)
* work we've completed.
*/
if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
- tg3_has_work(tp))
+ tg3_has_work(tnapi))
tw32(HOSTCC_MODE, tp->coalesce_mode |
- (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
+ HOSTCC_MODE_ENABLE | tnapi->coal_now);
+}
+
+static void tg3_napi_disable(struct tg3 *tp)
+{
+ int i;
+
+ for (i = tp->irq_cnt - 1; i >= 0; i--)
+ napi_disable(&tp->napi[i].napi);
+}
+
+static void tg3_napi_enable(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ napi_enable(&tp->napi[i].napi);
}
static inline void tg3_netif_stop(struct tg3 *tp)
{
tp->dev->trans_start = jiffies; /* prevent tx timeout */
- napi_disable(&tp->napi);
+ tg3_napi_disable(tp);
netif_tx_disable(tp->dev);
}
static inline void tg3_netif_start(struct tg3 *tp)
{
- netif_wake_queue(tp->dev);
- /* NOTE: unconditional netif_wake_queue is only appropriate
- * so long as all callers are assured to have free tx slots
- * (such as after tg3_init_hw)
+ /* NOTE: unconditional netif_tx_wake_all_queues is only
+ * appropriate so long as all callers are assured to
+ * have free tx slots (such as after tg3_init_hw)
*/
- napi_enable(&tp->napi);
- tp->hw_status->status |= SD_STATUS_UPDATED;
+ netif_tx_wake_all_queues(tp->dev);
+
+ tg3_napi_enable(tp);
+ tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
tg3_enable_ints(tp);
}
static void tg3_switch_clocks(struct tg3 *tp)
{
- u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
+ u32 clock_ctrl;
u32 orig_clock_ctrl;
if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
return;
+ clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
+
orig_clock_ctrl = clock_ctrl;
clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
CLOCK_CTRL_CLKRUN_OENABLE |
@@ -743,7 +782,7 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
*val = 0x0;
- frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
+ frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
@@ -784,7 +823,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
unsigned int loops;
int ret;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
(reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
return 0;
@@ -794,7 +833,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
udelay(80);
}
- frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
+ frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
@@ -917,7 +956,9 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
tw32(MAC_PHYCFG2, val);
val = tr32(MAC_PHYCFG1);
- val &= ~MAC_PHYCFG1_RGMII_INT;
+ val &= ~(MAC_PHYCFG1_RGMII_INT |
+ MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
+ val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
tw32(MAC_PHYCFG1, val);
return;
@@ -933,15 +974,18 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
tw32(MAC_PHYCFG2, val);
- val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
- MAC_PHYCFG1_RGMII_SND_STAT_EN);
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
+ val = tr32(MAC_PHYCFG1);
+ val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
+ MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
+ if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
}
- tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
+ val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
+ MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
+ tw32(MAC_PHYCFG1, val);
val = tr32(MAC_EXT_RGMII_MODE);
val &= ~(MAC_RGMII_MODE_RX_INT_B |
@@ -977,6 +1021,21 @@ static void tg3_mdio_start(struct tg3 *tp)
tw32_f(MAC_MI_MODE, tp->mi_mode);
udelay(80);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ u32 funcnum, is_serdes;
+
+ funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
+ if (funcnum)
+ tp->phy_addr = 2;
+ else
+ tp->phy_addr = 1;
+
+ is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+ if (is_serdes)
+ tp->phy_addr += 7;
+ } else
+ tp->phy_addr = PHY_ADDR;
+
if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
@@ -1064,6 +1123,7 @@ static int tg3_mdio_init(struct tg3 *tp)
case TG3_PHY_ID_RTL8201E:
case TG3_PHY_ID_BCMAC131:
phydev->interface = PHY_INTERFACE_MODE_MII;
+ tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
break;
}
@@ -1469,14 +1529,38 @@ static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
}
+static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
+{
+ u32 phytest;
+
+ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
+ u32 phy;
+
+ tg3_writephy(tp, MII_TG3_FET_TEST,
+ phytest | MII_TG3_FET_SHADOW_EN);
+ if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
+ if (enable)
+ phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
+ else
+ phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
+ tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
+ }
+ tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
+ }
+}
+
static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
{
u32 reg;
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
return;
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+ tg3_phy_fet_toggle_apd(tp, enable);
+ return;
+ }
+
reg = MII_TG3_MISC_SHDW_WREN |
MII_TG3_MISC_SHDW_SCR5_SEL |
MII_TG3_MISC_SHDW_SCR5_LPED |
@@ -1506,20 +1590,22 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
return;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
u32 ephy;
- if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
- tg3_writephy(tp, MII_TG3_EPHY_TEST,
- ephy | MII_TG3_EPHY_SHADOW_EN);
- if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
+ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
+ u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
+
+ tg3_writephy(tp, MII_TG3_FET_TEST,
+ ephy | MII_TG3_FET_SHADOW_EN);
+ if (!tg3_readphy(tp, reg, &phy)) {
if (enable)
- phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
+ phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
else
- phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
- tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
+ phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
+ tg3_writephy(tp, reg, phy);
}
- tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
+ tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
}
} else {
phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
@@ -1888,7 +1974,7 @@ out:
if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
/* Cannot do read-modify-write on 5401 */
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
- } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
+ } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
u32 phy_reg;
/* Set bit 14 with read-modify-write to preserve other bits */
@@ -1900,7 +1986,7 @@ out:
/* Set phy register 0x10 bit 0 to high fifo elasticity to support
* jumbo frames transmission.
*/
- if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
+ if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
u32 phy_reg;
if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
@@ -1910,7 +1996,7 @@ out:
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
/* adjust output voltage */
- tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
+ tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
}
tg3_phy_toggle_automdix(tp, 1);
@@ -1925,8 +2011,9 @@ static void tg3_frob_aux_power(struct tg3 *tp)
if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
return;
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
struct net_device *dev_peer;
dev_peer = pci_get_drvdata(tp->pdev_peer);
@@ -2655,7 +2742,7 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8
break;
default:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
SPEED_10;
*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
@@ -2990,7 +3077,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
+ else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
tg3_writephy(tp, MII_TG3_IMASK, ~0);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
@@ -3100,7 +3187,9 @@ relink:
tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
else
tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
- } else
+ } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else
tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
@@ -3167,6 +3256,15 @@ relink:
pci_write_config_word(tp->pdev,
tp->pcie_cap + PCI_EXP_LNKCTL,
newlnkctl);
+ } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
+ u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
+ if (tp->link_config.active_speed == SPEED_100 ||
+ tp->link_config.active_speed == SPEED_10)
+ newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+ else
+ newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+ if (newreg != oldreg)
+ tw32(TG3_PCIE_LNKCTL, newreg);
}
if (current_link_up != netif_carrier_ok(tp->dev)) {
@@ -3848,9 +3946,9 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
else
current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
- tp->hw_status->status =
+ tp->napi[0].hw_status->status =
(SD_STATUS_UPDATED |
- (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
+ (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
for (i = 0; i < 100; i++) {
tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
@@ -4216,24 +4314,32 @@ static void tg3_tx_recover(struct tg3 *tp)
spin_unlock(&tp->lock);
}
-static inline u32 tg3_tx_avail(struct tg3 *tp)
+static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
{
smp_mb();
- return (tp->tx_pending -
- ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
+ return tnapi->tx_pending -
+ ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
}
/* Tigon3 never reports partial packet sends. So we do not
* need special logic to handle SKBs that have not had all
* of their frags sent yet, like SunGEM does.
*/
-static void tg3_tx(struct tg3 *tp)
+static void tg3_tx(struct tg3_napi *tnapi)
{
- u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
- u32 sw_idx = tp->tx_cons;
+ struct tg3 *tp = tnapi->tp;
+ u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
+ u32 sw_idx = tnapi->tx_cons;
+ struct netdev_queue *txq;
+ int index = tnapi - tp->napi;
+
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ index--;
+
+ txq = netdev_get_tx_queue(tp->dev, index);
while (sw_idx != hw_idx) {
- struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
+ struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
struct sk_buff *skb = ri->skb;
int i, tx_bug = 0;
@@ -4249,7 +4355,7 @@ static void tg3_tx(struct tg3 *tp)
sw_idx = NEXT_TX(sw_idx);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- ri = &tp->tx_buffers[sw_idx];
+ ri = &tnapi->tx_buffers[sw_idx];
if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
tx_bug = 1;
sw_idx = NEXT_TX(sw_idx);
@@ -4263,7 +4369,7 @@ static void tg3_tx(struct tg3 *tp)
}
}
- tp->tx_cons = sw_idx;
+ tnapi->tx_cons = sw_idx;
/* Need to make the tx_cons update visible to tg3_start_xmit()
* before checking for netif_queue_stopped(). Without the
@@ -4272,13 +4378,13 @@ static void tg3_tx(struct tg3 *tp)
*/
smp_mb();
- if (unlikely(netif_queue_stopped(tp->dev) &&
- (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
- netif_tx_lock(tp->dev);
- if (netif_queue_stopped(tp->dev) &&
- (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
- netif_wake_queue(tp->dev);
- netif_tx_unlock(tp->dev);
+ if (unlikely(netif_tx_queue_stopped(txq) &&
+ (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
+ (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
}
}
@@ -4293,33 +4399,35 @@ static void tg3_tx(struct tg3 *tp)
* buffers the cpu only reads the last cacheline of the RX descriptor
* (to fetch the error flags, vlan tag, checksum, and opaque cookie).
*/
-static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
+static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
int src_idx, u32 dest_idx_unmasked)
{
+ struct tg3 *tp = tnapi->tp;
struct tg3_rx_buffer_desc *desc;
struct ring_info *map, *src_map;
struct sk_buff *skb;
dma_addr_t mapping;
int skb_size, dest_idx;
+ struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
src_map = NULL;
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
- desc = &tp->rx_std[dest_idx];
- map = &tp->rx_std_buffers[dest_idx];
+ desc = &tpr->rx_std[dest_idx];
+ map = &tpr->rx_std_buffers[dest_idx];
if (src_idx >= 0)
- src_map = &tp->rx_std_buffers[src_idx];
- skb_size = tp->rx_pkt_buf_sz;
+ src_map = &tpr->rx_std_buffers[src_idx];
+ skb_size = tp->rx_pkt_map_sz;
break;
case RXD_OPAQUE_RING_JUMBO:
dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
- desc = &tp->rx_jumbo[dest_idx];
- map = &tp->rx_jumbo_buffers[dest_idx];
+ desc = &tpr->rx_jmb[dest_idx].std;
+ map = &tpr->rx_jmb_buffers[dest_idx];
if (src_idx >= 0)
- src_map = &tp->rx_jumbo_buffers[src_idx];
- skb_size = RX_JUMBO_PKT_BUF_SZ;
+ src_map = &tpr->rx_jmb_buffers[src_idx];
+ skb_size = TG3_RX_JMB_MAP_SZ;
break;
default:
@@ -4332,14 +4440,13 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
* Callers depend upon this behavior and assume that
* we leave everything unchanged if we fail.
*/
- skb = netdev_alloc_skb(tp->dev, skb_size);
+ skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
if (skb == NULL)
return -ENOMEM;
skb_reserve(skb, tp->rx_offset);
- mapping = pci_map_single(tp->pdev, skb->data,
- skb_size - tp->rx_offset,
+ mapping = pci_map_single(tp->pdev, skb->data, skb_size,
PCI_DMA_FROMDEVICE);
map->skb = skb;
@@ -4358,28 +4465,30 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
* members of the RX descriptor are invariant. See notes above
* tg3_alloc_rx_skb for full details.
*/
-static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
+static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
int src_idx, u32 dest_idx_unmasked)
{
+ struct tg3 *tp = tnapi->tp;
struct tg3_rx_buffer_desc *src_desc, *dest_desc;
struct ring_info *src_map, *dest_map;
int dest_idx;
+ struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
- dest_desc = &tp->rx_std[dest_idx];
- dest_map = &tp->rx_std_buffers[dest_idx];
- src_desc = &tp->rx_std[src_idx];
- src_map = &tp->rx_std_buffers[src_idx];
+ dest_desc = &tpr->rx_std[dest_idx];
+ dest_map = &tpr->rx_std_buffers[dest_idx];
+ src_desc = &tpr->rx_std[src_idx];
+ src_map = &tpr->rx_std_buffers[src_idx];
break;
case RXD_OPAQUE_RING_JUMBO:
dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
- dest_desc = &tp->rx_jumbo[dest_idx];
- dest_map = &tp->rx_jumbo_buffers[dest_idx];
- src_desc = &tp->rx_jumbo[src_idx];
- src_map = &tp->rx_jumbo_buffers[src_idx];
+ dest_desc = &tpr->rx_jmb[dest_idx].std;
+ dest_map = &tpr->rx_jmb_buffers[dest_idx];
+ src_desc = &tpr->rx_jmb[src_idx].std;
+ src_map = &tpr->rx_jmb_buffers[src_idx];
break;
default:
@@ -4395,13 +4504,6 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
src_map->skb = NULL;
}
-#if TG3_VLAN_TAG_USED
-static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
-{
- return vlan_gro_receive(&tp->napi, tp->vlgrp, vlan_tag, skb);
-}
-#endif
-
/* The RX ring scheme is composed of multiple rings which post fresh
* buffers to the chip, and one special ring the chip uses to report
* status back to the host.
@@ -4426,14 +4528,16 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
* If both the host and chip were to write into the same ring, cache line
* eviction could occur since both entities want it in an exclusive state.
*/
-static int tg3_rx(struct tg3 *tp, int budget)
+static int tg3_rx(struct tg3_napi *tnapi, int budget)
{
+ struct tg3 *tp = tnapi->tp;
u32 work_mask, rx_std_posted = 0;
- u32 sw_idx = tp->rx_rcb_ptr;
+ u32 sw_idx = tnapi->rx_rcb_ptr;
u16 hw_idx;
int received;
+ struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
- hw_idx = tp->hw_status->idx[0].rx_producer;
+ hw_idx = *(tnapi->rx_rcb_prod_idx);
/*
* We need to order the read of hw_idx and the read of
* the opaque cookie.
@@ -4442,7 +4546,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
work_mask = 0;
received = 0;
while (sw_idx != hw_idx && budget > 0) {
- struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
+ struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
unsigned int len;
struct sk_buff *skb;
dma_addr_t dma_addr;
@@ -4451,27 +4555,25 @@ static int tg3_rx(struct tg3 *tp, int budget)
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) {
- dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
- mapping);
- skb = tp->rx_std_buffers[desc_idx].skb;
- post_ptr = &tp->rx_std_ptr;
+ struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
+ dma_addr = pci_unmap_addr(ri, mapping);
+ skb = ri->skb;
+ post_ptr = &tpr->rx_std_ptr;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
- dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
- mapping);
- skb = tp->rx_jumbo_buffers[desc_idx].skb;
- post_ptr = &tp->rx_jumbo_ptr;
- }
- else {
+ struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
+ dma_addr = pci_unmap_addr(ri, mapping);
+ skb = ri->skb;
+ post_ptr = &tpr->rx_jmb_ptr;
+ } else
goto next_pkt_nopost;
- }
work_mask |= opaque_key;
if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
(desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
drop_it:
- tg3_recycle_rx(tp, opaque_key,
+ tg3_recycle_rx(tnapi, opaque_key,
desc_idx, *post_ptr);
drop_it_no_recycle:
/* Other statistics kept track of by card. */
@@ -4491,20 +4593,19 @@ static int tg3_rx(struct tg3 *tp, int budget)
) {
int skb_size;
- skb_size = tg3_alloc_rx_skb(tp, opaque_key,
+ skb_size = tg3_alloc_rx_skb(tnapi, opaque_key,
desc_idx, *post_ptr);
if (skb_size < 0)
goto drop_it;
- pci_unmap_single(tp->pdev, dma_addr,
- skb_size - tp->rx_offset,
+ pci_unmap_single(tp->pdev, dma_addr, skb_size,
PCI_DMA_FROMDEVICE);
skb_put(skb, len);
} else {
struct sk_buff *copy_skb;
- tg3_recycle_rx(tp, opaque_key,
+ tg3_recycle_rx(tnapi, opaque_key,
desc_idx, *post_ptr);
copy_skb = netdev_alloc_skb(tp->dev,
@@ -4541,11 +4642,11 @@ static int tg3_rx(struct tg3 *tp, int budget)
#if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL &&
desc->type_flags & RXD_FLAG_VLAN) {
- tg3_vlan_rx(tp, skb,
- desc->err_vlan & RXD_VLAN_MASK);
+ vlan_gro_receive(&tnapi->napi, tp->vlgrp,
+ desc->err_vlan & RXD_VLAN_MASK, skb);
} else
#endif
- napi_gro_receive(&tp->napi, skb);
+ napi_gro_receive(&tnapi->napi, skb);
received++;
budget--;
@@ -4567,23 +4668,23 @@ next_pkt_nopost:
/* Refresh hw_idx to see if there is new work */
if (sw_idx == hw_idx) {
- hw_idx = tp->hw_status->idx[0].rx_producer;
+ hw_idx = *(tnapi->rx_rcb_prod_idx);
rmb();
}
}
/* ACK the status ring. */
- tp->rx_rcb_ptr = sw_idx;
- tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
+ tnapi->rx_rcb_ptr = sw_idx;
+ tw32_rx_mbox(tnapi->consmbox, sw_idx);
/* Refill RX ring(s). */
if (work_mask & RXD_OPAQUE_RING_STD) {
- sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
+ sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
sw_idx);
}
if (work_mask & RXD_OPAQUE_RING_JUMBO) {
- sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
+ sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
sw_idx);
}
@@ -4592,9 +4693,10 @@ next_pkt_nopost:
return received;
}
-static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
+static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
{
- struct tg3_hw_status *sblk = tp->hw_status;
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
/* handle link change and other phy events */
if (!(tp->tg3_flags &
@@ -4618,8 +4720,8 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
}
/* run TX completion thread */
- if (sblk->idx[0].tx_consumer != tp->tx_cons) {
- tg3_tx(tp);
+ if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
+ tg3_tx(tnapi);
if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
return work_done;
}
@@ -4628,20 +4730,21 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
* All RX "locking" is done by ensuring outside
* code synchronizes with tg3->napi.poll()
*/
- if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
- work_done += tg3_rx(tp, budget - work_done);
+ if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
+ work_done += tg3_rx(tnapi, budget - work_done);
return work_done;
}
static int tg3_poll(struct napi_struct *napi, int budget)
{
- struct tg3 *tp = container_of(napi, struct tg3, napi);
+ struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+ struct tg3 *tp = tnapi->tp;
int work_done = 0;
- struct tg3_hw_status *sblk = tp->hw_status;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
while (1) {
- work_done = tg3_poll_work(tp, work_done, budget);
+ work_done = tg3_poll_work(tnapi, work_done, budget);
if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
goto tx_recovery;
@@ -4650,19 +4753,19 @@ static int tg3_poll(struct napi_struct *napi, int budget)
break;
if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
- /* tp->last_tag is used in tg3_restart_ints() below
+ /* tp->last_tag is used in tg3_int_reenable() below
* to tell the hw how much work has been processed,
* so we must read it before checking for more work.
*/
- tp->last_tag = sblk->status_tag;
- tp->last_irq_tag = tp->last_tag;
+ tnapi->last_tag = sblk->status_tag;
+ tnapi->last_irq_tag = tnapi->last_tag;
rmb();
} else
sblk->status &= ~SD_STATUS_UPDATED;
- if (likely(!tg3_has_work(tp))) {
+ if (likely(!tg3_has_work(tnapi))) {
napi_complete(napi);
- tg3_restart_ints(tp);
+ tg3_int_reenable(tnapi);
break;
}
}
@@ -4678,12 +4781,15 @@ tx_recovery:
static void tg3_irq_quiesce(struct tg3 *tp)
{
+ int i;
+
BUG_ON(tp->irq_sync);
tp->irq_sync = 1;
smp_mb();
- synchronize_irq(tp->pdev->irq);
+ for (i = 0; i < tp->irq_cnt; i++)
+ synchronize_irq(tp->napi[i].irq_vec);
}
static inline int tg3_irq_sync(struct tg3 *tp)
@@ -4713,14 +4819,15 @@ static inline void tg3_full_unlock(struct tg3 *tp)
*/
static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
{
- struct net_device *dev = dev_id;
- struct tg3 *tp = netdev_priv(dev);
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
- prefetch(tp->hw_status);
- prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+ prefetch(tnapi->hw_status);
+ if (tnapi->rx_rcb)
+ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
if (likely(!tg3_irq_sync(tp)))
- napi_schedule(&tp->napi);
+ napi_schedule(&tnapi->napi);
return IRQ_HANDLED;
}
@@ -4731,11 +4838,12 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
*/
static irqreturn_t tg3_msi(int irq, void *dev_id)
{
- struct net_device *dev = dev_id;
- struct tg3 *tp = netdev_priv(dev);
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
- prefetch(tp->hw_status);
- prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+ prefetch(tnapi->hw_status);
+ if (tnapi->rx_rcb)
+ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
/*
* Writing any value to intr-mbox-0 clears PCI INTA# and
* chip-internal interrupt pending events.
@@ -4745,16 +4853,16 @@ static irqreturn_t tg3_msi(int irq, void *dev_id)
*/
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
if (likely(!tg3_irq_sync(tp)))
- napi_schedule(&tp->napi);
+ napi_schedule(&tnapi->napi);
return IRQ_RETVAL(1);
}
static irqreturn_t tg3_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = dev_id;
- struct tg3 *tp = netdev_priv(dev);
- struct tg3_hw_status *sblk = tp->hw_status;
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
unsigned int handled = 1;
/* In INTx mode, it is possible for the interrupt to arrive at
@@ -4785,9 +4893,9 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id)
if (tg3_irq_sync(tp))
goto out;
sblk->status &= ~SD_STATUS_UPDATED;
- if (likely(tg3_has_work(tp))) {
- prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
- napi_schedule(&tp->napi);
+ if (likely(tg3_has_work(tnapi))) {
+ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+ napi_schedule(&tnapi->napi);
} else {
/* No work, shared interrupt perhaps? re-enable
* interrupts, and flush that PCI write
@@ -4801,9 +4909,9 @@ out:
static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
{
- struct net_device *dev = dev_id;
- struct tg3 *tp = netdev_priv(dev);
- struct tg3_hw_status *sblk = tp->hw_status;
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
unsigned int handled = 1;
/* In INTx mode, it is possible for the interrupt to arrive at
@@ -4811,7 +4919,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
* Reading the PCI State register will confirm whether the
* interrupt is ours and will flush the status block.
*/
- if (unlikely(sblk->status_tag == tp->last_irq_tag)) {
+ if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
handled = 0;
@@ -4838,14 +4946,14 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
* so that the above check can report that the screaming interrupts
* are unhandled. Eventually they will be silenced.
*/
- tp->last_irq_tag = sblk->status_tag;
+ tnapi->last_irq_tag = sblk->status_tag;
if (tg3_irq_sync(tp))
goto out;
- prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
- napi_schedule(&tp->napi);
+ napi_schedule(&tnapi->napi);
out:
return IRQ_RETVAL(handled);
@@ -4854,9 +4962,9 @@ out:
/* ISR for interrupt test */
static irqreturn_t tg3_test_isr(int irq, void *dev_id)
{
- struct net_device *dev = dev_id;
- struct tg3 *tp = netdev_priv(dev);
- struct tg3_hw_status *sblk = tp->hw_status;
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
if ((sblk->status & SD_STATUS_UPDATED) ||
!(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
@@ -4886,7 +4994,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
tp->irq_sync = 0;
- napi_enable(&tp->napi);
+ tg3_napi_enable(tp);
dev_close(tp->dev);
tg3_full_lock(tp, 0);
}
@@ -4896,9 +5004,11 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tg3_poll_controller(struct net_device *dev)
{
+ int i;
struct tg3 *tp = netdev_priv(dev);
- tg3_interrupt(tp->pdev->irq, dev);
+ for (i = 0; i < tp->irq_cnt; i++)
+ tg3_interrupt(tp->napi[i].irq_vec, dev);
}
#endif
@@ -4993,13 +5103,14 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
#endif
}
-static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
+static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
/* Workaround 4GB and 40-bit hardware DMA bugs. */
static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
u32 last_plus_one, u32 *start,
u32 base_flags, u32 mss)
{
+ struct tg3_napi *tnapi = &tp->napi[0];
struct sk_buff *new_skb;
dma_addr_t new_addr = 0;
u32 entry = *start;
@@ -5034,7 +5145,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
dev_kfree_skb(new_skb);
new_skb = NULL;
} else {
- tg3_set_txd(tp, entry, new_addr, new_skb->len,
+ tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
base_flags, 1 | (mss << 1));
*start = NEXT_TX(entry);
}
@@ -5043,11 +5154,10 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
/* Now clean up the sw ring entries. */
i = 0;
while (entry != last_plus_one) {
- if (i == 0) {
- tp->tx_buffers[entry].skb = new_skb;
- } else {
- tp->tx_buffers[entry].skb = NULL;
- }
+ if (i == 0)
+ tnapi->tx_buffers[entry].skb = new_skb;
+ else
+ tnapi->tx_buffers[entry].skb = NULL;
entry = NEXT_TX(entry);
i++;
}
@@ -5058,11 +5168,11 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
return ret;
}
-static void tg3_set_txd(struct tg3 *tp, int entry,
+static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
dma_addr_t mapping, int len, u32 flags,
u32 mss_and_is_end)
{
- struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
+ struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
int is_end = (mss_and_is_end & 0x1);
u32 mss = (mss_and_is_end >> 1);
u32 vlan_tag = 0;
@@ -5084,23 +5194,29 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
/* hard_start_xmit for devices that don't have any bugs and
* support TG3_FLG2_HW_TSO_2 only.
*/
-static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
u32 len, entry, base_flags, mss;
struct skb_shared_info *sp;
dma_addr_t mapping;
+ struct tg3_napi *tnapi;
+ struct netdev_queue *txq;
- len = skb_headlen(skb);
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ tnapi = &tp->napi[skb_get_queue_mapping(skb)];
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ tnapi++;
/* We are running in BH disabled context with netif_tx_lock
* and TX reclaim runs via tp->napi.poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
+ if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
@@ -5109,11 +5225,12 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- entry = tp->tx_prod;
+ entry = tnapi->tx_prod;
base_flags = 0;
mss = 0;
if ((mss = skb_shinfo(skb)->gso_size) != 0) {
int tcp_opt_len, ip_tcp_len;
+ u32 hdrlen;
if (skb_header_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -5122,7 +5239,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
+ hdrlen = skb_headlen(skb) - ETH_HLEN;
else {
struct iphdr *iph = ip_hdr(skb);
@@ -5131,9 +5248,17 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
iph->check = 0;
iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
- mss |= (ip_tcp_len + tcp_opt_len) << 9;
+ hdrlen = ip_tcp_len + tcp_opt_len;
}
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ mss |= (hdrlen & 0xc) << 12;
+ if (hdrlen & 0x10)
+ base_flags |= 0x00000010;
+ base_flags |= (hdrlen & 0x3e0) << 5;
+ } else
+ mss |= hdrlen << 9;
+
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
@@ -5157,9 +5282,15 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = sp->dma_head;
- tp->tx_buffers[entry].skb = skb;
+ tnapi->tx_buffers[entry].skb = skb;
+
+ len = skb_headlen(skb);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ !mss && skb->len > ETH_DATA_LEN)
+ base_flags |= TXD_FLAG_JMB_PKT;
- tg3_set_txd(tp, entry, mapping, len, base_flags,
+ tg3_set_txd(tnapi, entry, mapping, len, base_flags,
(skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
entry = NEXT_TX(entry);
@@ -5174,9 +5305,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = frag->size;
mapping = sp->dma_maps[i];
- tp->tx_buffers[entry].skb = NULL;
+ tnapi->tx_buffers[entry].skb = NULL;
- tg3_set_txd(tp, entry, mapping, len,
+ tg3_set_txd(tnapi, entry, mapping, len,
base_flags, (i == last) | (mss << 1));
entry = NEXT_TX(entry);
@@ -5184,13 +5315,13 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Packets are ready, update Tx producer idx local and on card. */
- tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
+ tw32_tx_mbox(tnapi->prodmbox, entry);
- tp->tx_prod = entry;
- if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
- netif_stop_queue(dev);
- if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
- netif_wake_queue(tp->dev);
+ tnapi->tx_prod = entry;
+ if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
+ netif_tx_stop_queue(txq);
+ if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
+ netif_tx_wake_queue(txq);
}
out_unlock:
@@ -5199,7 +5330,8 @@ out_unlock:
return NETDEV_TX_OK;
}
-static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
+static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
+ struct net_device *);
/* Use GSO to workaround a rare TSO bug that may be triggered when the
* TSO header is greater than 80 bytes.
@@ -5207,11 +5339,12 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
{
struct sk_buff *segs, *nskb;
+ u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
/* Estimate the number of fragments in the worst case */
- if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
+ if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
netif_stop_queue(tp->dev);
- if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
+ if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
return NETDEV_TX_BUSY;
netif_wake_queue(tp->dev);
@@ -5237,13 +5370,15 @@ tg3_tso_bug_end:
/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
* support TG3_FLG2_HW_TSO_1 or firmware TSO only.
*/
-static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
+ struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
u32 len, entry, base_flags, mss;
struct skb_shared_info *sp;
int would_hit_hwbug;
dma_addr_t mapping;
+ struct tg3_napi *tnapi = &tp->napi[0];
len = skb_headlen(skb);
@@ -5252,7 +5387,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
@@ -5263,7 +5398,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- entry = tp->tx_prod;
+ entry = tnapi->tx_prod;
base_flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM;
@@ -5333,7 +5468,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
mapping = sp->dma_head;
- tp->tx_buffers[entry].skb = skb;
+ tnapi->tx_buffers[entry].skb = skb;
would_hit_hwbug = 0;
@@ -5342,7 +5477,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
else if (tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = 1;
- tg3_set_txd(tp, entry, mapping, len, base_flags,
+ tg3_set_txd(tnapi, entry, mapping, len, base_flags,
(skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
entry = NEXT_TX(entry);
@@ -5358,7 +5493,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
len = frag->size;
mapping = sp->dma_maps[i];
- tp->tx_buffers[entry].skb = NULL;
+ tnapi->tx_buffers[entry].skb = NULL;
if (tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = 1;
@@ -5367,10 +5502,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
would_hit_hwbug = 1;
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
- tg3_set_txd(tp, entry, mapping, len,
+ tg3_set_txd(tnapi, entry, mapping, len,
base_flags, (i == last)|(mss << 1));
else
- tg3_set_txd(tp, entry, mapping, len,
+ tg3_set_txd(tnapi, entry, mapping, len,
base_flags, (i == last));
entry = NEXT_TX(entry);
@@ -5395,12 +5530,12 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
}
/* Packets are ready, update Tx producer idx local and on card. */
- tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
+ tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry);
- tp->tx_prod = entry;
- if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
+ tnapi->tx_prod = entry;
+ if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
netif_stop_queue(dev);
- if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
+ if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
netif_wake_queue(tp->dev);
}
@@ -5468,63 +5603,40 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
return err;
}
-/* Free up pending packets in all rx/tx rings.
- *
- * The chip has been shut down and the driver detached from
- * the networking, so no interrupts or new tx packets will
- * end up in the driver. tp->{tx,}lock is not held and we are not
- * in an interrupt context and thus may sleep.
- */
-static void tg3_free_rings(struct tg3 *tp)
+static void tg3_rx_prodring_free(struct tg3 *tp,
+ struct tg3_rx_prodring_set *tpr)
{
- struct ring_info *rxp;
int i;
+ struct ring_info *rxp;
for (i = 0; i < TG3_RX_RING_SIZE; i++) {
- rxp = &tp->rx_std_buffers[i];
+ rxp = &tpr->rx_std_buffers[i];
if (rxp->skb == NULL)
continue;
- pci_unmap_single(tp->pdev,
- pci_unmap_addr(rxp, mapping),
- tp->rx_pkt_buf_sz - tp->rx_offset,
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any(rxp->skb);
- rxp->skb = NULL;
- }
-
- for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
- rxp = &tp->rx_jumbo_buffers[i];
- if (rxp->skb == NULL)
- continue;
pci_unmap_single(tp->pdev,
pci_unmap_addr(rxp, mapping),
- RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
+ tp->rx_pkt_map_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(rxp->skb);
rxp->skb = NULL;
}
- for (i = 0; i < TG3_TX_RING_SIZE; ) {
- struct tx_ring_info *txp;
- struct sk_buff *skb;
+ if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+ for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
+ rxp = &tpr->rx_jmb_buffers[i];
- txp = &tp->tx_buffers[i];
- skb = txp->skb;
+ if (rxp->skb == NULL)
+ continue;
- if (skb == NULL) {
- i++;
- continue;
+ pci_unmap_single(tp->pdev,
+ pci_unmap_addr(rxp, mapping),
+ TG3_RX_JMB_MAP_SZ,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(rxp->skb);
+ rxp->skb = NULL;
}
-
- skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
-
- txp->skb = NULL;
-
- i += skb_shinfo(skb)->nr_frags + 1;
-
- dev_kfree_skb_any(skb);
}
}
@@ -5535,23 +5647,20 @@ static void tg3_free_rings(struct tg3 *tp)
* end up in the driver. tp->{tx,}lock are held and thus
* we may not sleep.
*/
-static int tg3_init_rings(struct tg3 *tp)
+static int tg3_rx_prodring_alloc(struct tg3 *tp,
+ struct tg3_rx_prodring_set *tpr)
{
- u32 i;
-
- /* Free up all the SKBs. */
- tg3_free_rings(tp);
+ u32 i, rx_pkt_dma_sz;
+ struct tg3_napi *tnapi = &tp->napi[0];
/* Zero out all descriptors. */
- memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
- memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
- memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
- memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
+ memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
- tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
+ rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
- (tp->dev->mtu > ETH_DATA_LEN))
- tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
+ tp->dev->mtu > ETH_DATA_LEN)
+ rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
+ tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
/* Initialize invariants of the rings, we only set this
* stuff once. This works because the card does not
@@ -5560,102 +5669,242 @@ static int tg3_init_rings(struct tg3 *tp)
for (i = 0; i < TG3_RX_RING_SIZE; i++) {
struct tg3_rx_buffer_desc *rxd;
- rxd = &tp->rx_std[i];
- rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
- << RXD_LEN_SHIFT;
+ rxd = &tpr->rx_std[i];
+ rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
rxd->opaque = (RXD_OPAQUE_RING_STD |
(i << RXD_OPAQUE_INDEX_SHIFT));
}
- if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
- for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
- struct tg3_rx_buffer_desc *rxd;
-
- rxd = &tp->rx_jumbo[i];
- rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
- << RXD_LEN_SHIFT;
- rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
- RXD_FLAG_JUMBO;
- rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
- (i << RXD_OPAQUE_INDEX_SHIFT));
- }
- }
-
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
- if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
+ if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) {
printk(KERN_WARNING PFX
"%s: Using a smaller RX standard ring, "
"only %d out of %d buffers were allocated "
"successfully.\n",
tp->dev->name, i, tp->rx_pending);
if (i == 0)
- return -ENOMEM;
+ goto initfail;
tp->rx_pending = i;
break;
}
}
+ if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
+ goto done;
+
+ memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
+
if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
+ for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
+ struct tg3_rx_buffer_desc *rxd;
+
+ rxd = &tpr->rx_jmb[i].std;
+ rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
+ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
+ RXD_FLAG_JUMBO;
+ rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
+ (i << RXD_OPAQUE_INDEX_SHIFT));
+ }
+
for (i = 0; i < tp->rx_jumbo_pending; i++) {
- if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
+ if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO,
-1, i) < 0) {
printk(KERN_WARNING PFX
"%s: Using a smaller RX jumbo ring, "
"only %d out of %d buffers were "
"allocated successfully.\n",
tp->dev->name, i, tp->rx_jumbo_pending);
- if (i == 0) {
- tg3_free_rings(tp);
- return -ENOMEM;
- }
+ if (i == 0)
+ goto initfail;
tp->rx_jumbo_pending = i;
break;
}
}
}
+
+done:
return 0;
+
+initfail:
+ tg3_rx_prodring_free(tp, tpr);
+ return -ENOMEM;
}
-/*
- * Must not be invoked with interrupt sources disabled and
- * the hardware shutdown down.
- */
-static void tg3_free_consistent(struct tg3 *tp)
+static void tg3_rx_prodring_fini(struct tg3 *tp,
+ struct tg3_rx_prodring_set *tpr)
{
- kfree(tp->rx_std_buffers);
- tp->rx_std_buffers = NULL;
- if (tp->rx_std) {
+ kfree(tpr->rx_std_buffers);
+ tpr->rx_std_buffers = NULL;
+ kfree(tpr->rx_jmb_buffers);
+ tpr->rx_jmb_buffers = NULL;
+ if (tpr->rx_std) {
pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
- tp->rx_std, tp->rx_std_mapping);
- tp->rx_std = NULL;
+ tpr->rx_std, tpr->rx_std_mapping);
+ tpr->rx_std = NULL;
}
- if (tp->rx_jumbo) {
+ if (tpr->rx_jmb) {
pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
- tp->rx_jumbo, tp->rx_jumbo_mapping);
- tp->rx_jumbo = NULL;
+ tpr->rx_jmb, tpr->rx_jmb_mapping);
+ tpr->rx_jmb = NULL;
+ }
+}
+
+static int tg3_rx_prodring_init(struct tg3 *tp,
+ struct tg3_rx_prodring_set *tpr)
+{
+ tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
+ TG3_RX_RING_SIZE, GFP_KERNEL);
+ if (!tpr->rx_std_buffers)
+ return -ENOMEM;
+
+ tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
+ &tpr->rx_std_mapping);
+ if (!tpr->rx_std)
+ goto err_out;
+
+ if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+ tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
+ TG3_RX_JUMBO_RING_SIZE,
+ GFP_KERNEL);
+ if (!tpr->rx_jmb_buffers)
+ goto err_out;
+
+ tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
+ TG3_RX_JUMBO_RING_BYTES,
+ &tpr->rx_jmb_mapping);
+ if (!tpr->rx_jmb)
+ goto err_out;
}
- if (tp->rx_rcb) {
- pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
- tp->rx_rcb, tp->rx_rcb_mapping);
- tp->rx_rcb = NULL;
+
+ return 0;
+
+err_out:
+ tg3_rx_prodring_fini(tp, tpr);
+ return -ENOMEM;
+}
+
+/* Free up pending packets in all rx/tx rings.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. tp->{tx,}lock is not held and we are not
+ * in an interrupt context and thus may sleep.
+ */
+static void tg3_free_rings(struct tg3 *tp)
+{
+ int i, j;
+
+ for (j = 0; j < tp->irq_cnt; j++) {
+ struct tg3_napi *tnapi = &tp->napi[j];
+
+ if (!tnapi->tx_buffers)
+ continue;
+
+ for (i = 0; i < TG3_TX_RING_SIZE; ) {
+ struct tx_ring_info *txp;
+ struct sk_buff *skb;
+
+ txp = &tnapi->tx_buffers[i];
+ skb = txp->skb;
+
+ if (skb == NULL) {
+ i++;
+ continue;
+ }
+
+ skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
+
+ txp->skb = NULL;
+
+ i += skb_shinfo(skb)->nr_frags + 1;
+
+ dev_kfree_skb_any(skb);
+ }
}
- if (tp->tx_ring) {
- pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
- tp->tx_ring, tp->tx_desc_mapping);
- tp->tx_ring = NULL;
+
+ tg3_rx_prodring_free(tp, &tp->prodring[0]);
+}
+
+/* Initialize tx/rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. tp->{tx,}lock are held and thus
+ * we may not sleep.
+ */
+static int tg3_init_rings(struct tg3 *tp)
+{
+ int i;
+
+ /* Free up all the SKBs. */
+ tg3_free_rings(tp);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ tnapi->last_tag = 0;
+ tnapi->last_irq_tag = 0;
+ tnapi->hw_status->status = 0;
+ tnapi->hw_status->status_tag = 0;
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+ tnapi->tx_prod = 0;
+ tnapi->tx_cons = 0;
+ if (tnapi->tx_ring)
+ memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
+
+ tnapi->rx_rcb_ptr = 0;
+ if (tnapi->rx_rcb)
+ memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
}
- if (tp->hw_status) {
- pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
- tp->hw_status, tp->status_mapping);
- tp->hw_status = NULL;
+
+ return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void tg3_free_consistent(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ if (tnapi->tx_ring) {
+ pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
+ tnapi->tx_ring, tnapi->tx_desc_mapping);
+ tnapi->tx_ring = NULL;
+ }
+
+ kfree(tnapi->tx_buffers);
+ tnapi->tx_buffers = NULL;
+
+ if (tnapi->rx_rcb) {
+ pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
+ tnapi->rx_rcb,
+ tnapi->rx_rcb_mapping);
+ tnapi->rx_rcb = NULL;
+ }
+
+ if (tnapi->hw_status) {
+ pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
+ tnapi->hw_status,
+ tnapi->status_mapping);
+ tnapi->hw_status = NULL;
+ }
}
+
if (tp->hw_stats) {
pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
+
+ tg3_rx_prodring_fini(tp, &tp->prodring[0]);
}
/*
@@ -5664,54 +5913,79 @@ static void tg3_free_consistent(struct tg3 *tp)
*/
static int tg3_alloc_consistent(struct tg3 *tp)
{
- tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
- (TG3_RX_RING_SIZE +
- TG3_RX_JUMBO_RING_SIZE)) +
- (sizeof(struct tx_ring_info) *
- TG3_TX_RING_SIZE),
- GFP_KERNEL);
- if (!tp->rx_std_buffers)
- return -ENOMEM;
+ int i;
- tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
- tp->tx_buffers = (struct tx_ring_info *)
- &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
+ if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
+ return -ENOMEM;
- tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
- &tp->rx_std_mapping);
- if (!tp->rx_std)
+ tp->hw_stats = pci_alloc_consistent(tp->pdev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping);
+ if (!tp->hw_stats)
goto err_out;
- tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
- &tp->rx_jumbo_mapping);
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
- if (!tp->rx_jumbo)
- goto err_out;
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ struct tg3_hw_status *sblk;
- tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
- &tp->rx_rcb_mapping);
- if (!tp->rx_rcb)
- goto err_out;
+ tnapi->hw_status = pci_alloc_consistent(tp->pdev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping);
+ if (!tnapi->hw_status)
+ goto err_out;
- tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
- &tp->tx_desc_mapping);
- if (!tp->tx_ring)
- goto err_out;
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+ sblk = tnapi->hw_status;
- tp->hw_status = pci_alloc_consistent(tp->pdev,
- TG3_HW_STATUS_SIZE,
- &tp->status_mapping);
- if (!tp->hw_status)
- goto err_out;
+ /*
+ * When RSS is enabled, the status block format changes
+ * slightly. The "rx_jumbo_consumer", "reserved",
+ * and "rx_mini_consumer" members get mapped to the
+ * other three rx return ring producer indexes.
+ */
+ switch (i) {
+ default:
+ tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
+ break;
+ case 2:
+ tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
+ break;
+ case 3:
+ tnapi->rx_rcb_prod_idx = &sblk->reserved;
+ break;
+ case 4:
+ tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
+ break;
+ }
- tp->hw_stats = pci_alloc_consistent(tp->pdev,
- sizeof(struct tg3_hw_stats),
- &tp->stats_mapping);
- if (!tp->hw_stats)
- goto err_out;
+ /*
+ * If multivector RSS is enabled, vector 0 does not handle
+ * rx or tx interrupts. Don't allocate any resources for it.
+ */
+ if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
+ continue;
- memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
- memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+ tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping);
+ if (!tnapi->rx_rcb)
+ goto err_out;
+
+ memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+
+ tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
+ TG3_TX_RING_SIZE, GFP_KERNEL);
+ if (!tnapi->tx_buffers)
+ goto err_out;
+
+ tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
+ TG3_TX_RING_BYTES,
+ &tnapi->tx_desc_mapping);
+ if (!tnapi->tx_ring)
+ goto err_out;
+ }
return 0;
@@ -5823,8 +6097,11 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
- if (tp->hw_status)
- memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ if (tnapi->hw_status)
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+ }
if (tp->hw_stats)
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
@@ -6111,7 +6388,7 @@ static int tg3_chip_reset(struct tg3 *tp)
{
u32 val;
void (*write_op)(struct tg3 *, u32, u32);
- int err;
+ int i, err;
tg3_nvram_lock(tp);
@@ -6151,14 +6428,24 @@ static int tg3_chip_reset(struct tg3 *tp)
* sharing or irqpoll.
*/
tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
- if (tp->hw_status) {
- tp->hw_status->status = 0;
- tp->hw_status->status_tag = 0;
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ if (tnapi->hw_status) {
+ tnapi->hw_status->status = 0;
+ tnapi->hw_status->status_tag = 0;
+ }
+ tnapi->last_tag = 0;
+ tnapi->last_irq_tag = 0;
}
- tp->last_tag = 0;
- tp->last_irq_tag = 0;
smp_mb();
- synchronize_irq(tp->pdev->irq);
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ synchronize_irq(tp->napi[i].irq_vec);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+ tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
+ }
/* do the reset */
val = GRC_MISC_CFG_CORECLK_RESET;
@@ -6212,6 +6499,8 @@ static int tg3_chip_reset(struct tg3 *tp)
udelay(120);
if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
+ u16 val16;
+
if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
int i;
u32 cfg_val;
@@ -6225,12 +6514,22 @@ static int tg3_chip_reset(struct tg3 *tp)
cfg_val | (1 << 15));
}
- /* Set PCIE max payload size to 128 bytes and
- * clear the "no snoop" and "relaxed ordering" bits.
+ /* Clear the "no snoop" and "relaxed ordering" bits. */
+ pci_read_config_word(tp->pdev,
+ tp->pcie_cap + PCI_EXP_DEVCTL,
+ &val16);
+ val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ /*
+ * Older PCIe devices only support the 128 byte
+ * MPS setting. Enforce the restriction.
*/
+ if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
+ val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
pci_write_config_word(tp->pdev,
tp->pcie_cap + PCI_EXP_DEVCTL,
- 0);
+ val16);
pcie_set_readrq(tp->pdev, 4096);
@@ -6288,16 +6587,18 @@ static int tg3_chip_reset(struct tg3 *tp)
tw32_f(MAC_MODE, 0);
udelay(40);
- tg3_mdio_start(tp);
-
tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
err = tg3_poll_fw(tp);
if (err)
return err;
+ tg3_mdio_start(tp);
+
if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
- tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+ tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
val = tr32(0x7c00);
tw32(0x7c00, val | (1 << 25));
@@ -6647,24 +6948,175 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
static void __tg3_set_rx_mode(struct net_device *);
static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
{
- tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
- tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
- tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
- tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
- tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
- tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+ int i;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
+ tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
+ tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
+ tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
+
+ tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
+ tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
+ tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+ } else {
+ tw32(HOSTCC_TXCOL_TICKS, 0);
+ tw32(HOSTCC_TXMAX_FRAMES, 0);
+ tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+
+ tw32(HOSTCC_RXCOL_TICKS, 0);
+ tw32(HOSTCC_RXMAX_FRAMES, 0);
+ tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
}
- tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
- tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
+
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
u32 val = ec->stats_block_coalesce_usecs;
+ tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
+ tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+
if (!netif_carrier_ok(tp->dev))
val = 0;
tw32(HOSTCC_STAT_COAL_TICKS, val);
}
+
+ for (i = 0; i < tp->irq_cnt - 1; i++) {
+ u32 reg;
+
+ reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
+ tw32(reg, ec->rx_coalesce_usecs);
+ reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_coalesce_usecs);
+ reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
+ tw32(reg, ec->rx_max_coalesced_frames);
+ reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_max_coalesced_frames);
+ reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
+ tw32(reg, ec->rx_max_coalesced_frames_irq);
+ reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_max_coalesced_frames_irq);
+ }
+
+ for (; i < tp->irq_max - 1; i++) {
+ tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_rings_reset(struct tg3 *tp)
+{
+ int i;
+ u32 stblk, txrcb, rxrcb, limit;
+ struct tg3_napi *tnapi = &tp->napi[0];
+
+ /* Disable all transmit rings but the first. */
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
+ else
+ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
+
+ for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
+ txrcb < limit; txrcb += TG3_BDINFO_SIZE)
+ tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+
+
+ /* Disable all receive return rings but the first. */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
+ else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
+ else
+ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
+
+ for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
+ rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
+ tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+
+ /* Disable interrupts */
+ tw32_mailbox_f(tp->napi[0].int_mbox, 1);
+
+ /* Zero mailbox registers. */
+ if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
+ for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
+ tp->napi[i].tx_prod = 0;
+ tp->napi[i].tx_cons = 0;
+ tw32_mailbox(tp->napi[i].prodmbox, 0);
+ tw32_rx_mbox(tp->napi[i].consmbox, 0);
+ tw32_mailbox_f(tp->napi[i].int_mbox, 1);
+ }
+ } else {
+ tp->napi[0].tx_prod = 0;
+ tp->napi[0].tx_cons = 0;
+ tw32_mailbox(tp->napi[0].prodmbox, 0);
+ tw32_rx_mbox(tp->napi[0].consmbox, 0);
+ }
+
+ /* Make sure the NIC-based send BD rings are disabled. */
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+ for (i = 0; i < 16; i++)
+ tw32_tx_mbox(mbox + i * 8, 0);
+ }
+
+ txrcb = NIC_SRAM_SEND_RCB;
+ rxrcb = NIC_SRAM_RCV_RET_RCB;
+
+ /* Clear status block in ram. */
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+ /* Set status block DMA address */
+ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tnapi->status_mapping >> 32));
+ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tnapi->status_mapping & 0xffffffff));
+
+ if (tnapi->tx_ring) {
+ tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+ (TG3_TX_RING_SIZE <<
+ BDINFO_FLAGS_MAXLEN_SHIFT),
+ NIC_SRAM_TX_BUFFER_DESC);
+ txrcb += TG3_BDINFO_SIZE;
+ }
+
+ if (tnapi->rx_rcb) {
+ tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
+ (TG3_RX_RCB_RING_SIZE(tp) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT), 0);
+ rxrcb += TG3_BDINFO_SIZE;
+ }
+
+ stblk = HOSTCC_STATBLCK_RING1;
+
+ for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
+ u64 mapping = (u64)tnapi->status_mapping;
+ tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
+ tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
+
+ /* Clear status block in ram. */
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+ tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+ (TG3_TX_RING_SIZE <<
+ BDINFO_FLAGS_MAXLEN_SHIFT),
+ NIC_SRAM_TX_BUFFER_DESC);
+
+ tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
+ (TG3_RX_RCB_RING_SIZE(tp) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT), 0);
+
+ stblk += 8;
+ txrcb += TG3_BDINFO_SIZE;
+ rxrcb += TG3_BDINFO_SIZE;
+ }
}
/* tp->lock is held. */
@@ -6672,6 +7124,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{
u32 val, rdmac_mode;
int i, err, limit;
+ struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
tg3_disable_ints(tp);
@@ -6719,6 +7172,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
PCIE_PWR_MGMT_L1_THRESH_4MS;
tw32(PCIE_PWR_MGMT_THRESH, val);
+
+ val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
+ tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
+
+ tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
+ }
+
+ if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
+ val = tr32(TG3_PCIE_LNKCTL);
+ if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
+ val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
+ else
+ val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
+ tw32(TG3_PCIE_LNKCTL, val);
}
/* This works around an issue with Athlon chipsets on
@@ -6766,7 +7233,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return err;
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
/* This value is determined during the probe time DMA
* engine test, tg3_test_dma.
*/
@@ -6886,35 +7354,33 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
* configurable.
*/
tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
- ((u64) tp->rx_std_mapping >> 32));
+ ((u64) tpr->rx_std_mapping >> 32));
tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
- ((u64) tp->rx_std_mapping & 0xffffffff));
+ ((u64) tpr->rx_std_mapping & 0xffffffff));
tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_BUFFER_DESC);
- /* Don't even try to program the JUMBO/MINI buffer descriptor
- * configs on 5705.
- */
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
- tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
- RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
- } else {
- tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
- RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
-
+ /* Disable the mini ring */
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
BDINFO_FLAGS_DISABLED);
+ /* Program the jumbo buffer descriptor ring control
+ * blocks on those devices that have them.
+ */
+ if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
+ !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
/* Setup replenish threshold. */
tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
- ((u64) tp->rx_jumbo_mapping >> 32));
+ ((u64) tpr->rx_jmb_mapping >> 32));
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
- ((u64) tp->rx_jumbo_mapping & 0xffffffff));
+ ((u64) tpr->rx_jmb_mapping & 0xffffffff));
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
- RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
+ (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
+ BDINFO_FLAGS_USE_EXT_RECV);
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -6922,57 +7388,31 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
BDINFO_FLAGS_DISABLED);
}
- }
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
+ (RX_STD_MAX_SIZE << 2);
+ else
+ val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
+ } else
+ val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
- /* There is only one send ring on 5705/5750, no need to explicitly
- * disable the others.
- */
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
- /* Clear out send RCB ring in SRAM. */
- for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
- tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
- BDINFO_FLAGS_DISABLED);
- }
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
- tp->tx_prod = 0;
- tp->tx_cons = 0;
- tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
- tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
+ tpr->rx_std_ptr = tp->rx_pending;
+ tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
+ tpr->rx_std_ptr);
- tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
- tp->tx_desc_mapping,
- (TG3_TX_RING_SIZE <<
- BDINFO_FLAGS_MAXLEN_SHIFT),
- NIC_SRAM_TX_BUFFER_DESC);
+ tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
+ tp->rx_jumbo_pending : 0;
+ tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
+ tpr->rx_jmb_ptr);
- /* There is only one receive return ring on 5705/5750, no need
- * to explicitly disable the others.
- */
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
- for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
- i += TG3_BDINFO_SIZE) {
- tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
- BDINFO_FLAGS_DISABLED);
- }
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ tw32(STD_REPLENISH_LWM, 32);
+ tw32(JMB_REPLENISH_LWM, 16);
}
- tp->rx_rcb_ptr = 0;
- tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
-
- tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
- tp->rx_rcb_mapping,
- (TG3_RX_RCB_RING_SIZE(tp) <<
- BDINFO_FLAGS_MAXLEN_SHIFT),
- 0);
-
- tp->rx_std_ptr = tp->rx_pending;
- tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
- tp->rx_std_ptr);
-
- tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
- tp->rx_jumbo_pending : 0;
- tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
- tp->rx_jumbo_ptr);
+ tg3_rings_reset(tp);
/* Initialize MAC address and backoff seed. */
__tg3_set_mac_addr(tp, 0);
@@ -7061,12 +7501,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
__tg3_set_coalesce(tp, &tp->coal);
- /* set status block DMA address */
- tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
- ((u64) tp->status_mapping >> 32));
- tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
- ((u64) tp->status_mapping & 0xffffffff));
-
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
/* Status/statistics block address. See tg3_timer,
* the tg3_periodic_fetch_stats call there, and
@@ -7077,7 +7511,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
((u64) tp->stats_mapping & 0xffffffff));
tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
+
tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
+
+ /* Clear statistics and status block memory areas */
+ for (i = NIC_SRAM_STATS_BLK;
+ i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
+ i += sizeof(u32)) {
+ tg3_write_mem(tp, i, 0);
+ udelay(40);
+ }
}
tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
@@ -7087,15 +7530,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
- /* Clear statistics/status block in chip, and status block in ram. */
- for (i = NIC_SRAM_STATS_BLK;
- i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
- i += sizeof(u32)) {
- tg3_write_mem(tp, i, 0);
- udelay(40);
- }
- memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
-
if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
/* reset to prevent losing 1st rx packet intermittently */
@@ -7147,7 +7581,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
udelay(100);
- tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
+ val = tr32(MSGINT_MODE);
+ val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
+ tw32(MSGINT_MODE, val);
+ }
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -7164,7 +7602,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
- if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
+ if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
(tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
/* nothing */
@@ -7217,7 +7655,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
- tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
+ val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ val |= SNDBDI_MODE_MULTI_TXQ_EN;
+ tw32(SNDBDI_MODE, val);
tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
@@ -7236,10 +7677,46 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(MAC_TX_MODE, tp->tx_mode);
udelay(100);
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
+ u32 reg = MAC_RSS_INDIR_TBL_0;
+ u8 *ent = (u8 *)&val;
+
+ /* Setup the indirection table */
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
+ int idx = i % sizeof(val);
+
+ ent[idx] = i % (tp->irq_cnt - 1);
+ if (idx == sizeof(val) - 1) {
+ tw32(reg, val);
+ reg += 4;
+ }
+ }
+
+ /* Setup the "secret" hash key. */
+ tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
+ tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
+ tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
+ tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
+ tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
+ tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
+ tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
+ tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
+ tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
+ tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
+ }
+
tp->rx_mode = RX_MODE_ENABLE;
if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
+ tp->rx_mode |= RX_MODE_RSS_ENABLE |
+ RX_MODE_RSS_ITBL_HASH_BITS_7 |
+ RX_MODE_RSS_IPV6_HASH_EN |
+ RX_MODE_RSS_TCP_IPV6_HASH_EN |
+ RX_MODE_RSS_IPV4_HASH_EN |
+ RX_MODE_RSS_TCP_IPV4_HASH_EN;
+
tw32_f(MAC_RX_MODE, tp->rx_mode);
udelay(10);
@@ -7302,7 +7779,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return err;
if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
+ !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
u32 tmp;
/* Clear CRC stats. */
@@ -7449,12 +7926,12 @@ static void tg3_timer(unsigned long __opaque)
* IRQ status the mailbox/status_block protocol the chip
* uses with the cpu is race prone.
*/
- if (tp->hw_status->status & SD_STATUS_UPDATED) {
+ if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
tw32(GRC_LOCAL_CTRL,
tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
} else {
tw32(HOSTCC_MODE, tp->coalesce_mode |
- (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
+ HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
}
if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
@@ -7555,13 +8032,22 @@ restart_timer:
add_timer(&tp->timer);
}
-static int tg3_request_irq(struct tg3 *tp)
+static int tg3_request_irq(struct tg3 *tp, int irq_num)
{
irq_handler_t fn;
unsigned long flags;
- struct net_device *dev = tp->dev;
+ char *name;
+ struct tg3_napi *tnapi = &tp->napi[irq_num];
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+ if (tp->irq_cnt == 1)
+ name = tp->dev->name;
+ else {
+ name = &tnapi->irq_lbl[0];
+ snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
+ name[IFNAMSIZ-1] = 0;
+ }
+
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
fn = tg3_msi;
if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
fn = tg3_msi_1shot;
@@ -7572,37 +8058,49 @@ static int tg3_request_irq(struct tg3 *tp)
fn = tg3_interrupt_tagged;
flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
}
- return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
+
+ return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
}
static int tg3_test_interrupt(struct tg3 *tp)
{
+ struct tg3_napi *tnapi = &tp->napi[0];
struct net_device *dev = tp->dev;
int err, i, intr_ok = 0;
+ u32 val;
if (!netif_running(dev))
return -ENODEV;
tg3_disable_ints(tp);
- free_irq(tp->pdev->irq, dev);
+ free_irq(tnapi->irq_vec, tnapi);
- err = request_irq(tp->pdev->irq, tg3_test_isr,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
+ /*
+ * Turn off MSI one shot mode. Otherwise this test has no
+ * observable way to know whether the interrupt was delivered.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
+ val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
+ tw32(MSGINT_MODE, val);
+ }
+
+ err = request_irq(tnapi->irq_vec, tg3_test_isr,
+ IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
if (err)
return err;
- tp->hw_status->status &= ~SD_STATUS_UPDATED;
+ tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
tg3_enable_ints(tp);
tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
- HOSTCC_MODE_NOW);
+ tnapi->coal_now);
for (i = 0; i < 5; i++) {
u32 int_mbox, misc_host_ctrl;
- int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
- TG3_64BIT_REG_LOW);
+ int_mbox = tr32_mailbox(tnapi->int_mbox);
misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
if ((int_mbox != 0) ||
@@ -7616,15 +8114,22 @@ static int tg3_test_interrupt(struct tg3 *tp)
tg3_disable_ints(tp);
- free_irq(tp->pdev->irq, dev);
+ free_irq(tnapi->irq_vec, tnapi);
- err = tg3_request_irq(tp);
+ err = tg3_request_irq(tp, 0);
if (err)
return err;
- if (intr_ok)
+ if (intr_ok) {
+ /* Reenable MSI one shot mode. */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
+ val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
+ tw32(MSGINT_MODE, val);
+ }
return 0;
+ }
return -EIO;
}
@@ -7634,7 +8139,6 @@ static int tg3_test_interrupt(struct tg3 *tp)
*/
static int tg3_test_msi(struct tg3 *tp)
{
- struct net_device *dev = tp->dev;
int err;
u16 pci_cmd;
@@ -7665,12 +8169,13 @@ static int tg3_test_msi(struct tg3 *tp)
"the PCI maintainer and include system chipset information.\n",
tp->dev->name);
- free_irq(tp->pdev->irq, dev);
+ free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
+
pci_disable_msi(tp->pdev);
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
- err = tg3_request_irq(tp);
+ err = tg3_request_irq(tp, 0);
if (err)
return err;
@@ -7685,7 +8190,7 @@ static int tg3_test_msi(struct tg3 *tp)
tg3_full_unlock(tp);
if (err)
- free_irq(tp->pdev->irq, dev);
+ free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
return err;
}
@@ -7721,10 +8226,95 @@ static int tg3_request_firmware(struct tg3 *tp)
return 0;
}
+static bool tg3_enable_msix(struct tg3 *tp)
+{
+ int i, rc, cpus = num_online_cpus();
+ struct msix_entry msix_ent[tp->irq_max];
+
+ if (cpus == 1)
+ /* Just fallback to the simpler MSI mode. */
+ return false;
+
+ /*
+ * We want as many rx rings enabled as there are cpus.
+ * The first MSIX vector only deals with link interrupts, etc,
+ * so we add one to the number of vectors we are requesting.
+ */
+ tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
+
+ for (i = 0; i < tp->irq_max; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
+ if (rc != 0) {
+ if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
+ return false;
+ if (pci_enable_msix(tp->pdev, msix_ent, rc))
+ return false;
+ printk(KERN_NOTICE
+ "%s: Requested %d MSI-X vectors, received %d\n",
+ tp->dev->name, tp->irq_cnt, rc);
+ tp->irq_cnt = rc;
+ }
+
+ tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
+
+ for (i = 0; i < tp->irq_max; i++)
+ tp->napi[i].irq_vec = msix_ent[i].vector;
+
+ tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
+
+ return true;
+}
+
+static void tg3_ints_init(struct tg3 *tp)
+{
+ if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
+ !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
+ /* All MSI supporting chips should support tagged
+ * status. Assert that this is the case.
+ */
+ printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
+ "Not using MSI.\n", tp->dev->name);
+ goto defcfg;
+ }
+
+ if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
+ tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
+ else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
+ pci_enable_msi(tp->pdev) == 0)
+ tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
+
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
+ u32 msi_mode = tr32(MSGINT_MODE);
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ msi_mode |= MSGINT_MODE_MULTIVEC_EN;
+ tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
+ }
+defcfg:
+ if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
+ tp->irq_cnt = 1;
+ tp->napi[0].irq_vec = tp->pdev->irq;
+ tp->dev->real_num_tx_queues = 1;
+ }
+}
+
+static void tg3_ints_fini(struct tg3 *tp)
+{
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ pci_disable_msix(tp->pdev);
+ else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
+ pci_disable_msi(tp->pdev);
+ tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
+ tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
+}
+
static int tg3_open(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
- int err;
+ int i, err;
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
@@ -7755,40 +8345,33 @@ static int tg3_open(struct net_device *dev)
tg3_full_unlock(tp);
+ /*
+ * Setup interrupts first so we know how
+ * many NAPI resources to allocate
+ */
+ tg3_ints_init(tp);
+
/* The placement of this call is tied
* to the setup and use of Host TX descriptors.
*/
err = tg3_alloc_consistent(tp);
if (err)
- return err;
-
- if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
- /* All MSI supporting chips should support tagged
- * status. Assert that this is the case.
- */
- if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
- printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
- "Not using MSI.\n", tp->dev->name);
- } else if (pci_enable_msi(tp->pdev) == 0) {
- u32 msi_mode;
+ goto err_out1;
- msi_mode = tr32(MSGINT_MODE);
- tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
- tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
- }
- }
- err = tg3_request_irq(tp);
+ tg3_napi_enable(tp);
- if (err) {
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
- pci_disable_msi(tp->pdev);
- tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ err = tg3_request_irq(tp, i);
+ if (err) {
+ for (i--; i >= 0; i--)
+ free_irq(tnapi->irq_vec, tnapi);
+ break;
}
- tg3_free_consistent(tp);
- return err;
}
- napi_enable(&tp->napi);
+ if (err)
+ goto err_out2;
tg3_full_lock(tp, 0);
@@ -7816,45 +8399,28 @@ static int tg3_open(struct net_device *dev)
tg3_full_unlock(tp);
- if (err) {
- napi_disable(&tp->napi);
- free_irq(tp->pdev->irq, dev);
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
- pci_disable_msi(tp->pdev);
- tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
- }
- tg3_free_consistent(tp);
- return err;
- }
+ if (err)
+ goto err_out3;
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
err = tg3_test_msi(tp);
if (err) {
tg3_full_lock(tp, 0);
-
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
- pci_disable_msi(tp->pdev);
- tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
- }
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
- tg3_free_consistent(tp);
-
tg3_full_unlock(tp);
- napi_disable(&tp->napi);
-
- return err;
+ goto err_out2;
}
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
- if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
- u32 val = tr32(PCIE_TRANSACTION_CFG);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
+ (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
+ u32 val = tr32(PCIE_TRANSACTION_CFG);
- tw32(PCIE_TRANSACTION_CFG,
- val | PCIE_TRANS_CFG_1SHOT_MSI);
- }
+ tw32(PCIE_TRANSACTION_CFG,
+ val | PCIE_TRANS_CFG_1SHOT_MSI);
}
}
@@ -7868,9 +8434,23 @@ static int tg3_open(struct net_device *dev)
tg3_full_unlock(tp);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
+
+err_out3:
+ for (i = tp->irq_cnt - 1; i >= 0; i--) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ free_irq(tnapi->irq_vec, tnapi);
+ }
+
+err_out2:
+ tg3_napi_disable(tp);
+ tg3_free_consistent(tp);
+
+err_out1:
+ tg3_ints_fini(tp);
+ return err;
}
#if 0
@@ -7879,6 +8459,7 @@ static int tg3_open(struct net_device *dev)
u32 val32, val32_2, val32_3, val32_4, val32_5;
u16 val16;
int i;
+ struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
@@ -8031,14 +8612,15 @@ static int tg3_open(struct net_device *dev)
val32, val32_2, val32_3, val32_4, val32_5);
/* SW status block */
- printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
- tp->hw_status->status,
- tp->hw_status->status_tag,
- tp->hw_status->rx_jumbo_consumer,
- tp->hw_status->rx_consumer,
- tp->hw_status->rx_mini_consumer,
- tp->hw_status->idx[0].rx_producer,
- tp->hw_status->idx[0].tx_consumer);
+ printk(KERN_DEBUG
+ "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
+ sblk->status,
+ sblk->status_tag,
+ sblk->rx_jumbo_consumer,
+ sblk->rx_consumer,
+ sblk->rx_mini_consumer,
+ sblk->idx[0].rx_producer,
+ sblk->idx[0].tx_consumer);
/* SW statistics block */
printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
@@ -8106,12 +8688,13 @@ static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
static int tg3_close(struct net_device *dev)
{
+ int i;
struct tg3 *tp = netdev_priv(dev);
- napi_disable(&tp->napi);
+ tg3_napi_disable(tp);
cancel_work_sync(&tp->reset_task);
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
del_timer_sync(&tp->timer);
@@ -8128,12 +8711,13 @@ static int tg3_close(struct net_device *dev)
tg3_full_unlock(tp);
- free_irq(tp->pdev->irq, dev);
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
- pci_disable_msi(tp->pdev);
- tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+ for (i = tp->irq_cnt - 1; i >= 0; i--) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ free_irq(tnapi->irq_vec, tnapi);
}
+ tg3_ints_fini(tp);
+
memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
sizeof(tp->net_stats_prev));
memcpy(&tp->estats_prev, tg3_get_estats(tp),
@@ -8697,7 +9281,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->speed = tp->link_config.active_speed;
cmd->duplex = tp->link_config.active_duplex;
}
- cmd->phy_address = PHY_ADDR;
+ cmd->phy_address = tp->phy_addr;
cmd->transceiver = XCVR_INTERNAL;
cmd->autoneg = tp->link_config.autoneg;
cmd->maxtxpkt = 0;
@@ -8872,7 +9456,8 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
dev->features |= NETIF_F_TSO_ECN;
} else
dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
@@ -8934,13 +9519,13 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
else
ering->rx_jumbo_pending = 0;
- ering->tx_pending = tp->tx_pending;
+ ering->tx_pending = tp->napi[0].tx_pending;
}
static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
struct tg3 *tp = netdev_priv(dev);
- int irq_sync = 0, err = 0;
+ int i, irq_sync = 0, err = 0;
if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
(ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
@@ -8964,7 +9549,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
tp->rx_pending > 63)
tp->rx_pending = 63;
tp->rx_jumbo_pending = ering->rx_jumbo_pending;
- tp->tx_pending = ering->tx_pending;
+
+ for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
+ tp->napi[i].tx_pending = ering->tx_pending;
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -9672,12 +10259,23 @@ static int tg3_test_memory(struct tg3 *tp)
static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
{
u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
- u32 desc_idx;
+ u32 desc_idx, coal_now;
struct sk_buff *skb, *rx_skb;
u8 *tx_data;
dma_addr_t map;
int num_pkts, tx_len, rx_len, i, err;
struct tg3_rx_buffer_desc *desc;
+ struct tg3_napi *tnapi, *rnapi;
+ struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+
+ if (tp->irq_cnt > 1) {
+ tnapi = &tp->napi[1];
+ rnapi = &tp->napi[1];
+ } else {
+ tnapi = &tp->napi[0];
+ rnapi = &tp->napi[0];
+ }
+ coal_now = tnapi->coal_now | rnapi->coal_now;
if (loopback_mode == TG3_MAC_LOOPBACK) {
/* HW errata - mac loopback fails in some cases on 5780.
@@ -9699,18 +10297,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
} else if (loopback_mode == TG3_PHY_LOOPBACK) {
u32 val;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- u32 phytest;
-
- if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
- u32 phy;
-
- tg3_writephy(tp, MII_TG3_EPHY_TEST,
- phytest | MII_TG3_EPHY_SHADOW_EN);
- if (!tg3_readphy(tp, 0x1b, &phy))
- tg3_writephy(tp, 0x1b, phy & ~0x20);
- tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
- }
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+ tg3_phy_fet_toggle_apd(tp, false);
val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
} else
val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
@@ -9721,8 +10309,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
udelay(40);
mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
mac_mode |= MAC_MODE_PORT_MODE_MII;
} else
mac_mode |= MAC_MODE_PORT_MODE_GMII;
@@ -9765,35 +10354,34 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
- HOSTCC_MODE_NOW);
+ rnapi->coal_now);
udelay(10);
- rx_start_idx = tp->hw_status->idx[0].rx_producer;
+ rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
num_pkts = 0;
- tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
+ tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
- tp->tx_prod++;
+ tnapi->tx_prod++;
num_pkts++;
- tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
- tp->tx_prod);
- tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
+ tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
+ tr32_mailbox(tnapi->prodmbox);
udelay(10);
/* 250 usec to allow enough time on some 10/100 Mbps devices. */
for (i = 0; i < 25; i++) {
tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
- HOSTCC_MODE_NOW);
+ coal_now);
udelay(10);
- tx_idx = tp->hw_status->idx[0].tx_consumer;
- rx_idx = tp->hw_status->idx[0].rx_producer;
- if ((tx_idx == tp->tx_prod) &&
+ tx_idx = tnapi->hw_status->idx[0].tx_consumer;
+ rx_idx = rnapi->hw_status->idx[0].rx_producer;
+ if ((tx_idx == tnapi->tx_prod) &&
(rx_idx == (rx_start_idx + num_pkts)))
break;
}
@@ -9801,13 +10389,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- if (tx_idx != tp->tx_prod)
+ if (tx_idx != tnapi->tx_prod)
goto out;
if (rx_idx != rx_start_idx + num_pkts)
goto out;
- desc = &tp->rx_rcb[rx_start_idx];
+ desc = &rnapi->rx_rcb[rx_start_idx];
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key != RXD_OPAQUE_RING_STD)
@@ -9821,9 +10409,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
if (rx_len != tx_len)
goto out;
- rx_skb = tp->rx_std_buffers[desc_idx].skb;
+ rx_skb = tpr->rx_std_buffers[desc_idx].skb;
- map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
+ map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
for (i = 14; i < tx_len; i++) {
@@ -9997,7 +10585,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch(cmd) {
case SIOCGMIIPHY:
- data->phy_id = PHY_ADDR;
+ data->phy_id = tp->phy_addr;
/* fallthru */
case SIOCGMIIREG: {
@@ -10022,9 +10610,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
break; /* We have no PHY */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
if (tp->link_config.phy_is_low_power)
return -EAGAIN;
@@ -10236,8 +10821,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
nvcfg1 = tr32(NVRAM_CFG1);
if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
tp->tg3_flags2 |= TG3_FLG2_FLASH;
- }
- else {
+ } else {
nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
tw32(NVRAM_CFG1, nvcfg1);
}
@@ -10245,43 +10829,69 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
- case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
- tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- break;
- case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
- tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
- break;
- case FLASH_VENDOR_ATMEL_EEPROM:
- tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- break;
- case FLASH_VENDOR_ST:
- tp->nvram_jedecnum = JEDEC_ST;
- tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- break;
- case FLASH_VENDOR_SAIFUN:
- tp->nvram_jedecnum = JEDEC_SAIFUN;
- tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
- break;
- case FLASH_VENDOR_SST_SMALL:
- case FLASH_VENDOR_SST_LARGE:
- tp->nvram_jedecnum = JEDEC_SST;
- tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
- break;
+ case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ break;
+ case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
+ break;
+ case FLASH_VENDOR_ATMEL_EEPROM:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ break;
+ case FLASH_VENDOR_ST:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ break;
+ case FLASH_VENDOR_SAIFUN:
+ tp->nvram_jedecnum = JEDEC_SAIFUN;
+ tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
+ break;
+ case FLASH_VENDOR_SST_SMALL:
+ case FLASH_VENDOR_SST_LARGE:
+ tp->nvram_jedecnum = JEDEC_SST;
+ tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
+ break;
}
- }
- else {
+ } else {
tp->nvram_jedecnum = JEDEC_ATMEL;
tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
}
}
+static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
+{
+ switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
+ case FLASH_5752PAGE_SIZE_256:
+ tp->nvram_pagesize = 256;
+ break;
+ case FLASH_5752PAGE_SIZE_512:
+ tp->nvram_pagesize = 512;
+ break;
+ case FLASH_5752PAGE_SIZE_1K:
+ tp->nvram_pagesize = 1024;
+ break;
+ case FLASH_5752PAGE_SIZE_2K:
+ tp->nvram_pagesize = 2048;
+ break;
+ case FLASH_5752PAGE_SIZE_4K:
+ tp->nvram_pagesize = 4096;
+ break;
+ case FLASH_5752PAGE_SIZE_264:
+ tp->nvram_pagesize = 264;
+ break;
+ case FLASH_5752PAGE_SIZE_528:
+ tp->nvram_pagesize = 528;
+ break;
+ }
+}
+
static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
{
u32 nvcfg1;
@@ -10293,48 +10903,28 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
- case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
- case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
- tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- break;
- case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
- tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
- break;
- case FLASH_5752VENDOR_ST_M45PE10:
- case FLASH_5752VENDOR_ST_M45PE20:
- case FLASH_5752VENDOR_ST_M45PE40:
- tp->nvram_jedecnum = JEDEC_ST;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
- break;
+ case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
+ case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ break;
+ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ break;
+ case FLASH_5752VENDOR_ST_M45PE10:
+ case FLASH_5752VENDOR_ST_M45PE20:
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ break;
}
if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
- switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
- case FLASH_5752PAGE_SIZE_256:
- tp->nvram_pagesize = 256;
- break;
- case FLASH_5752PAGE_SIZE_512:
- tp->nvram_pagesize = 512;
- break;
- case FLASH_5752PAGE_SIZE_1K:
- tp->nvram_pagesize = 1024;
- break;
- case FLASH_5752PAGE_SIZE_2K:
- tp->nvram_pagesize = 2048;
- break;
- case FLASH_5752PAGE_SIZE_4K:
- tp->nvram_pagesize = 4096;
- break;
- case FLASH_5752PAGE_SIZE_264:
- tp->nvram_pagesize = 264;
- break;
- }
- }
- else {
+ tg3_nvram_get_pagesize(tp, nvcfg1);
+ } else {
/* For eeprom, set pagesize to maximum eeprom size */
tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
@@ -10357,45 +10947,45 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
switch (nvcfg1) {
- case FLASH_5755VENDOR_ATMEL_FLASH_1:
- case FLASH_5755VENDOR_ATMEL_FLASH_2:
- case FLASH_5755VENDOR_ATMEL_FLASH_3:
- case FLASH_5755VENDOR_ATMEL_FLASH_5:
- tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
- tp->nvram_pagesize = 264;
- if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
- nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
- tp->nvram_size = (protect ? 0x3e200 :
- TG3_NVRAM_SIZE_512KB);
- else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
- tp->nvram_size = (protect ? 0x1f200 :
- TG3_NVRAM_SIZE_256KB);
- else
- tp->nvram_size = (protect ? 0x1f200 :
- TG3_NVRAM_SIZE_128KB);
- break;
- case FLASH_5752VENDOR_ST_M45PE10:
- case FLASH_5752VENDOR_ST_M45PE20:
- case FLASH_5752VENDOR_ST_M45PE40:
- tp->nvram_jedecnum = JEDEC_ST;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
- tp->nvram_pagesize = 256;
- if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
- tp->nvram_size = (protect ?
- TG3_NVRAM_SIZE_64KB :
- TG3_NVRAM_SIZE_128KB);
- else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
- tp->nvram_size = (protect ?
- TG3_NVRAM_SIZE_64KB :
- TG3_NVRAM_SIZE_256KB);
- else
- tp->nvram_size = (protect ?
- TG3_NVRAM_SIZE_128KB :
- TG3_NVRAM_SIZE_512KB);
- break;
+ case FLASH_5755VENDOR_ATMEL_FLASH_1:
+ case FLASH_5755VENDOR_ATMEL_FLASH_2:
+ case FLASH_5755VENDOR_ATMEL_FLASH_3:
+ case FLASH_5755VENDOR_ATMEL_FLASH_5:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tp->nvram_pagesize = 264;
+ if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
+ nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
+ tp->nvram_size = (protect ? 0x3e200 :
+ TG3_NVRAM_SIZE_512KB);
+ else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
+ tp->nvram_size = (protect ? 0x1f200 :
+ TG3_NVRAM_SIZE_256KB);
+ else
+ tp->nvram_size = (protect ? 0x1f200 :
+ TG3_NVRAM_SIZE_128KB);
+ break;
+ case FLASH_5752VENDOR_ST_M45PE10:
+ case FLASH_5752VENDOR_ST_M45PE20:
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tp->nvram_pagesize = 256;
+ if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
+ tp->nvram_size = (protect ?
+ TG3_NVRAM_SIZE_64KB :
+ TG3_NVRAM_SIZE_128KB);
+ else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
+ tp->nvram_size = (protect ?
+ TG3_NVRAM_SIZE_64KB :
+ TG3_NVRAM_SIZE_256KB);
+ else
+ tp->nvram_size = (protect ?
+ TG3_NVRAM_SIZE_128KB :
+ TG3_NVRAM_SIZE_512KB);
+ break;
}
}
@@ -10406,34 +10996,34 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
nvcfg1 = tr32(NVRAM_CFG1);
switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
- case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
- case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
- case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
- case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
- tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+ case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
+ case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
+ case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
+ case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
- nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
- tw32(NVRAM_CFG1, nvcfg1);
- break;
- case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
- case FLASH_5755VENDOR_ATMEL_FLASH_1:
- case FLASH_5755VENDOR_ATMEL_FLASH_2:
- case FLASH_5755VENDOR_ATMEL_FLASH_3:
- tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
- tp->nvram_pagesize = 264;
- break;
- case FLASH_5752VENDOR_ST_M45PE10:
- case FLASH_5752VENDOR_ST_M45PE20:
- case FLASH_5752VENDOR_ST_M45PE40:
- tp->nvram_jedecnum = JEDEC_ST;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
- tp->nvram_pagesize = 256;
- break;
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ break;
+ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+ case FLASH_5755VENDOR_ATMEL_FLASH_1:
+ case FLASH_5755VENDOR_ATMEL_FLASH_2:
+ case FLASH_5755VENDOR_ATMEL_FLASH_3:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tp->nvram_pagesize = 264;
+ break;
+ case FLASH_5752VENDOR_ST_M45PE10:
+ case FLASH_5752VENDOR_ST_M45PE20:
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tp->nvram_pagesize = 256;
+ break;
}
}
@@ -10451,63 +11041,63 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
switch (nvcfg1) {
- case FLASH_5761VENDOR_ATMEL_ADB021D:
- case FLASH_5761VENDOR_ATMEL_ADB041D:
- case FLASH_5761VENDOR_ATMEL_ADB081D:
- case FLASH_5761VENDOR_ATMEL_ADB161D:
- case FLASH_5761VENDOR_ATMEL_MDB021D:
- case FLASH_5761VENDOR_ATMEL_MDB041D:
- case FLASH_5761VENDOR_ATMEL_MDB081D:
- case FLASH_5761VENDOR_ATMEL_MDB161D:
- tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
- tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
- tp->nvram_pagesize = 256;
- break;
- case FLASH_5761VENDOR_ST_A_M45PE20:
- case FLASH_5761VENDOR_ST_A_M45PE40:
- case FLASH_5761VENDOR_ST_A_M45PE80:
- case FLASH_5761VENDOR_ST_A_M45PE16:
- case FLASH_5761VENDOR_ST_M_M45PE20:
- case FLASH_5761VENDOR_ST_M_M45PE40:
- case FLASH_5761VENDOR_ST_M_M45PE80:
- case FLASH_5761VENDOR_ST_M_M45PE16:
- tp->nvram_jedecnum = JEDEC_ST;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
- tp->nvram_pagesize = 256;
- break;
+ case FLASH_5761VENDOR_ATMEL_ADB021D:
+ case FLASH_5761VENDOR_ATMEL_ADB041D:
+ case FLASH_5761VENDOR_ATMEL_ADB081D:
+ case FLASH_5761VENDOR_ATMEL_ADB161D:
+ case FLASH_5761VENDOR_ATMEL_MDB021D:
+ case FLASH_5761VENDOR_ATMEL_MDB041D:
+ case FLASH_5761VENDOR_ATMEL_MDB081D:
+ case FLASH_5761VENDOR_ATMEL_MDB161D:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
+ tp->nvram_pagesize = 256;
+ break;
+ case FLASH_5761VENDOR_ST_A_M45PE20:
+ case FLASH_5761VENDOR_ST_A_M45PE40:
+ case FLASH_5761VENDOR_ST_A_M45PE80:
+ case FLASH_5761VENDOR_ST_A_M45PE16:
+ case FLASH_5761VENDOR_ST_M_M45PE20:
+ case FLASH_5761VENDOR_ST_M_M45PE40:
+ case FLASH_5761VENDOR_ST_M_M45PE80:
+ case FLASH_5761VENDOR_ST_M_M45PE16:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tp->nvram_pagesize = 256;
+ break;
}
if (protect) {
tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
} else {
switch (nvcfg1) {
- case FLASH_5761VENDOR_ATMEL_ADB161D:
- case FLASH_5761VENDOR_ATMEL_MDB161D:
- case FLASH_5761VENDOR_ST_A_M45PE16:
- case FLASH_5761VENDOR_ST_M_M45PE16:
- tp->nvram_size = TG3_NVRAM_SIZE_2MB;
- break;
- case FLASH_5761VENDOR_ATMEL_ADB081D:
- case FLASH_5761VENDOR_ATMEL_MDB081D:
- case FLASH_5761VENDOR_ST_A_M45PE80:
- case FLASH_5761VENDOR_ST_M_M45PE80:
- tp->nvram_size = TG3_NVRAM_SIZE_1MB;
- break;
- case FLASH_5761VENDOR_ATMEL_ADB041D:
- case FLASH_5761VENDOR_ATMEL_MDB041D:
- case FLASH_5761VENDOR_ST_A_M45PE40:
- case FLASH_5761VENDOR_ST_M_M45PE40:
- tp->nvram_size = TG3_NVRAM_SIZE_512KB;
- break;
- case FLASH_5761VENDOR_ATMEL_ADB021D:
- case FLASH_5761VENDOR_ATMEL_MDB021D:
- case FLASH_5761VENDOR_ST_A_M45PE20:
- case FLASH_5761VENDOR_ST_M_M45PE20:
- tp->nvram_size = TG3_NVRAM_SIZE_256KB;
- break;
+ case FLASH_5761VENDOR_ATMEL_ADB161D:
+ case FLASH_5761VENDOR_ATMEL_MDB161D:
+ case FLASH_5761VENDOR_ST_A_M45PE16:
+ case FLASH_5761VENDOR_ST_M_M45PE16:
+ tp->nvram_size = TG3_NVRAM_SIZE_2MB;
+ break;
+ case FLASH_5761VENDOR_ATMEL_ADB081D:
+ case FLASH_5761VENDOR_ATMEL_MDB081D:
+ case FLASH_5761VENDOR_ST_A_M45PE80:
+ case FLASH_5761VENDOR_ST_M_M45PE80:
+ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+ break;
+ case FLASH_5761VENDOR_ATMEL_ADB041D:
+ case FLASH_5761VENDOR_ATMEL_MDB041D:
+ case FLASH_5761VENDOR_ST_A_M45PE40:
+ case FLASH_5761VENDOR_ST_M_M45PE40:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ case FLASH_5761VENDOR_ATMEL_ADB021D:
+ case FLASH_5761VENDOR_ATMEL_MDB021D:
+ case FLASH_5761VENDOR_ST_A_M45PE20:
+ case FLASH_5761VENDOR_ST_M_M45PE20:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
}
}
}
@@ -10586,34 +11176,84 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
return;
}
- switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
- case FLASH_5752PAGE_SIZE_256:
+ tg3_nvram_get_pagesize(tp, nvcfg1);
+ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
- tp->nvram_pagesize = 256;
- break;
- case FLASH_5752PAGE_SIZE_512:
- tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
- tp->nvram_pagesize = 512;
- break;
- case FLASH_5752PAGE_SIZE_1K:
- tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
- tp->nvram_pagesize = 1024;
- break;
- case FLASH_5752PAGE_SIZE_2K:
- tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
- tp->nvram_pagesize = 2048;
- break;
- case FLASH_5752PAGE_SIZE_4K:
- tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
- tp->nvram_pagesize = 4096;
- break;
- case FLASH_5752PAGE_SIZE_264:
- tp->nvram_pagesize = 264;
+}
+
+
+static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5717VENDOR_ATMEL_EEPROM:
+ case FLASH_5717VENDOR_MICRO_EEPROM:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ return;
+ case FLASH_5717VENDOR_ATMEL_MDB011D:
+ case FLASH_5717VENDOR_ATMEL_ADB011B:
+ case FLASH_5717VENDOR_ATMEL_ADB011D:
+ case FLASH_5717VENDOR_ATMEL_MDB021D:
+ case FLASH_5717VENDOR_ATMEL_ADB021B:
+ case FLASH_5717VENDOR_ATMEL_ADB021D:
+ case FLASH_5717VENDOR_ATMEL_45USPT:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5717VENDOR_ATMEL_MDB021D:
+ case FLASH_5717VENDOR_ATMEL_ADB021B:
+ case FLASH_5717VENDOR_ATMEL_ADB021D:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ default:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ }
break;
- case FLASH_5752PAGE_SIZE_528:
- tp->nvram_pagesize = 528;
+ case FLASH_5717VENDOR_ST_M_M25PE10:
+ case FLASH_5717VENDOR_ST_A_M25PE10:
+ case FLASH_5717VENDOR_ST_M_M45PE10:
+ case FLASH_5717VENDOR_ST_A_M45PE10:
+ case FLASH_5717VENDOR_ST_M_M25PE20:
+ case FLASH_5717VENDOR_ST_A_M25PE20:
+ case FLASH_5717VENDOR_ST_M_M45PE20:
+ case FLASH_5717VENDOR_ST_A_M45PE20:
+ case FLASH_5717VENDOR_ST_25USPT:
+ case FLASH_5717VENDOR_ST_45USPT:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5717VENDOR_ST_M_M25PE20:
+ case FLASH_5717VENDOR_ST_A_M25PE20:
+ case FLASH_5717VENDOR_ST_M_M45PE20:
+ case FLASH_5717VENDOR_ST_A_M45PE20:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ default:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ }
break;
+ default:
+ tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
+ return;
}
+
+ tg3_nvram_get_pagesize(tp, nvcfg1);
+ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+ tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
}
/* Chips other than 5700/5701 use the NVRAM for fetching info. */
@@ -10658,6 +11298,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
tg3_get_5906_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
tg3_get_57780_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ tg3_get_5717_nvram_info(tp);
else
tg3_get_nvram_info(tp);
@@ -11485,6 +12127,9 @@ out_not_found:
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
strcpy(tp->board_part_number, "BCM57790");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
+ strcpy(tp->board_part_number, "BCM57788");
else
strcpy(tp->board_part_number, "none");
}
@@ -11768,8 +12413,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
u32 prod_id_asic_rev;
- pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
- &prod_id_asic_rev);
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
+ pci_read_config_dword(tp->pdev,
+ TG3PCI_GEN2_PRODID_ASICREV,
+ &prod_id_asic_rev);
+ else
+ pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
+ &prod_id_asic_rev);
+
tp->pci_chip_rev_id = prod_id_asic_rev;
}
@@ -11907,8 +12561,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
tp->misc_host_ctrl);
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
tp->pdev_peer = tg3_find_peer(tp);
/* Intentionally exclude ASIC_REV_5906 */
@@ -11917,7 +12572,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
@@ -11965,9 +12621,19 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
}
+ tp->irq_max = 1;
+
+#ifdef TG3_NAPI
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
+ tp->irq_max = TG3_IRQ_MAX_VECS;
+ }
+#endif
+
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
- tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
+ (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
&pci_state_reg);
@@ -12100,7 +12766,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->write32 = tg3_write_flush_reg32;
}
-
if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
tp->write32_tx_mbox = tg3_write32_tx_mbox;
@@ -12159,7 +12824,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
/* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
@@ -12216,12 +12882,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
}
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
+
/* A few boards don't want Ethernet@WireSpeed phy feature */
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
(tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
(tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
+ (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
@@ -12232,9 +12901,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
+ !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -12269,6 +12939,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
+ if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
+ tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
+ tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
+ tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
+
err = tg3_mdio_init(tp);
if (err)
return err;
@@ -12352,7 +13027,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
err = tg3_phy_probe(tp);
@@ -12471,8 +13146,10 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
else
tg3_nvram_unlock(tp);
- }
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC)
+ mac_offset = 0xcc;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
mac_offset = 0x10;
/* First try to get it from MAC address mailbox. */
@@ -12953,7 +13630,8 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
{
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
tp->bufmgr_config.mbuf_read_dma_low_water =
DEFAULT_MB_RDMA_LOW_WATER_5705;
tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -13158,7 +13836,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
static int tg3_version_printed = 0;
struct net_device *dev;
struct tg3 *tp;
- int err, pm_cap;
+ int i, err, pm_cap;
+ u32 sndmbx, rcvmbx, intmbx;
char str[40];
u64 dma_mask, persist_dma_mask;
@@ -13190,7 +13869,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_free_res;
}
- dev = alloc_etherdev(sizeof(*tp));
+ dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
if (!dev) {
printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
err = -ENOMEM;
@@ -13252,9 +13931,52 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->rx_pending = TG3_DEF_RX_RING_PENDING;
tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
- tp->tx_pending = TG3_DEF_TX_RING_PENDING;
- netif_napi_add(dev, &tp->napi, tg3_poll, 64);
+ intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
+ rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
+ sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+ for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ tnapi->tp = tp;
+ tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
+
+ tnapi->int_mbox = intmbx;
+ if (i < 4)
+ intmbx += 0x8;
+ else
+ intmbx += 0x4;
+
+ tnapi->consmbox = rcvmbx;
+ tnapi->prodmbox = sndmbx;
+
+ if (i)
+ tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
+ else
+ tnapi->coal_now = HOSTCC_MODE_NOW;
+
+ if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
+ break;
+
+ /*
+ * If we support MSIX, we'll be using RSS. If we're using
+ * RSS, the first vector only handles link interrupts and the
+ * remaining vectors handle rx and tx interrupts. Reuse the
+ * mailbox values for the next iteration. The values we setup
+ * above are still useful for the single vectored mode.
+ */
+ if (!i)
+ continue;
+
+ rcvmbx += 0x8;
+
+ if (sndmbx & 0x4)
+ sndmbx -= 0x4;
+ else
+ sndmbx += 0xc;
+ }
+
+ netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
dev->ethtool_ops = &tg3_ethtool_ops;
dev->watchdog_timeo = TG3_TX_TIMEOUT;
dev->irq = pdev->irq;
@@ -13348,7 +14070,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
dev->features |= NETIF_F_TSO_ECN;
}