summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/8139too.c138
-rw-r--r--drivers/net/Kconfig25
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/arm/at91_ether.c41
-rw-r--r--drivers/net/arm/at91_ether.h1
-rw-r--r--drivers/net/arm/ep93xx_eth.c2
-rw-r--r--drivers/net/arm/etherh.c6
-rw-r--r--drivers/net/atl1e/Makefile2
-rw-r--r--drivers/net/atl1e/atl1e.h503
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c405
-rw-r--r--drivers/net/atl1e/atl1e_hw.c664
-rw-r--r--drivers/net/atl1e/atl1e_hw.h793
-rw-r--r--drivers/net/atl1e/atl1e_main.c2599
-rw-r--r--drivers/net/atl1e/atl1e_param.c263
-rw-r--r--drivers/net/au1000_eth.c5
-rw-r--r--drivers/net/bfin_mac.c1
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/dm9000.c15
-rw-r--r--drivers/net/e1000/e1000.h71
-rw-r--r--drivers/net/e1000/e1000_ethtool.c558
-rw-r--r--drivers/net/e1000/e1000_hw.c1373
-rw-r--r--drivers/net/e1000/e1000_main.c1490
-rw-r--r--drivers/net/e1000/e1000_osdep.h14
-rw-r--r--drivers/net/e1000/e1000_param.c16
-rw-r--r--drivers/net/e1000e/netdev.c2
-rw-r--r--drivers/net/fec.c54
-rw-r--r--drivers/net/fec_mpc52xx.c5
-rw-r--r--drivers/net/fs_enet/Makefile5
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c319
-rw-r--r--drivers/net/fs_enet/fs_enet.h4
-rw-r--r--drivers/net/fs_enet/mac-fcc.c67
-rw-r--r--drivers/net/fs_enet/mac-fec.c23
-rw-r--r--drivers/net/fs_enet/mac-scc.c37
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c107
-rw-r--r--drivers/net/fs_enet/mii-fec.c144
-rw-r--r--drivers/net/gianfar.c122
-rw-r--r--drivers/net/gianfar.h12
-rw-r--r--drivers/net/gianfar_ethtool.c41
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hp-plus.c2
-rw-r--r--drivers/net/hp.c2
-rw-r--r--drivers/net/ibmveth.c189
-rw-r--r--drivers/net/ibmveth.h5
-rw-r--r--drivers/net/igb/igb_main.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/macb.c4
-rw-r--r--drivers/net/macvlan.c3
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mlx4/cmd.c3
-rw-r--r--drivers/net/mlx4/eq.c1
-rw-r--r--drivers/net/mlx4/fw.c18
-rw-r--r--drivers/net/mlx4/fw.h2
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mlx4/mlx4.h1
-rw-r--r--drivers/net/mlx4/mr.c49
-rw-r--r--drivers/net/mlx4/pd.c7
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c62
-rw-r--r--drivers/net/ne.c2
-rw-r--r--drivers/net/ne2.c2
-rw-r--r--drivers/net/netxen/Makefile2
-rw-r--r--drivers/net/netxen/netxen_nic.h575
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c710
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c168
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h251
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c2196
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h60
-rw-r--r--drivers/net/netxen/netxen_nic_init.c806
-rw-r--r--drivers/net/netxen/netxen_nic_isr.c220
-rw-r--r--drivers/net/netxen/netxen_nic_main.c1169
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c114
-rw-r--r--drivers/net/netxen/netxen_nic_phan_reg.h31
-rw-r--r--drivers/net/phy/marvell.c7
-rw-r--r--drivers/net/ppp_generic.c3
-rw-r--r--drivers/net/r6040.c445
-rw-r--r--drivers/net/r8169.c14
-rw-r--r--drivers/net/sfc/efx.c39
-rw-r--r--drivers/net/sfc/falcon.c12
-rw-r--r--drivers/net/sfc/net_driver.h5
-rw-r--r--drivers/net/sh_eth.c5
-rw-r--r--drivers/net/sky2.c5
-rw-r--r--drivers/net/smc91x.c94
-rw-r--r--drivers/net/smc91x.h76
-rw-r--r--drivers/net/tc35815.c1
-rw-r--r--drivers/net/tulip/de4x5.c16
-rw-r--r--drivers/net/tulip/de4x5.h3
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/cdc_ether.c11
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/virtio_net.c114
-rw-r--r--drivers/net/wan/cosa.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c3
-rw-r--r--drivers/net/wireless/ipw2200.c33
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c18
96 files changed, 12475 insertions, 5042 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index aabad8ce7458..8db4e6b89482 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1010,7 +1010,7 @@ static int __devinit vortex_probe1(struct device *gendev,
static int printed_version;
int retval, print_info;
struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
- char *print_name = "3c59x";
+ const char *print_name = "3c59x";
struct pci_dev *pdev = NULL;
struct eisa_device *edev = NULL;
DECLARE_MAC_BUF(mac);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 75317a14ad1c..8a5b0d293f75 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -98,7 +98,6 @@
#include <linux/compiler.h>
#include <linux/pci.h>
#include <linux/init.h>
-#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
@@ -120,11 +119,6 @@
NETIF_MSG_LINK)
-/* enable PIO instead of MMIO, if CONFIG_8139TOO_PIO is selected */
-#ifdef CONFIG_8139TOO_PIO
-#define USE_IO_OPS 1
-#endif
-
/* define to 1, 2 or 3 to enable copious debugging info */
#define RTL8139_DEBUG 0
@@ -156,6 +150,13 @@
static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+/* Whether to use MMIO or PIO. Default to MMIO. */
+#ifdef CONFIG_8139TOO_PIO
+static int use_io = 1;
+#else
+static int use_io = 0;
+#endif
+
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static int multicast_filter_limit = 32;
@@ -614,6 +615,8 @@ MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+module_param(use_io, int, 0);
+MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
module_param(multicast_filter_limit, int, 0);
module_param_array(media, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
@@ -709,13 +712,8 @@ static void __rtl8139_cleanup_dev (struct net_device *dev)
assert (tp->pci_dev != NULL);
pdev = tp->pci_dev;
-#ifdef USE_IO_OPS
- if (tp->mmio_addr)
- ioport_unmap (tp->mmio_addr);
-#else
if (tp->mmio_addr)
pci_iounmap (pdev, tp->mmio_addr);
-#endif /* USE_IO_OPS */
/* it's ok to call this even if we have no regions to free */
pci_release_regions (pdev);
@@ -790,32 +788,33 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
DPRINTK("PIO region size == 0x%02X\n", pio_len);
DPRINTK("MMIO region size == 0x%02lX\n", mmio_len);
-#ifdef USE_IO_OPS
- /* make sure PCI base addr 0 is PIO */
- if (!(pio_flags & IORESOURCE_IO)) {
- dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
- rc = -ENODEV;
- goto err_out;
- }
- /* check for weird/broken PCI region reporting */
- if (pio_len < RTL_MIN_IO_SIZE) {
- dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
- rc = -ENODEV;
- goto err_out;
- }
-#else
- /* make sure PCI base addr 1 is MMIO */
- if (!(mmio_flags & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
- rc = -ENODEV;
- goto err_out;
- }
- if (mmio_len < RTL_MIN_IO_SIZE) {
- dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
- rc = -ENODEV;
- goto err_out;
+retry:
+ if (use_io) {
+ /* make sure PCI base addr 0 is PIO */
+ if (!(pio_flags & IORESOURCE_IO)) {
+ dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
+ rc = -ENODEV;
+ goto err_out;
+ }
+ /* check for weird/broken PCI region reporting */
+ if (pio_len < RTL_MIN_IO_SIZE) {
+ dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
+ rc = -ENODEV;
+ goto err_out;
+ }
+ } else {
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(mmio_flags & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
+ rc = -ENODEV;
+ goto err_out;
+ }
+ if (mmio_len < RTL_MIN_IO_SIZE) {
+ dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
+ rc = -ENODEV;
+ goto err_out;
+ }
}
-#endif
rc = pci_request_regions (pdev, DRV_NAME);
if (rc)
@@ -825,28 +824,28 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
/* enable PCI bus-mastering */
pci_set_master (pdev);
-#ifdef USE_IO_OPS
- ioaddr = ioport_map(pio_start, pio_len);
- if (!ioaddr) {
- dev_err(&pdev->dev, "cannot map PIO, aborting\n");
- rc = -EIO;
- goto err_out;
- }
- dev->base_addr = pio_start;
- tp->mmio_addr = ioaddr;
- tp->regs_len = pio_len;
-#else
- /* ioremap MMIO region */
- ioaddr = pci_iomap(pdev, 1, 0);
- if (ioaddr == NULL) {
- dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
- rc = -EIO;
- goto err_out;
+ if (use_io) {
+ ioaddr = pci_iomap(pdev, 0, 0);
+ if (!ioaddr) {
+ dev_err(&pdev->dev, "cannot map PIO, aborting\n");
+ rc = -EIO;
+ goto err_out;
+ }
+ dev->base_addr = pio_start;
+ tp->regs_len = pio_len;
+ } else {
+ /* ioremap MMIO region */
+ ioaddr = pci_iomap(pdev, 1, 0);
+ if (ioaddr == NULL) {
+ dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n");
+ pci_release_regions(pdev);
+ use_io = 1;
+ goto retry;
+ }
+ dev->base_addr = (long) ioaddr;
+ tp->regs_len = mmio_len;
}
- dev->base_addr = (long) ioaddr;
tp->mmio_addr = ioaddr;
- tp->regs_len = mmio_len;
-#endif /* USE_IO_OPS */
/* Bring old chips out of low-power mode. */
RTL_W8 (HltClk, 'R');
@@ -952,6 +951,14 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
"Use the \"8139cp\" driver for improved performance and stability.\n");
}
+ if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
+ pdev->device == PCI_DEVICE_ID_REALTEK_8139 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS &&
+ pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) {
+ printk(KERN_INFO "8139too: OQO Model 2 detected. Forcing PIO\n");
+ use_io = 1;
+ }
+
i = rtl8139_init_board (pdev, &dev);
if (i < 0)
return i;
@@ -2381,20 +2388,24 @@ static void rtl8139_set_msglevel(struct net_device *dev, u32 datum)
np->msg_enable = datum;
}
-/* TODO: we are too slack to do reg dumping for pio, for now */
-#ifdef CONFIG_8139TOO_PIO
-#define rtl8139_get_regs_len NULL
-#define rtl8139_get_regs NULL
-#else
static int rtl8139_get_regs_len(struct net_device *dev)
{
- struct rtl8139_private *np = netdev_priv(dev);
+ struct rtl8139_private *np;
+ /* TODO: we are too slack to do reg dumping for pio, for now */
+ if (use_io)
+ return 0;
+ np = netdev_priv(dev);
return np->regs_len;
}
static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
{
- struct rtl8139_private *np = netdev_priv(dev);
+ struct rtl8139_private *np;
+
+ /* TODO: we are too slack to do reg dumping for pio, for now */
+ if (use_io)
+ return;
+ np = netdev_priv(dev);
regs->version = RTL_REGS_VER;
@@ -2402,7 +2413,6 @@ static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs,
memcpy_fromio(regbuf, np->mmio_addr, regs->len);
spin_unlock_irq(&np->lock);
}
-#endif /* CONFIG_8139TOO_MMIO */
static int rtl8139_get_sset_count(struct net_device *dev, int sset)
{
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3e5e64c33e18..fa533c27052a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1926,20 +1926,6 @@ config E1000
To compile this driver as a module, choose M here. The module
will be called e1000.
-config E1000_NAPI
- bool "Use Rx Polling (NAPI)"
- depends on E1000
- help
- NAPI is a new driver API designed to reduce CPU and interrupt load
- when the driver is receiving lots of packets from the card. It is
- still somewhat experimental and thus not yet enabled by default.
-
- If your estimated Rx load is 10kpps or more, or if the card will be
- deployed on potentially unfriendly networks (e.g. in a firewall),
- then say Y here.
-
- If in doubt, say N.
-
config E1000_DISABLE_PACKET_SPLIT
bool "Disable Packet Split for PCI express adapters"
depends on E1000
@@ -2304,6 +2290,17 @@ config ATL1
To compile this driver as a module, choose M here. The module
will be called atl1.
+config ATL1E
+ tristate "Atheros L1E Gigabit Ethernet support (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ select MII
+ help
+ This driver supports the Atheros L1E gigabit ethernet adapter.
+
+ To compile this driver as a module, choose M here. The module
+ will be called atl1e.
+
endif # NETDEV_1000
#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 4b17a9ab7861..7629c9017215 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_EHEA) += ehea/
obj-$(CONFIG_CAN) += can/
obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_ATL1) += atlx/
+obj-$(CONFIG_ATL1E) += atl1e/
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
obj-$(CONFIG_TEHUTI) += tehuti.o
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 1e39e78f1778..ffae266e2d7f 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -677,7 +677,7 @@ static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
}
static const struct ethtool_ops at91ether_ethtool_ops = {
@@ -820,7 +820,7 @@ static int at91ether_tx(struct sk_buff *skb, struct net_device *dev)
lp->skb = skb;
lp->skb_length = skb->len;
lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE);
- lp->stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
/* Set address of the data in the Transmit Address register */
at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr);
@@ -843,34 +843,33 @@ static int at91ether_tx(struct sk_buff *skb, struct net_device *dev)
*/
static struct net_device_stats *at91ether_stats(struct net_device *dev)
{
- struct at91_private *lp = netdev_priv(dev);
int ale, lenerr, seqe, lcol, ecol;
if (netif_running(dev)) {
- lp->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */
+ dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */
ale = at91_emac_read(AT91_EMAC_ALE);
- lp->stats.rx_frame_errors += ale; /* Alignment errors */
+ dev->stats.rx_frame_errors += ale; /* Alignment errors */
lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF);
- lp->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
+ dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
seqe = at91_emac_read(AT91_EMAC_SEQE);
- lp->stats.rx_crc_errors += seqe; /* CRC error */
- lp->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */
- lp->stats.rx_errors += (ale + lenerr + seqe
+ dev->stats.rx_crc_errors += seqe; /* CRC error */
+ dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */
+ dev->stats.rx_errors += (ale + lenerr + seqe
+ at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB));
- lp->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */
- lp->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */
- lp->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */
- lp->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */
+ dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */
+ dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */
+ dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */
+ dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */
lcol = at91_emac_read(AT91_EMAC_LCOL);
ecol = at91_emac_read(AT91_EMAC_ECOL);
- lp->stats.tx_window_errors += lcol; /* Late collisions */
- lp->stats.tx_aborted_errors += ecol; /* 16 collisions */
+ dev->stats.tx_window_errors += lcol; /* Late collisions */
+ dev->stats.tx_aborted_errors += ecol; /* 16 collisions */
- lp->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol);
+ dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol);
}
- return &lp->stats;
+ return &dev->stats;
}
/*
@@ -896,16 +895,16 @@ static void at91ether_rx(struct net_device *dev)
skb->protocol = eth_type_trans(skb, dev);
dev->last_rx = jiffies;
- lp->stats.rx_bytes += pktlen;
+ dev->stats.rx_bytes += pktlen;
netif_rx(skb);
}
else {
- lp->stats.rx_dropped += 1;
+ dev->stats.rx_dropped += 1;
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
}
if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST)
- lp->stats.multicast++;
+ dev->stats.multicast++;
dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */
if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */
@@ -934,7 +933,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */
/* The TCOM bit is set even if the transmission failed. */
if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY))
- lp->stats.tx_errors += 1;
+ dev->stats.tx_errors += 1;
if (lp->skb) {
dev_kfree_skb_irq(lp->skb);
diff --git a/drivers/net/arm/at91_ether.h b/drivers/net/arm/at91_ether.h
index a38fd2d053a6..353f4dab62be 100644
--- a/drivers/net/arm/at91_ether.h
+++ b/drivers/net/arm/at91_ether.h
@@ -84,7 +84,6 @@ struct recv_desc_bufs
struct at91_private
{
- struct net_device_stats stats;
struct mii_if_info mii; /* ethtool support */
struct at91_eth_data board_data; /* board-specific configuration */
struct clk *ether_clk; /* clock */
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index ecd8fc6146e9..7a14980f3472 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -848,7 +848,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
ep->res = request_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1,
- pdev->dev.bus_id);
+ dev_name(&pdev->dev));
if (ep->res == NULL) {
dev_err(&pdev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index e9d15eccad08..5c5f1e470d3c 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -535,7 +535,7 @@ static int __init etherh_addr(char *addr, struct expansion_card *ec)
if (!ecard_readchunk(&cd, ec, 0xf5, 0)) {
printk(KERN_ERR "%s: unable to read podule description string\n",
- ec->dev.bus_id);
+ dev_name(&ec->dev));
goto no_addr;
}
@@ -554,7 +554,7 @@ static int __init etherh_addr(char *addr, struct expansion_card *ec)
}
printk(KERN_ERR "%s: unable to parse MAC address: %s\n",
- ec->dev.bus_id, cd.d.string);
+ dev_name(&ec->dev), cd.d.string);
no_addr:
return -ENODEV;
@@ -585,7 +585,7 @@ static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev->dev.parent->bus_id,
+ strlcpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/atl1e/Makefile b/drivers/net/atl1e/Makefile
new file mode 100644
index 000000000000..bc11be824e76
--- /dev/null
+++ b/drivers/net/atl1e/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_ATL1E) += atl1e.o
+atl1e-objs += atl1e_main.o atl1e_hw.o atl1e_ethtool.o atl1e_param.o
diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h
new file mode 100644
index 000000000000..b645fa0f3f64
--- /dev/null
+++ b/drivers/net/atl1e/atl1e.h
@@ -0,0 +1,503 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ * Copyright(c) 2007 xiong huang <xiong.huang@atheros.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _ATL1E_H_
+#define _ATL1E_H_
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/tcp.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/workqueue.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include "atl1e_hw.h"
+
+#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
+#define CMD_IO_SPACE 0x0001
+#define CMD_MEMORY_SPACE 0x0002
+#define CMD_BUS_MASTER 0x0004
+
+#define BAR_0 0
+#define BAR_1 1
+#define BAR_5 5
+
+/* Wake Up Filter Control */
+#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
+#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+
+#define SPEED_0 0xffff
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* Error Codes */
+#define AT_ERR_EEPROM 1
+#define AT_ERR_PHY 2
+#define AT_ERR_CONFIG 3
+#define AT_ERR_PARAM 4
+#define AT_ERR_MAC_TYPE 5
+#define AT_ERR_PHY_TYPE 6
+#define AT_ERR_PHY_SPEED 7
+#define AT_ERR_PHY_RES 8
+#define AT_ERR_TIMEOUT 9
+
+#define MAX_JUMBO_FRAME_SIZE 0x2000
+
+#define AT_VLAN_TAG_TO_TPD_TAG(_vlan, _tpd) \
+ _tpd = (((_vlan) << (4)) | (((_vlan) >> 13) & 7) |\
+ (((_vlan) >> 9) & 8))
+
+#define AT_TPD_TAG_TO_VLAN_TAG(_tpd, _vlan) \
+ _vlan = (((_tpd) >> 8) | (((_tpd) & 0x77) << 9) |\
+ (((_tdp) & 0x88) << 5))
+
+#define AT_MAX_RECEIVE_QUEUE 4
+#define AT_PAGE_NUM_PER_QUEUE 2
+
+#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL
+#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL
+
+#define AT_TX_WATCHDOG (5 * HZ)
+#define AT_MAX_INT_WORK 10
+#define AT_TWSI_EEPROM_TIMEOUT 100
+#define AT_HW_MAX_IDLE_DELAY 10
+#define AT_SUSPEND_LINK_TIMEOUT 28
+
+#define AT_REGS_LEN 75
+#define AT_EEPROM_LEN 512
+#define AT_ADV_MASK (ADVERTISE_10_HALF |\
+ ADVERTISE_10_FULL |\
+ ADVERTISE_100_HALF |\
+ ADVERTISE_100_FULL |\
+ ADVERTISE_1000_FULL)
+
+/* tpd word 2 */
+#define TPD_BUFLEN_MASK 0x3FFF
+#define TPD_BUFLEN_SHIFT 0
+#define TPD_DMAINT_MASK 0x0001
+#define TPD_DMAINT_SHIFT 14
+#define TPD_PKTNT_MASK 0x0001
+#define TPD_PKTINT_SHIFT 15
+#define TPD_VLANTAG_MASK 0xFFFF
+#define TPD_VLAN_SHIFT 16
+
+/* tpd word 3 bits 0:4 */
+#define TPD_EOP_MASK 0x0001
+#define TPD_EOP_SHIFT 0
+#define TPD_IP_VERSION_MASK 0x0001
+#define TPD_IP_VERSION_SHIFT 1 /* 0 : IPV4, 1 : IPV6 */
+#define TPD_INS_VL_TAG_MASK 0x0001
+#define TPD_INS_VL_TAG_SHIFT 2
+#define TPD_CC_SEGMENT_EN_MASK 0x0001
+#define TPD_CC_SEGMENT_EN_SHIFT 3
+#define TPD_SEGMENT_EN_MASK 0x0001
+#define TPD_SEGMENT_EN_SHIFT 4
+
+/* tdp word 3 bits 5:7 if ip version is 0 */
+#define TPD_IP_CSUM_MASK 0x0001
+#define TPD_IP_CSUM_SHIFT 5
+#define TPD_TCP_CSUM_MASK 0x0001
+#define TPD_TCP_CSUM_SHIFT 6
+#define TPD_UDP_CSUM_MASK 0x0001
+#define TPD_UDP_CSUM_SHIFT 7
+
+/* tdp word 3 bits 5:7 if ip version is 1 */
+#define TPD_V6_IPHLLO_MASK 0x0007
+#define TPD_V6_IPHLLO_SHIFT 7
+
+/* tpd word 3 bits 8:9 bit */
+#define TPD_VL_TAGGED_MASK 0x0001
+#define TPD_VL_TAGGED_SHIFT 8
+#define TPD_ETHTYPE_MASK 0x0001
+#define TPD_ETHTYPE_SHIFT 9
+
+/* tdp word 3 bits 10:13 if ip version is 0 */
+#define TDP_V4_IPHL_MASK 0x000F
+#define TPD_V4_IPHL_SHIFT 10
+
+/* tdp word 3 bits 10:13 if ip version is 1 */
+#define TPD_V6_IPHLHI_MASK 0x000F
+#define TPD_V6_IPHLHI_SHIFT 10
+
+/* tpd word 3 bit 14:31 if segment enabled */
+#define TPD_TCPHDRLEN_MASK 0x000F
+#define TPD_TCPHDRLEN_SHIFT 14
+#define TPD_HDRFLAG_MASK 0x0001
+#define TPD_HDRFLAG_SHIFT 18
+#define TPD_MSS_MASK 0x1FFF
+#define TPD_MSS_SHIFT 19
+
+/* tdp word 3 bit 16:31 if custom csum enabled */
+#define TPD_PLOADOFFSET_MASK 0x00FF
+#define TPD_PLOADOFFSET_SHIFT 16
+#define TPD_CCSUMOFFSET_MASK 0x00FF
+#define TPD_CCSUMOFFSET_SHIFT 24
+
+struct atl1e_tpd_desc {
+ __le64 buffer_addr;
+ __le32 word2;
+ __le32 word3;
+};
+
+/* how about 0x2000 */
+#define MAX_TX_BUF_LEN 0x2000
+#define MAX_TX_BUF_SHIFT 13
+/*#define MAX_TX_BUF_LEN 0x3000 */
+
+/* rrs word 1 bit 0:31 */
+#define RRS_RX_CSUM_MASK 0xFFFF
+#define RRS_RX_CSUM_SHIFT 0
+#define RRS_PKT_SIZE_MASK 0x3FFF
+#define RRS_PKT_SIZE_SHIFT 16
+#define RRS_CPU_NUM_MASK 0x0003
+#define RRS_CPU_NUM_SHIFT 30
+
+#define RRS_IS_RSS_IPV4 0x0001
+#define RRS_IS_RSS_IPV4_TCP 0x0002
+#define RRS_IS_RSS_IPV6 0x0004
+#define RRS_IS_RSS_IPV6_TCP 0x0008
+#define RRS_IS_IPV6 0x0010
+#define RRS_IS_IP_FRAG 0x0020
+#define RRS_IS_IP_DF 0x0040
+#define RRS_IS_802_3 0x0080
+#define RRS_IS_VLAN_TAG 0x0100
+#define RRS_IS_ERR_FRAME 0x0200
+#define RRS_IS_IPV4 0x0400
+#define RRS_IS_UDP 0x0800
+#define RRS_IS_TCP 0x1000
+#define RRS_IS_BCAST 0x2000
+#define RRS_IS_MCAST 0x4000
+#define RRS_IS_PAUSE 0x8000
+
+#define RRS_ERR_BAD_CRC 0x0001
+#define RRS_ERR_CODE 0x0002
+#define RRS_ERR_DRIBBLE 0x0004
+#define RRS_ERR_RUNT 0x0008
+#define RRS_ERR_RX_OVERFLOW 0x0010
+#define RRS_ERR_TRUNC 0x0020
+#define RRS_ERR_IP_CSUM 0x0040
+#define RRS_ERR_L4_CSUM 0x0080
+#define RRS_ERR_LENGTH 0x0100
+#define RRS_ERR_DES_ADDR 0x0200
+
+struct atl1e_recv_ret_status {
+ u16 seq_num;
+ u16 hash_lo;
+ __le32 word1;
+ u16 pkt_flag;
+ u16 err_flag;
+ u16 hash_hi;
+ u16 vtag;
+};
+
+enum atl1e_dma_req_block {
+ atl1e_dma_req_128 = 0,
+ atl1e_dma_req_256 = 1,
+ atl1e_dma_req_512 = 2,
+ atl1e_dma_req_1024 = 3,
+ atl1e_dma_req_2048 = 4,
+ atl1e_dma_req_4096 = 5
+};
+
+enum atl1e_rrs_type {
+ atl1e_rrs_disable = 0,
+ atl1e_rrs_ipv4 = 1,
+ atl1e_rrs_ipv4_tcp = 2,
+ atl1e_rrs_ipv6 = 4,
+ atl1e_rrs_ipv6_tcp = 8
+};
+
+enum atl1e_nic_type {
+ athr_l1e = 0,
+ athr_l2e_revA = 1,
+ athr_l2e_revB = 2
+};
+
+struct atl1e_hw_stats {
+ /* rx */
+ unsigned long rx_ok; /* The number of good packet received. */
+ unsigned long rx_bcast; /* The number of good broadcast packet received. */
+ unsigned long rx_mcast; /* The number of good multicast packet received. */
+ unsigned long rx_pause; /* The number of Pause packet received. */
+ unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */
+ unsigned long rx_fcs_err; /* The number of packets with bad FCS. */
+ unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */
+ unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */
+ unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */
+ unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */
+ unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */
+ unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */
+ unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */
+ unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */
+ unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */
+ unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */
+ unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */
+ unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */
+ unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */
+ unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */
+ unsigned long rx_align_err; /* Alignment Error */
+ unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */
+ unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */
+ unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */
+
+ /* tx */
+ unsigned long tx_ok; /* The number of good packet transmitted. */
+ unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */
+ unsigned long tx_mcast; /* The number of good multicast packet transmitted. */
+ unsigned long tx_pause; /* The number of Pause packet transmitted. */
+ unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */
+ unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */
+ unsigned long tx_defer; /* The number of packets transmitted that is deferred. */
+ unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */
+ unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */
+ unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */
+ unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */
+ unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */
+ unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */
+ unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */
+ unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */
+ unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */
+ unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */
+ unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */
+ unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */
+ unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
+ unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */
+ unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */
+ unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */
+ unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */
+ unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */
+};
+
+struct atl1e_hw {
+ u8 __iomem *hw_addr; /* inner register address */
+ resource_size_t mem_rang;
+ struct atl1e_adapter *adapter;
+ enum atl1e_nic_type nic_type;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u16 pci_cmd_word;
+ u8 mac_addr[ETH_ALEN];
+ u8 perm_mac_addr[ETH_ALEN];
+ u8 preamble_len;
+ u16 max_frame_size;
+ u16 rx_jumbo_th;
+ u16 tx_jumbo_th;
+
+ u16 media_type;
+#define MEDIA_TYPE_AUTO_SENSOR 0
+#define MEDIA_TYPE_100M_FULL 1
+#define MEDIA_TYPE_100M_HALF 2
+#define MEDIA_TYPE_10M_FULL 3
+#define MEDIA_TYPE_10M_HALF 4
+
+ u16 autoneg_advertised;
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg;
+
+ u16 imt; /* Interrupt Moderator timer ( 2us resolution) */
+ u16 ict; /* Interrupt Clear timer (2us resolution) */
+ u32 smb_timer;
+ u16 rrd_thresh; /* Threshold of number of RRD produced to trigger
+ interrupt request */
+ u16 tpd_thresh;
+ u16 rx_count_down; /* 2us resolution */
+ u16 tx_count_down;
+
+ u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */
+ enum atl1e_rrs_type rrs_type;
+ u32 base_cpu;
+ u32 indirect_tab;
+
+ enum atl1e_dma_req_block dmar_block;
+ enum atl1e_dma_req_block dmaw_block;
+ u8 dmaw_dly_cnt;
+ u8 dmar_dly_cnt;
+
+ bool phy_configured;
+ bool re_autoneg;
+ bool emi_ca;
+};
+
+/*
+ * wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct atl1e_tx_buffer {
+ struct sk_buff *skb;
+ u16 length;
+ dma_addr_t dma;
+};
+
+struct atl1e_rx_page {
+ dma_addr_t dma; /* receive rage DMA address */
+ u8 *addr; /* receive rage virtual address */
+ dma_addr_t write_offset_dma; /* the DMA address which contain the
+ receive data offset in the page */
+ u32 *write_offset_addr; /* the virtaul address which contain
+ the receive data offset in the page */
+ u32 read_offset; /* the offset where we have read */
+};
+
+struct atl1e_rx_page_desc {
+ struct atl1e_rx_page rx_page[AT_PAGE_NUM_PER_QUEUE];
+ u8 rx_using;
+ u16 rx_nxseq;
+};
+
+/* transmit packet descriptor (tpd) ring */
+struct atl1e_tx_ring {
+ struct atl1e_tpd_desc *desc; /* descriptor ring virtual address */
+ dma_addr_t dma; /* descriptor ring physical address */
+ u16 count; /* the count of transmit rings */
+ rwlock_t tx_lock;
+ u16 next_to_use;
+ atomic_t next_to_clean;
+ struct atl1e_tx_buffer *tx_buffer;
+ dma_addr_t cmb_dma;
+ u32 *cmb;
+};
+
+/* receive packet descriptor ring */
+struct atl1e_rx_ring {
+ void *desc;
+ dma_addr_t dma;
+ int size;
+ u32 page_size; /* bytes length of rxf page */
+ u32 real_page_size; /* real_page_size = page_size + jumbo + aliagn */
+ struct atl1e_rx_page_desc rx_page_desc[AT_MAX_RECEIVE_QUEUE];
+};
+
+/* board specific private data structure */
+struct atl1e_adapter {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct vlan_group *vlgrp;
+ struct napi_struct napi;
+ struct mii_if_info mii; /* MII interface info */
+ struct atl1e_hw hw;
+ struct atl1e_hw_stats hw_stats;
+ struct net_device_stats net_stats;
+
+ bool have_msi;
+ u32 wol;
+ u16 link_speed;
+ u16 link_duplex;
+
+ spinlock_t mdio_lock;
+ spinlock_t tx_lock;
+ atomic_t irq_sem;
+
+ struct work_struct reset_task;
+ struct work_struct link_chg_task;
+ struct timer_list watchdog_timer;
+ struct timer_list phy_config_timer;
+
+ /* All Descriptor memory */
+ dma_addr_t ring_dma;
+ void *ring_vir_addr;
+ int ring_size;
+
+ struct atl1e_tx_ring tx_ring;
+ struct atl1e_rx_ring rx_ring;
+ int num_rx_queues;
+ unsigned long flags;
+#define __AT_TESTING 0x0001
+#define __AT_RESETTING 0x0002
+#define __AT_DOWN 0x0003
+
+ u32 bd_number; /* board number;*/
+ u32 pci_state[16];
+ u32 *config_space;
+};
+
+#define AT_WRITE_REG(a, reg, value) ( \
+ writel((value), ((a)->hw_addr + reg)))
+
+#define AT_WRITE_FLUSH(a) (\
+ readl((a)->hw_addr))
+
+#define AT_READ_REG(a, reg) ( \
+ readl((a)->hw_addr + reg))
+
+#define AT_WRITE_REGB(a, reg, value) (\
+ writeb((value), ((a)->hw_addr + reg)))
+
+#define AT_READ_REGB(a, reg) (\
+ readb((a)->hw_addr + reg))
+
+#define AT_WRITE_REGW(a, reg, value) (\
+ writew((value), ((a)->hw_addr + reg)))
+
+#define AT_READ_REGW(a, reg) (\
+ readw((a)->hw_addr + reg))
+
+#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), (((a)->hw_addr + reg) + ((offset) << 2))))
+
+#define AT_READ_REG_ARRAY(a, reg, offset) ( \
+ readl(((a)->hw_addr + reg) + ((offset) << 2)))
+
+extern char atl1e_driver_name[];
+extern char atl1e_driver_version[];
+
+extern void atl1e_check_options(struct atl1e_adapter *adapter);
+extern int atl1e_up(struct atl1e_adapter *adapter);
+extern void atl1e_down(struct atl1e_adapter *adapter);
+extern void atl1e_reinit_locked(struct atl1e_adapter *adapter);
+extern s32 atl1e_reset_hw(struct atl1e_hw *hw);
+extern void atl1e_set_ethtool_ops(struct net_device *netdev);
+#endif /* _ATL1_E_H_ */
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
new file mode 100644
index 000000000000..cdc3b85b10b9
--- /dev/null
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "atl1e.h"
+
+static int atl1e_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ if (hw->nic_type == athr_l1e)
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+
+ ecmd->advertising = ADVERTISED_TP;
+
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->advertising |= hw->autoneg_advertised;
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = 0;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ if (adapter->link_speed != SPEED_0) {
+ ecmd->speed = adapter->link_speed;
+ if (adapter->link_duplex == FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = AUTONEG_ENABLE;
+ return 0;
+}
+
+static int atl1e_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+
+ while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ u16 adv4, adv9;
+
+ if ((ecmd->advertising&ADVERTISE_1000_FULL)) {
+ if (hw->nic_type == athr_l1e) {
+ hw->autoneg_advertised =
+ ecmd->advertising & AT_ADV_MASK;
+ } else {
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ return -EINVAL;
+ }
+ } else if (ecmd->advertising&ADVERTISE_1000_HALF) {
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ return -EINVAL;
+ } else {
+ hw->autoneg_advertised =
+ ecmd->advertising & AT_ADV_MASK;
+ }
+ ecmd->advertising = hw->autoneg_advertised |
+ ADVERTISED_TP | ADVERTISED_Autoneg;
+
+ adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK;
+ adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
+ if (hw->autoneg_advertised & ADVERTISE_10_HALF)
+ adv4 |= MII_AR_10T_HD_CAPS;
+ if (hw->autoneg_advertised & ADVERTISE_10_FULL)
+ adv4 |= MII_AR_10T_FD_CAPS;
+ if (hw->autoneg_advertised & ADVERTISE_100_HALF)
+ adv4 |= MII_AR_100TX_HD_CAPS;
+ if (hw->autoneg_advertised & ADVERTISE_100_FULL)
+ adv4 |= MII_AR_100TX_FD_CAPS;
+ if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
+ adv9 |= MII_AT001_CR_1000T_FD_CAPS;
+
+ if (adv4 != hw->mii_autoneg_adv_reg ||
+ adv9 != hw->mii_1000t_ctrl_reg) {
+ hw->mii_autoneg_adv_reg = adv4;
+ hw->mii_1000t_ctrl_reg = adv9;
+ hw->re_autoneg = true;
+ }
+
+ } else {
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ return -EINVAL;
+ }
+
+ /* reset the link */
+
+ if (netif_running(adapter->netdev)) {
+ atl1e_down(adapter);
+ atl1e_up(adapter);
+ } else
+ atl1e_reset_hw(&adapter->hw);
+
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ return 0;
+}
+
+static u32 atl1e_get_tx_csum(struct net_device *netdev)
+{
+ return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static u32 atl1e_get_msglevel(struct net_device *netdev)
+{
+#ifdef DBG
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static void atl1e_set_msglevel(struct net_device *netdev, u32 data)
+{
+}
+
+static int atl1e_get_regs_len(struct net_device *netdev)
+{
+ return AT_REGS_LEN * sizeof(u32);
+}
+
+static void atl1e_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u16 phy_data;
+
+ memset(p, 0, AT_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+ regs_buff[0] = AT_READ_REG(hw, REG_VPD_CAP);
+ regs_buff[1] = AT_READ_REG(hw, REG_SPI_FLASH_CTRL);
+ regs_buff[2] = AT_READ_REG(hw, REG_SPI_FLASH_CONFIG);
+ regs_buff[3] = AT_READ_REG(hw, REG_TWSI_CTRL);
+ regs_buff[4] = AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL);
+ regs_buff[5] = AT_READ_REG(hw, REG_MASTER_CTRL);
+ regs_buff[6] = AT_READ_REG(hw, REG_MANUAL_TIMER_INIT);
+ regs_buff[7] = AT_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT);
+ regs_buff[8] = AT_READ_REG(hw, REG_GPHY_CTRL);
+ regs_buff[9] = AT_READ_REG(hw, REG_CMBDISDMA_TIMER);
+ regs_buff[10] = AT_READ_REG(hw, REG_IDLE_STATUS);
+ regs_buff[11] = AT_READ_REG(hw, REG_MDIO_CTRL);
+ regs_buff[12] = AT_READ_REG(hw, REG_SERDES_LOCK);
+ regs_buff[13] = AT_READ_REG(hw, REG_MAC_CTRL);
+ regs_buff[14] = AT_READ_REG(hw, REG_MAC_IPG_IFG);
+ regs_buff[15] = AT_READ_REG(hw, REG_MAC_STA_ADDR);
+ regs_buff[16] = AT_READ_REG(hw, REG_MAC_STA_ADDR+4);
+ regs_buff[17] = AT_READ_REG(hw, REG_RX_HASH_TABLE);
+ regs_buff[18] = AT_READ_REG(hw, REG_RX_HASH_TABLE+4);
+ regs_buff[19] = AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL);
+ regs_buff[20] = AT_READ_REG(hw, REG_MTU);
+ regs_buff[21] = AT_READ_REG(hw, REG_WOL_CTRL);
+ regs_buff[22] = AT_READ_REG(hw, REG_SRAM_TRD_ADDR);
+ regs_buff[23] = AT_READ_REG(hw, REG_SRAM_TRD_LEN);
+ regs_buff[24] = AT_READ_REG(hw, REG_SRAM_RXF_ADDR);
+ regs_buff[25] = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
+ regs_buff[26] = AT_READ_REG(hw, REG_SRAM_TXF_ADDR);
+ regs_buff[27] = AT_READ_REG(hw, REG_SRAM_TXF_LEN);
+ regs_buff[28] = AT_READ_REG(hw, REG_SRAM_TCPH_ADDR);
+ regs_buff[29] = AT_READ_REG(hw, REG_SRAM_PKTH_ADDR);
+
+ atl1e_read_phy_reg(hw, MII_BMCR, &phy_data);
+ regs_buff[73] = (u32)phy_data;
+ atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
+ regs_buff[74] = (u32)phy_data;
+}
+
+static int atl1e_get_eeprom_len(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ if (!atl1e_check_eeprom_exist(&adapter->hw))
+ return AT_EEPROM_LEN;
+ else
+ return 0;
+}
+
+static int atl1e_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+ u32 *eeprom_buff;
+ int first_dword, last_dword;
+ int ret_val = 0;
+ int i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (atl1e_check_eeprom_exist(hw)) /* not exist */
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_dword = eeprom->offset >> 2;
+ last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+
+ eeprom_buff = kmalloc(sizeof(u32) *
+ (last_dword - first_dword + 1), GFP_KERNEL);
+ if (eeprom_buff == NULL)
+ return -ENOMEM;
+
+ for (i = first_dword; i < last_dword; i++) {
+ if (!atl1e_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) {
+ kfree(eeprom_buff);
+ return -EIO;
+ }
+ }
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
+ eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int atl1e_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+ u32 *eeprom_buff;
+ u32 *ptr;
+ int first_dword, last_dword;
+ int ret_val = 0;
+ int i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EINVAL;
+
+ first_dword = eeprom->offset >> 2;
+ last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+ eeprom_buff = kmalloc(AT_EEPROM_LEN, GFP_KERNEL);
+ if (eeprom_buff == NULL)
+ return -ENOMEM;
+
+ ptr = (u32 *)eeprom_buff;
+
+ if (eeprom->offset & 3) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ if (!atl1e_read_eeprom(hw, first_dword * 4, &(eeprom_buff[0]))) {
+ ret_val = -EIO;
+ goto out;
+ }
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 3)) {
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+
+ if (!atl1e_read_eeprom(hw, last_dword * 4,
+ &(eeprom_buff[last_dword - first_dword]))) {
+ ret_val = -EIO;
+ goto out;
+ }
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_dword - first_dword + 1; i++) {
+ if (!atl1e_write_eeprom(hw, ((first_dword + i) * 4),
+ eeprom_buff[i])) {
+ ret_val = -EIO;
+ goto out;
+ }
+ }
+out:
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void atl1e_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ strncpy(drvinfo->driver, atl1e_driver_name, 32);
+ strncpy(drvinfo->version, atl1e_driver_version, 32);
+ strncpy(drvinfo->fw_version, "L1e", 32);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->n_stats = 0;
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = atl1e_get_regs_len(netdev);
+ drvinfo->eedump_len = atl1e_get_eeprom_len(netdev);
+}
+
+static void atl1e_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_MAGIC | WAKE_PHY;
+ wol->wolopts = 0;
+
+ if (adapter->wol & AT_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & AT_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & AT_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & AT_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & AT_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+
+ return;
+}
+
+static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
+ WAKE_MCAST | WAKE_BCAST | WAKE_MCAST))
+ return -EOPNOTSUPP;
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= AT_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= AT_WUFC_LNKC;
+
+ return 0;
+}
+
+static int atl1e_nway_reset(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ if (netif_running(netdev))
+ atl1e_reinit_locked(adapter);
+ return 0;
+}
+
+static struct ethtool_ops atl1e_ethtool_ops = {
+ .get_settings = atl1e_get_settings,
+ .set_settings = atl1e_set_settings,
+ .get_drvinfo = atl1e_get_drvinfo,
+ .get_regs_len = atl1e_get_regs_len,
+ .get_regs = atl1e_get_regs,
+ .get_wol = atl1e_get_wol,
+ .set_wol = atl1e_set_wol,
+ .get_msglevel = atl1e_get_msglevel,
+ .set_msglevel = atl1e_set_msglevel,
+ .nway_reset = atl1e_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = atl1e_get_eeprom_len,
+ .get_eeprom = atl1e_get_eeprom,
+ .set_eeprom = atl1e_set_eeprom,
+ .get_tx_csum = atl1e_get_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+#endif
+};
+
+void atl1e_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops);
+}
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
new file mode 100644
index 000000000000..949e75358bf0
--- /dev/null
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -0,0 +1,664 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+
+#include "atl1e.h"
+
+/*
+ * check_eeprom_exist
+ * return 0 if eeprom exist
+ */
+int atl1e_check_eeprom_exist(struct atl1e_hw *hw)
+{
+ u32 value;
+
+ value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL);
+ if (value & SPI_FLASH_CTRL_EN_VPD) {
+ value &= ~SPI_FLASH_CTRL_EN_VPD;
+ AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
+ }
+ value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST);
+ return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
+}
+
+void atl1e_hw_set_mac_addr(struct atl1e_hw *hw)
+{
+ u32 value;
+ /*
+ * 00-0B-6A-F6-00-DC
+ * 0: 6AF600DC 1: 000B
+ * low dword
+ */
+ value = (((u32)hw->mac_addr[2]) << 24) |
+ (((u32)hw->mac_addr[3]) << 16) |
+ (((u32)hw->mac_addr[4]) << 8) |
+ (((u32)hw->mac_addr[5])) ;
+ AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
+ /* hight dword */
+ value = (((u32)hw->mac_addr[0]) << 8) |
+ (((u32)hw->mac_addr[1])) ;
+ AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
+}
+
+/*
+ * atl1e_get_permanent_address
+ * return 0 if get valid mac address,
+ */
+static int atl1e_get_permanent_address(struct atl1e_hw *hw)
+{
+ u32 addr[2];
+ u32 i;
+ u32 twsi_ctrl_data;
+ u8 eth_addr[ETH_ALEN];
+
+ if (is_valid_ether_addr(hw->perm_mac_addr))
+ return 0;
+
+ /* init */
+ addr[0] = addr[1] = 0;
+
+ if (!atl1e_check_eeprom_exist(hw)) {
+ /* eeprom exist */
+ twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
+ twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
+ AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data);
+ for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) {
+ msleep(10);
+ twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
+ if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0)
+ break;
+ }
+ if (i >= AT_TWSI_EEPROM_TIMEOUT)
+ return AT_ERR_TIMEOUT;
+ }
+
+ /* maybe MAC-address is from BIOS */
+ addr[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR);
+ addr[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4);
+ *(u32 *) &eth_addr[2] = swab32(addr[0]);
+ *(u16 *) &eth_addr[0] = swab16(*(u16 *)&addr[1]);
+
+ if (is_valid_ether_addr(eth_addr)) {
+ memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
+ return 0;
+ }
+
+ return AT_ERR_EEPROM;
+}
+
+bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value)
+{
+ return true;
+}
+
+bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value)
+{
+ int i;
+ u32 control;
+
+ if (offset & 3)
+ return false; /* address do not align */
+
+ AT_WRITE_REG(hw, REG_VPD_DATA, 0);
+ control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
+ AT_WRITE_REG(hw, REG_VPD_CAP, control);
+
+ for (i = 0; i < 10; i++) {
+ msleep(2);
+ control = AT_READ_REG(hw, REG_VPD_CAP);
+ if (control & VPD_CAP_VPD_FLAG)
+ break;
+ }
+ if (control & VPD_CAP_VPD_FLAG) {
+ *p_value = AT_READ_REG(hw, REG_VPD_DATA);
+ return true;
+ }
+ return false; /* timeout */
+}
+
+void atl1e_force_ps(struct atl1e_hw *hw)
+{
+ AT_WRITE_REGW(hw, REG_GPHY_CTRL,
+ GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
+}
+
+/*
+ * Reads the adapter's MAC address from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ */
+int atl1e_read_mac_addr(struct atl1e_hw *hw)
+{
+ int err = 0;
+
+ err = atl1e_get_permanent_address(hw);
+ if (err)
+ return AT_ERR_EEPROM;
+ memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
+ return 0;
+}
+
+/*
+ * atl1e_hash_mc_addr
+ * purpose
+ * set hash value for a multicast address
+ * hash calcu processing :
+ * 1. calcu 32bit CRC for multicast address
+ * 2. reverse crc with MSB to LSB
+ */
+u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr)
+{
+ u32 crc32;
+ u32 value = 0;
+ int i;
+
+ crc32 = ether_crc_le(6, mc_addr);
+ crc32 = ~crc32;
+ for (i = 0; i < 32; i++)
+ value |= (((crc32 >> i) & 1) << (31 - i));
+
+ return value;
+}
+
+/*
+ * Sets the bit in the multicast table corresponding to the hash value.
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ */
+void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value)
+{
+ u32 hash_bit, hash_reg;
+ u32 mta;
+
+ /*
+ * The HASH Table is a register array of 2 32-bit registers.
+ * It is treated like an array of 64 bits. We want to set
+ * bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The register is determined by the
+ * upper 7 bits of the hash value and the bit within that
+ * register are determined by the lower 5 bits of the value.
+ */
+ hash_reg = (hash_value >> 31) & 0x1;
+ hash_bit = (hash_value >> 26) & 0x1F;
+
+ mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
+
+ mta |= (1 << hash_bit);
+
+ AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
+}
+/*
+ * Reads the value from a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to read
+ */
+int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data)
+{
+ u32 val;
+ int i;
+
+ val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
+ MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW |
+ MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+
+ AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
+
+ wmb();
+
+ for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+ udelay(2);
+ val = AT_READ_REG(hw, REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ wmb();
+ }
+ if (!(val & (MDIO_START | MDIO_BUSY))) {
+ *phy_data = (u16)val;
+ return 0;
+ }
+
+ return AT_ERR_PHY;
+}
+
+/*
+ * Writes a value to a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to write
+ * data - data to write to the PHY
+ */
+int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data)
+{
+ int i;
+ u32 val;
+
+ val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
+ (reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
+ MDIO_SUP_PREAMBLE |
+ MDIO_START |
+ MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+
+ AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
+ wmb();
+
+ for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+ udelay(2);
+ val = AT_READ_REG(hw, REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ wmb();
+ }
+
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ return 0;
+
+ return AT_ERR_PHY;
+}
+
+/*
+ * atl1e_init_pcie - init PCIE module
+ */
+static void atl1e_init_pcie(struct atl1e_hw *hw)
+{
+ u32 value;
+ /* comment 2lines below to save more power when sususpend
+ value = LTSSM_TEST_MODE_DEF;
+ AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
+ */
+
+ /* pcie flow control mode change */
+ value = AT_READ_REG(hw, 0x1008);
+ value |= 0x8000;
+ AT_WRITE_REG(hw, 0x1008, value);
+}
+/*
+ * Configures PHY autoneg and flow control advertisement settings
+ *
+ * hw - Struct containing variables accessed by shared code
+ */
+static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg;
+
+ if (0 != hw->mii_autoneg_adv_reg)
+ return 0;
+ /* Read the MII Auto-Neg Advertisement Register (Address 4/9). */
+ mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
+ mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK;
+
+ /*
+ * Need to parse autoneg_advertised and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /*
+ * First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+ mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
+
+ /*
+ * Need to parse MediaType and setup the
+ * appropriate PHY registers.
+ */
+ switch (hw->media_type) {
+ case MEDIA_TYPE_AUTO_SENSOR:
+ mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
+ MII_AR_10T_FD_CAPS |
+ MII_AR_100TX_HD_CAPS |
+ MII_AR_100TX_FD_CAPS);
+ hw->autoneg_advertised = ADVERTISE_10_HALF |
+ ADVERTISE_10_FULL |
+ ADVERTISE_100_HALF |
+ ADVERTISE_100_FULL;
+ if (hw->nic_type == athr_l1e) {
+ mii_1000t_ctrl_reg |=
+ MII_AT001_CR_1000T_FD_CAPS;
+ hw->autoneg_advertised |= ADVERTISE_1000_FULL;
+ }
+ break;
+
+ case MEDIA_TYPE_100M_FULL:
+ mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
+ hw->autoneg_advertised = ADVERTISE_100_FULL;
+ break;
+
+ case MEDIA_TYPE_100M_HALF:
+ mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
+ hw->autoneg_advertised = ADVERTISE_100_HALF;
+ break;
+
+ case MEDIA_TYPE_10M_FULL:
+ mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
+ hw->autoneg_advertised = ADVERTISE_10_FULL;
+ break;
+
+ default:
+ mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
+ hw->autoneg_advertised = ADVERTISE_10_HALF;
+ break;
+ }
+
+ /* flow control fixed to enable all */
+ mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+
+ hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
+ hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
+
+ ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
+ ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
+ mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Resets the PHY and make all config validate
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets bit 15 and 12 of the MII control regiser (for F001 bug)
+ */
+int atl1e_phy_commit(struct atl1e_hw *hw)
+{
+ struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ int ret_val;
+ u16 phy_data;
+
+ phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
+
+ ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
+ if (ret_val) {
+ u32 val;
+ int i;
+ /**************************************
+ * pcie serdes link may be down !
+ **************************************/
+ for (i = 0; i < 25; i++) {
+ msleep(1);
+ val = AT_READ_REG(hw, REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ }
+
+ if (0 != (val & (MDIO_START | MDIO_BUSY))) {
+ dev_err(&pdev->dev,
+ "pcie linkdown at least for 25ms\n");
+ return ret_val;
+ }
+
+ dev_err(&pdev->dev, "pcie linkup after %d ms\n", i);
+ }
+ return 0;
+}
+
+int atl1e_phy_init(struct atl1e_hw *hw)
+{
+ struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ s32 ret_val;
+ u16 phy_val;
+
+ if (hw->phy_configured) {
+ if (hw->re_autoneg) {
+ hw->re_autoneg = false;
+ return atl1e_restart_autoneg(hw);
+ }
+ return 0;
+ }
+
+ /* RESET GPHY Core */
+ AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
+ msleep(2);
+ AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
+ GPHY_CTRL_EXT_RESET);
+ msleep(2);
+
+ /* patches */
+ /* p1. eable hibernation mode */
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB);
+ if (ret_val)
+ return ret_val;
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00);
+ if (ret_val)
+ return ret_val;
+ /* p2. set Class A/B for all modes */
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+ phy_val = 0x02ef;
+ /* remove Class AB */
+ /* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val);
+ if (ret_val)
+ return ret_val;
+ /* p3. 10B ??? */
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12);
+ if (ret_val)
+ return ret_val;
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04);
+ if (ret_val)
+ return ret_val;
+ /* p4. 1000T power */
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4);
+ if (ret_val)
+ return ret_val;
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5);
+ if (ret_val)
+ return ret_val;
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46);
+ if (ret_val)
+ return ret_val;
+
+ msleep(1);
+
+ /*Enable PHY LinkChange Interrupt */
+ ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00);
+ if (ret_val) {
+ dev_err(&pdev->dev, "Error enable PHY linkChange Interrupt\n");
+ return ret_val;
+ }
+ /* setup AutoNeg parameters */
+ ret_val = atl1e_phy_setup_autoneg_adv(hw);
+ if (ret_val) {
+ dev_err(&pdev->dev, "Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/
+ dev_dbg(&pdev->dev, "Restarting Auto-Neg");
+ ret_val = atl1e_phy_commit(hw);
+ if (ret_val) {
+ dev_err(&pdev->dev, "Error Resetting the phy");
+ return ret_val;
+ }
+
+ hw->phy_configured = true;
+
+ return 0;
+}
+
+/*
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ * hw - Struct containing variables accessed by shared code
+ * return : 0 or idle status (if error)
+ */
+int atl1e_reset_hw(struct atl1e_hw *hw)
+{
+ struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+
+ u32 idle_status_data = 0;
+ u16 pci_cfg_cmd_word = 0;
+ int timeout = 0;
+
+ /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+ pci_read_config_word(pdev, PCI_REG_COMMAND, &pci_cfg_cmd_word);
+ if ((pci_cfg_cmd_word & (CMD_IO_SPACE |
+ CMD_MEMORY_SPACE | CMD_BUS_MASTER))
+ != (CMD_IO_SPACE | CMD_MEMORY_SPACE | CMD_BUS_MASTER)) {
+ pci_cfg_cmd_word |= (CMD_IO_SPACE |
+ CMD_MEMORY_SPACE | CMD_BUS_MASTER);
+ pci_write_config_word(pdev, PCI_REG_COMMAND, pci_cfg_cmd_word);
+ }
+
+ /*
+ * Issue Soft Reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ AT_WRITE_REG(hw, REG_MASTER_CTRL,
+ MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST);
+ wmb();
+ msleep(1);
+
+ /* Wait at least 10ms for All module to be Idle */
+ for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
+ idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS);
+ if (idle_status_data == 0)
+ break;
+ msleep(1);
+ cpu_relax();
+ }
+
+ if (timeout >= AT_HW_MAX_IDLE_DELAY) {
+ dev_err(&pdev->dev,
+ "MAC state machine cann't be idle since"
+ " disabled for 10ms second\n");
+ return AT_ERR_TIMEOUT;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Performs basic configuration of the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes multicast table,
+ * and Calls routines to setup link
+ * Leaves the transmit and receive units disabled and uninitialized.
+ */
+int atl1e_init_hw(struct atl1e_hw *hw)
+{
+ s32 ret_val = 0;
+
+ atl1e_init_pcie(hw);
+
+ /* Zero out the Multicast HASH table */
+ /* clear the old settings from the multicast hash table */
+ AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
+ AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
+
+ ret_val = atl1e_phy_init(hw);
+
+ return ret_val;
+}
+
+/*
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ */
+int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex)
+{
+ int err;
+ u16 phy_data;
+
+ /* Read PHY Specific Status Register (17) */
+ err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data);
+ if (err)
+ return err;
+
+ if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED))
+ return AT_ERR_PHY_RES;
+
+ switch (phy_data & MII_AT001_PSSR_SPEED) {
+ case MII_AT001_PSSR_1000MBS:
+ *speed = SPEED_1000;
+ break;
+ case MII_AT001_PSSR_100MBS:
+ *speed = SPEED_100;
+ break;
+ case MII_AT001_PSSR_10MBS:
+ *speed = SPEED_10;
+ break;
+ default:
+ return AT_ERR_PHY_SPEED;
+ break;
+ }
+
+ if (phy_data & MII_AT001_PSSR_DPLX)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+
+ return 0;
+}
+
+int atl1e_restart_autoneg(struct atl1e_hw *hw)
+{
+ int err = 0;
+
+ err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
+ if (err)
+ return err;
+
+ if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
+ err = atl1e_write_phy_reg(hw, MII_AT001_CR,
+ hw->mii_1000t_ctrl_reg);
+ if (err)
+ return err;
+ }
+
+ err = atl1e_write_phy_reg(hw, MII_BMCR,
+ MII_CR_RESET | MII_CR_AUTO_NEG_EN |
+ MII_CR_RESTART_AUTO_NEG);
+ return err;
+}
+
diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h
new file mode 100644
index 000000000000..5ea2f4d86cfa
--- /dev/null
+++ b/drivers/net/atl1e/atl1e_hw.h
@@ -0,0 +1,793 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _ATHL1E_HW_H_
+#define _ATHL1E_HW_H_
+
+#include <linux/types.h>
+#include <linux/mii.h>
+
+struct atl1e_adapter;
+struct atl1e_hw;
+
+/* function prototype */
+s32 atl1e_reset_hw(struct atl1e_hw *hw);
+s32 atl1e_read_mac_addr(struct atl1e_hw *hw);
+s32 atl1e_init_hw(struct atl1e_hw *hw);
+s32 atl1e_phy_commit(struct atl1e_hw *hw);
+s32 atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex);
+u32 atl1e_auto_get_fc(struct atl1e_adapter *adapter, u16 duplex);
+u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr);
+void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value);
+s32 atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data);
+s32 atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data);
+s32 atl1e_validate_mdi_setting(struct atl1e_hw *hw);
+void atl1e_hw_set_mac_addr(struct atl1e_hw *hw);
+bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value);
+bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value);
+s32 atl1e_phy_enter_power_saving(struct atl1e_hw *hw);
+s32 atl1e_phy_leave_power_saving(struct atl1e_hw *hw);
+s32 atl1e_phy_init(struct atl1e_hw *hw);
+int atl1e_check_eeprom_exist(struct atl1e_hw *hw);
+void atl1e_force_ps(struct atl1e_hw *hw);
+s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
+
+/* register definition */
+#define REG_PM_CTRLSTAT 0x44
+
+#define REG_PCIE_CAP_LIST 0x58
+
+#define REG_DEVICE_CAP 0x5C
+#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7
+#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0
+
+#define REG_DEVICE_CTRL 0x60
+#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7
+#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5
+#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7
+#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12
+
+#define REG_VPD_CAP 0x6C
+#define VPD_CAP_ID_MASK 0xff
+#define VPD_CAP_ID_SHIFT 0
+#define VPD_CAP_NEXT_PTR_MASK 0xFF
+#define VPD_CAP_NEXT_PTR_SHIFT 8
+#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
+#define VPD_CAP_VPD_ADDR_SHIFT 16
+#define VPD_CAP_VPD_FLAG 0x80000000
+
+#define REG_VPD_DATA 0x70
+
+#define REG_SPI_FLASH_CTRL 0x200
+#define SPI_FLASH_CTRL_STS_NON_RDY 0x1
+#define SPI_FLASH_CTRL_STS_WEN 0x2
+#define SPI_FLASH_CTRL_STS_WPEN 0x80
+#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF
+#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0
+#define SPI_FLASH_CTRL_INS_MASK 0x7
+#define SPI_FLASH_CTRL_INS_SHIFT 8
+#define SPI_FLASH_CTRL_START 0x800
+#define SPI_FLASH_CTRL_EN_VPD 0x2000
+#define SPI_FLASH_CTRL_LDSTART 0x8000
+#define SPI_FLASH_CTRL_CS_HI_MASK 0x3
+#define SPI_FLASH_CTRL_CS_HI_SHIFT 16
+#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3
+#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18
+#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3
+#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20
+#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3
+#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22
+#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3
+#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24
+#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3
+#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26
+#define SPI_FLASH_CTRL_WAIT_READY 0x10000000
+
+#define REG_SPI_ADDR 0x204
+
+#define REG_SPI_DATA 0x208
+
+#define REG_SPI_FLASH_CONFIG 0x20C
+#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF
+#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0
+#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3
+#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24
+#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000
+
+
+#define REG_SPI_FLASH_OP_PROGRAM 0x210
+#define REG_SPI_FLASH_OP_SC_ERASE 0x211
+#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212
+#define REG_SPI_FLASH_OP_RDID 0x213
+#define REG_SPI_FLASH_OP_WREN 0x214
+#define REG_SPI_FLASH_OP_RDSR 0x215
+#define REG_SPI_FLASH_OP_WRSR 0x216
+#define REG_SPI_FLASH_OP_READ 0x217
+
+#define REG_TWSI_CTRL 0x218
+#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
+#define TWSI_CTRL_LD_OFFSET_SHIFT 0
+#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
+#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
+#define TWSI_CTRL_SW_LDSTART 0x800
+#define TWSI_CTRL_HW_LDSTART 0x1000
+#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F
+#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
+#define TWSI_CTRL_LD_EXIST 0x400000
+#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
+#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
+#define TWSI_CTRL_FREQ_SEL_100K 0
+#define TWSI_CTRL_FREQ_SEL_200K 1
+#define TWSI_CTRL_FREQ_SEL_300K 2
+#define TWSI_CTRL_FREQ_SEL_400K 3
+#define TWSI_CTRL_SMB_SLV_ADDR
+#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
+#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
+
+
+#define REG_PCIE_DEV_MISC_CTRL 0x21C
+#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2
+#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1
+#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4
+#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8
+#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10
+
+#define REG_PCIE_PHYMISC 0x1000
+#define PCIE_PHYMISC_FORCE_RCV_DET 0x4
+
+#define REG_LTSSM_TEST_MODE 0x12FC
+#define LTSSM_TEST_MODE_DEF 0xE000
+
+/* Selene Master Control Register */
+#define REG_MASTER_CTRL 0x1400
+#define MASTER_CTRL_SOFT_RST 0x1
+#define MASTER_CTRL_MTIMER_EN 0x2
+#define MASTER_CTRL_ITIMER_EN 0x4
+#define MASTER_CTRL_MANUAL_INT 0x8
+#define MASTER_CTRL_ITIMER2_EN 0x20
+#define MASTER_CTRL_INT_RDCLR 0x40
+#define MASTER_CTRL_LED_MODE 0x200
+#define MASTER_CTRL_REV_NUM_SHIFT 16
+#define MASTER_CTRL_REV_NUM_MASK 0xff
+#define MASTER_CTRL_DEV_ID_SHIFT 24
+#define MASTER_CTRL_DEV_ID_MASK 0xff
+
+/* Timer Initial Value Register */
+#define REG_MANUAL_TIMER_INIT 0x1404
+
+
+/* IRQ ModeratorTimer Initial Value Register */
+#define REG_IRQ_MODU_TIMER_INIT 0x1408 /* w */
+#define REG_IRQ_MODU_TIMER2_INIT 0x140A /* w */
+
+
+#define REG_GPHY_CTRL 0x140C
+#define GPHY_CTRL_EXT_RESET 1
+#define GPHY_CTRL_PIPE_MOD 2
+#define GPHY_CTRL_TEST_MODE_MASK 3
+#define GPHY_CTRL_TEST_MODE_SHIFT 2
+#define GPHY_CTRL_BERT_START 0x10
+#define GPHY_CTRL_GATE_25M_EN 0x20
+#define GPHY_CTRL_LPW_EXIT 0x40
+#define GPHY_CTRL_PHY_IDDQ 0x80
+#define GPHY_CTRL_PHY_IDDQ_DIS 0x100
+#define GPHY_CTRL_PCLK_SEL_DIS 0x200
+#define GPHY_CTRL_HIB_EN 0x400
+#define GPHY_CTRL_HIB_PULSE 0x800
+#define GPHY_CTRL_SEL_ANA_RST 0x1000
+#define GPHY_CTRL_PHY_PLL_ON 0x2000
+#define GPHY_CTRL_PWDOWN_HW 0x4000
+#define GPHY_CTRL_DEFAULT (\
+ GPHY_CTRL_PHY_PLL_ON |\
+ GPHY_CTRL_SEL_ANA_RST |\
+ GPHY_CTRL_HIB_PULSE |\
+ GPHY_CTRL_HIB_EN)
+
+#define GPHY_CTRL_PW_WOL_DIS (\
+ GPHY_CTRL_PHY_PLL_ON |\
+ GPHY_CTRL_SEL_ANA_RST |\
+ GPHY_CTRL_HIB_PULSE |\
+ GPHY_CTRL_HIB_EN |\
+ GPHY_CTRL_PWDOWN_HW |\
+ GPHY_CTRL_PCLK_SEL_DIS |\
+ GPHY_CTRL_PHY_IDDQ)
+
+/* IRQ Anti-Lost Timer Initial Value Register */
+#define REG_CMBDISDMA_TIMER 0x140E
+
+
+/* Block IDLE Status Register */
+#define REG_IDLE_STATUS 0x1410
+#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC state machine is in non-IDLE state. 0: RXMAC is idling */
+#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC state machine is in non-IDLE state. 0: TXMAC is idling */
+#define IDLE_STATUS_RXQ 4 /* 1: RXQ state machine is in non-IDLE state. 0: RXQ is idling */
+#define IDLE_STATUS_TXQ 8 /* 1: TXQ state machine is in non-IDLE state. 0: TXQ is idling */
+#define IDLE_STATUS_DMAR 0x10 /* 1: DMAR state machine is in non-IDLE state. 0: DMAR is idling */
+#define IDLE_STATUS_DMAW 0x20 /* 1: DMAW state machine is in non-IDLE state. 0: DMAW is idling */
+#define IDLE_STATUS_SMB 0x40 /* 1: SMB state machine is in non-IDLE state. 0: SMB is idling */
+#define IDLE_STATUS_CMB 0x80 /* 1: CMB state machine is in non-IDLE state. 0: CMB is idling */
+
+/* MDIO Control Register */
+#define REG_MDIO_CTRL 0x1414
+#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit control data to write to PHY MII management register */
+#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit status data that was read from the PHY MII management register*/
+#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */
+#define MDIO_REG_ADDR_SHIFT 16
+#define MDIO_RW 0x200000 /* 1: read, 0: write */
+#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */
+#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO master. And this bit is self cleared after one cycle*/
+#define MDIO_CLK_SEL_SHIFT 24
+#define MDIO_CLK_25_4 0
+#define MDIO_CLK_25_6 2
+#define MDIO_CLK_25_8 3
+#define MDIO_CLK_25_10 4
+#define MDIO_CLK_25_14 5
+#define MDIO_CLK_25_20 6
+#define MDIO_CLK_25_28 7
+#define MDIO_BUSY 0x8000000
+#define MDIO_AP_EN 0x10000000
+#define MDIO_WAIT_TIMES 10
+
+/* MII PHY Status Register */
+#define REG_PHY_STATUS 0x1418
+#define PHY_STATUS_100M 0x20000
+#define PHY_STATUS_EMI_CA 0x40000
+
+/* BIST Control and Status Register0 (for the Packet Memory) */
+#define REG_BIST0_CTRL 0x141c
+#define BIST0_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */
+/* BIST process and reset to zero when BIST is done */
+#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */
+/* decoder failure or more than 1 cell stuck-to-x failure */
+#define BIST0_FUSE_FLAG 0x4 /* 1: Indicating one cell has been fixed */
+
+/* BIST Control and Status Register1(for the retry buffer of PCI Express) */
+#define REG_BIST1_CTRL 0x1420
+#define BIST1_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */
+/* BIST process and reset to zero when BIST is done */
+#define BIST1_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */
+/* decoder failure or more than 1 cell stuck-to-x failure.*/
+#define BIST1_FUSE_FLAG 0x4
+
+/* SerDes Lock Detect Control and Status Register */
+#define REG_SERDES_LOCK 0x1424
+#define SERDES_LOCK_DETECT 1 /* 1: SerDes lock detected . This signal comes from Analog SerDes */
+#define SERDES_LOCK_DETECT_EN 2 /* 1: Enable SerDes Lock detect function */
+
+/* MAC Control Register */
+#define REG_MAC_CTRL 0x1480
+#define MAC_CTRL_TX_EN 1 /* 1: Transmit Enable */
+#define MAC_CTRL_RX_EN 2 /* 1: Receive Enable */
+#define MAC_CTRL_TX_FLOW 4 /* 1: Transmit Flow Control Enable */
+#define MAC_CTRL_RX_FLOW 8 /* 1: Receive Flow Control Enable */
+#define MAC_CTRL_LOOPBACK 0x10 /* 1: Loop back at G/MII Interface */
+#define MAC_CTRL_DUPLX 0x20 /* 1: Full-duplex mode 0: Half-duplex mode */
+#define MAC_CTRL_ADD_CRC 0x40 /* 1: Instruct MAC to attach CRC on all egress Ethernet frames */
+#define MAC_CTRL_PAD 0x80 /* 1: Instruct MAC to pad short frames to 60-bytes, and then attach CRC. This bit has higher priority over CRC_EN */
+#define MAC_CTRL_LENCHK 0x100 /* 1: Instruct MAC to check if length field matches the real packet length */
+#define MAC_CTRL_HUGE_EN 0x200 /* 1: receive Jumbo frame enable */
+#define MAC_CTRL_PRMLEN_SHIFT 10 /* Preamble length */
+#define MAC_CTRL_PRMLEN_MASK 0xf
+#define MAC_CTRL_RMV_VLAN 0x4000 /* 1: to remove VLAN Tag automatically from all receive packets */
+#define MAC_CTRL_PROMIS_EN 0x8000 /* 1: Promiscuous Mode Enable */
+#define MAC_CTRL_TX_PAUSE 0x10000 /* 1: transmit test pause */
+#define MAC_CTRL_SCNT 0x20000 /* 1: shortcut slot time counter */
+#define MAC_CTRL_SRST_TX 0x40000 /* 1: synchronized reset Transmit MAC module */
+#define MAC_CTRL_TX_SIMURST 0x80000 /* 1: transmit simulation reset */
+#define MAC_CTRL_SPEED_SHIFT 20 /* 10: gigabit 01:10M/100M */
+#define MAC_CTRL_SPEED_MASK 0x300000
+#define MAC_CTRL_SPEED_1000 2
+#define MAC_CTRL_SPEED_10_100 1
+#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 /* 1: transmit maximum backoff (half-duplex test bit) */
+#define MAC_CTRL_TX_HUGE 0x800000 /* 1: transmit huge enable */
+#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 /* 1: RX checksum enable */
+#define MAC_CTRL_MC_ALL_EN 0x2000000 /* 1: upload all multicast frame without error to system */
+#define MAC_CTRL_BC_EN 0x4000000 /* 1: upload all broadcast frame without error to system */
+#define MAC_CTRL_DBG 0x8000000 /* 1: upload all received frame to system (Debug Mode) */
+
+/* MAC IPG/IFG Control Register */
+#define REG_MAC_IPG_IFG 0x1484
+#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back inter-packet gap. The default is 96-bit time */
+#define MAC_IPG_IFG_IPGT_MASK 0x7f
+#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to enforce in between RX frames */
+#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */
+#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */
+#define MAC_IPG_IFG_IPGR1_MASK 0x7f
+#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */
+#define MAC_IPG_IFG_IPGR2_MASK 0x7f
+
+/* MAC STATION ADDRESS */
+#define REG_MAC_STA_ADDR 0x1488
+
+/* Hash table for multicast address */
+#define REG_RX_HASH_TABLE 0x1490
+
+
+/* MAC Half-Duplex Control Register */
+#define REG_MAC_HALF_DUPLX_CTRL 0x1498
+#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */
+#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff
+#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 /* Retransmission maximum, afterwards the packet will be discarded */
+#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf
+#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 /* 1: Allow the transmission of a packet which has been excessively deferred */
+#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 /* 1: No back-off on collision, immediately start the retransmission */
+#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* 1: No back-off on backpressure, immediately start the transmission after back pressure */
+#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */
+#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */
+#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf
+#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */
+#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */
+
+/* Maximum Frame Length Control Register */
+#define REG_MTU 0x149c
+
+/* Wake-On-Lan control register */
+#define REG_WOL_CTRL 0x14a0
+#define WOL_PATTERN_EN 0x00000001
+#define WOL_PATTERN_PME_EN 0x00000002
+#define WOL_MAGIC_EN 0x00000004
+#define WOL_MAGIC_PME_EN 0x00000008
+#define WOL_LINK_CHG_EN 0x00000010
+#define WOL_LINK_CHG_PME_EN 0x00000020
+#define WOL_PATTERN_ST 0x00000100
+#define WOL_MAGIC_ST 0x00000200
+#define WOL_LINKCHG_ST 0x00000400
+#define WOL_CLK_SWITCH_EN 0x00008000
+#define WOL_PT0_EN 0x00010000
+#define WOL_PT1_EN 0x00020000
+#define WOL_PT2_EN 0x00040000
+#define WOL_PT3_EN 0x00080000
+#define WOL_PT4_EN 0x00100000
+#define WOL_PT5_EN 0x00200000
+#define WOL_PT6_EN 0x00400000
+/* WOL Length ( 2 DWORD ) */
+#define REG_WOL_PATTERN_LEN 0x14a4
+#define WOL_PT_LEN_MASK 0x7f
+#define WOL_PT0_LEN_SHIFT 0
+#define WOL_PT1_LEN_SHIFT 8
+#define WOL_PT2_LEN_SHIFT 16
+#define WOL_PT3_LEN_SHIFT 24
+#define WOL_PT4_LEN_SHIFT 0
+#define WOL_PT5_LEN_SHIFT 8
+#define WOL_PT6_LEN_SHIFT 16
+
+/* Internal SRAM Partition Register */
+#define REG_SRAM_TRD_ADDR 0x1518
+#define REG_SRAM_TRD_LEN 0x151C
+#define REG_SRAM_RXF_ADDR 0x1520
+#define REG_SRAM_RXF_LEN 0x1524
+#define REG_SRAM_TXF_ADDR 0x1528
+#define REG_SRAM_TXF_LEN 0x152C
+#define REG_SRAM_TCPH_ADDR 0x1530
+#define REG_SRAM_PKTH_ADDR 0x1532
+
+/* Load Ptr Register */
+#define REG_LOAD_PTR 0x1534 /* Software sets this bit after the initialization of the head and tail */
+
+/*
+ * addresses of all descriptors, as well as the following descriptor
+ * control register, which triggers each function block to load the head
+ * pointer to prepare for the operation. This bit is then self-cleared
+ * after one cycle.
+ */
+
+/* Descriptor Control register */
+#define REG_RXF3_BASE_ADDR_HI 0x153C
+#define REG_DESC_BASE_ADDR_HI 0x1540
+#define REG_RXF0_BASE_ADDR_HI 0x1540 /* share with DESC BASE ADDR HI */
+#define REG_HOST_RXF0_PAGE0_LO 0x1544
+#define REG_HOST_RXF0_PAGE1_LO 0x1548
+#define REG_TPD_BASE_ADDR_LO 0x154C
+#define REG_RXF1_BASE_ADDR_HI 0x1550
+#define REG_RXF2_BASE_ADDR_HI 0x1554
+#define REG_HOST_RXFPAGE_SIZE 0x1558
+#define REG_TPD_RING_SIZE 0x155C
+/* RSS about */
+#define REG_RSS_KEY0 0x14B0
+#define REG_RSS_KEY1 0x14B4
+#define REG_RSS_KEY2 0x14B8
+#define REG_RSS_KEY3 0x14BC
+#define REG_RSS_KEY4 0x14C0
+#define REG_RSS_KEY5 0x14C4
+#define REG_RSS_KEY6 0x14C8
+#define REG_RSS_KEY7 0x14CC
+#define REG_RSS_KEY8 0x14D0
+#define REG_RSS_KEY9 0x14D4
+#define REG_IDT_TABLE4 0x14E0
+#define REG_IDT_TABLE5 0x14E4
+#define REG_IDT_TABLE6 0x14E8
+#define REG_IDT_TABLE7 0x14EC
+#define REG_IDT_TABLE0 0x1560
+#define REG_IDT_TABLE1 0x1564
+#define REG_IDT_TABLE2 0x1568
+#define REG_IDT_TABLE3 0x156C
+#define REG_IDT_TABLE REG_IDT_TABLE0
+#define REG_RSS_HASH_VALUE 0x1570
+#define REG_RSS_HASH_FLAG 0x1574
+#define REG_BASE_CPU_NUMBER 0x157C
+
+
+/* TXQ Control Register */
+#define REG_TXQ_CTRL 0x1580
+#define TXQ_CTRL_NUM_TPD_BURST_MASK 0xF
+#define TXQ_CTRL_NUM_TPD_BURST_SHIFT 0
+#define TXQ_CTRL_EN 0x20 /* 1: Enable TXQ */
+#define TXQ_CTRL_ENH_MODE 0x40 /* Performance enhancement mode, in which up to two back-to-back DMA read commands might be dispatched. */
+#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 /* Number of data byte to read in a cache-aligned burst. Each SRAM entry is 8-byte in length. */
+#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff
+
+/* Jumbo packet Threshold for task offload */
+#define REG_TX_EARLY_TH 0x1584 /* Jumbo frame threshold in QWORD unit. Packet greater than */
+/* JUMBO_TASK_OFFLOAD_THRESHOLD will not be task offloaded. */
+#define TX_TX_EARLY_TH_MASK 0x7ff
+#define TX_TX_EARLY_TH_SHIFT 0
+
+
+/* RXQ Control Register */
+#define REG_RXQ_CTRL 0x15A0
+#define RXQ_CTRL_PBA_ALIGN_32 0 /* rx-packet alignment */
+#define RXQ_CTRL_PBA_ALIGN_64 1
+#define RXQ_CTRL_PBA_ALIGN_128 2
+#define RXQ_CTRL_PBA_ALIGN_256 3
+#define RXQ_CTRL_Q1_EN 0x10
+#define RXQ_CTRL_Q2_EN 0x20
+#define RXQ_CTRL_Q3_EN 0x40
+#define RXQ_CTRL_IPV6_XSUM_VERIFY_EN 0x80
+#define RXQ_CTRL_HASH_TLEN_SHIFT 8
+#define RXQ_CTRL_HASH_TLEN_MASK 0xFF
+#define RXQ_CTRL_HASH_TYPE_IPV4 0x10000
+#define RXQ_CTRL_HASH_TYPE_IPV4_TCP 0x20000
+#define RXQ_CTRL_HASH_TYPE_IPV6 0x40000
+#define RXQ_CTRL_HASH_TYPE_IPV6_TCP 0x80000
+#define RXQ_CTRL_RSS_MODE_DISABLE 0
+#define RXQ_CTRL_RSS_MODE_SQSINT 0x4000000
+#define RXQ_CTRL_RSS_MODE_MQUESINT 0x8000000
+#define RXQ_CTRL_RSS_MODE_MQUEMINT 0xC000000
+#define RXQ_CTRL_NIP_QUEUE_SEL_TBL 0x10000000
+#define RXQ_CTRL_HASH_ENABLE 0x20000000
+#define RXQ_CTRL_CUT_THRU_EN 0x40000000
+#define RXQ_CTRL_EN 0x80000000
+
+/* Rx jumbo packet threshold and rrd retirement timer */
+#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4
+/*
+ * Jumbo packet threshold for non-VLAN packet, in QWORD (64-bit) unit.
+ * When the packet length greater than or equal to this value, RXQ
+ * shall start cut-through forwarding of the received packet.
+ */
+#define RXQ_JMBOSZ_TH_MASK 0x7ff
+#define RXQ_JMBOSZ_TH_SHIFT 0 /* RRD retirement timer. Decrement by 1 after every 512ns passes*/
+#define RXQ_JMBO_LKAH_MASK 0xf
+#define RXQ_JMBO_LKAH_SHIFT 11
+
+/* RXF flow control register */
+#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8
+#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0
+#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff
+#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16
+#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff
+
+
+/* DMA Engine Control Register */
+#define REG_DMA_CTRL 0x15C0
+#define DMA_CTRL_DMAR_IN_ORDER 0x1
+#define DMA_CTRL_DMAR_ENH_ORDER 0x2
+#define DMA_CTRL_DMAR_OUT_ORDER 0x4
+#define DMA_CTRL_RCB_VALUE 0x8
+#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
+#define DMA_CTRL_DMAR_BURST_LEN_MASK 7
+#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
+#define DMA_CTRL_DMAW_BURST_LEN_MASK 7
+#define DMA_CTRL_DMAR_REQ_PRI 0x400
+#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x1F
+#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11
+#define DMA_CTRL_DMAW_DLY_CNT_MASK 0xF
+#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16
+#define DMA_CTRL_TXCMB_EN 0x100000
+#define DMA_CTRL_RXCMB_EN 0x200000
+
+
+/* CMB/SMB Control Register */
+#define REG_SMB_STAT_TIMER 0x15C4
+#define REG_TRIG_RRD_THRESH 0x15CA
+#define REG_TRIG_TPD_THRESH 0x15C8
+#define REG_TRIG_TXTIMER 0x15CC
+#define REG_TRIG_RXTIMER 0x15CE
+
+/* HOST RXF Page 1,2,3 address */
+#define REG_HOST_RXF1_PAGE0_LO 0x15D0
+#define REG_HOST_RXF1_PAGE1_LO 0x15D4
+#define REG_HOST_RXF2_PAGE0_LO 0x15D8
+#define REG_HOST_RXF2_PAGE1_LO 0x15DC
+#define REG_HOST_RXF3_PAGE0_LO 0x15E0
+#define REG_HOST_RXF3_PAGE1_LO 0x15E4
+
+/* Mail box */
+#define REG_MB_RXF1_RADDR 0x15B4
+#define REG_MB_RXF2_RADDR 0x15B8
+#define REG_MB_RXF3_RADDR 0x15BC
+#define REG_MB_TPD_PROD_IDX 0x15F0
+
+/* RXF-Page 0-3 PageNo & Valid bit */
+#define REG_HOST_RXF0_PAGE0_VLD 0x15F4
+#define HOST_RXF_VALID 1
+#define HOST_RXF_PAGENO_SHIFT 1
+#define HOST_RXF_PAGENO_MASK 0x7F
+#define REG_HOST_RXF0_PAGE1_VLD 0x15F5
+#define REG_HOST_RXF1_PAGE0_VLD 0x15F6
+#define REG_HOST_RXF1_PAGE1_VLD 0x15F7
+#define REG_HOST_RXF2_PAGE0_VLD 0x15F8
+#define REG_HOST_RXF2_PAGE1_VLD 0x15F9
+#define REG_HOST_RXF3_PAGE0_VLD 0x15FA
+#define REG_HOST_RXF3_PAGE1_VLD 0x15FB
+
+/* Interrupt Status Register */
+#define REG_ISR 0x1600
+#define ISR_SMB 1
+#define ISR_TIMER 2 /* Interrupt when Timer is counted down to zero */
+/*
+ * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set
+ * in Table 51 Selene Master Control Register (Offset 0x1400).
+ */
+#define ISR_MANUAL 4
+#define ISR_HW_RXF_OV 8 /* RXF overflow interrupt */
+#define ISR_HOST_RXF0_OV 0x10
+#define ISR_HOST_RXF1_OV 0x20
+#define ISR_HOST_RXF2_OV 0x40
+#define ISR_HOST_RXF3_OV 0x80
+#define ISR_TXF_UN 0x100
+#define ISR_RX0_PAGE_FULL 0x200
+#define ISR_DMAR_TO_RST 0x400
+#define ISR_DMAW_TO_RST 0x800
+#define ISR_GPHY 0x1000
+#define ISR_TX_CREDIT 0x2000
+#define ISR_GPHY_LPW 0x4000 /* GPHY low power state interrupt */
+#define ISR_RX_PKT 0x10000 /* One packet received, triggered by RFD */
+#define ISR_TX_PKT 0x20000 /* One packet transmitted, triggered by TPD */
+#define ISR_TX_DMA 0x40000
+#define ISR_RX_PKT_1 0x80000
+#define ISR_RX_PKT_2 0x100000
+#define ISR_RX_PKT_3 0x200000
+#define ISR_MAC_RX 0x400000
+#define ISR_MAC_TX 0x800000
+#define ISR_UR_DETECTED 0x1000000
+#define ISR_FERR_DETECTED 0x2000000
+#define ISR_NFERR_DETECTED 0x4000000
+#define ISR_CERR_DETECTED 0x8000000
+#define ISR_PHY_LINKDOWN 0x10000000
+#define ISR_DIS_INT 0x80000000
+
+
+/* Interrupt Mask Register */
+#define REG_IMR 0x1604
+
+
+#define IMR_NORMAL_MASK (\
+ ISR_SMB |\
+ ISR_TXF_UN |\
+ ISR_HW_RXF_OV |\
+ ISR_HOST_RXF0_OV|\
+ ISR_MANUAL |\
+ ISR_GPHY |\
+ ISR_GPHY_LPW |\
+ ISR_DMAR_TO_RST |\
+ ISR_DMAW_TO_RST |\
+ ISR_PHY_LINKDOWN|\
+ ISR_RX_PKT |\
+ ISR_TX_PKT)
+
+#define ISR_TX_EVENT (ISR_TXF_UN | ISR_TX_PKT)
+#define ISR_RX_EVENT (ISR_HOST_RXF0_OV | ISR_HW_RXF_OV | ISR_RX_PKT)
+
+#define REG_MAC_RX_STATUS_BIN 0x1700
+#define REG_MAC_RX_STATUS_END 0x175c
+#define REG_MAC_TX_STATUS_BIN 0x1760
+#define REG_MAC_TX_STATUS_END 0x17c0
+
+/* Hardware Offset Register */
+#define REG_HOST_RXF0_PAGEOFF 0x1800
+#define REG_TPD_CONS_IDX 0x1804
+#define REG_HOST_RXF1_PAGEOFF 0x1808
+#define REG_HOST_RXF2_PAGEOFF 0x180C
+#define REG_HOST_RXF3_PAGEOFF 0x1810
+
+/* RXF-Page 0-3 Offset DMA Address */
+#define REG_HOST_RXF0_MB0_LO 0x1820
+#define REG_HOST_RXF0_MB1_LO 0x1824
+#define REG_HOST_RXF1_MB0_LO 0x1828
+#define REG_HOST_RXF1_MB1_LO 0x182C
+#define REG_HOST_RXF2_MB0_LO 0x1830
+#define REG_HOST_RXF2_MB1_LO 0x1834
+#define REG_HOST_RXF3_MB0_LO 0x1838
+#define REG_HOST_RXF3_MB1_LO 0x183C
+
+/* Tpd CMB DMA Address */
+#define REG_HOST_TX_CMB_LO 0x1840
+#define REG_HOST_SMB_ADDR_LO 0x1844
+
+/* DEBUG ADDR */
+#define REG_DEBUG_DATA0 0x1900
+#define REG_DEBUG_DATA1 0x1904
+
+/***************************** MII definition ***************************************/
+/* PHY Common Register */
+#define MII_BMCR 0x00
+#define MII_BMSR 0x01
+#define MII_PHYSID1 0x02
+#define MII_PHYSID2 0x03
+#define MII_ADVERTISE 0x04
+#define MII_LPA 0x05
+#define MII_EXPANSION 0x06
+#define MII_AT001_CR 0x09
+#define MII_AT001_SR 0x0A
+#define MII_AT001_ESR 0x0F
+#define MII_AT001_PSCR 0x10
+#define MII_AT001_PSSR 0x11
+#define MII_INT_CTRL 0x12
+#define MII_INT_STATUS 0x13
+#define MII_SMARTSPEED 0x14
+#define MII_RERRCOUNTER 0x15
+#define MII_SREVISION 0x16
+#define MII_RESV1 0x17
+#define MII_LBRERROR 0x18
+#define MII_PHYADDR 0x19
+#define MII_RESV2 0x1a
+#define MII_TPISTATUS 0x1b
+#define MII_NCONFIG 0x1c
+
+#define MII_DBG_ADDR 0x1D
+#define MII_DBG_DATA 0x1E
+
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_MASK 0x2040
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Link partner ability register. */
+#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
+#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
+#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
+#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
+#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
+#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
+#define MII_LPA_PAUSE 0x0400 /* PAUSE */
+#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
+#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
+#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
+#define MII_LPA_NPAGE 0x8000 /* Next page bit */
+
+/* Autoneg Advertisement Register */
+#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
+#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
+#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+#define MII_AR_SPEED_MASK 0x01E0
+#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
+
+/* 1000BASE-T Control Register */
+#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
+/* 0=DTE device */
+#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
+/* 0=Configure PHY as Slave */
+#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
+/* 0=Automatic Master/Slave config */
+#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
+#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
+
+/* 1000BASE-T Status Register */
+#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
+#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
+#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
+
+/* Extended Status Register */
+#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
+#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
+#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
+#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+
+/* AT001 PHY Specific Control Register */
+#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
+#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
+#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008
+#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low,
+ * 0=CLK125 toggling
+ */
+#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
+/* Manual MDI configuration */
+#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
+ * 100BASE-TX/10BASE-T:
+ * MDI Mode
+ */
+#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
+ * all speeds.
+ */
+#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080
+/* 1=Enable Extended 10BASE-T distance
+ * (Lower 10BASE-T RX Threshold)
+ * 0=Normal 10BASE-T RX Threshold */
+#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100
+/* 1=5-Bit interface in 100BASE-TX
+ * 0=MII interface in 100BASE-TX */
+#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
+#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
+#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1
+#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5
+#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
+/* AT001 PHY Specific Status Register */
+#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
+#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */
+#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */
+#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#endif /*_ATHL1E_HW_H_*/
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
new file mode 100644
index 000000000000..35264c244cfd
--- /dev/null
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -0,0 +1,2599 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "atl1e.h"
+
+#define DRV_VERSION "1.0.0.7-NAPI"
+
+char atl1e_driver_name[] = "ATL1E";
+char atl1e_driver_version[] = DRV_VERSION;
+#define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026
+/*
+ * atl1e_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static struct pci_device_id atl1e_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
+ /* required last entry */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl);
+
+MODULE_AUTHOR("Atheros Corporation, <xiong.huang@atheros.com>, Jie Yang <jie.yang@atheros.com>");
+MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static inline void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
+
+static const u16
+atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
+{
+ {REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD},
+ {REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD},
+ {REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD},
+ {REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD}
+};
+
+static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] =
+{
+ REG_RXF0_BASE_ADDR_HI,
+ REG_RXF1_BASE_ADDR_HI,
+ REG_RXF2_BASE_ADDR_HI,
+ REG_RXF3_BASE_ADDR_HI
+};
+
+static const u16
+atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
+{
+ {REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO},
+ {REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO},
+ {REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO},
+ {REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO}
+};
+
+static const u16
+atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
+{
+ {REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO},
+ {REG_HOST_RXF1_MB0_LO, REG_HOST_RXF1_MB1_LO},
+ {REG_HOST_RXF2_MB0_LO, REG_HOST_RXF2_MB1_LO},
+ {REG_HOST_RXF3_MB0_LO, REG_HOST_RXF3_MB1_LO}
+};
+
+static const u16 atl1e_pay_load_size[] = {
+ 128, 256, 512, 1024, 2048, 4096,
+};
+
+/*
+ * atl1e_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
+{
+ if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
+ AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
+ AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
+ AT_WRITE_FLUSH(&adapter->hw);
+ }
+}
+
+/*
+ * atl1e_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
+{
+ atomic_inc(&adapter->irq_sem);
+ AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
+ AT_WRITE_FLUSH(&adapter->hw);
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/*
+ * atl1e_irq_reset - reset interrupt confiure on the NIC
+ * @adapter: board private structure
+ */
+static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
+{
+ atomic_set(&adapter->irq_sem, 0);
+ AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
+ AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
+ AT_WRITE_FLUSH(&adapter->hw);
+}
+
+/*
+ * atl1e_phy_config - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl1e_phy_config(unsigned long data)
+{
+ struct atl1e_adapter *adapter = (struct atl1e_adapter *) data;
+ struct atl1e_hw *hw = &adapter->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->mdio_lock, flags);
+ atl1e_restart_autoneg(hw);
+ spin_unlock_irqrestore(&adapter->mdio_lock, flags);
+}
+
+void atl1e_reinit_locked(struct atl1e_adapter *adapter)
+{
+
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
+ msleep(1);
+ atl1e_down(adapter);
+ atl1e_up(adapter);
+ clear_bit(__AT_RESETTING, &adapter->flags);
+}
+
+static void atl1e_reset_task(struct work_struct *work)
+{
+ struct atl1e_adapter *adapter;
+ adapter = container_of(work, struct atl1e_adapter, reset_task);
+
+ atl1e_reinit_locked(adapter);
+}
+
+static int atl1e_check_link(struct atl1e_adapter *adapter)
+{
+ struct atl1e_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+ u16 speed, duplex, phy_data;
+
+ /* MII_BMSR must read twise */
+ atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
+ if ((phy_data & BMSR_LSTATUS) == 0) {
+ /* link down */
+ if (netif_carrier_ok(netdev)) { /* old link state: Up */
+ u32 value;
+ /* disable rx */
+ value = AT_READ_REG(hw, REG_MAC_CTRL);
+ value &= ~MAC_CTRL_RX_EN;
+ AT_WRITE_REG(hw, REG_MAC_CTRL, value);
+ adapter->link_speed = SPEED_0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else {
+ /* Link Up */
+ err = atl1e_get_speed_and_duplex(hw, &speed, &duplex);
+ if (unlikely(err))
+ return err;
+
+ /* link result is our setting */
+ if (adapter->link_speed != speed ||
+ adapter->link_duplex != duplex) {
+ adapter->link_speed = speed;
+ adapter->link_duplex = duplex;
+ atl1e_setup_mac_ctrl(adapter);
+ dev_info(&pdev->dev,
+ "%s: %s NIC Link is Up<%d Mbps %s>\n",
+ atl1e_driver_name, netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex");
+ }
+
+ if (!netif_carrier_ok(netdev)) {
+ /* Link down -> Up */
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ }
+ return 0;
+}
+
+/*
+ * atl1e_link_chg_task - deal with link change event Out of interrupt context
+ * @netdev: network interface device structure
+ */
+static void atl1e_link_chg_task(struct work_struct *work)
+{
+ struct atl1e_adapter *adapter;
+ unsigned long flags;
+
+ adapter = container_of(work, struct atl1e_adapter, link_chg_task);
+ spin_lock_irqsave(&adapter->mdio_lock, flags);
+ atl1e_check_link(adapter);
+ spin_unlock_irqrestore(&adapter->mdio_lock, flags);
+}
+
+static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ u16 phy_data = 0;
+ u16 link_up = 0;
+
+ spin_lock(&adapter->mdio_lock);
+ atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ spin_unlock(&adapter->mdio_lock);
+ link_up = phy_data & BMSR_LSTATUS;
+ /* notify upper layer link down ASAP */
+ if (!link_up) {
+ if (netif_carrier_ok(netdev)) {
+ /* old link state: Up */
+ dev_info(&pdev->dev, "%s: %s NIC Link is Down\n",
+ atl1e_driver_name, netdev->name);
+ adapter->link_speed = SPEED_0;
+ netif_stop_queue(netdev);
+ }
+ }
+ schedule_work(&adapter->link_chg_task);
+}
+
+static void atl1e_del_timer(struct atl1e_adapter *adapter)
+{
+ del_timer_sync(&adapter->phy_config_timer);
+}
+
+static void atl1e_cancel_work(struct atl1e_adapter *adapter)
+{
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->link_chg_task);
+}
+
+/*
+ * atl1e_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void atl1e_tx_timeout(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+/*
+ * atl1e_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void atl1e_set_multi(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+ struct dev_mc_list *mc_ptr;
+ u32 mac_ctrl_data = 0;
+ u32 hash_value;
+
+ /* Check for Promiscuous and All Multicast modes */
+ mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL);
+
+ if (netdev->flags & IFF_PROMISC) {
+ mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
+ } else if (netdev->flags & IFF_ALLMULTI) {
+ mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
+ mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
+ } else {
+ mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
+ }
+
+ AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
+
+ /* clear the old settings from the multicast hash table */
+ AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
+ AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
+
+ /* comoute mc addresses' hash value ,and put it into hash table */
+ for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ atl1e_hash_set(hw, hash_value);
+ }
+}
+
+static void atl1e_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ u32 mac_ctrl_data = 0;
+
+ dev_dbg(&pdev->dev, "atl1e_vlan_rx_register\n");
+
+ atl1e_irq_disable(adapter);
+
+ adapter->vlgrp = grp;
+ mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
+
+ if (grp) {
+ /* enable VLAN tag insert/strip */
+ mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
+ } else {
+ /* disable VLAN tag insert/strip */
+ mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
+ }
+
+ AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
+ atl1e_irq_enable(adapter);
+}
+
+static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_dbg(&pdev->dev, "atl1e_restore_vlan !");
+ atl1e_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+}
+/*
+ * atl1e_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+
+ atl1e_hw_set_mac_addr(&adapter->hw);
+
+ return 0;
+}
+
+/*
+ * atl1e_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ int old_mtu = netdev->mtu;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
+ (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
+ return -EINVAL;
+ }
+ /* set MTU */
+ if (old_mtu != new_mtu && netif_running(netdev)) {
+ while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
+ msleep(1);
+ netdev->mtu = new_mtu;
+ adapter->hw.max_frame_size = new_mtu;
+ adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3;
+ atl1e_down(adapter);
+ atl1e_up(adapter);
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ }
+ return 0;
+}
+
+/*
+ * caller should hold mdio_lock
+ */
+static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ u16 result;
+
+ atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
+ return result;
+}
+
+static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
+ int reg_num, int val)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
+}
+
+/*
+ * atl1e_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1e_mii_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ struct mii_ioctl_data *data = if_mii(ifr);
+ unsigned long flags;
+ int retval = 0;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ spin_lock_irqsave(&adapter->mdio_lock, flags);
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = 0;
+ break;
+
+ case SIOCGMIIREG:
+ if (!capable(CAP_NET_ADMIN)) {
+ retval = -EPERM;
+ goto out;
+ }
+ if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ &data->val_out)) {
+ retval = -EIO;
+ goto out;
+ }
+ break;
+
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN)) {
+ retval = -EPERM;
+ goto out;
+ }
+ if (data->reg_num & ~(0x1F)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ dev_dbg(&pdev->dev, "<atl1e_mii_ioctl> write %x %x",
+ data->reg_num, data->val_in);
+ if (atl1e_write_phy_reg(&adapter->hw,
+ data->reg_num, data->val_in)) {
+ retval = -EIO;
+ goto out;
+ }
+ break;
+
+ default:
+ retval = -EOPNOTSUPP;
+ break;
+ }
+out:
+ spin_unlock_irqrestore(&adapter->mdio_lock, flags);
+ return retval;
+
+}
+
+/*
+ * atl1e_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return atl1e_mii_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void atl1e_setup_pcicmd(struct pci_dev *pdev)
+{
+ u16 cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO);
+ cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+
+ /*
+ * some motherboards BIOS(PXE/EFI) driver may set PME
+ * while they transfer control to OS (Windows/Linux)
+ * so we should clear this bit before NIC work normally
+ */
+ pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
+ msleep(1);
+}
+
+/*
+ * atl1e_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ */
+static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter)
+{
+ return 0;
+}
+
+/*
+ * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * atl1e_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
+{
+ struct atl1e_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 phy_status_data = 0;
+
+ adapter->wol = 0;
+ adapter->link_speed = SPEED_0; /* hardware init */
+ adapter->link_duplex = FULL_DUPLEX;
+ adapter->num_rx_queues = 1;
+
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+ phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
+ /* nic type */
+ if (hw->revision_id >= 0xF0) {
+ hw->nic_type = athr_l2e_revB;
+ } else {
+ if (phy_status_data & PHY_STATUS_100M)
+ hw->nic_type = athr_l1e;
+ else
+ hw->nic_type = athr_l2e_revA;
+ }
+
+ phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
+
+ if (phy_status_data & PHY_STATUS_EMI_CA)
+ hw->emi_ca = true;
+ else
+ hw->emi_ca = false;
+
+ hw->phy_configured = false;
+ hw->preamble_len = 7;
+ hw->max_frame_size = adapter->netdev->mtu;
+ hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN +
+ VLAN_HLEN + ETH_FCS_LEN + 7) >> 3;
+
+ hw->rrs_type = atl1e_rrs_disable;
+ hw->indirect_tab = 0;
+ hw->base_cpu = 0;
+
+ /* need confirm */
+
+ hw->ict = 50000; /* 100ms */
+ hw->smb_timer = 200000; /* 200ms */
+ hw->tpd_burst = 5;
+ hw->rrd_thresh = 1;
+ hw->tpd_thresh = adapter->tx_ring.count / 2;
+ hw->rx_count_down = 4; /* 2us resolution */
+ hw->tx_count_down = hw->imt * 4 / 3;
+ hw->dmar_block = atl1e_dma_req_1024;
+ hw->dmaw_block = atl1e_dma_req_1024;
+ hw->dmar_dly_cnt = 15;
+ hw->dmaw_dly_cnt = 4;
+
+ if (atl1e_alloc_queues(adapter)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ atomic_set(&adapter->irq_sem, 1);
+ spin_lock_init(&adapter->mdio_lock);
+ spin_lock_init(&adapter->tx_lock);
+
+ set_bit(__AT_DOWN, &adapter->flags);
+
+ return 0;
+}
+
+/*
+ * atl1e_clean_tx_ring - Free Tx-skb
+ * @adapter: board private structure
+ */
+static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
+{
+ struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
+ &adapter->tx_ring;
+ struct atl1e_tx_buffer *tx_buffer = NULL;
+ struct pci_dev *pdev = adapter->pdev;
+ u16 index, ring_count;
+
+ if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL)
+ return;
+
+ ring_count = tx_ring->count;
+ /* first unmmap dma */
+ for (index = 0; index < ring_count; index++) {
+ tx_buffer = &tx_ring->tx_buffer[index];
+ if (tx_buffer->dma) {
+ pci_unmap_page(pdev, tx_buffer->dma,
+ tx_buffer->length, PCI_DMA_TODEVICE);
+ tx_buffer->dma = 0;
+ }
+ }
+ /* second free skb */
+ for (index = 0; index < ring_count; index++) {
+ tx_buffer = &tx_ring->tx_buffer[index];
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+ }
+ }
+ /* Zero out Tx-buffers */
+ memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) *
+ ring_count);
+ memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) *
+ ring_count);
+}
+
+/*
+ * atl1e_clean_rx_ring - Free rx-reservation skbs
+ * @adapter: board private structure
+ */
+static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
+{
+ struct atl1e_rx_ring *rx_ring =
+ (struct atl1e_rx_ring *)&adapter->rx_ring;
+ struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
+ u16 i, j;
+
+
+ if (adapter->ring_vir_addr == NULL)
+ return;
+ /* Zero out the descriptor ring */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
+ if (rx_page_desc[i].rx_page[j].addr != NULL) {
+ memset(rx_page_desc[i].rx_page[j].addr, 0,
+ rx_ring->real_page_size);
+ }
+ }
+ }
+}
+
+static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size)
+{
+ *ring_size = ((u32)(adapter->tx_ring.count *
+ sizeof(struct atl1e_tpd_desc) + 7
+ /* tx ring, qword align */
+ + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE *
+ adapter->num_rx_queues + 31
+ /* rx ring, 32 bytes align */
+ + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) *
+ sizeof(u32) + 3));
+ /* tx, rx cmd, dword align */
+}
+
+static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
+{
+ struct atl1e_tx_ring *tx_ring = NULL;
+ struct atl1e_rx_ring *rx_ring = NULL;
+
+ tx_ring = &adapter->tx_ring;
+ rx_ring = &adapter->rx_ring;
+
+ rx_ring->real_page_size = adapter->rx_ring.page_size
+ + adapter->hw.max_frame_size
+ + ETH_HLEN + VLAN_HLEN
+ + ETH_FCS_LEN;
+ rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32);
+ atl1e_cal_ring_size(adapter, &adapter->ring_size);
+
+ adapter->ring_vir_addr = NULL;
+ adapter->rx_ring.desc = NULL;
+ rwlock_init(&adapter->tx_ring.tx_lock);
+
+ return;
+}
+
+/*
+ * Read / Write Ptr Initialize:
+ */
+static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
+{
+ struct atl1e_tx_ring *tx_ring = NULL;
+ struct atl1e_rx_ring *rx_ring = NULL;
+ struct atl1e_rx_page_desc *rx_page_desc = NULL;
+ int i, j;
+
+ tx_ring = &adapter->tx_ring;
+ rx_ring = &adapter->rx_ring;
+ rx_page_desc = rx_ring->rx_page_desc;
+
+ tx_ring->next_to_use = 0;
+ atomic_set(&tx_ring->next_to_clean, 0);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_page_desc[i].rx_using = 0;
+ rx_page_desc[i].rx_nxseq = 0;
+ for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
+ *rx_page_desc[i].rx_page[j].write_offset_addr = 0;
+ rx_page_desc[i].rx_page[j].read_offset = 0;
+ }
+ }
+}
+
+/*
+ * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ atl1e_clean_tx_ring(adapter);
+ atl1e_clean_rx_ring(adapter);
+
+ if (adapter->ring_vir_addr) {
+ pci_free_consistent(pdev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
+ adapter->ring_vir_addr = NULL;
+ }
+
+ if (adapter->tx_ring.tx_buffer) {
+ kfree(adapter->tx_ring.tx_buffer);
+ adapter->tx_ring.tx_buffer = NULL;
+ }
+}
+
+/*
+ * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct atl1e_tx_ring *tx_ring;
+ struct atl1e_rx_ring *rx_ring;
+ struct atl1e_rx_page_desc *rx_page_desc;
+ int size, i, j;
+ u32 offset = 0;
+ int err = 0;
+
+ if (adapter->ring_vir_addr != NULL)
+ return 0; /* alloced already */
+
+ tx_ring = &adapter->tx_ring;
+ rx_ring = &adapter->rx_ring;
+
+ /* real ring DMA buffer */
+
+ size = adapter->ring_size;
+ adapter->ring_vir_addr = pci_alloc_consistent(pdev,
+ adapter->ring_size, &adapter->ring_dma);
+
+ if (adapter->ring_vir_addr == NULL) {
+ dev_err(&pdev->dev, "pci_alloc_consistent failed, "
+ "size = D%d", size);
+ return -ENOMEM;
+ }
+
+ memset(adapter->ring_vir_addr, 0, adapter->ring_size);
+
+ rx_page_desc = rx_ring->rx_page_desc;
+
+ /* Init TPD Ring */
+ tx_ring->dma = roundup(adapter->ring_dma, 8);
+ offset = tx_ring->dma - adapter->ring_dma;
+ tx_ring->desc = (struct atl1e_tpd_desc *)
+ (adapter->ring_vir_addr + offset);
+ size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
+ tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
+ if (tx_ring->tx_buffer == NULL) {
+ dev_err(&pdev->dev, "kzalloc failed , size = D%d", size);
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Init RXF-Pages */
+ offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count);
+ offset = roundup(offset, 32);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
+ rx_page_desc[i].rx_page[j].dma =
+ adapter->ring_dma + offset;
+ rx_page_desc[i].rx_page[j].addr =
+ adapter->ring_vir_addr + offset;
+ offset += rx_ring->real_page_size;
+ }
+ }
+
+ /* Init CMB dma address */
+ tx_ring->cmb_dma = adapter->ring_dma + offset;
+ tx_ring->cmb = (u32 *)(adapter->ring_vir_addr + offset);
+ offset += sizeof(u32);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
+ rx_page_desc[i].rx_page[j].write_offset_dma =
+ adapter->ring_dma + offset;
+ rx_page_desc[i].rx_page[j].write_offset_addr =
+ adapter->ring_vir_addr + offset;
+ offset += sizeof(u32);
+ }
+ }
+
+ if (unlikely(offset > adapter->ring_size)) {
+ dev_err(&pdev->dev, "offset(%d) > ring size(%d) !!\n",
+ offset, adapter->ring_size);
+ err = -1;
+ goto failed;
+ }
+
+ return 0;
+failed:
+ if (adapter->ring_vir_addr != NULL) {
+ pci_free_consistent(pdev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
+ adapter->ring_vir_addr = NULL;
+ }
+ return err;
+}
+
+static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
+{
+
+ struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ struct atl1e_rx_ring *rx_ring =
+ (struct atl1e_rx_ring *)&adapter->rx_ring;
+ struct atl1e_tx_ring *tx_ring =
+ (struct atl1e_tx_ring *)&adapter->tx_ring;
+ struct atl1e_rx_page_desc *rx_page_desc = NULL;
+ int i, j;
+
+ AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
+ (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32));
+ AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO,
+ (u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK));
+ AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count));
+ AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO,
+ (u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK));
+
+ rx_page_desc = rx_ring->rx_page_desc;
+ /* RXF Page Physical address / Page Length */
+ for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) {
+ AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i],
+ (u32)((adapter->ring_dma &
+ AT_DMA_HI_ADDR_MASK) >> 32));
+ for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
+ u32 page_phy_addr;
+ u32 offset_phy_addr;
+
+ page_phy_addr = rx_page_desc[i].rx_page[j].dma;
+ offset_phy_addr =
+ rx_page_desc[i].rx_page[j].write_offset_dma;
+
+ AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j],
+ page_phy_addr & AT_DMA_LO_ADDR_MASK);
+ AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j],
+ offset_phy_addr & AT_DMA_LO_ADDR_MASK);
+ AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1);
+ }
+ }
+ /* Page Length */
+ AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
+ /* Load all of base address above */
+ AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
+
+ return;
+}
+
+static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
+{
+ struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ u32 dev_ctrl_data = 0;
+ u32 max_pay_load = 0;
+ u32 jumbo_thresh = 0;
+ u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
+
+ /* configure TXQ param */
+ if (hw->nic_type != athr_l2e_revB) {
+ extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
+ if (hw->max_frame_size <= 1500) {
+ jumbo_thresh = hw->max_frame_size + extra_size;
+ } else if (hw->max_frame_size < 6*1024) {
+ jumbo_thresh =
+ (hw->max_frame_size + extra_size) * 2 / 3;
+ } else {
+ jumbo_thresh = (hw->max_frame_size + extra_size) / 2;
+ }
+ AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3);
+ }
+
+ dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL);
+
+ max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
+ DEVICE_CTRL_MAX_PAYLOAD_MASK;
+
+ hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
+
+ max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
+ DEVICE_CTRL_MAX_RREQ_SZ_MASK;
+ hw->dmar_block = min(max_pay_load, hw->dmar_block);
+
+ if (hw->nic_type != athr_l2e_revB)
+ AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
+ atl1e_pay_load_size[hw->dmar_block]);
+ /* enable TXQ */
+ AT_WRITE_REGW(hw, REG_TXQ_CTRL,
+ (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK)
+ << TXQ_CTRL_NUM_TPD_BURST_SHIFT)
+ | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
+ return;
+}
+
+static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
+{
+ struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ u32 rxf_len = 0;
+ u32 rxf_low = 0;
+ u32 rxf_high = 0;
+ u32 rxf_thresh_data = 0;
+ u32 rxq_ctrl_data = 0;
+
+ if (hw->nic_type != athr_l2e_revB) {
+ AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM,
+ (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) <<
+ RXQ_JMBOSZ_TH_SHIFT |
+ (1 & RXQ_JMBO_LKAH_MASK) <<
+ RXQ_JMBO_LKAH_SHIFT));
+
+ rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
+ rxf_high = rxf_len * 4 / 5;
+ rxf_low = rxf_len / 5;
+ rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK)
+ << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
+ ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK)
+ << RXQ_RXF_PAUSE_TH_LO_SHIFT);
+
+ AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data);
+ }
+
+ /* RRS */
+ AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab);
+ AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
+
+ if (hw->rrs_type & atl1e_rrs_ipv4)
+ rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4;
+
+ if (hw->rrs_type & atl1e_rrs_ipv4_tcp)
+ rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP;
+
+ if (hw->rrs_type & atl1e_rrs_ipv6)
+ rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6;
+
+ if (hw->rrs_type & atl1e_rrs_ipv6_tcp)
+ rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP;
+
+ if (hw->rrs_type != atl1e_rrs_disable)
+ rxq_ctrl_data |=
+ (RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT);
+
+ rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 |
+ RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
+
+ AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
+ return;
+}
+
+static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
+{
+ struct atl1e_hw *hw = &adapter->hw;
+ u32 dma_ctrl_data = 0;
+
+ dma_ctrl_data = DMA_CTRL_RXCMB_EN;
+ dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
+ << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
+ dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
+ << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
+ dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER;
+ dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
+ << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
+ dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
+ << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
+
+ AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
+ return;
+}
+
+static inline void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
+{
+ u32 value;
+ struct atl1e_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ /* Config MAC CTRL Register */
+ value = MAC_CTRL_TX_EN |
+ MAC_CTRL_RX_EN ;
+
+ if (FULL_DUPLEX == adapter->link_duplex)
+ value |= MAC_CTRL_DUPLX;
+
+ value |= ((u32)((SPEED_1000 == adapter->link_speed) ?
+ MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
+ MAC_CTRL_SPEED_SHIFT);
+ value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
+
+ value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
+ value |= (((u32)adapter->hw.preamble_len &
+ MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
+
+ if (adapter->vlgrp)
+ value |= MAC_CTRL_RMV_VLAN;
+
+ value |= MAC_CTRL_BC_EN;
+ if (netdev->flags & IFF_PROMISC)
+ value |= MAC_CTRL_PROMIS_EN;
+ if (netdev->flags & IFF_ALLMULTI)
+ value |= MAC_CTRL_MC_ALL_EN;
+
+ AT_WRITE_REG(hw, REG_MAC_CTRL, value);
+}
+
+/*
+ * atl1e_configure - Configure Transmit&Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx /Rx unit of the MAC after a reset.
+ */
+static int atl1e_configure(struct atl1e_adapter *adapter)
+{
+ struct atl1e_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ u32 intr_status_data = 0;
+
+ /* clear interrupt status */
+ AT_WRITE_REG(hw, REG_ISR, ~0);
+
+ /* 1. set MAC Address */
+ atl1e_hw_set_mac_addr(hw);
+
+ /* 2. Init the Multicast HASH table done by set_muti */
+
+ /* 3. Clear any WOL status */
+ AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+
+ /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr
+ * TPD Ring/SMB/RXF0 Page CMBs, they use the same
+ * High 32bits memory */
+ atl1e_configure_des_ring(adapter);
+
+ /* 5. set Interrupt Moderator Timer */
+ AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt);
+ AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt);
+ AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE |
+ MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN);
+
+ /* 6. rx/tx threshold to trig interrupt */
+ AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh);
+ AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh);
+ AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down);
+ AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down);
+
+ /* 7. set Interrupt Clear Timer */
+ AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict);
+
+ /* 8. set MTU */
+ AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
+ VLAN_HLEN + ETH_FCS_LEN);
+
+ /* 9. config TXQ early tx threshold */
+ atl1e_configure_tx(adapter);
+
+ /* 10. config RXQ */
+ atl1e_configure_rx(adapter);
+
+ /* 11. config DMA Engine */
+ atl1e_configure_dma(adapter);
+
+ /* 12. smb timer to trig interrupt */
+ AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer);
+
+ intr_status_data = AT_READ_REG(hw, REG_ISR);
+ if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) {
+ dev_err(&pdev->dev, "atl1e_configure failed,"
+ "PCIE phy link down\n");
+ return -1;
+ }
+
+ AT_WRITE_REG(hw, REG_ISR, 0x7fffffff);
+ return 0;
+}
+
+/*
+ * atl1e_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ */
+static struct net_device_stats *atl1e_get_stats(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw_stats *hw_stats = &adapter->hw_stats;
+ struct net_device_stats *net_stats = &adapter->net_stats;
+
+ net_stats->rx_packets = hw_stats->rx_ok;
+ net_stats->tx_packets = hw_stats->tx_ok;
+ net_stats->rx_bytes = hw_stats->rx_byte_cnt;
+ net_stats->tx_bytes = hw_stats->tx_byte_cnt;
+ net_stats->multicast = hw_stats->rx_mcast;
+ net_stats->collisions = hw_stats->tx_1_col +
+ hw_stats->tx_2_col * 2 +
+ hw_stats->tx_late_col + hw_stats->tx_abort_col;
+
+ net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err +
+ hw_stats->rx_len_err + hw_stats->rx_sz_ov +
+ hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
+ net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
+ net_stats->rx_length_errors = hw_stats->rx_len_err;
+ net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
+ net_stats->rx_frame_errors = hw_stats->rx_align_err;
+ net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+
+ net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+
+ net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
+ hw_stats->tx_underrun + hw_stats->tx_trunc;
+ net_stats->tx_fifo_errors = hw_stats->tx_underrun;
+ net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
+ net_stats->tx_window_errors = hw_stats->tx_late_col;
+
+ return &adapter->net_stats;
+}
+
+static void atl1e_update_hw_stats(struct atl1e_adapter *adapter)
+{
+ u16 hw_reg_addr = 0;
+ unsigned long *stats_item = NULL;
+
+ /* update rx status */
+ hw_reg_addr = REG_MAC_RX_STATUS_BIN;
+ stats_item = &adapter->hw_stats.rx_ok;
+ while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
+ *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr);
+ stats_item++;
+ hw_reg_addr += 4;
+ }
+ /* update tx status */
+ hw_reg_addr = REG_MAC_TX_STATUS_BIN;
+ stats_item = &adapter->hw_stats.tx_ok;
+ while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
+ *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr);
+ stats_item++;
+ hw_reg_addr += 4;
+ }
+}
+
+static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
+{
+ u16 phy_data;
+
+ spin_lock(&adapter->mdio_lock);
+ atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data);
+ spin_unlock(&adapter->mdio_lock);
+}
+
+static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
+{
+ struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
+ &adapter->tx_ring;
+ struct atl1e_tx_buffer *tx_buffer = NULL;
+ u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
+ u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
+
+ while (next_to_clean != hw_next_to_clean) {
+ tx_buffer = &tx_ring->tx_buffer[next_to_clean];
+ if (tx_buffer->dma) {
+ pci_unmap_page(adapter->pdev, tx_buffer->dma,
+ tx_buffer->length, PCI_DMA_TODEVICE);
+ tx_buffer->dma = 0;
+ }
+
+ if (tx_buffer->skb) {
+ dev_kfree_skb_irq(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+ }
+
+ if (++next_to_clean == tx_ring->count)
+ next_to_clean = 0;
+ }
+
+ atomic_set(&tx_ring->next_to_clean, next_to_clean);
+
+ if (netif_queue_stopped(adapter->netdev) &&
+ netif_carrier_ok(adapter->netdev)) {
+ netif_wake_queue(adapter->netdev);
+ }
+
+ return true;
+}
+
+/*
+ * atl1e_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ */
+static irqreturn_t atl1e_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ struct atl1e_hw *hw = &adapter->hw;
+ int max_ints = AT_MAX_INT_WORK;
+ int handled = IRQ_NONE;
+ u32 status;
+
+ do {
+ status = AT_READ_REG(hw, REG_ISR);
+ if ((status & IMR_NORMAL_MASK) == 0 ||
+ (status & ISR_DIS_INT) != 0) {
+ if (max_ints != AT_MAX_INT_WORK)
+ handled = IRQ_HANDLED;
+ break;
+ }
+ /* link event */
+ if (status & ISR_GPHY)
+ atl1e_clear_phy_int(adapter);
+ /* Ack ISR */
+ AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
+
+ handled = IRQ_HANDLED;
+ /* check if PCIE PHY Link down */
+ if (status & ISR_PHY_LINKDOWN) {
+ dev_err(&pdev->dev,
+ "pcie phy linkdown %x\n", status);
+ if (netif_running(adapter->netdev)) {
+ /* reset MAC */
+ atl1e_irq_reset(adapter);
+ schedule_work(&adapter->reset_task);
+ break;
+ }
+ }
+
+ /* check if DMA read/write error */
+ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
+ dev_err(&pdev->dev,
+ "PCIE DMA RW error (status = 0x%x)\n",
+ status);
+ atl1e_irq_reset(adapter);
+ schedule_work(&adapter->reset_task);
+ break;
+ }
+
+ if (status & ISR_SMB)
+ atl1e_update_hw_stats(adapter);
+
+ /* link event */
+ if (status & (ISR_GPHY | ISR_MANUAL)) {
+ adapter->net_stats.tx_carrier_errors++;
+ atl1e_link_chg_event(adapter);
+ break;
+ }
+
+ /* transmit event */
+ if (status & ISR_TX_EVENT)
+ atl1e_clean_tx_irq(adapter);
+
+ if (status & ISR_RX_EVENT) {
+ /*
+ * disable rx interrupts, without
+ * the synchronize_irq bit
+ */
+ AT_WRITE_REG(hw, REG_IMR,
+ IMR_NORMAL_MASK & ~ISR_RX_EVENT);
+ AT_WRITE_FLUSH(hw);
+ if (likely(netif_rx_schedule_prep(netdev,
+ &adapter->napi)))
+ __netif_rx_schedule(netdev, &adapter->napi);
+ }
+ } while (--max_ints > 0);
+ /* re-enable Interrupt*/
+ AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
+
+ return handled;
+}
+
+static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter,
+ struct sk_buff *skb, struct atl1e_recv_ret_status *prrs)
+{
+ u8 *packet = (u8 *)(prrs + 1);
+ struct iphdr *iph;
+ u16 head_len = ETH_HLEN;
+ u16 pkt_flags;
+ u16 err_flags;
+
+ skb->ip_summed = CHECKSUM_NONE;
+ pkt_flags = prrs->pkt_flag;
+ err_flags = prrs->err_flag;
+ if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) &&
+ ((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) {
+ if (pkt_flags & RRS_IS_IPV4) {
+ if (pkt_flags & RRS_IS_802_3)
+ head_len += 8;
+ iph = (struct iphdr *) (packet + head_len);
+ if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF))
+ goto hw_xsum;
+ }
+ if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+ }
+
+hw_xsum :
+ return;
+}
+
+static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
+ u8 que)
+{
+ struct atl1e_rx_page_desc *rx_page_desc =
+ (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
+ u8 rx_using = rx_page_desc[que].rx_using;
+
+ return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]);
+}
+
+static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
+ int *work_done, int work_to_do)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
+ &adapter->rx_ring;
+ struct atl1e_rx_page_desc *rx_page_desc =
+ (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
+ struct sk_buff *skb = NULL;
+ struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que);
+ u32 packet_size, write_offset;
+ struct atl1e_recv_ret_status *prrs;
+
+ write_offset = *(rx_page->write_offset_addr);
+ if (likely(rx_page->read_offset < write_offset)) {
+ do {
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ /* get new packet's rrs */
+ prrs = (struct atl1e_recv_ret_status *) (rx_page->addr +
+ rx_page->read_offset);
+ /* check sequence number */
+ if (prrs->seq_num != rx_page_desc[que].rx_nxseq) {
+ dev_err(&pdev->dev,
+ "rx sequence number"
+ " error (rx=%d) (expect=%d)\n",
+ prrs->seq_num,
+ rx_page_desc[que].rx_nxseq);
+ rx_page_desc[que].rx_nxseq++;
+ /* just for debug use */
+ AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0,
+ (((u32)prrs->seq_num) << 16) |
+ rx_page_desc[que].rx_nxseq);
+ goto fatal_err;
+ }
+ rx_page_desc[que].rx_nxseq++;
+
+ /* error packet */
+ if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
+ if (prrs->err_flag & (RRS_ERR_BAD_CRC |
+ RRS_ERR_DRIBBLE | RRS_ERR_CODE |
+ RRS_ERR_TRUNC)) {
+ /* hardware error, discard this packet*/
+ dev_err(&pdev->dev,
+ "rx packet desc error %x\n",
+ *((u32 *)prrs + 1));
+ goto skip_pkt;
+ }
+ }
+
+ packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
+ RRS_PKT_SIZE_MASK) - 4; /* CRC */
+ skb = netdev_alloc_skb(netdev,
+ packet_size + NET_IP_ALIGN);
+ if (skb == NULL) {
+ dev_warn(&pdev->dev, "%s: Memory squeeze,"
+ "deferring packet.\n", netdev->name);
+ goto skip_pkt;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb->dev = netdev;
+ memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
+ skb_put(skb, packet_size);
+ skb->protocol = eth_type_trans(skb, netdev);
+ atl1e_rx_checksum(adapter, skb, prrs);
+
+ if (unlikely(adapter->vlgrp &&
+ (prrs->pkt_flag & RRS_IS_VLAN_TAG))) {
+ u16 vlan_tag = (prrs->vtag >> 4) |
+ ((prrs->vtag & 7) << 13) |
+ ((prrs->vtag & 8) << 9);
+ dev_dbg(&pdev->dev,
+ "RXD VLAN TAG<RRD>=0x%04x\n",
+ prrs->vtag);
+ vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+ vlan_tag);
+ } else {
+ netif_receive_skb(skb);
+ }
+
+ netdev->last_rx = jiffies;
+skip_pkt:
+ /* skip current packet whether it's ok or not. */
+ rx_page->read_offset +=
+ (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
+ RRS_PKT_SIZE_MASK) +
+ sizeof(struct atl1e_recv_ret_status) + 31) &
+ 0xFFFFFFE0);
+
+ if (rx_page->read_offset >= rx_ring->page_size) {
+ /* mark this page clean */
+ u16 reg_addr;
+ u8 rx_using;
+
+ rx_page->read_offset =
+ *(rx_page->write_offset_addr) = 0;
+ rx_using = rx_page_desc[que].rx_using;
+ reg_addr =
+ atl1e_rx_page_vld_regs[que][rx_using];
+ AT_WRITE_REGB(&adapter->hw, reg_addr, 1);
+ rx_page_desc[que].rx_using ^= 1;
+ rx_page = atl1e_get_rx_page(adapter, que);
+ }
+ write_offset = *(rx_page->write_offset_addr);
+ } while (rx_page->read_offset < write_offset);
+ }
+
+ return;
+
+fatal_err:
+ if (!test_bit(__AT_DOWN, &adapter->flags))
+ schedule_work(&adapter->reset_task);
+}
+
+/*
+ * atl1e_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ */
+static int atl1e_clean(struct napi_struct *napi, int budget)
+{
+ struct atl1e_adapter *adapter =
+ container_of(napi, struct atl1e_adapter, napi);
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 imr_data;
+ int work_done = 0;
+
+ /* Keep link state information with original netdev */
+ if (!netif_carrier_ok(adapter->netdev))
+ goto quit_polling;
+
+ atl1e_clean_rx_irq(adapter, 0, &work_done, budget);
+
+ /* If no Tx and not enough Rx work done, exit the polling mode */
+ if (work_done < budget) {
+quit_polling:
+ netif_rx_complete(netdev, napi);
+ imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
+ AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
+ /* test debug */
+ if (test_bit(__AT_DOWN, &adapter->flags)) {
+ atomic_dec(&adapter->irq_sem);
+ dev_err(&pdev->dev,
+ "atl1e_clean is called when AT_DOWN\n");
+ }
+ /* reenable RX intr */
+ /*atl1e_irq_enable(adapter); */
+
+ }
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void atl1e_netpoll(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ disable_irq(adapter->pdev->irq);
+ atl1e_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter)
+{
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
+ u16 next_to_use = 0;
+ u16 next_to_clean = 0;
+
+ next_to_clean = atomic_read(&tx_ring->next_to_clean);
+ next_to_use = tx_ring->next_to_use;
+
+ return (u16)(next_to_clean > next_to_use) ?
+ (next_to_clean - next_to_use - 1) :
+ (tx_ring->count + next_to_clean - next_to_use - 1);
+}
+
+/*
+ * get next usable tpd
+ * Note: should call atl1e_tdp_avail to make sure
+ * there is enough tpd to use
+ */
+static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
+{
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
+ u16 next_to_use = 0;
+
+ next_to_use = tx_ring->next_to_use;
+ if (++tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
+ return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use];
+}
+
+static struct atl1e_tx_buffer *
+atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd)
+{
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
+
+ return &tx_ring->tx_buffer[tpd - tx_ring->desc];
+}
+
+/* Calculate the transmit packet descript needed*/
+static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
+{
+ int i = 0;
+ u16 tpd_req = 1;
+ u16 fg_size = 0;
+ u16 proto_hdr_len = 0;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ fg_size = skb_shinfo(skb)->frags[i].size;
+ tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT);
+ }
+
+ if (skb_is_gso(skb)) {
+ if (skb->protocol == ntohs(ETH_P_IP) ||
+ (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
+ proto_hdr_len = skb_transport_offset(skb) +
+ tcp_hdrlen(skb);
+ if (proto_hdr_len < skb_headlen(skb)) {
+ tpd_req += ((skb_headlen(skb) - proto_hdr_len +
+ MAX_TX_BUF_LEN - 1) >>
+ MAX_TX_BUF_SHIFT);
+ }
+ }
+
+ }
+ return tpd_req;
+}
+
+static int atl1e_tso_csum(struct atl1e_adapter *adapter,
+ struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u8 hdr_len;
+ u32 real_len;
+ unsigned short offload_type;
+ int err;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (unlikely(err))
+ return -1;
+ }
+ offload_type = skb_shinfo(skb)->gso_type;
+
+ if (offload_type & SKB_GSO_TCPV4) {
+ real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ + ntohs(ip_hdr(skb)->tot_len));
+
+ if (real_len < skb->len)
+ pskb_trim(skb, real_len);
+
+ hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (unlikely(skb->len == hdr_len)) {
+ /* only xsum need */
+ dev_warn(&pdev->dev,
+ "IPV4 tso with zero data??\n");
+ goto check_sum;
+ } else {
+ ip_hdr(skb)->check = 0;
+ ip_hdr(skb)->tot_len = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(
+ ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ tpd->word3 |= (ip_hdr(skb)->ihl &
+ TDP_V4_IPHL_MASK) <<
+ TPD_V4_IPHL_SHIFT;
+ tpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
+ TPD_TCPHDRLEN_MASK) <<
+ TPD_TCPHDRLEN_SHIFT;
+ tpd->word3 |= ((skb_shinfo(skb)->gso_size) &
+ TPD_MSS_MASK) << TPD_MSS_SHIFT;
+ tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
+ }
+ return 0;
+ }
+
+ if (offload_type & SKB_GSO_TCPV6) {
+ real_len = (((unsigned char *)ipv6_hdr(skb) - skb->data)
+ + ntohs(ipv6_hdr(skb)->payload_len));
+ if (real_len < skb->len)
+ pskb_trim(skb, real_len);
+
+ /* check payload == 0 byte ? */
+ hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (unlikely(skb->len == hdr_len)) {
+ /* only xsum need */
+ dev_warn(&pdev->dev,
+ "IPV6 tso with zero data??\n");
+ goto check_sum;
+ } else {
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(
+ &ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ tpd->word3 |= 1 << TPD_IP_VERSION_SHIFT;
+ hdr_len >>= 1;
+ tpd->word3 |= (hdr_len & TPD_V6_IPHLLO_MASK) <<
+ TPD_V6_IPHLLO_SHIFT;
+ tpd->word3 |= ((hdr_len >> 3) &
+ TPD_V6_IPHLHI_MASK) <<
+ TPD_V6_IPHLHI_SHIFT;
+ tpd->word3 |= (tcp_hdrlen(skb) >> 2 &
+ TPD_TCPHDRLEN_MASK) <<
+ TPD_TCPHDRLEN_SHIFT;
+ tpd->word3 |= ((skb_shinfo(skb)->gso_size) &
+ TPD_MSS_MASK) << TPD_MSS_SHIFT;
+ tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
+ }
+ }
+ return 0;
+ }
+
+check_sum:
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ u8 css, cso;
+
+ cso = skb_transport_offset(skb);
+ if (unlikely(cso & 0x1)) {
+ dev_err(&adapter->pdev->dev,
+ "pay load offset should not ant event number\n");
+ return -1;
+ } else {
+ css = cso + skb->csum_offset;
+ tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) <<
+ TPD_PLOADOFFSET_SHIFT;
+ tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) <<
+ TPD_CCSUMOFFSET_SHIFT;
+ tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT;
+ }
+ }
+
+ return 0;
+}
+
+static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
+{
+ struct atl1e_tpd_desc *use_tpd = NULL;
+ struct atl1e_tx_buffer *tx_buffer = NULL;
+ u16 buf_len = skb->len - skb->data_len;
+ u16 map_len = 0;
+ u16 mapped_len = 0;
+ u16 hdr_len = 0;
+ u16 nr_frags;
+ u16 f;
+ int segment;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
+ if (segment) {
+ /* TSO */
+ map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ use_tpd = tpd;
+
+ tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
+ tx_buffer->length = map_len;
+ tx_buffer->dma = pci_map_single(adapter->pdev,
+ skb->data, hdr_len, PCI_DMA_TODEVICE);
+ mapped_len += map_len;
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+ use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
+ ((cpu_to_le32(tx_buffer->length) &
+ TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
+ }
+
+ while (mapped_len < buf_len) {
+ /* mapped_len == 0, means we should use the first tpd,
+ which is given by caller */
+ if (mapped_len == 0) {
+ use_tpd = tpd;
+ } else {
+ use_tpd = atl1e_get_tpd(adapter);
+ memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
+ }
+ tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
+ tx_buffer->skb = NULL;
+
+ tx_buffer->length = map_len =
+ ((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ?
+ MAX_TX_BUF_LEN : (buf_len - mapped_len);
+ tx_buffer->dma =
+ pci_map_single(adapter->pdev, skb->data + mapped_len,
+ map_len, PCI_DMA_TODEVICE);
+ mapped_len += map_len;
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+ use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
+ ((cpu_to_le32(tx_buffer->length) &
+ TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+ u16 i;
+ u16 seg_num;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ buf_len = frag->size;
+
+ seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+ for (i = 0; i < seg_num; i++) {
+ use_tpd = atl1e_get_tpd(adapter);
+ memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
+
+ tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
+ if (tx_buffer->skb)
+ BUG();
+
+ tx_buffer->skb = NULL;
+ tx_buffer->length =
+ (buf_len > MAX_TX_BUF_LEN) ?
+ MAX_TX_BUF_LEN : buf_len;
+ buf_len -= tx_buffer->length;
+
+ tx_buffer->dma =
+ pci_map_page(adapter->pdev, frag->page,
+ frag->page_offset +
+ (i * MAX_TX_BUF_LEN),
+ tx_buffer->length,
+ PCI_DMA_TODEVICE);
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+ use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
+ ((cpu_to_le32(tx_buffer->length) &
+ TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
+ }
+ }
+
+ if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK)
+ /* note this one is a tcp header */
+ tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
+ /* The last tpd */
+
+ use_tpd->word3 |= 1 << TPD_EOP_SHIFT;
+ /* The last buffer info contain the skb address,
+ so it will be free after unmap */
+ tx_buffer->skb = skb;
+}
+
+static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
+ struct atl1e_tpd_desc *tpd)
+{
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use);
+}
+
+static int atl1e_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ u16 tpd_req = 1;
+ struct atl1e_tpd_desc *tpd;
+
+ if (test_bit(__AT_DOWN, &adapter->flags)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (unlikely(skb->len <= 0)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ tpd_req = atl1e_cal_tdp_req(skb);
+ if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
+ return NETDEV_TX_LOCKED;
+
+ if (atl1e_tpd_avail(adapter) < tpd_req) {
+ /* no enough descriptor, just stop queue */
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ tpd = atl1e_get_tpd(adapter);
+
+ if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+ u16 vlan_tag = vlan_tx_tag_get(skb);
+ u16 atl1e_vlan_tag;
+
+ tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
+ AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag);
+ tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) <<
+ TPD_VLAN_SHIFT;
+ }
+
+ if (skb->protocol == ntohs(ETH_P_8021Q))
+ tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT;
+
+ if (skb_network_offset(skb) != ETH_HLEN)
+ tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */
+
+ /* do TSO and check sum */
+ if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ atl1e_tx_map(adapter, skb, tpd);
+ atl1e_tx_queue(adapter, tpd_req, tpd);
+
+ netdev->trans_start = jiffies;
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_OK;
+}
+
+static void atl1e_free_irq(struct atl1e_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+}
+
+static int atl1e_request_irq(struct atl1e_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ int flags = 0;
+ int err = 0;
+
+ adapter->have_msi = true;
+ err = pci_enable_msi(adapter->pdev);
+ if (err) {
+ dev_dbg(&pdev->dev,
+ "Unable to allocate MSI interrupt Error: %d\n", err);
+ adapter->have_msi = false;
+ } else
+ netdev->irq = pdev->irq;
+
+
+ if (!adapter->have_msi)
+ flags |= IRQF_SHARED;
+ err = request_irq(adapter->pdev->irq, &atl1e_intr, flags,
+ netdev->name, netdev);
+ if (err) {
+ dev_dbg(&pdev->dev,
+ "Unable to allocate interrupt Error: %d\n", err);
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+ return err;
+ }
+ dev_dbg(&pdev->dev, "atl1e_request_irq OK\n");
+ return err;
+}
+
+int atl1e_up(struct atl1e_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+ u32 val;
+
+ /* hardware has been reset, we need to reload some things */
+ err = atl1e_init_hw(&adapter->hw);
+ if (err) {
+ err = -EIO;
+ return err;
+ }
+ atl1e_init_ring_ptrs(adapter);
+ atl1e_set_multi(netdev);
+ atl1e_restore_vlan(adapter);
+
+ if (atl1e_configure(adapter)) {
+ err = -EIO;
+ goto err_up;
+ }
+
+ clear_bit(__AT_DOWN, &adapter->flags);
+ napi_enable(&adapter->napi);
+ atl1e_irq_enable(adapter);
+ val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL);
+ AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
+ val | MASTER_CTRL_MANUAL_INT);
+
+err_up:
+ return err;
+}
+
+void atl1e_down(struct atl1e_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer */
+ set_bit(__AT_DOWN, &adapter->flags);
+
+#ifdef NETIF_F_LLTX
+ netif_stop_queue(netdev);
+#else
+ netif_tx_disable(netdev);
+#endif
+
+ /* reset MAC to disable all RX/TX */
+ atl1e_reset_hw(&adapter->hw);
+ msleep(1);
+
+ napi_disable(&adapter->napi);
+ atl1e_del_timer(adapter);
+ atl1e_irq_disable(adapter);
+
+ netif_carrier_off(netdev);
+ adapter->link_speed = SPEED_0;
+ adapter->link_duplex = -1;
+ atl1e_clean_tx_ring(adapter);
+ atl1e_clean_rx_ring(adapter);
+}
+
+/*
+ * atl1e_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ */
+static int atl1e_open(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__AT_TESTING, &adapter->flags))
+ return -EBUSY;
+
+ /* allocate rx/tx dma buffer & descriptors */
+ atl1e_init_ring_resources(adapter);
+ err = atl1e_setup_ring_resources(adapter);
+ if (unlikely(err))
+ return err;
+
+ err = atl1e_request_irq(adapter);
+ if (unlikely(err))
+ goto err_req_irq;
+
+ err = atl1e_up(adapter);
+ if (unlikely(err))
+ goto err_up;
+
+ return 0;
+
+err_up:
+ atl1e_free_irq(adapter);
+err_req_irq:
+ atl1e_free_ring_resources(adapter);
+ atl1e_reset_hw(&adapter->hw);
+
+ return err;
+}
+
+/*
+ * atl1e_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ */
+static int atl1e_close(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
+ atl1e_down(adapter);
+ atl1e_free_irq(adapter);
+ atl1e_free_ring_resources(adapter);
+
+ return 0;
+}
+
+static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+ u32 ctrl = 0;
+ u32 mac_ctrl_data = 0;
+ u32 wol_ctrl_data = 0;
+ u16 mii_advertise_data = 0;
+ u16 mii_bmsr_data = 0;
+ u16 mii_intr_status_data = 0;
+ u32 wufc = adapter->wol;
+ u32 i;
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
+
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
+ atl1e_down(adapter);
+ }
+ netif_device_detach(netdev);
+
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+#endif
+
+ if (wufc) {
+ /* get link status */
+ atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
+
+ mii_advertise_data = MII_AR_10T_HD_CAPS;
+
+ if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) ||
+ (atl1e_write_phy_reg(hw,
+ MII_ADVERTISE, mii_advertise_data) != 0) ||
+ (atl1e_phy_commit(hw)) != 0) {
+ dev_dbg(&pdev->dev, "set phy register failed\n");
+ goto wol_dis;
+ }
+
+ hw->phy_configured = false; /* re-init PHY when resume */
+
+ /* turn on magic packet wol */
+ if (wufc & AT_WUFC_MAG)
+ wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
+
+ if (wufc & AT_WUFC_LNKC) {
+ /* if orignal link status is link, just wait for retrive link */
+ if (mii_bmsr_data & BMSR_LSTATUS) {
+ for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
+ msleep(100);
+ atl1e_read_phy_reg(hw, MII_BMSR,
+ (u16 *)&mii_bmsr_data);
+ if (mii_bmsr_data & BMSR_LSTATUS)
+ break;
+ }
+
+ if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
+ dev_dbg(&pdev->dev,
+ "%s: Link may change"
+ "when suspend\n",
+ atl1e_driver_name);
+ }
+ wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
+ /* only link up can wake up */
+ if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) {
+ dev_dbg(&pdev->dev, "%s: read write phy "
+ "register failed.\n",
+ atl1e_driver_name);
+ goto wol_dis;
+ }
+ }
+ /* clear phy interrupt */
+ atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data);
+ /* Config MAC Ctrl register */
+ mac_ctrl_data = MAC_CTRL_RX_EN;
+ /* set to 10/100M halt duplex */
+ mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT;
+ mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
+ MAC_CTRL_PRMLEN_MASK) <<
+ MAC_CTRL_PRMLEN_SHIFT);
+
+ if (adapter->vlgrp)
+ mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
+
+ /* magic packet maybe Broadcast&multicast&Unicast frame */
+ if (wufc & AT_WUFC_MAG)
+ mac_ctrl_data |= MAC_CTRL_BC_EN;
+
+ dev_dbg(&pdev->dev,
+ "%s: suspend MAC=0x%x\n",
+ atl1e_driver_name, mac_ctrl_data);
+
+ AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
+ AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
+ /* pcie patch */
+ ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC);
+ ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+ AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+ goto suspend_exit;
+ }
+wol_dis:
+
+ /* WOL disabled */
+ AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+
+ /* pcie patch */
+ ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC);
+ ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+ AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+
+ atl1e_force_ps(hw);
+ hw->phy_configured = false; /* re-init PHY when resume */
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+
+suspend_exit:
+
+ if (netif_running(netdev))
+ atl1e_free_irq(adapter);
+
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int atl1e_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "ATL1e: Cannot enable PCI"
+ " device from suspend\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
+
+ if (netif_running(netdev))
+ err = atl1e_request_irq(adapter);
+ if (err)
+ return err;
+
+ atl1e_reset_hw(&adapter->hw);
+
+ if (netif_running(netdev))
+ atl1e_up(adapter);
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif
+
+static void atl1e_shutdown(struct pci_dev *pdev)
+{
+ atl1e_suspend(pdev, PMSG_SUSPEND);
+}
+
+static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
+{
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pci_set_drvdata(pdev, netdev);
+
+ netdev->irq = pdev->irq;
+ netdev->open = &atl1e_open;
+ netdev->stop = &atl1e_close;
+ netdev->hard_start_xmit = &atl1e_xmit_frame;
+ netdev->get_stats = &atl1e_get_stats;
+ netdev->set_multicast_list = &atl1e_set_multi;
+ netdev->set_mac_address = &atl1e_set_mac_addr;
+ netdev->change_mtu = &atl1e_change_mtu;
+ netdev->do_ioctl = &atl1e_ioctl;
+ netdev->tx_timeout = &atl1e_tx_timeout;
+ netdev->watchdog_timeo = AT_TX_WATCHDOG;
+ netdev->vlan_rx_register = atl1e_vlan_rx_register;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = atl1e_netpoll;
+#endif
+ atl1e_set_ethtool_ops(netdev);
+
+ netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->features |= NETIF_F_LLTX;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+
+ return 0;
+}
+
+/*
+ * atl1e_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in atl1e_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * atl1e_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ */
+static int __devinit atl1e_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct atl1e_adapter *adapter = NULL;
+ static int cards_found;
+
+ int err = 0;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ return err;
+ }
+
+ /*
+ * The atl1e chip can DMA to 64-bit addresses, but it uses a single
+ * shared register for the high 32 bits, so only a single, aligned,
+ * 4 GB physical address range can be used at a time.
+ *
+ * Supporting 64-bit DMA on this hardware is more trouble than it's
+ * worth. It is far easier to limit to 32-bit DMA than update
+ * various kernel subsystems to support the mechanics required by a
+ * fixed-high-32-bit system.
+ */
+ if ((pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) != 0)) {
+ dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
+ goto err_dma;
+ }
+
+ err = pci_request_regions(pdev, atl1e_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+ goto err_pci_reg;
+ }
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct atl1e_adapter));
+ if (netdev == NULL) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "etherdev alloc failed\n");
+ goto err_alloc_etherdev;
+ }
+
+ err = atl1e_init_netdev(netdev, pdev);
+ if (err) {
+ dev_err(&pdev->dev, "init netdevice failed\n");
+ goto err_init_netdev;
+ }
+ adapter = netdev_priv(netdev);
+ adapter->bd_number = cards_found;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.adapter = adapter;
+ adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0);
+ if (!adapter->hw.hw_addr) {
+ err = -EIO;
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ goto err_ioremap;
+ }
+ netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
+
+ /* init mii data */
+ adapter->mii.dev = netdev;
+ adapter->mii.mdio_read = atl1e_mdio_read;
+ adapter->mii.mdio_write = atl1e_mdio_write;
+ adapter->mii.phy_id_mask = 0x1f;
+ adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
+
+ netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
+
+ init_timer(&adapter->phy_config_timer);
+ adapter->phy_config_timer.function = &atl1e_phy_config;
+ adapter->phy_config_timer.data = (unsigned long) adapter;
+
+ /* get user settings */
+ atl1e_check_options(adapter);
+ /*
+ * Mark all PCI regions associated with PCI device
+ * pdev as being reserved by owner atl1e_driver_name
+ * Enables bus-mastering on the device and calls
+ * pcibios_set_master to do the needed arch specific settings
+ */
+ atl1e_setup_pcicmd(pdev);
+ /* setup the private structure */
+ err = atl1e_sw_init(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "net device private data init failed\n");
+ goto err_sw_init;
+ }
+
+ /* Init GPHY as early as possible due to power saving issue */
+ spin_lock(&adapter->mdio_lock);
+ atl1e_phy_init(&adapter->hw);
+ spin_unlock(&adapter->mdio_lock);
+ /* reset the controller to
+ * put the device in a known good starting state */
+ err = atl1e_reset_hw(&adapter->hw);
+ if (err) {
+ err = -EIO;
+ goto err_reset;
+ }
+
+ if (atl1e_read_mac_addr(&adapter->hw) != 0) {
+ err = -EIO;
+ dev_err(&pdev->dev, "get mac address failed\n");
+ goto err_eeprom;
+ }
+
+ memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
+ dev_dbg(&pdev->dev, "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
+ adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
+ adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
+ adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
+
+ INIT_WORK(&adapter->reset_task, atl1e_reset_task);
+ INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "register netdevice failed\n");
+ goto err_register;
+ }
+
+ /* assume we have no link for now */
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+
+ cards_found++;
+
+ return 0;
+
+err_reset:
+err_register:
+err_sw_init:
+err_eeprom:
+ iounmap(adapter->hw.hw_addr);
+err_init_netdev:
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/*
+ * atl1e_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * atl1e_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ */
+static void __devexit atl1e_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ /*
+ * flush_scheduled work may reschedule our watchdog task, so
+ * explicitly disable watchdog tasks from being rescheduled
+ */
+ set_bit(__AT_DOWN, &adapter->flags);
+
+ atl1e_del_timer(adapter);
+ atl1e_cancel_work(adapter);
+
+ unregister_netdev(netdev);
+ atl1e_free_ring_resources(adapter);
+ atl1e_force_ps(&adapter->hw);
+ iounmap(adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+/*
+ * atl1e_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t
+atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1e_adapter *adapter = netdev->priv;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ atl1e_down(adapter);
+
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * atl1e_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1e_adapter *adapter = netdev->priv;
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "ATL1e: Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ atl1e_reset_hw(&adapter->hw);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/*
+ * atl1e_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the atl1e_resume routine.
+ */
+static void atl1e_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1e_adapter *adapter = netdev->priv;
+
+ if (netif_running(netdev)) {
+ if (atl1e_up(adapter)) {
+ dev_err(&pdev->dev,
+ "ATL1e: can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+}
+
+static struct pci_error_handlers atl1e_err_handler = {
+ .error_detected = atl1e_io_error_detected,
+ .slot_reset = atl1e_io_slot_reset,
+ .resume = atl1e_io_resume,
+};
+
+static struct pci_driver atl1e_driver = {
+ .name = atl1e_driver_name,
+ .id_table = atl1e_pci_tbl,
+ .probe = atl1e_probe,
+ .remove = __devexit_p(atl1e_remove),
+ /* Power Managment Hooks */
+#ifdef CONFIG_PM
+ .suspend = atl1e_suspend,
+ .resume = atl1e_resume,
+#endif
+ .shutdown = atl1e_shutdown,
+ .err_handler = &atl1e_err_handler
+};
+
+/*
+ * atl1e_init_module - Driver Registration Routine
+ *
+ * atl1e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init atl1e_init_module(void)
+{
+ return pci_register_driver(&atl1e_driver);
+}
+
+/*
+ * atl1e_exit_module - Driver Exit Cleanup Routine
+ *
+ * atl1e_exit_module is called just before the driver is removed
+ * from memory.
+ */
+static void __exit atl1e_exit_module(void)
+{
+ pci_unregister_driver(&atl1e_driver);
+}
+
+module_init(atl1e_init_module);
+module_exit(atl1e_exit_module);
diff --git a/drivers/net/atl1e/atl1e_param.c b/drivers/net/atl1e/atl1e_param.c
new file mode 100644
index 000000000000..f72abb34b0cd
--- /dev/null
+++ b/drivers/net/atl1e/atl1e_param.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/netdevice.h>
+
+#include "atl1e.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define ATL1E_MAX_NIC 32
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+#define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET }
+
+#define ATL1E_PARAM(x, desc) \
+ static int __devinitdata x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \
+ static int num_##x; \
+ module_param_array_named(x, x, int, &num_##x, 0); \
+ MODULE_PARM_DESC(x, desc);
+
+/* Transmit Memory count
+ *
+ * Valid Range: 64-2048
+ *
+ * Default Value: 128
+ */
+#define ATL1E_MIN_TX_DESC_CNT 32
+#define ATL1E_MAX_TX_DESC_CNT 1020
+#define ATL1E_DEFAULT_TX_DESC_CNT 128
+ATL1E_PARAM(tx_desc_cnt, "Transmit description count");
+
+/* Receive Memory Block Count
+ *
+ * Valid Range: 16-512
+ *
+ * Default Value: 128
+ */
+#define ATL1E_MIN_RX_MEM_SIZE 8 /* 8KB */
+#define ATL1E_MAX_RX_MEM_SIZE 1024 /* 1MB */
+#define ATL1E_DEFAULT_RX_MEM_SIZE 256 /* 128KB */
+ATL1E_PARAM(rx_mem_size, "memory size of rx buffer(KB)");
+
+/* User Specified MediaType Override
+ *
+ * Valid Range: 0-5
+ * - 0 - auto-negotiate at all supported speeds
+ * - 1 - only link at 100Mbps Full Duplex
+ * - 2 - only link at 100Mbps Half Duplex
+ * - 3 - only link at 10Mbps Full Duplex
+ * - 4 - only link at 10Mbps Half Duplex
+ * Default Value: 0
+ */
+
+ATL1E_PARAM(media_type, "MediaType Select");
+
+/* Interrupt Moderate Timer in units of 2 us
+ *
+ * Valid Range: 10-65535
+ *
+ * Default Value: 45000(90ms)
+ */
+#define INT_MOD_DEFAULT_CNT 100 /* 200us */
+#define INT_MOD_MAX_CNT 65000
+#define INT_MOD_MIN_CNT 50
+ATL1E_PARAM(int_mod_timer, "Interrupt Moderator Timer");
+
+#define AUTONEG_ADV_DEFAULT 0x2F
+#define AUTONEG_ADV_MASK 0x2F
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+#define FLASH_VENDOR_DEFAULT 0
+#define FLASH_VENDOR_MIN 0
+#define FLASH_VENDOR_MAX 2
+
+struct atl1e_option {
+ enum { enable_option, range_option, list_option } type;
+ char *name;
+ char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ struct atl1e_opt_list { int i; char *str; } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct pci_dev *pdev)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ dev_info(&pdev->dev, "%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ dev_info(&pdev->dev, "%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ dev_info(&pdev->dev, "%s set to %i\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option:{
+ int i;
+ struct atl1e_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ dev_info(&pdev->dev, "%s\n",
+ ent->str);
+ return 0;
+ }
+ }
+ break;
+ }
+ default:
+ BUG();
+ }
+
+ dev_info(&pdev->dev, "Invalid %s specified (%i) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+/*
+ * atl1e_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ */
+void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int bd = adapter->bd_number;
+ if (bd >= ATL1E_MAX_NIC) {
+ dev_notice(&pdev->dev, "no configuration for board #%i\n", bd);
+ dev_notice(&pdev->dev, "Using defaults for all values\n");
+ }
+
+ { /* Transmit Ring Size */
+ struct atl1e_option opt = {
+ .type = range_option,
+ .name = "Transmit Ddescription Count",
+ .err = "using default of "
+ __MODULE_STRING(ATL1E_DEFAULT_TX_DESC_CNT),
+ .def = ATL1E_DEFAULT_TX_DESC_CNT,
+ .arg = { .r = { .min = ATL1E_MIN_TX_DESC_CNT,
+ .max = ATL1E_MAX_TX_DESC_CNT} }
+ };
+ int val;
+ if (num_tx_desc_cnt > bd) {
+ val = tx_desc_cnt[bd];
+ atl1e_validate_option(&val, &opt, pdev);
+ adapter->tx_ring.count = (u16) val & 0xFFFC;
+ } else
+ adapter->tx_ring.count = (u16)opt.def;
+ }
+
+ { /* Receive Memory Block Count */
+ struct atl1e_option opt = {
+ .type = range_option,
+ .name = "Memory size of rx buffer(KB)",
+ .err = "using default of "
+ __MODULE_STRING(ATL1E_DEFAULT_RX_MEM_SIZE),
+ .def = ATL1E_DEFAULT_RX_MEM_SIZE,
+ .arg = { .r = { .min = ATL1E_MIN_RX_MEM_SIZE,
+ .max = ATL1E_MAX_RX_MEM_SIZE} }
+ };
+ int val;
+ if (num_rx_mem_size > bd) {
+ val = rx_mem_size[bd];
+ atl1e_validate_option(&val, &opt, pdev);
+ adapter->rx_ring.page_size = (u32)val * 1024;
+ } else {
+ adapter->rx_ring.page_size = (u32)opt.def * 1024;
+ }
+ }
+
+ { /* Interrupt Moderate Timer */
+ struct atl1e_option opt = {
+ .type = range_option,
+ .name = "Interrupt Moderate Timer",
+ .err = "using default of "
+ __MODULE_STRING(INT_MOD_DEFAULT_CNT),
+ .def = INT_MOD_DEFAULT_CNT,
+ .arg = { .r = { .min = INT_MOD_MIN_CNT,
+ .max = INT_MOD_MAX_CNT} }
+ } ;
+ int val;
+ if (num_int_mod_timer > bd) {
+ val = int_mod_timer[bd];
+ atl1e_validate_option(&val, &opt, pdev);
+ adapter->hw.imt = (u16) val;
+ } else
+ adapter->hw.imt = (u16)(opt.def);
+ }
+
+ { /* MediaType */
+ struct atl1e_option opt = {
+ .type = range_option,
+ .name = "Speed/Duplex Selection",
+ .err = "using default of "
+ __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR),
+ .def = MEDIA_TYPE_AUTO_SENSOR,
+ .arg = { .r = { .min = MEDIA_TYPE_AUTO_SENSOR,
+ .max = MEDIA_TYPE_10M_HALF} }
+ } ;
+ int val;
+ if (num_media_type > bd) {
+ val = media_type[bd];
+ atl1e_validate_option(&val, &opt, pdev);
+ adapter->hw.media_type = (u16) val;
+ } else
+ adapter->hw.media_type = (u16)(opt.def);
+
+ }
+}
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 3ab61e40e86a..cb8be490e5ae 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -911,9 +911,8 @@ au1000_adjust_link(struct net_device *dev)
if(phydev->link != aup->old_link) {
// link state changed
- if (phydev->link) // link went up
- netif_tx_schedule_all(dev);
- else { // link went down
+ if (!phydev->link) {
+ /* link went down */
aup->old_speed = 0;
aup->old_duplex = -1;
}
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index a6a3da89f590..a8ec60e1ed75 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -357,7 +357,6 @@ static void bfin_mac_adjust_link(struct net_device *dev)
if (!lp->old_link) {
new_state = 1;
lp->old_link = 1;
- netif_tx_schedule_all(dev);
}
} else if (lp->old_link) {
new_state = 1;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9737c06045d6..a641eeaa2a2f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5041,6 +5041,7 @@ static int bond_check_params(struct bond_params *params)
}
static struct lock_class_key bonding_netdev_xmit_lock_key;
+static struct lock_class_key bonding_netdev_addr_lock_key;
static void bond_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
@@ -5052,6 +5053,8 @@ static void bond_set_lockdep_class_one(struct net_device *dev,
static void bond_set_lockdep_class(struct net_device *dev)
{
+ lockdep_set_class(&dev->addr_list_lock,
+ &bonding_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL);
}
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index fbd4280c102c..a7800e559090 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -945,10 +945,8 @@ static void cpmac_adjust_link(struct net_device *dev)
if (!priv->oldlink) {
new_state = 1;
priv->oldlink = 1;
- netif_tx_schedule_all(dev);
}
} else if (priv->oldlink) {
- netif_tx_stop_all_queues(dev);
new_state = 1;
priv->oldlink = 0;
priv->oldspeed = 0;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 952e10d686ec..0b0f1c407a7e 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -888,19 +888,22 @@ dm9000_rx(struct net_device *dev)
dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
}
- if (rxhdr.RxStatus & 0xbf) {
+ /* rxhdr.RxStatus is identical to RSR register. */
+ if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
+ RSR_PLE | RSR_RWTO |
+ RSR_LCS | RSR_RF)) {
GoodPacket = false;
- if (rxhdr.RxStatus & 0x01) {
+ if (rxhdr.RxStatus & RSR_FOE) {
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "fifo error\n");
dev->stats.rx_fifo_errors++;
}
- if (rxhdr.RxStatus & 0x02) {
+ if (rxhdr.RxStatus & RSR_CE) {
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "crc error\n");
dev->stats.rx_crc_errors++;
}
- if (rxhdr.RxStatus & 0x80) {
+ if (rxhdr.RxStatus & RSR_RF) {
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "length error\n");
dev->stats.rx_length_errors++;
@@ -1067,7 +1070,7 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
- iow(db, DM9000_EPCR, 0xc); /* Issue phyxcer read command */
+ iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock,flags);
@@ -1118,7 +1121,7 @@ dm9000_phy_write(struct net_device *dev,
iow(db, DM9000_EPDRL, value);
iow(db, DM9000_EPDRH, value >> 8);
- iow(db, DM9000_EPCR, 0xa); /* Issue phyxcer write command */
+ iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 31feae1ea390..19e317eaf5bc 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -90,10 +90,13 @@ struct e1000_adapter;
#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
#define PFX "e1000: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
- __FUNCTION__ , ## args))
+
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+do { \
+ if (NETIF_MSG_##nlevel & adapter->msg_enable) \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, \
+ adapter->netdev->name, __func__, ##args); \
+} while (0)
#define E1000_MAX_INTR 10
@@ -151,9 +154,9 @@ struct e1000_adapter;
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define E1000_MNG_VLAN_NONE -1
+#define E1000_MNG_VLAN_NONE (-1)
/* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
@@ -165,9 +168,13 @@ struct e1000_buffer {
u16 next_to_watch;
};
+struct e1000_ps_page {
+ struct page *ps_page[PS_PAGE_BUFFERS];
+};
-struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
-struct e1000_ps_page_dma { u64 ps_page_dma[PS_PAGE_BUFFERS]; };
+struct e1000_ps_page_dma {
+ u64 ps_page_dma[PS_PAGE_BUFFERS];
+};
struct e1000_tx_ring {
/* pointer to the descriptor ring memory */
@@ -217,13 +224,13 @@ struct e1000_rx_ring {
u16 rdt;
};
-#define E1000_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
+#define E1000_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) \
+ ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
-#define E1000_RX_DESC_PS(R, i) \
+#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
-#define E1000_RX_DESC_EXT(R, i) \
+#define E1000_RX_DESC_EXT(R, i) \
(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
@@ -246,9 +253,7 @@ struct e1000_adapter {
u16 link_speed;
u16 link_duplex;
spinlock_t stats_lock;
-#ifdef CONFIG_E1000_NAPI
spinlock_t tx_queue_lock;
-#endif
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
@@ -286,22 +291,16 @@ struct e1000_adapter {
bool detect_tx_hung;
/* RX */
-#ifdef CONFIG_E1000_NAPI
- bool (*clean_rx) (struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring,
- int *work_done, int work_to_do);
-#else
- bool (*clean_rx) (struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring);
-#endif
- void (*alloc_rx_buf) (struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring,
- int cleaned_count);
+ bool (*clean_rx)(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
+ void (*alloc_rx_buf)(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
struct e1000_rx_ring *rx_ring; /* One per active queue */
-#ifdef CONFIG_E1000_NAPI
struct napi_struct napi;
struct net_device *polling_netdev; /* One per active queue */
-#endif
+
int num_tx_queues;
int num_rx_queues;
@@ -317,7 +316,6 @@ struct e1000_adapter {
u64 gorcl_old;
u16 rx_ps_bsize0;
-
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
@@ -342,6 +340,10 @@ struct e1000_adapter {
bool quad_port_a;
unsigned long flags;
u32 eeprom_wol;
+
+ /* for ioport free */
+ int bars;
+ int need_ioport;
};
enum e1000_state_t {
@@ -353,9 +355,18 @@ enum e1000_state_t {
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
+extern int e1000_up(struct e1000_adapter *adapter);
+extern void e1000_down(struct e1000_adapter *adapter);
+extern void e1000_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000_reset(struct e1000_adapter *adapter);
+extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
+extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_update_stats(struct e1000_adapter *adapter);
extern void e1000_power_up_phy(struct e1000_adapter *);
extern void e1000_set_ethtool_ops(struct net_device *netdev);
extern void e1000_check_options(struct e1000_adapter *adapter);
-
#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index a3f6a9c72ec8..6a3893acfe04 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -29,21 +29,8 @@
/* ethtool support for e1000 */
#include "e1000.h"
-
#include <asm/uaccess.h>
-extern int e1000_up(struct e1000_adapter *adapter);
-extern void e1000_down(struct e1000_adapter *adapter);
-extern void e1000_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000_reset(struct e1000_adapter *adapter);
-extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
-extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
-extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_update_stats(struct e1000_adapter *adapter);
-
-
struct e1000_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -112,8 +99,8 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
};
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
-static int
-e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int e1000_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -162,7 +149,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->transceiver = XCVR_EXTERNAL;
}
- if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
+ if (er32(STATUS) & E1000_STATUS_LU) {
e1000_get_speed_and_duplex(hw, &adapter->link_speed,
&adapter->link_duplex);
@@ -185,8 +172,8 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
-static int
-e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int e1000_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -231,9 +218,8 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
-static void
-e1000_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+static void e1000_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -251,9 +237,8 @@ e1000_get_pauseparam(struct net_device *netdev,
}
}
-static int
-e1000_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+static int e1000_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -289,15 +274,13 @@ e1000_set_pauseparam(struct net_device *netdev,
return retval;
}
-static u32
-e1000_get_rx_csum(struct net_device *netdev)
+static u32 e1000_get_rx_csum(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
return adapter->rx_csum;
}
-static int
-e1000_set_rx_csum(struct net_device *netdev, u32 data)
+static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
adapter->rx_csum = data;
@@ -309,18 +292,17 @@ e1000_set_rx_csum(struct net_device *netdev, u32 data)
return 0;
}
-static u32
-e1000_get_tx_csum(struct net_device *netdev)
+static u32 e1000_get_tx_csum(struct net_device *netdev)
{
return (netdev->features & NETIF_F_HW_CSUM) != 0;
}
-static int
-e1000_set_tx_csum(struct net_device *netdev, u32 data)
+static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
- if (adapter->hw.mac_type < e1000_82543) {
+ if (hw->mac_type < e1000_82543) {
if (!data)
return -EINVAL;
return 0;
@@ -334,12 +316,13 @@ e1000_set_tx_csum(struct net_device *netdev, u32 data)
return 0;
}
-static int
-e1000_set_tso(struct net_device *netdev, u32 data)
+static int e1000_set_tso(struct net_device *netdev, u32 data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if ((adapter->hw.mac_type < e1000_82544) ||
- (adapter->hw.mac_type == e1000_82547))
+ struct e1000_hw *hw = &adapter->hw;
+
+ if ((hw->mac_type < e1000_82544) ||
+ (hw->mac_type == e1000_82547))
return data ? -EINVAL : 0;
if (data)
@@ -357,30 +340,26 @@ e1000_set_tso(struct net_device *netdev, u32 data)
return 0;
}
-static u32
-e1000_get_msglevel(struct net_device *netdev)
+static u32 e1000_get_msglevel(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
-static void
-e1000_set_msglevel(struct net_device *netdev, u32 data)
+static void e1000_set_msglevel(struct net_device *netdev, u32 data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
-static int
-e1000_get_regs_len(struct net_device *netdev)
+static int e1000_get_regs_len(struct net_device *netdev)
{
#define E1000_REGS_LEN 32
return E1000_REGS_LEN * sizeof(u32);
}
-static void
-e1000_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
+static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -391,22 +370,22 @@ e1000_get_regs(struct net_device *netdev,
regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
- regs_buff[0] = E1000_READ_REG(hw, CTRL);
- regs_buff[1] = E1000_READ_REG(hw, STATUS);
+ regs_buff[0] = er32(CTRL);
+ regs_buff[1] = er32(STATUS);
- regs_buff[2] = E1000_READ_REG(hw, RCTL);
- regs_buff[3] = E1000_READ_REG(hw, RDLEN);
- regs_buff[4] = E1000_READ_REG(hw, RDH);
- regs_buff[5] = E1000_READ_REG(hw, RDT);
- regs_buff[6] = E1000_READ_REG(hw, RDTR);
+ regs_buff[2] = er32(RCTL);
+ regs_buff[3] = er32(RDLEN);
+ regs_buff[4] = er32(RDH);
+ regs_buff[5] = er32(RDT);
+ regs_buff[6] = er32(RDTR);
- regs_buff[7] = E1000_READ_REG(hw, TCTL);
- regs_buff[8] = E1000_READ_REG(hw, TDLEN);
- regs_buff[9] = E1000_READ_REG(hw, TDH);
- regs_buff[10] = E1000_READ_REG(hw, TDT);
- regs_buff[11] = E1000_READ_REG(hw, TIDV);
+ regs_buff[7] = er32(TCTL);
+ regs_buff[8] = er32(TDLEN);
+ regs_buff[9] = er32(TDH);
+ regs_buff[10] = er32(TDT);
+ regs_buff[11] = er32(TIDV);
- regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */
+ regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */
if (hw->phy_type == e1000_phy_igp) {
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
IGP01E1000_PHY_AGC_A);
@@ -464,20 +443,20 @@ e1000_get_regs(struct net_device *netdev,
if (hw->mac_type >= e1000_82540 &&
hw->mac_type < e1000_82571 &&
hw->media_type == e1000_media_type_copper) {
- regs_buff[26] = E1000_READ_REG(hw, MANC);
+ regs_buff[26] = er32(MANC);
}
}
-static int
-e1000_get_eeprom_len(struct net_device *netdev)
+static int e1000_get_eeprom_len(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- return adapter->hw.eeprom.word_size * 2;
+ struct e1000_hw *hw = &adapter->hw;
+
+ return hw->eeprom.word_size * 2;
}
-static int
-e1000_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+static int e1000_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -504,10 +483,12 @@ e1000_get_eeprom(struct net_device *netdev,
last_word - first_word + 1,
eeprom_buff);
else {
- for (i = 0; i < last_word - first_word + 1; i++)
- if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
- &eeprom_buff[i])))
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ret_val = e1000_read_eeprom(hw, first_word + i, 1,
+ &eeprom_buff[i]);
+ if (ret_val)
break;
+ }
}
/* Device's eeprom is always little-endian, word addressable */
@@ -521,9 +502,8 @@ e1000_get_eeprom(struct net_device *netdev,
return ret_val;
}
-static int
-e1000_set_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+static int e1000_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -584,11 +564,11 @@ e1000_set_eeprom(struct net_device *netdev,
return ret_val;
}
-static void
-e1000_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+static void e1000_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
char firmware_version[32];
u16 eeprom_data;
@@ -597,8 +577,8 @@ e1000_get_drvinfo(struct net_device *netdev,
/* EEPROM image version # is reported as firmware version # for
* 8257{1|2|3} controllers */
- e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
- switch (adapter->hw.mac_type) {
+ e1000_read_eeprom(hw, 5, 1, &eeprom_data);
+ switch (hw->mac_type) {
case e1000_82571:
case e1000_82572:
case e1000_82573:
@@ -619,12 +599,12 @@ e1000_get_drvinfo(struct net_device *netdev,
drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
-static void
-e1000_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static void e1000_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- e1000_mac_type mac_type = adapter->hw.mac_type;
+ struct e1000_hw *hw = &adapter->hw;
+ e1000_mac_type mac_type = hw->mac_type;
struct e1000_tx_ring *txdr = adapter->tx_ring;
struct e1000_rx_ring *rxdr = adapter->rx_ring;
@@ -640,12 +620,12 @@ e1000_get_ringparam(struct net_device *netdev,
ring->rx_jumbo_pending = 0;
}
-static int
-e1000_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static int e1000_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- e1000_mac_type mac_type = adapter->hw.mac_type;
+ struct e1000_hw *hw = &adapter->hw;
+ e1000_mac_type mac_type = hw->mac_type;
struct e1000_tx_ring *txdr, *tx_old;
struct e1000_rx_ring *rxdr, *rx_old;
int i, err;
@@ -691,9 +671,11 @@ e1000_set_ringparam(struct net_device *netdev,
if (netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */
- if ((err = e1000_setup_all_rx_resources(adapter)))
+ err = e1000_setup_all_rx_resources(adapter);
+ if (err)
goto err_setup_rx;
- if ((err = e1000_setup_all_tx_resources(adapter)))
+ err = e1000_setup_all_tx_resources(adapter);
+ if (err)
goto err_setup_tx;
/* save the new, restore the old in order to free it,
@@ -707,7 +689,8 @@ e1000_set_ringparam(struct net_device *netdev,
kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
- if ((err = e1000_up(adapter)))
+ err = e1000_up(adapter);
+ if (err)
goto err_setup;
}
@@ -728,12 +711,13 @@ err_setup:
return err;
}
-static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
- int reg, u32 mask, u32 write)
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
{
+ struct e1000_hw *hw = &adapter->hw;
static const u32 test[] =
{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
- u8 __iomem *address = adapter->hw.hw_addr + reg;
+ u8 __iomem *address = hw->hw_addr + reg;
u32 read;
int i;
@@ -751,10 +735,11 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
return false;
}
-static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
- int reg, u32 mask, u32 write)
+static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
{
- u8 __iomem *address = adapter->hw.hw_addr + reg;
+ struct e1000_hw *hw = &adapter->hw;
+ u8 __iomem *address = hw->hw_addr + reg;
u32 read;
writel(write & mask, address);
@@ -772,7 +757,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
#define REG_PATTERN_TEST(reg, mask, write) \
do { \
if (reg_pattern_test(adapter, data, \
- (adapter->hw.mac_type >= e1000_82543) \
+ (hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg, \
mask, write)) \
return 1; \
@@ -781,22 +766,22 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
#define REG_SET_AND_CHECK(reg, mask, write) \
do { \
if (reg_set_and_check(adapter, data, \
- (adapter->hw.mac_type >= e1000_82543) \
+ (hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg, \
mask, write)) \
return 1; \
} while (0)
-static int
-e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
+static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
{
u32 value, before, after;
u32 i, toggle;
+ struct e1000_hw *hw = &adapter->hw;
/* The status register is Read Only, so a write should fail.
* Some bits that get toggled are ignored.
*/
- switch (adapter->hw.mac_type) {
+ switch (hw->mac_type) {
/* there are several bits on newer hardware that are r/w */
case e1000_82571:
case e1000_82572:
@@ -812,10 +797,10 @@ e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
break;
}
- before = E1000_READ_REG(&adapter->hw, STATUS);
- value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle);
- E1000_WRITE_REG(&adapter->hw, STATUS, toggle);
- after = E1000_READ_REG(&adapter->hw, STATUS) & toggle;
+ before = er32(STATUS);
+ value = (er32(STATUS) & toggle);
+ ew32(STATUS, toggle);
+ after = er32(STATUS) & toggle;
if (value != after) {
DPRINTK(DRV, ERR, "failed STATUS register test got: "
"0x%08X expected: 0x%08X\n", after, value);
@@ -823,9 +808,9 @@ e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
return 1;
}
/* restore previous status */
- E1000_WRITE_REG(&adapter->hw, STATUS, before);
+ ew32(STATUS, before);
- if (adapter->hw.mac_type != e1000_ich8lan) {
+ if (hw->mac_type != e1000_ich8lan) {
REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
@@ -845,20 +830,20 @@ e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
- before = (adapter->hw.mac_type == e1000_ich8lan ?
+ before = (hw->mac_type == e1000_ich8lan ?
0x06C3B33E : 0x06DFB3FE);
REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
- if (adapter->hw.mac_type >= e1000_82543) {
+ if (hw->mac_type >= e1000_82543) {
REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
- if (adapter->hw.mac_type != e1000_ich8lan)
+ if (hw->mac_type != e1000_ich8lan)
REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
- value = (adapter->hw.mac_type == e1000_ich8lan ?
+ value = (hw->mac_type == e1000_ich8lan ?
E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
for (i = 0; i < value; i++) {
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
@@ -874,7 +859,7 @@ e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
}
- value = (adapter->hw.mac_type == e1000_ich8lan ?
+ value = (hw->mac_type == e1000_ich8lan ?
E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
for (i = 0; i < value; i++)
REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
@@ -883,9 +868,9 @@ e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
return 0;
}
-static int
-e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
+static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
{
+ struct e1000_hw *hw = &adapter->hw;
u16 temp;
u16 checksum = 0;
u16 i;
@@ -893,7 +878,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
/* Read and add up the contents of the EEPROM */
for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
- if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
+ if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) {
*data = 1;
break;
}
@@ -901,30 +886,30 @@ e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
}
/* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16) EEPROM_SUM) && !(*data))
+ if ((checksum != (u16)EEPROM_SUM) && !(*data))
*data = 2;
return *data;
}
-static irqreturn_t
-e1000_test_intr(int irq, void *data)
+static irqreturn_t e1000_test_intr(int irq, void *data)
{
- struct net_device *netdev = (struct net_device *) data;
+ struct net_device *netdev = (struct net_device *)data;
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
- adapter->test_icr |= E1000_READ_REG(&adapter->hw, ICR);
+ adapter->test_icr |= er32(ICR);
return IRQ_HANDLED;
}
-static int
-e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
+static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
{
struct net_device *netdev = adapter->netdev;
u32 mask, i = 0;
bool shared_int = true;
u32 irq = adapter->pdev->irq;
+ struct e1000_hw *hw = &adapter->hw;
*data = 0;
@@ -942,13 +927,13 @@ e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
(shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */
- E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
+ ew32(IMC, 0xFFFFFFFF);
msleep(10);
/* Test each interrupt */
for (; i < 10; i++) {
- if (adapter->hw.mac_type == e1000_ich8lan && i == 8)
+ if (hw->mac_type == e1000_ich8lan && i == 8)
continue;
/* Interrupt to test */
@@ -962,8 +947,8 @@ e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
* test failed.
*/
adapter->test_icr = 0;
- E1000_WRITE_REG(&adapter->hw, IMC, mask);
- E1000_WRITE_REG(&adapter->hw, ICS, mask);
+ ew32(IMC, mask);
+ ew32(ICS, mask);
msleep(10);
if (adapter->test_icr & mask) {
@@ -979,8 +964,8 @@ e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
* test failed.
*/
adapter->test_icr = 0;
- E1000_WRITE_REG(&adapter->hw, IMS, mask);
- E1000_WRITE_REG(&adapter->hw, ICS, mask);
+ ew32(IMS, mask);
+ ew32(ICS, mask);
msleep(10);
if (!(adapter->test_icr & mask)) {
@@ -996,8 +981,8 @@ e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
* test failed.
*/
adapter->test_icr = 0;
- E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF);
- E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
+ ew32(IMC, ~mask & 0x00007FFF);
+ ew32(ICS, ~mask & 0x00007FFF);
msleep(10);
if (adapter->test_icr) {
@@ -1008,7 +993,7 @@ e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
}
/* Disable all the interrupts */
- E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
+ ew32(IMC, 0xFFFFFFFF);
msleep(10);
/* Unhook test interrupt handler */
@@ -1017,8 +1002,7 @@ e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
return *data;
}
-static void
-e1000_free_desc_rings(struct e1000_adapter *adapter)
+static void e1000_free_desc_rings(struct e1000_adapter *adapter)
{
struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
@@ -1064,9 +1048,9 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
return;
}
-static int
-e1000_setup_desc_rings(struct e1000_adapter *adapter)
+static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
@@ -1078,41 +1062,39 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
if (!txdr->count)
txdr->count = E1000_DEFAULT_TXD;
- if (!(txdr->buffer_info = kcalloc(txdr->count,
- sizeof(struct e1000_buffer),
- GFP_KERNEL))) {
+ txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer),
+ GFP_KERNEL);
+ if (!txdr->buffer_info) {
ret_val = 1;
goto err_nomem;
}
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size,
- &txdr->dma))) {
+ txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
}
memset(txdr->desc, 0, txdr->size);
txdr->next_to_use = txdr->next_to_clean = 0;
- E1000_WRITE_REG(&adapter->hw, TDBAL,
- ((u64) txdr->dma & 0x00000000FFFFFFFF));
- E1000_WRITE_REG(&adapter->hw, TDBAH, ((u64) txdr->dma >> 32));
- E1000_WRITE_REG(&adapter->hw, TDLEN,
- txdr->count * sizeof(struct e1000_tx_desc));
- E1000_WRITE_REG(&adapter->hw, TDH, 0);
- E1000_WRITE_REG(&adapter->hw, TDT, 0);
- E1000_WRITE_REG(&adapter->hw, TCTL,
- E1000_TCTL_PSP | E1000_TCTL_EN |
- E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
- E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+ ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
+ ew32(TDBAH, ((u64)txdr->dma >> 32));
+ ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc));
+ ew32(TDH, 0);
+ ew32(TDT, 0);
+ ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN |
+ E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+ E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
for (i = 0; i < txdr->count; i++) {
struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
struct sk_buff *skb;
unsigned int size = 1024;
- if (!(skb = alloc_skb(size, GFP_KERNEL))) {
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb) {
ret_val = 3;
goto err_nomem;
}
@@ -1135,40 +1117,40 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
if (!rxdr->count)
rxdr->count = E1000_DEFAULT_RXD;
- if (!(rxdr->buffer_info = kcalloc(rxdr->count,
- sizeof(struct e1000_buffer),
- GFP_KERNEL))) {
+ rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
+ GFP_KERNEL);
+ if (!rxdr->buffer_info) {
ret_val = 4;
goto err_nomem;
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
- if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
+ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ if (!rxdr->desc) {
ret_val = 5;
goto err_nomem;
}
memset(rxdr->desc, 0, rxdr->size);
rxdr->next_to_use = rxdr->next_to_clean = 0;
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
- E1000_WRITE_REG(&adapter->hw, RDBAL,
- ((u64) rxdr->dma & 0xFFFFFFFF));
- E1000_WRITE_REG(&adapter->hw, RDBAH, ((u64) rxdr->dma >> 32));
- E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size);
- E1000_WRITE_REG(&adapter->hw, RDH, 0);
- E1000_WRITE_REG(&adapter->hw, RDT, 0);
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF));
+ ew32(RDBAH, ((u64)rxdr->dma >> 32));
+ ew32(RDLEN, rxdr->size);
+ ew32(RDH, 0);
+ ew32(RDT, 0);
rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
+ ew32(RCTL, rctl);
for (i = 0; i < rxdr->count; i++) {
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
struct sk_buff *skb;
- if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
- GFP_KERNEL))) {
+ skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
+ if (!skb) {
ret_val = 6;
goto err_nomem;
}
@@ -1189,73 +1171,74 @@ err_nomem:
return ret_val;
}
-static void
-e1000_phy_disable_receiver(struct e1000_adapter *adapter)
+static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
+
/* Write out to PHY registers 29 and 30 to disable the Receiver. */
- e1000_write_phy_reg(&adapter->hw, 29, 0x001F);
- e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC);
- e1000_write_phy_reg(&adapter->hw, 29, 0x001A);
- e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0);
+ e1000_write_phy_reg(hw, 29, 0x001F);
+ e1000_write_phy_reg(hw, 30, 0x8FFC);
+ e1000_write_phy_reg(hw, 29, 0x001A);
+ e1000_write_phy_reg(hw, 30, 0x8FF0);
}
-static void
-e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
+static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
u16 phy_reg;
/* Because we reset the PHY above, we need to re-force TX_CLK in the
* Extended PHY Specific Control Register to 25MHz clock. This
* value defaults back to a 2.5MHz clock when the PHY is reset.
*/
- e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
phy_reg |= M88E1000_EPSCR_TX_CLK_25;
- e1000_write_phy_reg(&adapter->hw,
+ e1000_write_phy_reg(hw,
M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
/* In addition, because of the s/w reset above, we need to enable
* CRS on TX. This must be set for both full and half duplex
* operation.
*/
- e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
- e1000_write_phy_reg(&adapter->hw,
+ e1000_write_phy_reg(hw,
M88E1000_PHY_SPEC_CTRL, phy_reg);
}
-static int
-e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
+static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg;
u16 phy_reg;
/* Setup the Device Control Register for PHY loopback test. */
- ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl_reg = er32(CTRL);
ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */
E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
E1000_CTRL_FD); /* Force Duplex to FULL */
- E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg);
+ ew32(CTRL, ctrl_reg);
/* Read the PHY Specific Control Register (0x10) */
- e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
/* Clear Auto-Crossover bits in PHY Specific Control Register
* (bits 6:5).
*/
phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
- e1000_write_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
/* Perform software reset on the PHY */
- e1000_phy_reset(&adapter->hw);
+ e1000_phy_reset(hw);
/* Have to setup TX_CLK and TX_CRS after software reset */
e1000_phy_reset_clk_and_crs(adapter);
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8100);
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x8100);
/* Wait for reset to complete. */
udelay(500);
@@ -1267,55 +1250,55 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
e1000_phy_disable_receiver(adapter);
/* Set the loopback bit in the PHY control register. */
- e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
phy_reg |= MII_CR_LOOPBACK;
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg);
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
/* Setup TX_CLK and TX_CRS one more time. */
e1000_phy_reset_clk_and_crs(adapter);
/* Check Phy Configuration */
- e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
if (phy_reg != 0x4100)
return 9;
- e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
if (phy_reg != 0x0070)
return 10;
- e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
+ e1000_read_phy_reg(hw, 29, &phy_reg);
if (phy_reg != 0x001A)
return 11;
return 0;
}
-static int
-e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
+static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
u32 stat_reg = 0;
- adapter->hw.autoneg = false;
+ hw->autoneg = false;
- if (adapter->hw.phy_type == e1000_phy_m88) {
+ if (hw->phy_type == e1000_phy_m88) {
/* Auto-MDI/MDIX Off */
- e1000_write_phy_reg(&adapter->hw,
+ e1000_write_phy_reg(hw,
M88E1000_PHY_SPEC_CTRL, 0x0808);
/* reset to update Auto-MDI/MDIX */
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140);
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x9140);
/* autoneg off */
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140);
- } else if (adapter->hw.phy_type == e1000_phy_gg82563)
- e1000_write_phy_reg(&adapter->hw,
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x8140);
+ } else if (hw->phy_type == e1000_phy_gg82563)
+ e1000_write_phy_reg(hw,
GG82563_PHY_KMRN_MODE_CTRL,
0x1CC);
- ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl_reg = er32(CTRL);
- if (adapter->hw.phy_type == e1000_phy_ife) {
+ if (hw->phy_type == e1000_phy_ife) {
/* force 100, set loopback */
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x6100);
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x6100);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
@@ -1325,10 +1308,10 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
E1000_CTRL_FD); /* Force Duplex to FULL */
} else {
/* force 1000, set loopback */
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140);
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
/* Now set up the MAC to the same speed/duplex as the PHY. */
- ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl_reg = er32(CTRL);
ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
@@ -1336,23 +1319,23 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
E1000_CTRL_FD); /* Force Duplex to FULL */
}
- if (adapter->hw.media_type == e1000_media_type_copper &&
- adapter->hw.phy_type == e1000_phy_m88)
+ if (hw->media_type == e1000_media_type_copper &&
+ hw->phy_type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
else {
/* Set the ILOS bit on the fiber Nic is half
* duplex link is detected. */
- stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
+ stat_reg = er32(STATUS);
if ((stat_reg & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
}
- E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg);
+ ew32(CTRL, ctrl_reg);
/* Disable the receiver on the PHY so when a cable is plugged in, the
* PHY does not begin to autoneg when a cable is reconnected to the NIC.
*/
- if (adapter->hw.phy_type == e1000_phy_m88)
+ if (hw->phy_type == e1000_phy_m88)
e1000_phy_disable_receiver(adapter);
udelay(500);
@@ -1360,15 +1343,15 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
return 0;
}
-static int
-e1000_set_phy_loopback(struct e1000_adapter *adapter)
+static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
u16 phy_reg = 0;
u16 count = 0;
- switch (adapter->hw.mac_type) {
+ switch (hw->mac_type) {
case e1000_82543:
- if (adapter->hw.media_type == e1000_media_type_copper) {
+ if (hw->media_type == e1000_media_type_copper) {
/* Attempt to setup Loopback mode on Non-integrated PHY.
* Some PHY registers get corrupted at random, so
* attempt this 10 times.
@@ -1402,9 +1385,9 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
/* Default PHY loopback work is to read the MII
* control register and assert bit 14 (loopback mode).
*/
- e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
phy_reg |= MII_CR_LOOPBACK;
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg);
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
return 0;
break;
}
@@ -1412,8 +1395,7 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
return 8;
}
-static int
-e1000_setup_loopback_test(struct e1000_adapter *adapter)
+static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
@@ -1431,14 +1413,14 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter)
case e1000_82572:
#define E1000_SERDES_LB_ON 0x410
e1000_set_phy_loopback(adapter);
- E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_ON);
+ ew32(SCTL, E1000_SERDES_LB_ON);
msleep(10);
return 0;
break;
default:
- rctl = E1000_READ_REG(hw, RCTL);
+ rctl = er32(RCTL);
rctl |= E1000_RCTL_LBM_TCVR;
- E1000_WRITE_REG(hw, RCTL, rctl);
+ ew32(RCTL, rctl);
return 0;
}
} else if (hw->media_type == e1000_media_type_copper)
@@ -1447,16 +1429,15 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter)
return 7;
}
-static void
-e1000_loopback_cleanup(struct e1000_adapter *adapter)
+static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
u16 phy_reg;
- rctl = E1000_READ_REG(hw, RCTL);
+ rctl = er32(RCTL);
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
- E1000_WRITE_REG(hw, RCTL, rctl);
+ ew32(RCTL, rctl);
switch (hw->mac_type) {
case e1000_82571:
@@ -1464,7 +1445,7 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter)
if (hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes) {
#define E1000_SERDES_LB_OFF 0x400
- E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
+ ew32(SCTL, E1000_SERDES_LB_OFF);
msleep(10);
break;
}
@@ -1489,8 +1470,8 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter)
}
}
-static void
-e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+static void e1000_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
{
memset(skb->data, 0xFF, frame_size);
frame_size &= ~1;
@@ -1499,8 +1480,8 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
}
-static int
-e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+static int e1000_check_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
{
frame_size &= ~1;
if (*(skb->data + 3) == 0xFF) {
@@ -1512,16 +1493,16 @@ e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
return 13;
}
-static int
-e1000_run_loopback_test(struct e1000_adapter *adapter)
+static int e1000_run_loopback_test(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
int i, j, k, l, lc, good_cnt, ret_val=0;
unsigned long time;
- E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
+ ew32(RDT, rxdr->count - 1);
/* Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
@@ -1544,7 +1525,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
PCI_DMA_TODEVICE);
if (unlikely(++k == txdr->count)) k = 0;
}
- E1000_WRITE_REG(&adapter->hw, TDT, k);
+ ew32(TDT, k);
msleep(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
@@ -1577,21 +1558,24 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
return ret_val;
}
-static int
-e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
+static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
{
+ struct e1000_hw *hw = &adapter->hw;
+
/* PHY loopback cannot be performed if SoL/IDER
* sessions are active */
- if (e1000_check_phy_reset_block(&adapter->hw)) {
+ if (e1000_check_phy_reset_block(hw)) {
DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
"when SoL/IDER is active.\n");
*data = 0;
goto out;
}
- if ((*data = e1000_setup_desc_rings(adapter)))
+ *data = e1000_setup_desc_rings(adapter);
+ if (*data)
goto out;
- if ((*data = e1000_setup_loopback_test(adapter)))
+ *data = e1000_setup_loopback_test(adapter);
+ if (*data)
goto err_loopback;
*data = e1000_run_loopback_test(adapter);
e1000_loopback_cleanup(adapter);
@@ -1602,38 +1586,37 @@ out:
return *data;
}
-static int
-e1000_link_test(struct e1000_adapter *adapter, u64 *data)
+static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
{
+ struct e1000_hw *hw = &adapter->hw;
*data = 0;
- if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
+ if (hw->media_type == e1000_media_type_internal_serdes) {
int i = 0;
- adapter->hw.serdes_link_down = true;
+ hw->serdes_link_down = true;
/* On some blade server designs, link establishment
* could take as long as 2-3 minutes */
do {
- e1000_check_for_link(&adapter->hw);
- if (!adapter->hw.serdes_link_down)
+ e1000_check_for_link(hw);
+ if (!hw->serdes_link_down)
return *data;
msleep(20);
} while (i++ < 3750);
*data = 1;
} else {
- e1000_check_for_link(&adapter->hw);
- if (adapter->hw.autoneg) /* if auto_neg is set wait for it */
+ e1000_check_for_link(hw);
+ if (hw->autoneg) /* if auto_neg is set wait for it */
msleep(4000);
- if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
+ if (!(er32(STATUS) & E1000_STATUS_LU)) {
*data = 1;
}
}
return *data;
}
-static int
-e1000_get_sset_count(struct net_device *netdev, int sset)
+static int e1000_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_TEST:
@@ -1645,11 +1628,11 @@ e1000_get_sset_count(struct net_device *netdev, int sset)
}
}
-static void
-e1000_diag_test(struct net_device *netdev,
- struct ethtool_test *eth_test, u64 *data)
+static void e1000_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
bool if_running = netif_running(netdev);
set_bit(__E1000_TESTING, &adapter->flags);
@@ -1657,9 +1640,9 @@ e1000_diag_test(struct net_device *netdev,
/* Offline tests */
/* save speed, duplex, autoneg settings */
- u16 autoneg_advertised = adapter->hw.autoneg_advertised;
- u8 forced_speed_duplex = adapter->hw.forced_speed_duplex;
- u8 autoneg = adapter->hw.autoneg;
+ u16 autoneg_advertised = hw->autoneg_advertised;
+ u8 forced_speed_duplex = hw->forced_speed_duplex;
+ u8 autoneg = hw->autoneg;
DPRINTK(HW, INFO, "offline testing starting\n");
@@ -1692,9 +1675,9 @@ e1000_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
/* restore speed, duplex, autoneg settings */
- adapter->hw.autoneg_advertised = autoneg_advertised;
- adapter->hw.forced_speed_duplex = forced_speed_duplex;
- adapter->hw.autoneg = autoneg;
+ hw->autoneg_advertised = autoneg_advertised;
+ hw->forced_speed_duplex = forced_speed_duplex;
+ hw->autoneg = autoneg;
e1000_reset(adapter);
clear_bit(__E1000_TESTING, &adapter->flags);
@@ -1717,7 +1700,8 @@ e1000_diag_test(struct net_device *netdev,
msleep_interruptible(4 * 1000);
}
-static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol)
+static int e1000_wol_exclusion(struct e1000_adapter *adapter,
+ struct ethtool_wolinfo *wol)
{
struct e1000_hw *hw = &adapter->hw;
int retval = 1; /* fail by default */
@@ -1742,7 +1726,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol
case E1000_DEV_ID_82571EB_SERDES:
case E1000_DEV_ID_82571EB_COPPER:
/* Wake events not supported on port B */
- if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
+ if (er32(STATUS) & E1000_STATUS_FUNC_1) {
wol->supported = 0;
break;
}
@@ -1766,7 +1750,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
* so exclude FUNC_1 ports from having WoL enabled */
- if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1 &&
+ if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
!adapter->eeprom_wol) {
wol->supported = 0;
break;
@@ -1778,10 +1762,11 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol
return retval;
}
-static void
-e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static void e1000_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
wol->supported = WAKE_UCAST | WAKE_MCAST |
WAKE_BCAST | WAKE_MAGIC;
@@ -1793,7 +1778,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return;
/* apply any specific unsupported masks here */
- switch (adapter->hw.device_id) {
+ switch (hw->device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
/* KSP3 does not suppport UCAST wake-ups */
wol->supported &= ~WAKE_UCAST;
@@ -1818,8 +1803,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return;
}
-static int
-e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -1863,61 +1847,60 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
/* bit defines for adapter->led_status */
#define E1000_LED_ON 0
-static void
-e1000_led_blink_callback(unsigned long data)
+static void e1000_led_blink_callback(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_hw *hw = &adapter->hw;
if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
- e1000_led_off(&adapter->hw);
+ e1000_led_off(hw);
else
- e1000_led_on(&adapter->hw);
+ e1000_led_on(hw);
mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
}
-static int
-e1000_phys_id(struct net_device *netdev, u32 data)
+static int e1000_phys_id(struct net_device *netdev, u32 data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
if (!data)
data = INT_MAX;
- if (adapter->hw.mac_type < e1000_82571) {
+ if (hw->mac_type < e1000_82571) {
if (!adapter->blink_timer.function) {
init_timer(&adapter->blink_timer);
adapter->blink_timer.function = e1000_led_blink_callback;
- adapter->blink_timer.data = (unsigned long) adapter;
+ adapter->blink_timer.data = (unsigned long)adapter;
}
- e1000_setup_led(&adapter->hw);
+ e1000_setup_led(hw);
mod_timer(&adapter->blink_timer, jiffies);
msleep_interruptible(data * 1000);
del_timer_sync(&adapter->blink_timer);
- } else if (adapter->hw.phy_type == e1000_phy_ife) {
+ } else if (hw->phy_type == e1000_phy_ife) {
if (!adapter->blink_timer.function) {
init_timer(&adapter->blink_timer);
adapter->blink_timer.function = e1000_led_blink_callback;
- adapter->blink_timer.data = (unsigned long) adapter;
+ adapter->blink_timer.data = (unsigned long)adapter;
}
mod_timer(&adapter->blink_timer, jiffies);
msleep_interruptible(data * 1000);
del_timer_sync(&adapter->blink_timer);
e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
} else {
- e1000_blink_led_start(&adapter->hw);
+ e1000_blink_led_start(hw);
msleep_interruptible(data * 1000);
}
- e1000_led_off(&adapter->hw);
+ e1000_led_off(hw);
clear_bit(E1000_LED_ON, &adapter->led_status);
- e1000_cleanup_led(&adapter->hw);
+ e1000_cleanup_led(hw);
return 0;
}
-static int
-e1000_nway_reset(struct net_device *netdev)
+static int e1000_nway_reset(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev))
@@ -1925,9 +1908,8 @@ e1000_nway_reset(struct net_device *netdev)
return 0;
}
-static void
-e1000_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+static void e1000_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
int i;
@@ -1941,8 +1923,8 @@ e1000_get_ethtool_stats(struct net_device *netdev,
/* BUG_ON(i != E1000_STATS_LEN); */
}
-static void
-e1000_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+static void e1000_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
{
u8 *p = data;
int i;
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 9a4b6cbddf2c..9d6edf3e73f9 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -42,48 +42,65 @@ static void e1000_release_software_semaphore(struct e1000_hw *hw);
static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw);
static s32 e1000_check_downshift(struct e1000_hw *hw);
-static s32 e1000_check_polarity(struct e1000_hw *hw, e1000_rev_polarity *polarity);
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+ e1000_rev_polarity *polarity);
static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
static void e1000_clear_vfta(struct e1000_hw *hw);
static s32 e1000_commit_shadow_ram(struct e1000_hw *hw);
static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
- bool link_up);
+ bool link_up);
static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank);
static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
-static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, u16 *max_length);
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+ u16 *max_length);
static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
static s32 e1000_get_software_flag(struct e1000_hw *hw);
static s32 e1000_ich8_cycle_init(struct e1000_hw *hw);
static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout);
static s32 e1000_id_led_init(struct e1000_hw *hw);
-static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, u32 cnf_base_addr, u32 cnf_size);
+static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+ u32 cnf_base_addr,
+ u32 cnf_size);
static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw);
static void e1000_init_rx_addrs(struct e1000_hw *hw);
static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
static s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
-static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, u16 offset, u8 *sum);
-static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw, struct e1000_host_mng_command_header* hdr);
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum);
+static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw,
+ struct e1000_host_mng_command_header
+ *hdr);
static s32 e1000_mng_write_commit(struct e1000_hw *hw);
-static s32 e1000_phy_ife_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
-static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
-static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
+static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
-static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data);
-static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
+static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index,
+ u8 byte);
static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data);
-static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size, u16 *data);
-static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size, u16 data);
-static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+ u16 *data);
+static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+ u16 data);
+static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
static void e1000_release_software_flag(struct e1000_hw *hw);
static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
@@ -101,23 +118,21 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data,
- u16 count);
+ u16 count);
static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
-static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw,
- u16 offset, u16 words,
- u16 *data);
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd);
static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
-static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data,
- u16 count);
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count);
static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
- u16 phy_data);
+ u16 phy_data);
static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr,
- u16 *phy_data);
+ u16 *phy_data);
static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
static void e1000_release_eeprom(struct e1000_hw *hw);
@@ -127,8 +142,7 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
static s32 e1000_set_phy_mode(struct e1000_hw *hw);
static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer);
static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
-static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
- u16 duplex);
+static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex);
static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
/* IGP cable length table */
@@ -159,8 +173,7 @@ u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static s32
-e1000_set_phy_type(struct e1000_hw *hw)
+static s32 e1000_set_phy_type(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_set_phy_type");
@@ -210,8 +223,7 @@ e1000_set_phy_type(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static void
-e1000_phy_init_script(struct e1000_hw *hw)
+static void e1000_phy_init_script(struct e1000_hw *hw)
{
u32 ret_val;
u16 phy_saved_data;
@@ -306,8 +318,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-s32
-e1000_set_mac_type(struct e1000_hw *hw)
+s32 e1000_set_mac_type(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_set_mac_type");
@@ -474,8 +485,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
* **************************************************************************/
-void
-e1000_set_media_type(struct e1000_hw *hw)
+void e1000_set_media_type(struct e1000_hw *hw)
{
u32 status;
@@ -510,7 +520,7 @@ e1000_set_media_type(struct e1000_hw *hw)
hw->media_type = e1000_media_type_copper;
break;
default:
- status = E1000_READ_REG(hw, STATUS);
+ status = er32(STATUS);
if (status & E1000_STATUS_TBIMODE) {
hw->media_type = e1000_media_type_fiber;
/* tbi_compatibility not valid on fiber */
@@ -528,8 +538,7 @@ e1000_set_media_type(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-s32
-e1000_reset_hw(struct e1000_hw *hw)
+s32 e1000_reset_hw(struct e1000_hw *hw)
{
u32 ctrl;
u32 ctrl_ext;
@@ -559,15 +568,15 @@ e1000_reset_hw(struct e1000_hw *hw)
/* Clear interrupt mask to stop board from generating interrupts */
DEBUGOUT("Masking off all interrupts\n");
- E1000_WRITE_REG(hw, IMC, 0xffffffff);
+ ew32(IMC, 0xffffffff);
/* Disable the Transmit and Receive units. Then delay to allow
* any pending transactions to complete before we hit the MAC with
* the global reset.
*/
- E1000_WRITE_REG(hw, RCTL, 0);
- E1000_WRITE_REG(hw, TCTL, E1000_TCTL_PSP);
- E1000_WRITE_FLUSH(hw);
+ ew32(RCTL, 0);
+ ew32(TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH();
/* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
hw->tbi_compatibility_on = false;
@@ -577,11 +586,11 @@ e1000_reset_hw(struct e1000_hw *hw)
*/
msleep(10);
- ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl = er32(CTRL);
/* Must reset the PHY before resetting the MAC */
if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
- E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST));
+ ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
msleep(5);
}
@@ -590,12 +599,12 @@ e1000_reset_hw(struct e1000_hw *hw)
if (hw->mac_type == e1000_82573) {
timeout = 10;
- extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+ extcnf_ctrl = er32(EXTCNF_CTRL);
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
do {
- E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
- extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ extcnf_ctrl = er32(EXTCNF_CTRL);
if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
break;
@@ -610,9 +619,9 @@ e1000_reset_hw(struct e1000_hw *hw)
/* Workaround for ICH8 bit corruption issue in FIFO memory */
if (hw->mac_type == e1000_ich8lan) {
/* Set Tx and Rx buffer allocation to 8k apiece. */
- E1000_WRITE_REG(hw, PBA, E1000_PBA_8K);
+ ew32(PBA, E1000_PBA_8K);
/* Set Packet Buffer Size to 16k. */
- E1000_WRITE_REG(hw, PBS, E1000_PBS_16K);
+ ew32(PBS, E1000_PBS_16K);
}
/* Issue a global reset to the MAC. This will reset the chip's
@@ -636,7 +645,7 @@ e1000_reset_hw(struct e1000_hw *hw)
case e1000_82545_rev_3:
case e1000_82546_rev_3:
/* Reset is performed on a shadow of the control register */
- E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST));
+ ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
break;
case e1000_ich8lan:
if (!hw->phy_reset_disable &&
@@ -649,11 +658,11 @@ e1000_reset_hw(struct e1000_hw *hw)
}
e1000_get_software_flag(hw);
- E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
+ ew32(CTRL, (ctrl | E1000_CTRL_RST));
msleep(5);
break;
default:
- E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
+ ew32(CTRL, (ctrl | E1000_CTRL_RST));
break;
}
@@ -668,10 +677,10 @@ e1000_reset_hw(struct e1000_hw *hw)
case e1000_82544:
/* Wait for reset to complete */
udelay(10);
- ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
- E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH(hw);
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
/* Wait for EEPROM reload */
msleep(2);
break;
@@ -685,10 +694,10 @@ e1000_reset_hw(struct e1000_hw *hw)
case e1000_82573:
if (!e1000_is_onboard_nvm_eeprom(hw)) {
udelay(10);
- ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
- E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH(hw);
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
}
/* fall through */
default:
@@ -701,27 +710,27 @@ e1000_reset_hw(struct e1000_hw *hw)
/* Disable HW ARPs on ASF enabled adapters */
if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
- manc = E1000_READ_REG(hw, MANC);
+ manc = er32(MANC);
manc &= ~(E1000_MANC_ARP_EN);
- E1000_WRITE_REG(hw, MANC, manc);
+ ew32(MANC, manc);
}
if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
e1000_phy_init_script(hw);
/* Configure activity LED after PHY reset */
- led_ctrl = E1000_READ_REG(hw, LEDCTL);
+ led_ctrl = er32(LEDCTL);
led_ctrl &= IGP_ACTIVITY_LED_MASK;
led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
- E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+ ew32(LEDCTL, led_ctrl);
}
/* Clear interrupt mask to stop board from generating interrupts */
DEBUGOUT("Masking off all interrupts\n");
- E1000_WRITE_REG(hw, IMC, 0xffffffff);
+ ew32(IMC, 0xffffffff);
/* Clear any pending interrupt events. */
- icr = E1000_READ_REG(hw, ICR);
+ icr = er32(ICR);
/* If MWI was previously enabled, reenable it. */
if (hw->mac_type == e1000_82542_rev2_0) {
@@ -730,9 +739,9 @@ e1000_reset_hw(struct e1000_hw *hw)
}
if (hw->mac_type == e1000_ich8lan) {
- u32 kab = E1000_READ_REG(hw, KABGTXD);
+ u32 kab = er32(KABGTXD);
kab |= E1000_KABGTXD_BGSQLBIAS;
- E1000_WRITE_REG(hw, KABGTXD, kab);
+ ew32(KABGTXD, kab);
}
return E1000_SUCCESS;
@@ -747,8 +756,7 @@ e1000_reset_hw(struct e1000_hw *hw)
* This function contains hardware limitation workarounds for PCI-E adapters
*
*****************************************************************************/
-static void
-e1000_initialize_hardware_bits(struct e1000_hw *hw)
+static void e1000_initialize_hardware_bits(struct e1000_hw *hw)
{
if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) {
/* Settings common to all PCI-express silicon */
@@ -758,22 +766,22 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw)
u32 reg_txdctl, reg_txdctl1;
/* link autonegotiation/sync workarounds */
- reg_tarc0 = E1000_READ_REG(hw, TARC0);
+ reg_tarc0 = er32(TARC0);
reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27));
/* Enable not-done TX descriptor counting */
- reg_txdctl = E1000_READ_REG(hw, TXDCTL);
+ reg_txdctl = er32(TXDCTL);
reg_txdctl |= E1000_TXDCTL_COUNT_DESC;
- E1000_WRITE_REG(hw, TXDCTL, reg_txdctl);
- reg_txdctl1 = E1000_READ_REG(hw, TXDCTL1);
+ ew32(TXDCTL, reg_txdctl);
+ reg_txdctl1 = er32(TXDCTL1);
reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC;
- E1000_WRITE_REG(hw, TXDCTL1, reg_txdctl1);
+ ew32(TXDCTL1, reg_txdctl1);
switch (hw->mac_type) {
case e1000_82571:
case e1000_82572:
/* Clear PHY TX compatible mode bits */
- reg_tarc1 = E1000_READ_REG(hw, TARC1);
+ reg_tarc1 = er32(TARC1);
reg_tarc1 &= ~((1 << 30)|(1 << 29));
/* link autonegotiation/sync workarounds */
@@ -783,25 +791,25 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw)
reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24));
/* Multiple read bit is reversed polarity */
- reg_tctl = E1000_READ_REG(hw, TCTL);
+ reg_tctl = er32(TCTL);
if (reg_tctl & E1000_TCTL_MULR)
reg_tarc1 &= ~(1 << 28);
else
reg_tarc1 |= (1 << 28);
- E1000_WRITE_REG(hw, TARC1, reg_tarc1);
+ ew32(TARC1, reg_tarc1);
break;
case e1000_82573:
- reg_ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ reg_ctrl_ext = er32(CTRL_EXT);
reg_ctrl_ext &= ~(1 << 23);
reg_ctrl_ext |= (1 << 22);
/* TX byte count fix */
- reg_ctrl = E1000_READ_REG(hw, CTRL);
+ reg_ctrl = er32(CTRL);
reg_ctrl &= ~(1 << 29);
- E1000_WRITE_REG(hw, CTRL_EXT, reg_ctrl_ext);
- E1000_WRITE_REG(hw, CTRL, reg_ctrl);
+ ew32(CTRL_EXT, reg_ctrl_ext);
+ ew32(CTRL, reg_ctrl);
break;
case e1000_80003es2lan:
/* improve small packet performace for fiber/serdes */
@@ -811,14 +819,14 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw)
}
/* Multiple read bit is reversed polarity */
- reg_tctl = E1000_READ_REG(hw, TCTL);
- reg_tarc1 = E1000_READ_REG(hw, TARC1);
+ reg_tctl = er32(TCTL);
+ reg_tarc1 = er32(TARC1);
if (reg_tctl & E1000_TCTL_MULR)
reg_tarc1 &= ~(1 << 28);
else
reg_tarc1 |= (1 << 28);
- E1000_WRITE_REG(hw, TARC1, reg_tarc1);
+ ew32(TARC1, reg_tarc1);
break;
case e1000_ich8lan:
/* Reduce concurrent DMA requests to 3 from 4 */
@@ -827,16 +835,16 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw)
(hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))
reg_tarc0 |= ((1 << 29)|(1 << 28));
- reg_ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ reg_ctrl_ext = er32(CTRL_EXT);
reg_ctrl_ext |= (1 << 22);
- E1000_WRITE_REG(hw, CTRL_EXT, reg_ctrl_ext);
+ ew32(CTRL_EXT, reg_ctrl_ext);
/* workaround TX hang with TSO=on */
reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23));
/* Multiple read bit is reversed polarity */
- reg_tctl = E1000_READ_REG(hw, TCTL);
- reg_tarc1 = E1000_READ_REG(hw, TARC1);
+ reg_tctl = er32(TCTL);
+ reg_tarc1 = er32(TARC1);
if (reg_tctl & E1000_TCTL_MULR)
reg_tarc1 &= ~(1 << 28);
else
@@ -845,13 +853,13 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw)
/* workaround TX hang with TSO=on */
reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24));
- E1000_WRITE_REG(hw, TARC1, reg_tarc1);
+ ew32(TARC1, reg_tarc1);
break;
default:
break;
}
- E1000_WRITE_REG(hw, TARC0, reg_tarc0);
+ ew32(TARC0, reg_tarc0);
}
}
@@ -866,8 +874,7 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw)
* configuration and flow control settings. Clears all on-chip counters. Leaves
* the transmit and receive units disabled and uninitialized.
*****************************************************************************/
-s32
-e1000_init_hw(struct e1000_hw *hw)
+s32 e1000_init_hw(struct e1000_hw *hw)
{
u32 ctrl;
u32 i;
@@ -883,9 +890,9 @@ e1000_init_hw(struct e1000_hw *hw)
((hw->revision_id < 3) ||
((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
(hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) {
- reg_data = E1000_READ_REG(hw, STATUS);
+ reg_data = er32(STATUS);
reg_data &= ~0x80000000;
- E1000_WRITE_REG(hw, STATUS, reg_data);
+ ew32(STATUS, reg_data);
}
/* Initialize Identification LED */
@@ -906,7 +913,7 @@ e1000_init_hw(struct e1000_hw *hw)
/* VET hardcoded to standard value and VFTA removed in ICH8 LAN */
if (hw->mac_type != e1000_ich8lan) {
if (hw->mac_type < e1000_82545_rev_3)
- E1000_WRITE_REG(hw, VET, 0);
+ ew32(VET, 0);
e1000_clear_vfta(hw);
}
@@ -914,8 +921,8 @@ e1000_init_hw(struct e1000_hw *hw)
if (hw->mac_type == e1000_82542_rev2_0) {
DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
e1000_pci_clear_mwi(hw);
- E1000_WRITE_REG(hw, RCTL, E1000_RCTL_RST);
- E1000_WRITE_FLUSH(hw);
+ ew32(RCTL, E1000_RCTL_RST);
+ E1000_WRITE_FLUSH();
msleep(5);
}
@@ -926,8 +933,8 @@ e1000_init_hw(struct e1000_hw *hw)
/* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
if (hw->mac_type == e1000_82542_rev2_0) {
- E1000_WRITE_REG(hw, RCTL, 0);
- E1000_WRITE_FLUSH(hw);
+ ew32(RCTL, 0);
+ E1000_WRITE_FLUSH();
msleep(1);
if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
e1000_pci_set_mwi(hw);
@@ -942,7 +949,7 @@ e1000_init_hw(struct e1000_hw *hw)
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* use write flush to prevent Memory Write Block (MWB) from
* occuring when accessing our register space */
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
}
/* Set the PCI priority bit correctly in the CTRL register. This
@@ -951,8 +958,8 @@ e1000_init_hw(struct e1000_hw *hw)
* 82542 and 82543 silicon.
*/
if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
- ctrl = E1000_READ_REG(hw, CTRL);
- E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR);
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
}
switch (hw->mac_type) {
@@ -975,9 +982,9 @@ e1000_init_hw(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy */
if (hw->mac_type > e1000_82544) {
- ctrl = E1000_READ_REG(hw, TXDCTL);
+ ctrl = er32(TXDCTL);
ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
- E1000_WRITE_REG(hw, TXDCTL, ctrl);
+ ew32(TXDCTL, ctrl);
}
if (hw->mac_type == e1000_82573) {
@@ -989,21 +996,21 @@ e1000_init_hw(struct e1000_hw *hw)
break;
case e1000_80003es2lan:
/* Enable retransmit on late collisions */
- reg_data = E1000_READ_REG(hw, TCTL);
+ reg_data = er32(TCTL);
reg_data |= E1000_TCTL_RTLC;
- E1000_WRITE_REG(hw, TCTL, reg_data);
+ ew32(TCTL, reg_data);
/* Configure Gigabit Carry Extend Padding */
- reg_data = E1000_READ_REG(hw, TCTL_EXT);
+ reg_data = er32(TCTL_EXT);
reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
- E1000_WRITE_REG(hw, TCTL_EXT, reg_data);
+ ew32(TCTL_EXT, reg_data);
/* Configure Transmit Inter-Packet Gap */
- reg_data = E1000_READ_REG(hw, TIPG);
+ reg_data = er32(TIPG);
reg_data &= ~E1000_TIPG_IPGT_MASK;
reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
- E1000_WRITE_REG(hw, TIPG, reg_data);
+ ew32(TIPG, reg_data);
reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001);
reg_data &= ~0x00100000;
@@ -1012,17 +1019,17 @@ e1000_init_hw(struct e1000_hw *hw)
case e1000_82571:
case e1000_82572:
case e1000_ich8lan:
- ctrl = E1000_READ_REG(hw, TXDCTL1);
+ ctrl = er32(TXDCTL1);
ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
- E1000_WRITE_REG(hw, TXDCTL1, ctrl);
+ ew32(TXDCTL1, ctrl);
break;
}
if (hw->mac_type == e1000_82573) {
- u32 gcr = E1000_READ_REG(hw, GCR);
+ u32 gcr = er32(GCR);
gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
- E1000_WRITE_REG(hw, GCR, gcr);
+ ew32(GCR, gcr);
}
/* Clear all of the statistics registers (clear on read). It is
@@ -1039,11 +1046,11 @@ e1000_init_hw(struct e1000_hw *hw)
if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
- ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext = er32(CTRL_EXT);
/* Relaxed ordering must be disabled to avoid a parity
* error crash in a PCI slot. */
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
- E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ ew32(CTRL_EXT, ctrl_ext);
}
return ret_val;
@@ -1054,8 +1061,7 @@ e1000_init_hw(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code.
*****************************************************************************/
-static s32
-e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
{
u16 eeprom_data;
s32 ret_val;
@@ -1100,8 +1106,7 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
* established. Assumes the hardware has previously been reset and the
* transmitter and receiver are not enabled.
*****************************************************************************/
-s32
-e1000_setup_link(struct e1000_hw *hw)
+s32 e1000_setup_link(struct e1000_hw *hw)
{
u32 ctrl_ext;
s32 ret_val;
@@ -1176,7 +1181,7 @@ e1000_setup_link(struct e1000_hw *hw)
}
ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
SWDPIO__EXT_SHIFT);
- E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ ew32(CTRL_EXT, ctrl_ext);
}
/* Call the necessary subroutine to configure the link. */
@@ -1193,12 +1198,12 @@ e1000_setup_link(struct e1000_hw *hw)
/* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */
if (hw->mac_type != e1000_ich8lan) {
- E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE);
- E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH);
- E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW);
+ ew32(FCT, FLOW_CONTROL_TYPE);
+ ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
}
- E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time);
+ ew32(FCTTV, hw->fc_pause_time);
/* Set the flow control receive threshold registers. Normally,
* these registers will be set to a default threshold that may be
@@ -1207,18 +1212,18 @@ e1000_setup_link(struct e1000_hw *hw)
* registers will be set to 0.
*/
if (!(hw->fc & E1000_FC_TX_PAUSE)) {
- E1000_WRITE_REG(hw, FCRTL, 0);
- E1000_WRITE_REG(hw, FCRTH, 0);
+ ew32(FCRTL, 0);
+ ew32(FCRTH, 0);
} else {
/* We need to set up the Receive Threshold high and low water marks
* as well as (optionally) enabling the transmission of XON frames.
*/
if (hw->fc_send_xon) {
- E1000_WRITE_REG(hw, FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
- E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water);
+ ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
+ ew32(FCRTH, hw->fc_high_water);
} else {
- E1000_WRITE_REG(hw, FCRTL, hw->fc_low_water);
- E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water);
+ ew32(FCRTL, hw->fc_low_water);
+ ew32(FCRTH, hw->fc_high_water);
}
}
return ret_val;
@@ -1233,8 +1238,7 @@ e1000_setup_link(struct e1000_hw *hw)
* link. Assumes the hardware has been previously reset and the transmitter
* and receiver are not enabled.
*****************************************************************************/
-static s32
-e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
{
u32 ctrl;
u32 status;
@@ -1251,7 +1255,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
* loopback mode is disabled during initialization.
*/
if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572)
- E1000_WRITE_REG(hw, SCTL, E1000_DISABLE_SERDES_LOOPBACK);
+ ew32(SCTL, E1000_DISABLE_SERDES_LOOPBACK);
/* On adapters with a MAC newer than 82544, SWDP 1 will be
* set when the optics detect a signal. On older adapters, it will be
@@ -1259,7 +1263,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
* If we're on serdes media, adjust the output amplitude to value
* set in the EEPROM.
*/
- ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl = er32(CTRL);
if (hw->media_type == e1000_media_type_fiber)
signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
@@ -1330,9 +1334,9 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
*/
DEBUGOUT("Auto-negotiation enabled\n");
- E1000_WRITE_REG(hw, TXCW, txcw);
- E1000_WRITE_REG(hw, CTRL, ctrl);
- E1000_WRITE_FLUSH(hw);
+ ew32(TXCW, txcw);
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
hw->txcw = txcw;
msleep(1);
@@ -1344,11 +1348,11 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
* For internal serdes, we just assume a signal is present, then poll.
*/
if (hw->media_type == e1000_media_type_internal_serdes ||
- (E1000_READ_REG(hw, CTRL) & E1000_CTRL_SWDPIN1) == signal) {
+ (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
DEBUGOUT("Looking for Link\n");
for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
msleep(10);
- status = E1000_READ_REG(hw, STATUS);
+ status = er32(STATUS);
if (status & E1000_STATUS_LU) break;
}
if (i == (LINK_UP_TIMEOUT / 10)) {
@@ -1380,8 +1384,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
-static s32
-e1000_copper_link_preconfig(struct e1000_hw *hw)
+static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val;
@@ -1389,7 +1392,7 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
DEBUGFUNC("e1000_copper_link_preconfig");
- ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl = er32(CTRL);
/* With 82543, we need to force speed and duplex on the MAC equal to what
* the PHY speed and duplex configuration is. In addition, we need to
* perform a hardware reset on the PHY to take it out of reset.
@@ -1397,10 +1400,10 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
if (hw->mac_type > e1000_82543) {
ctrl |= E1000_CTRL_SLU;
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
- E1000_WRITE_REG(hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
} else {
ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
- E1000_WRITE_REG(hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
ret_val = e1000_phy_hw_reset(hw);
if (ret_val)
return ret_val;
@@ -1440,8 +1443,7 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*********************************************************************/
-static s32
-e1000_copper_link_igp_setup(struct e1000_hw *hw)
+static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
{
u32 led_ctrl;
s32 ret_val;
@@ -1462,10 +1464,10 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
msleep(15);
if (hw->mac_type != e1000_ich8lan) {
/* Configure activity LED after PHY reset */
- led_ctrl = E1000_READ_REG(hw, LEDCTL);
+ led_ctrl = er32(LEDCTL);
led_ctrl &= IGP_ACTIVITY_LED_MASK;
led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
- E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+ ew32(LEDCTL, led_ctrl);
}
/* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
@@ -1587,8 +1589,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*********************************************************************/
-static s32
-e1000_copper_link_ggp_setup(struct e1000_hw *hw)
+static s32 e1000_copper_link_ggp_setup(struct e1000_hw *hw)
{
s32 ret_val;
u16 phy_data;
@@ -1679,9 +1680,9 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- reg_data = E1000_READ_REG(hw, CTRL_EXT);
+ reg_data = er32(CTRL_EXT);
reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
- E1000_WRITE_REG(hw, CTRL_EXT, reg_data);
+ ew32(CTRL_EXT, reg_data);
ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
&phy_data);
@@ -1735,8 +1736,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*********************************************************************/
-static s32
-e1000_copper_link_mgp_setup(struct e1000_hw *hw)
+static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
{
s32 ret_val;
u16 phy_data;
@@ -1839,8 +1839,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*********************************************************************/
-static s32
-e1000_copper_link_autoneg(struct e1000_hw *hw)
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
{
s32 ret_val;
u16 phy_data;
@@ -1910,8 +1909,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
-static s32
-e1000_copper_link_postconfig(struct e1000_hw *hw)
+static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
{
s32 ret_val;
DEBUGFUNC("e1000_copper_link_postconfig");
@@ -1948,8 +1946,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
-static s32
-e1000_setup_copper_link(struct e1000_hw *hw)
+static s32 e1000_setup_copper_link(struct e1000_hw *hw)
{
s32 ret_val;
u16 i;
@@ -2062,8 +2059,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
-static s32
-e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
+static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
{
s32 ret_val = E1000_SUCCESS;
u32 tipg;
@@ -2078,10 +2074,10 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
return ret_val;
/* Configure Transmit Inter-Packet Gap */
- tipg = E1000_READ_REG(hw, TIPG);
+ tipg = er32(TIPG);
tipg &= ~E1000_TIPG_IPGT_MASK;
tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
- E1000_WRITE_REG(hw, TIPG, tipg);
+ ew32(TIPG, tipg);
ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
@@ -2098,8 +2094,7 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
return ret_val;
}
-static s32
-e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
+static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u16 reg_data;
@@ -2114,10 +2109,10 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
return ret_val;
/* Configure Transmit Inter-Packet Gap */
- tipg = E1000_READ_REG(hw, TIPG);
+ tipg = er32(TIPG);
tipg &= ~E1000_TIPG_IPGT_MASK;
tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
- E1000_WRITE_REG(hw, TIPG, tipg);
+ ew32(TIPG, tipg);
ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
@@ -2135,8 +2130,7 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
-s32
-e1000_phy_setup_autoneg(struct e1000_hw *hw)
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
{
s32 ret_val;
u16 mii_autoneg_adv_reg;
@@ -2284,8 +2278,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
-static s32
-e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val;
@@ -2302,7 +2295,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
DEBUGOUT1("hw->fc = %d\n", hw->fc);
/* Read the Device Control Register. */
- ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl = er32(CTRL);
/* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
@@ -2357,7 +2350,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
e1000_config_collision_dist(hw);
/* Write the configured values back to the Device Control Reg. */
- E1000_WRITE_REG(hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
if ((hw->phy_type == e1000_phy_m88) ||
(hw->phy_type == e1000_phy_gg82563)) {
@@ -2535,8 +2528,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
* Link should have been established previously. Reads the speed and duplex
* information from the Device Status register.
******************************************************************************/
-void
-e1000_config_collision_dist(struct e1000_hw *hw)
+void e1000_config_collision_dist(struct e1000_hw *hw)
{
u32 tctl, coll_dist;
@@ -2547,13 +2539,13 @@ e1000_config_collision_dist(struct e1000_hw *hw)
else
coll_dist = E1000_COLLISION_DISTANCE;
- tctl = E1000_READ_REG(hw, TCTL);
+ tctl = er32(TCTL);
tctl &= ~E1000_TCTL_COLD;
tctl |= coll_dist << E1000_COLD_SHIFT;
- E1000_WRITE_REG(hw, TCTL, tctl);
- E1000_WRITE_FLUSH(hw);
+ ew32(TCTL, tctl);
+ E1000_WRITE_FLUSH();
}
/******************************************************************************
@@ -2565,8 +2557,7 @@ e1000_config_collision_dist(struct e1000_hw *hw)
* The contents of the PHY register containing the needed information need to
* be passed in.
******************************************************************************/
-static s32
-e1000_config_mac_to_phy(struct e1000_hw *hw)
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val;
@@ -2582,7 +2573,7 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
/* Read the Device Control Register and set the bits to Force Speed
* and Duplex.
*/
- ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl = er32(CTRL);
ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
@@ -2609,7 +2600,7 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
ctrl |= E1000_CTRL_SPD_100;
/* Write the configured values back to the Device Control Reg. */
- E1000_WRITE_REG(hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
return E1000_SUCCESS;
}
@@ -2624,15 +2615,14 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
* by the PHY rather than the MAC. Software must also configure these
* bits when link is forced on a fiber connection.
*****************************************************************************/
-s32
-e1000_force_mac_fc(struct e1000_hw *hw)
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
{
u32 ctrl;
DEBUGFUNC("e1000_force_mac_fc");
/* Get the current configuration of the Device Control Register */
- ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl = er32(CTRL);
/* Because we didn't get link via the internal auto-negotiation
* mechanism (we either forced link or we got link via PHY
@@ -2676,7 +2666,7 @@ e1000_force_mac_fc(struct e1000_hw *hw)
if (hw->mac_type == e1000_82542_rev2_0)
ctrl &= (~E1000_CTRL_TFCE);
- E1000_WRITE_REG(hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
return E1000_SUCCESS;
}
@@ -2691,8 +2681,7 @@ e1000_force_mac_fc(struct e1000_hw *hw)
* based on the flow control negotiated by the PHY. In TBI mode, the TFCE
* and RFCE bits will be automaticaly set to the negotiated flow control mode.
*****************************************************************************/
-static s32
-e1000_config_fc_after_link_up(struct e1000_hw *hw)
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
{
s32 ret_val;
u16 mii_status_reg;
@@ -2896,8 +2885,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* Called by any function that needs to check the link status of the adapter.
*****************************************************************************/
-s32
-e1000_check_for_link(struct e1000_hw *hw)
+s32 e1000_check_for_link(struct e1000_hw *hw)
{
u32 rxcw = 0;
u32 ctrl;
@@ -2910,8 +2898,8 @@ e1000_check_for_link(struct e1000_hw *hw)
DEBUGFUNC("e1000_check_for_link");
- ctrl = E1000_READ_REG(hw, CTRL);
- status = E1000_READ_REG(hw, STATUS);
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
/* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
* set when the optics detect a signal. On older adapters, it will be
@@ -2919,7 +2907,7 @@ e1000_check_for_link(struct e1000_hw *hw)
*/
if ((hw->media_type == e1000_media_type_fiber) ||
(hw->media_type == e1000_media_type_internal_serdes)) {
- rxcw = E1000_READ_REG(hw, RXCW);
+ rxcw = er32(RXCW);
if (hw->media_type == e1000_media_type_fiber) {
signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
@@ -2965,11 +2953,11 @@ e1000_check_for_link(struct e1000_hw *hw)
(!hw->autoneg) &&
(hw->forced_speed_duplex == e1000_10_full ||
hw->forced_speed_duplex == e1000_10_half)) {
- E1000_WRITE_REG(hw, IMC, 0xffffffff);
+ ew32(IMC, 0xffffffff);
ret_val = e1000_polarity_reversal_workaround(hw);
- icr = E1000_READ_REG(hw, ICR);
- E1000_WRITE_REG(hw, ICS, (icr & ~E1000_ICS_LSC));
- E1000_WRITE_REG(hw, IMS, IMS_ENABLE_MASK);
+ icr = er32(ICR);
+ ew32(ICS, (icr & ~E1000_ICS_LSC));
+ ew32(IMS, IMS_ENABLE_MASK);
}
} else {
@@ -3034,9 +3022,9 @@ e1000_check_for_link(struct e1000_hw *hw)
*/
if (hw->tbi_compatibility_on) {
/* If we previously were in the mode, turn it off. */
- rctl = E1000_READ_REG(hw, RCTL);
+ rctl = er32(RCTL);
rctl &= ~E1000_RCTL_SBP;
- E1000_WRITE_REG(hw, RCTL, rctl);
+ ew32(RCTL, rctl);
hw->tbi_compatibility_on = false;
}
} else {
@@ -3047,9 +3035,9 @@ e1000_check_for_link(struct e1000_hw *hw)
*/
if (!hw->tbi_compatibility_on) {
hw->tbi_compatibility_on = true;
- rctl = E1000_READ_REG(hw, RCTL);
+ rctl = er32(RCTL);
rctl |= E1000_RCTL_SBP;
- E1000_WRITE_REG(hw, RCTL, rctl);
+ ew32(RCTL, rctl);
}
}
}
@@ -3073,12 +3061,12 @@ e1000_check_for_link(struct e1000_hw *hw)
DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
- E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE));
+ ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
/* Force link-up and also force full-duplex. */
- ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl = er32(CTRL);
ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
- E1000_WRITE_REG(hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
/* Configure Flow Control after forcing link up. */
ret_val = e1000_config_fc_after_link_up(hw);
@@ -3096,8 +3084,8 @@ e1000_check_for_link(struct e1000_hw *hw)
(hw->media_type == e1000_media_type_internal_serdes)) &&
(ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
- E1000_WRITE_REG(hw, TXCW, hw->txcw);
- E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
+ ew32(TXCW, hw->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
hw->serdes_link_down = false;
}
@@ -3105,10 +3093,10 @@ e1000_check_for_link(struct e1000_hw *hw)
* based on MAC synchronization for internal serdes media type.
*/
else if ((hw->media_type == e1000_media_type_internal_serdes) &&
- !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
+ !(E1000_TXCW_ANE & er32(TXCW))) {
/* SYNCH bit and IV bit are sticky. */
udelay(10);
- if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) {
+ if (E1000_RXCW_SYNCH & er32(RXCW)) {
if (!(rxcw & E1000_RXCW_IV)) {
hw->serdes_link_down = false;
DEBUGOUT("SERDES: Link is up.\n");
@@ -3119,8 +3107,8 @@ e1000_check_for_link(struct e1000_hw *hw)
}
}
if ((hw->media_type == e1000_media_type_internal_serdes) &&
- (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
- hw->serdes_link_down = !(E1000_STATUS_LU & E1000_READ_REG(hw, STATUS));
+ (E1000_TXCW_ANE & er32(TXCW))) {
+ hw->serdes_link_down = !(E1000_STATUS_LU & er32(STATUS));
}
return E1000_SUCCESS;
}
@@ -3132,10 +3120,7 @@ e1000_check_for_link(struct e1000_hw *hw)
* speed - Speed of the connection
* duplex - Duplex setting of the connection
*****************************************************************************/
-s32
-e1000_get_speed_and_duplex(struct e1000_hw *hw,
- u16 *speed,
- u16 *duplex)
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
{
u32 status;
s32 ret_val;
@@ -3144,7 +3129,7 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
DEBUGFUNC("e1000_get_speed_and_duplex");
if (hw->mac_type >= e1000_82543) {
- status = E1000_READ_REG(hw, STATUS);
+ status = er32(STATUS);
if (status & E1000_STATUS_SPEED_1000) {
*speed = SPEED_1000;
DEBUGOUT("1000 Mbs, ");
@@ -3214,8 +3199,7 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
-static s32
-e1000_wait_autoneg(struct e1000_hw *hw)
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
{
s32 ret_val;
u16 i;
@@ -3249,15 +3233,13 @@ e1000_wait_autoneg(struct e1000_hw *hw)
* hw - Struct containing variables accessed by shared code
* ctrl - Device control register's current value
******************************************************************************/
-static void
-e1000_raise_mdi_clk(struct e1000_hw *hw,
- u32 *ctrl)
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
/* Raise the clock input to the Management Data Clock (by setting the MDC
* bit), and then delay 10 microseconds.
*/
- E1000_WRITE_REG(hw, CTRL, (*ctrl | E1000_CTRL_MDC));
- E1000_WRITE_FLUSH(hw);
+ ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH();
udelay(10);
}
@@ -3267,15 +3249,13 @@ e1000_raise_mdi_clk(struct e1000_hw *hw,
* hw - Struct containing variables accessed by shared code
* ctrl - Device control register's current value
******************************************************************************/
-static void
-e1000_lower_mdi_clk(struct e1000_hw *hw,
- u32 *ctrl)
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
/* Lower the clock input to the Management Data Clock (by clearing the MDC
* bit), and then delay 10 microseconds.
*/
- E1000_WRITE_REG(hw, CTRL, (*ctrl & ~E1000_CTRL_MDC));
- E1000_WRITE_FLUSH(hw);
+ ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH();
udelay(10);
}
@@ -3288,10 +3268,7 @@ e1000_lower_mdi_clk(struct e1000_hw *hw,
*
* Bits are shifted out in MSB to LSB order.
******************************************************************************/
-static void
-e1000_shift_out_mdi_bits(struct e1000_hw *hw,
- u32 data,
- u16 count)
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
{
u32 ctrl;
u32 mask;
@@ -3303,7 +3280,7 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw,
mask = 0x01;
mask <<= (count - 1);
- ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl = er32(CTRL);
/* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
@@ -3319,8 +3296,8 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw,
else
ctrl &= ~E1000_CTRL_MDIO;
- E1000_WRITE_REG(hw, CTRL, ctrl);
- E1000_WRITE_FLUSH(hw);
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
udelay(10);
@@ -3338,8 +3315,7 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw,
*
* Bits are shifted in in MSB to LSB order.
******************************************************************************/
-static u16
-e1000_shift_in_mdi_bits(struct e1000_hw *hw)
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
{
u32 ctrl;
u16 data = 0;
@@ -3352,14 +3328,14 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
* by raising the input to the Management Data Clock (setting the MDC bit),
* and then reading the value of the MDIO bit.
*/
- ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl = er32(CTRL);
/* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
ctrl &= ~E1000_CTRL_MDIO_DIR;
ctrl &= ~E1000_CTRL_MDIO;
- E1000_WRITE_REG(hw, CTRL, ctrl);
- E1000_WRITE_FLUSH(hw);
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
/* Raise and Lower the clock before reading in the data. This accounts for
* the turnaround bits. The first clock occurred when we clocked out the
@@ -3371,7 +3347,7 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
for (data = 0, i = 0; i < 16; i++) {
data = data << 1;
e1000_raise_mdi_clk(hw, &ctrl);
- ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl = er32(CTRL);
/* Check to see if we shifted in a "1". */
if (ctrl & E1000_CTRL_MDIO)
data |= 1;
@@ -3384,8 +3360,7 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
return data;
}
-static s32
-e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
+static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync = 0;
u32 swmask = mask;
@@ -3404,7 +3379,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
if (e1000_get_hw_eeprom_semaphore(hw))
return -E1000_ERR_SWFW_SYNC;
- swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC);
+ swfw_sync = er32(SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask))) {
break;
}
@@ -3422,14 +3397,13 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
}
swfw_sync |= swmask;
- E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync);
+ ew32(SW_FW_SYNC, swfw_sync);
e1000_put_hw_eeprom_semaphore(hw);
return E1000_SUCCESS;
}
-static void
-e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
+static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
@@ -3451,9 +3425,9 @@ e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS);
/* empty */
- swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC);
+ swfw_sync = er32(SW_FW_SYNC);
swfw_sync &= ~swmask;
- E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync);
+ ew32(SW_FW_SYNC, swfw_sync);
e1000_put_hw_eeprom_semaphore(hw);
}
@@ -3464,10 +3438,7 @@ e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to read
******************************************************************************/
-s32
-e1000_read_phy_reg(struct e1000_hw *hw,
- u32 reg_addr,
- u16 *phy_data)
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
{
u32 ret_val;
u16 swfw;
@@ -3475,7 +3446,7 @@ e1000_read_phy_reg(struct e1000_hw *hw,
DEBUGFUNC("e1000_read_phy_reg");
if ((hw->mac_type == e1000_80003es2lan) &&
- (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
swfw = E1000_SWFW_PHY1_SM;
} else {
swfw = E1000_SWFW_PHY0_SM;
@@ -3523,9 +3494,8 @@ e1000_read_phy_reg(struct e1000_hw *hw,
return ret_val;
}
-static s32
-e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
- u16 *phy_data)
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 *phy_data)
{
u32 i;
u32 mdic = 0;
@@ -3547,12 +3517,12 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
(phy_addr << E1000_MDIC_PHY_SHIFT) |
(E1000_MDIC_OP_READ));
- E1000_WRITE_REG(hw, MDIC, mdic);
+ ew32(MDIC, mdic);
/* Poll the ready bit to see if the MDI read completed */
for (i = 0; i < 64; i++) {
udelay(50);
- mdic = E1000_READ_REG(hw, MDIC);
+ mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY) break;
}
if (!(mdic & E1000_MDIC_READY)) {
@@ -3563,7 +3533,7 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
DEBUGOUT("MDI Error\n");
return -E1000_ERR_PHY;
}
- *phy_data = (u16) mdic;
+ *phy_data = (u16)mdic;
} else {
/* We must first send a preamble through the MDIO pin to signal the
* beginning of an MII instruction. This is done by sending 32
@@ -3603,9 +3573,7 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
* reg_addr - address of the PHY register to write
* data - data to write to the PHY
******************************************************************************/
-s32
-e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr,
- u16 phy_data)
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
{
u32 ret_val;
u16 swfw;
@@ -3613,7 +3581,7 @@ e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr,
DEBUGFUNC("e1000_write_phy_reg");
if ((hw->mac_type == e1000_80003es2lan) &&
- (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
swfw = E1000_SWFW_PHY1_SM;
} else {
swfw = E1000_SWFW_PHY0_SM;
@@ -3661,9 +3629,8 @@ e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr,
return ret_val;
}
-static s32
-e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
- u16 phy_data)
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 phy_data)
{
u32 i;
u32 mdic = 0;
@@ -3681,17 +3648,17 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
* for the PHY register in the MDI Control register. The MAC will take
* care of interfacing with the PHY to send the desired data.
*/
- mdic = (((u32) phy_data) |
+ mdic = (((u32)phy_data) |
(reg_addr << E1000_MDIC_REG_SHIFT) |
(phy_addr << E1000_MDIC_PHY_SHIFT) |
(E1000_MDIC_OP_WRITE));
- E1000_WRITE_REG(hw, MDIC, mdic);
+ ew32(MDIC, mdic);
/* Poll the ready bit to see if the MDI read completed */
for (i = 0; i < 641; i++) {
udelay(5);
- mdic = E1000_READ_REG(hw, MDIC);
+ mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY) break;
}
if (!(mdic & E1000_MDIC_READY)) {
@@ -3715,7 +3682,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
(PHY_OP_WRITE << 12) | (PHY_SOF << 14));
mdic <<= 16;
- mdic |= (u32) phy_data;
+ mdic |= (u32)phy_data;
e1000_shift_out_mdi_bits(hw, mdic, 32);
}
@@ -3723,17 +3690,14 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
return E1000_SUCCESS;
}
-static s32
-e1000_read_kmrn_reg(struct e1000_hw *hw,
- u32 reg_addr,
- u16 *data)
+static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data)
{
u32 reg_val;
u16 swfw;
DEBUGFUNC("e1000_read_kmrn_reg");
if ((hw->mac_type == e1000_80003es2lan) &&
- (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
swfw = E1000_SWFW_PHY1_SM;
} else {
swfw = E1000_SWFW_PHY0_SM;
@@ -3745,28 +3709,25 @@ e1000_read_kmrn_reg(struct e1000_hw *hw,
reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
E1000_KUMCTRLSTA_OFFSET) |
E1000_KUMCTRLSTA_REN;
- E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val);
+ ew32(KUMCTRLSTA, reg_val);
udelay(2);
/* Read the data returned */
- reg_val = E1000_READ_REG(hw, KUMCTRLSTA);
+ reg_val = er32(KUMCTRLSTA);
*data = (u16)reg_val;
e1000_swfw_sync_release(hw, swfw);
return E1000_SUCCESS;
}
-static s32
-e1000_write_kmrn_reg(struct e1000_hw *hw,
- u32 reg_addr,
- u16 data)
+static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data)
{
u32 reg_val;
u16 swfw;
DEBUGFUNC("e1000_write_kmrn_reg");
if ((hw->mac_type == e1000_80003es2lan) &&
- (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
swfw = E1000_SWFW_PHY1_SM;
} else {
swfw = E1000_SWFW_PHY0_SM;
@@ -3776,7 +3737,7 @@ e1000_write_kmrn_reg(struct e1000_hw *hw,
reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
E1000_KUMCTRLSTA_OFFSET) | data;
- E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val);
+ ew32(KUMCTRLSTA, reg_val);
udelay(2);
e1000_swfw_sync_release(hw, swfw);
@@ -3788,8 +3749,7 @@ e1000_write_kmrn_reg(struct e1000_hw *hw,
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
-s32
-e1000_phy_hw_reset(struct e1000_hw *hw)
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
u32 ctrl, ctrl_ext;
u32 led_ctrl;
@@ -3808,7 +3768,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
if (hw->mac_type > e1000_82543) {
if ((hw->mac_type == e1000_80003es2lan) &&
- (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
swfw = E1000_SWFW_PHY1_SM;
} else {
swfw = E1000_SWFW_PHY0_SM;
@@ -3823,17 +3783,17 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
* and deassert. For e1000_82571 hardware and later, we instead delay
* for 50us between and 10ms after the deassertion.
*/
- ctrl = E1000_READ_REG(hw, CTRL);
- E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST);
- E1000_WRITE_FLUSH(hw);
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+ E1000_WRITE_FLUSH();
if (hw->mac_type < e1000_82571)
msleep(10);
else
udelay(100);
- E1000_WRITE_REG(hw, CTRL, ctrl);
- E1000_WRITE_FLUSH(hw);
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
if (hw->mac_type >= e1000_82571)
mdelay(10);
@@ -3843,24 +3803,24 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
/* Read the Extended Device Control Register, assert the PHY_RESET_DIR
* bit to put the PHY into reset. Then, take it out of reset.
*/
- ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
- E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH(hw);
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
msleep(10);
ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
- E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH(hw);
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
}
udelay(150);
if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
/* Configure activity LED after PHY reset */
- led_ctrl = E1000_READ_REG(hw, LEDCTL);
+ led_ctrl = er32(LEDCTL);
led_ctrl &= IGP_ACTIVITY_LED_MASK;
led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
- E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+ ew32(LEDCTL, led_ctrl);
}
/* Wait for FW to finish PHY configuration. */
@@ -3882,8 +3842,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
*
* Sets bit 15 of the MII Control register
******************************************************************************/
-s32
-e1000_phy_reset(struct e1000_hw *hw)
+s32 e1000_phy_reset(struct e1000_hw *hw)
{
s32 ret_val;
u16 phy_data;
@@ -3934,8 +3893,7 @@ e1000_phy_reset(struct e1000_hw *hw)
*
* hw - struct containing variables accessed by shared code
******************************************************************************/
-void
-e1000_phy_powerdown_workaround(struct e1000_hw *hw)
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw)
{
s32 reg;
u16 phy_data;
@@ -3948,8 +3906,8 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw)
do {
/* Disable link */
- reg = E1000_READ_REG(hw, PHY_CTRL);
- E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+ reg = er32(PHY_CTRL);
+ ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
/* Write VR power-down enable - bits 9:8 should be 10b */
@@ -3964,8 +3922,8 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw)
break;
/* Issue PHY reset and repeat at most one more time */
- reg = E1000_READ_REG(hw, CTRL);
- E1000_WRITE_REG(hw, CTRL, reg | E1000_CTRL_PHY_RST);
+ reg = er32(CTRL);
+ ew32(CTRL, reg | E1000_CTRL_PHY_RST);
retry++;
} while (retry);
@@ -3987,8 +3945,7 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw)
*
* hw - struct containing variables accessed by shared code
******************************************************************************/
-static s32
-e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
+static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
{
s32 ret_val;
s32 reg;
@@ -4024,8 +3981,8 @@ e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
mdelay(5);
}
/* Disable GigE link negotiation */
- reg = E1000_READ_REG(hw, PHY_CTRL);
- E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+ reg = er32(PHY_CTRL);
+ ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
/* unable to acquire PCS lock */
@@ -4040,8 +3997,7 @@ e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
-static s32
-e1000_detect_gig_phy(struct e1000_hw *hw)
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
{
s32 phy_init_status, ret_val;
u16 phy_id_high, phy_id_low;
@@ -4076,14 +4032,14 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- hw->phy_id = (u32) (phy_id_high << 16);
+ hw->phy_id = (u32)(phy_id_high << 16);
udelay(20);
ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
if (ret_val)
return ret_val;
- hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK);
- hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK;
+ hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK);
+ hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK;
switch (hw->mac_type) {
case e1000_82543:
@@ -4136,8 +4092,7 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
-static s32
-e1000_phy_reset_dsp(struct e1000_hw *hw)
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
{
s32 ret_val;
DEBUGFUNC("e1000_phy_reset_dsp");
@@ -4163,9 +4118,8 @@ e1000_phy_reset_dsp(struct e1000_hw *hw)
* hw - Struct containing variables accessed by shared code
* phy_info - PHY information structure
******************************************************************************/
-static s32
-e1000_phy_igp_get_info(struct e1000_hw *hw,
- struct e1000_phy_info *phy_info)
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
{
s32 ret_val;
u16 phy_data, min_length, max_length, average;
@@ -4240,9 +4194,8 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
* hw - Struct containing variables accessed by shared code
* phy_info - PHY information structure
******************************************************************************/
-static s32
-e1000_phy_ife_get_info(struct e1000_hw *hw,
- struct e1000_phy_info *phy_info)
+static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
{
s32 ret_val;
u16 phy_data;
@@ -4290,9 +4243,8 @@ e1000_phy_ife_get_info(struct e1000_hw *hw,
* hw - Struct containing variables accessed by shared code
* phy_info - PHY information structure
******************************************************************************/
-static s32
-e1000_phy_m88_get_info(struct e1000_hw *hw,
- struct e1000_phy_info *phy_info)
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
{
s32 ret_val;
u16 phy_data;
@@ -4369,9 +4321,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
* hw - Struct containing variables accessed by shared code
* phy_info - PHY information structure
******************************************************************************/
-s32
-e1000_phy_get_info(struct e1000_hw *hw,
- struct e1000_phy_info *phy_info)
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
{
s32 ret_val;
u16 phy_data;
@@ -4415,8 +4365,7 @@ e1000_phy_get_info(struct e1000_hw *hw,
return e1000_phy_m88_get_info(hw, phy_info);
}
-s32
-e1000_validate_mdi_setting(struct e1000_hw *hw)
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_validate_mdi_settings");
@@ -4436,11 +4385,10 @@ e1000_validate_mdi_setting(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-s32
-e1000_init_eeprom_params(struct e1000_hw *hw)
+s32 e1000_init_eeprom_params(struct e1000_hw *hw)
{
struct e1000_eeprom_info *eeprom = &hw->eeprom;
- u32 eecd = E1000_READ_REG(hw, EECD);
+ u32 eecd = er32(EECD);
s32 ret_val = E1000_SUCCESS;
u16 eeprom_size;
@@ -4542,7 +4490,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
/* Ensure that the Autonomous FLASH update bit is cleared due to
* Flash update issue on parts which use a FLASH for NVM. */
eecd &= ~E1000_EECD_AUPDEN;
- E1000_WRITE_REG(hw, EECD, eecd);
+ ew32(EECD, eecd);
}
break;
case e1000_80003es2lan:
@@ -4626,16 +4574,14 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
* hw - Struct containing variables accessed by shared code
* eecd - EECD's current value
*****************************************************************************/
-static void
-e1000_raise_ee_clk(struct e1000_hw *hw,
- u32 *eecd)
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
{
/* Raise the clock input to the EEPROM (by setting the SK bit), and then
* wait <delay> microseconds.
*/
*eecd = *eecd | E1000_EECD_SK;
- E1000_WRITE_REG(hw, EECD, *eecd);
- E1000_WRITE_FLUSH(hw);
+ ew32(EECD, *eecd);
+ E1000_WRITE_FLUSH();
udelay(hw->eeprom.delay_usec);
}
@@ -4645,16 +4591,14 @@ e1000_raise_ee_clk(struct e1000_hw *hw,
* hw - Struct containing variables accessed by shared code
* eecd - EECD's current value
*****************************************************************************/
-static void
-e1000_lower_ee_clk(struct e1000_hw *hw,
- u32 *eecd)
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
{
/* Lower the clock input to the EEPROM (by clearing the SK bit), and then
* wait 50 microseconds.
*/
*eecd = *eecd & ~E1000_EECD_SK;
- E1000_WRITE_REG(hw, EECD, *eecd);
- E1000_WRITE_FLUSH(hw);
+ ew32(EECD, *eecd);
+ E1000_WRITE_FLUSH();
udelay(hw->eeprom.delay_usec);
}
@@ -4665,10 +4609,7 @@ e1000_lower_ee_clk(struct e1000_hw *hw,
* data - data to send to the EEPROM
* count - number of bits to shift out
*****************************************************************************/
-static void
-e1000_shift_out_ee_bits(struct e1000_hw *hw,
- u16 data,
- u16 count)
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
{
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 eecd;
@@ -4679,7 +4620,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
* In order to do this, "data" must be broken down into bits.
*/
mask = 0x01 << (count - 1);
- eecd = E1000_READ_REG(hw, EECD);
+ eecd = er32(EECD);
if (eeprom->type == e1000_eeprom_microwire) {
eecd &= ~E1000_EECD_DO;
} else if (eeprom->type == e1000_eeprom_spi) {
@@ -4696,8 +4637,8 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
if (data & mask)
eecd |= E1000_EECD_DI;
- E1000_WRITE_REG(hw, EECD, eecd);
- E1000_WRITE_FLUSH(hw);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(eeprom->delay_usec);
@@ -4710,7 +4651,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
/* We leave the "DI" bit set to "0" when we leave this routine. */
eecd &= ~E1000_EECD_DI;
- E1000_WRITE_REG(hw, EECD, eecd);
+ ew32(EECD, eecd);
}
/******************************************************************************
@@ -4718,9 +4659,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static u16
-e1000_shift_in_ee_bits(struct e1000_hw *hw,
- u16 count)
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
{
u32 eecd;
u32 i;
@@ -4733,7 +4672,7 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw,
* always be clear.
*/
- eecd = E1000_READ_REG(hw, EECD);
+ eecd = er32(EECD);
eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
data = 0;
@@ -4742,7 +4681,7 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw,
data = data << 1;
e1000_raise_ee_clk(hw, &eecd);
- eecd = E1000_READ_REG(hw, EECD);
+ eecd = er32(EECD);
eecd &= ~(E1000_EECD_DI);
if (eecd & E1000_EECD_DO)
@@ -4762,8 +4701,7 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw,
* Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
* function should be called before issuing a command to the EEPROM.
*****************************************************************************/
-static s32
-e1000_acquire_eeprom(struct e1000_hw *hw)
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
{
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 eecd, i=0;
@@ -4772,23 +4710,23 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
return -E1000_ERR_SWFW_SYNC;
- eecd = E1000_READ_REG(hw, EECD);
+ eecd = er32(EECD);
if (hw->mac_type != e1000_82573) {
/* Request EEPROM Access */
if (hw->mac_type > e1000_82544) {
eecd |= E1000_EECD_REQ;
- E1000_WRITE_REG(hw, EECD, eecd);
- eecd = E1000_READ_REG(hw, EECD);
+ ew32(EECD, eecd);
+ eecd = er32(EECD);
while ((!(eecd & E1000_EECD_GNT)) &&
(i < E1000_EEPROM_GRANT_ATTEMPTS)) {
i++;
udelay(5);
- eecd = E1000_READ_REG(hw, EECD);
+ eecd = er32(EECD);
}
if (!(eecd & E1000_EECD_GNT)) {
eecd &= ~E1000_EECD_REQ;
- E1000_WRITE_REG(hw, EECD, eecd);
+ ew32(EECD, eecd);
DEBUGOUT("Could not acquire EEPROM grant\n");
e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
return -E1000_ERR_EEPROM;
@@ -4801,15 +4739,15 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
if (eeprom->type == e1000_eeprom_microwire) {
/* Clear SK and DI */
eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
- E1000_WRITE_REG(hw, EECD, eecd);
+ ew32(EECD, eecd);
/* Set CS */
eecd |= E1000_EECD_CS;
- E1000_WRITE_REG(hw, EECD, eecd);
+ ew32(EECD, eecd);
} else if (eeprom->type == e1000_eeprom_spi) {
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
- E1000_WRITE_REG(hw, EECD, eecd);
+ ew32(EECD, eecd);
udelay(1);
}
@@ -4821,46 +4759,45 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static void
-e1000_standby_eeprom(struct e1000_hw *hw)
+static void e1000_standby_eeprom(struct e1000_hw *hw)
{
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 eecd;
- eecd = E1000_READ_REG(hw, EECD);
+ eecd = er32(EECD);
if (eeprom->type == e1000_eeprom_microwire) {
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
- E1000_WRITE_REG(hw, EECD, eecd);
- E1000_WRITE_FLUSH(hw);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(eeprom->delay_usec);
/* Clock high */
eecd |= E1000_EECD_SK;
- E1000_WRITE_REG(hw, EECD, eecd);
- E1000_WRITE_FLUSH(hw);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(eeprom->delay_usec);
/* Select EEPROM */
eecd |= E1000_EECD_CS;
- E1000_WRITE_REG(hw, EECD, eecd);
- E1000_WRITE_FLUSH(hw);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(eeprom->delay_usec);
/* Clock low */
eecd &= ~E1000_EECD_SK;
- E1000_WRITE_REG(hw, EECD, eecd);
- E1000_WRITE_FLUSH(hw);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(eeprom->delay_usec);
} else if (eeprom->type == e1000_eeprom_spi) {
/* Toggle CS to flush commands */
eecd |= E1000_EECD_CS;
- E1000_WRITE_REG(hw, EECD, eecd);
- E1000_WRITE_FLUSH(hw);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(eeprom->delay_usec);
eecd &= ~E1000_EECD_CS;
- E1000_WRITE_REG(hw, EECD, eecd);
- E1000_WRITE_FLUSH(hw);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(eeprom->delay_usec);
}
}
@@ -4870,20 +4807,19 @@ e1000_standby_eeprom(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static void
-e1000_release_eeprom(struct e1000_hw *hw)
+static void e1000_release_eeprom(struct e1000_hw *hw)
{
u32 eecd;
DEBUGFUNC("e1000_release_eeprom");
- eecd = E1000_READ_REG(hw, EECD);
+ eecd = er32(EECD);
if (hw->eeprom.type == e1000_eeprom_spi) {
eecd |= E1000_EECD_CS; /* Pull CS high */
eecd &= ~E1000_EECD_SK; /* Lower SCK */
- E1000_WRITE_REG(hw, EECD, eecd);
+ ew32(EECD, eecd);
udelay(hw->eeprom.delay_usec);
} else if (hw->eeprom.type == e1000_eeprom_microwire) {
@@ -4892,25 +4828,25 @@ e1000_release_eeprom(struct e1000_hw *hw)
/* CS on Microwire is active-high */
eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
- E1000_WRITE_REG(hw, EECD, eecd);
+ ew32(EECD, eecd);
/* Rising edge of clock */
eecd |= E1000_EECD_SK;
- E1000_WRITE_REG(hw, EECD, eecd);
- E1000_WRITE_FLUSH(hw);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(hw->eeprom.delay_usec);
/* Falling edge of clock */
eecd &= ~E1000_EECD_SK;
- E1000_WRITE_REG(hw, EECD, eecd);
- E1000_WRITE_FLUSH(hw);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(hw->eeprom.delay_usec);
}
/* Stop requesting EEPROM access */
if (hw->mac_type > e1000_82544) {
eecd &= ~E1000_EECD_REQ;
- E1000_WRITE_REG(hw, EECD, eecd);
+ ew32(EECD, eecd);
}
e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
@@ -4921,8 +4857,7 @@ e1000_release_eeprom(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static s32
-e1000_spi_eeprom_ready(struct e1000_hw *hw)
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
{
u16 retry_count = 0;
u8 spi_stat_reg;
@@ -4967,11 +4902,7 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw)
* data - word read from the EEPROM
* words - number of words to read
*****************************************************************************/
-s32
-e1000_read_eeprom(struct e1000_hw *hw,
- u16 offset,
- u16 words,
- u16 *data)
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 i = 0;
@@ -5068,11 +4999,8 @@ e1000_read_eeprom(struct e1000_hw *hw,
* data - word read from the EEPROM
* words - number of words to read
*****************************************************************************/
-static s32
-e1000_read_eeprom_eerd(struct e1000_hw *hw,
- u16 offset,
- u16 words,
- u16 *data)
+static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
u32 i, eerd = 0;
s32 error = 0;
@@ -5081,13 +5009,13 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw,
eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
E1000_EEPROM_RW_REG_START;
- E1000_WRITE_REG(hw, EERD, eerd);
+ ew32(EERD, eerd);
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
if (error) {
break;
}
- data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA);
+ data[i] = (er32(EERD) >> E1000_EEPROM_RW_REG_DATA);
}
@@ -5102,11 +5030,8 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw,
* data - word read from the EEPROM
* words - number of words to read
*****************************************************************************/
-static s32
-e1000_write_eeprom_eewr(struct e1000_hw *hw,
- u16 offset,
- u16 words,
- u16 *data)
+static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
u32 register_value = 0;
u32 i = 0;
@@ -5125,7 +5050,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
break;
}
- E1000_WRITE_REG(hw, EEWR, register_value);
+ ew32(EEWR, register_value);
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
@@ -5143,8 +5068,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static s32
-e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
+static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
{
u32 attempts = 100000;
u32 i, reg = 0;
@@ -5152,9 +5076,9 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
for (i = 0; i < attempts; i++) {
if (eerd == E1000_EEPROM_POLL_READ)
- reg = E1000_READ_REG(hw, EERD);
+ reg = er32(EERD);
else
- reg = E1000_READ_REG(hw, EEWR);
+ reg = er32(EEWR);
if (reg & E1000_EEPROM_RW_REG_DONE) {
done = E1000_SUCCESS;
@@ -5171,8 +5095,7 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
*
* hw - Struct containing variables accessed by shared code
****************************************************************************/
-static bool
-e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
+static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
{
u32 eecd = 0;
@@ -5182,7 +5105,7 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
return false;
if (hw->mac_type == e1000_82573) {
- eecd = E1000_READ_REG(hw, EECD);
+ eecd = er32(EECD);
/* Isolate bits 15 & 16 */
eecd = ((eecd >> 15) & 0x03);
@@ -5204,8 +5127,7 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
* If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
* valid.
*****************************************************************************/
-s32
-e1000_validate_eeprom_checksum(struct e1000_hw *hw)
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
{
u16 checksum = 0;
u16 i, eeprom_data;
@@ -5252,7 +5174,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
checksum += eeprom_data;
}
- if (checksum == (u16) EEPROM_SUM)
+ if (checksum == (u16)EEPROM_SUM)
return E1000_SUCCESS;
else {
DEBUGOUT("EEPROM Checksum Invalid\n");
@@ -5268,8 +5190,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
* Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
* Writes the difference to word offset 63 of the EEPROM.
*****************************************************************************/
-s32
-e1000_update_eeprom_checksum(struct e1000_hw *hw)
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
{
u32 ctrl_ext;
u16 checksum = 0;
@@ -5284,7 +5205,7 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
}
checksum += eeprom_data;
}
- checksum = (u16) EEPROM_SUM - checksum;
+ checksum = (u16)EEPROM_SUM - checksum;
if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
DEBUGOUT("EEPROM Write Error\n");
return -E1000_ERR_EEPROM;
@@ -5294,9 +5215,9 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
e1000_commit_shadow_ram(hw);
/* Reload the EEPROM, or else modifications will not appear
* until after next adapter reset. */
- ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
- E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ ew32(CTRL_EXT, ctrl_ext);
msleep(10);
}
return E1000_SUCCESS;
@@ -5313,11 +5234,7 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
* If e1000_update_eeprom_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
*****************************************************************************/
-s32
-e1000_write_eeprom(struct e1000_hw *hw,
- u16 offset,
- u16 words,
- u16 *data)
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_eeprom_info *eeprom = &hw->eeprom;
s32 status = 0;
@@ -5370,11 +5287,8 @@ e1000_write_eeprom(struct e1000_hw *hw,
* data - pointer to array of 8 bit words to be written to the EEPROM
*
*****************************************************************************/
-static s32
-e1000_write_eeprom_spi(struct e1000_hw *hw,
- u16 offset,
- u16 words,
- u16 *data)
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u16 widx = 0;
@@ -5436,11 +5350,8 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
* data - pointer to array of 16 bit words to be written to the EEPROM
*
*****************************************************************************/
-static s32
-e1000_write_eeprom_microwire(struct e1000_hw *hw,
- u16 offset,
- u16 words,
- u16 *data)
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
{
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 eecd;
@@ -5484,7 +5395,7 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
* If DO does not go high in 10 milliseconds, then error out.
*/
for (i = 0; i < 200; i++) {
- eecd = E1000_READ_REG(hw, EECD);
+ eecd = er32(EECD);
if (eecd & E1000_EECD_DO) break;
udelay(50);
}
@@ -5523,8 +5434,7 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
* data - word read from the EEPROM
* words - number of words to read
*****************************************************************************/
-static s32
-e1000_commit_shadow_ram(struct e1000_hw *hw)
+static s32 e1000_commit_shadow_ram(struct e1000_hw *hw)
{
u32 attempts = 100000;
u32 eecd = 0;
@@ -5539,9 +5449,9 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
if (hw->mac_type == e1000_82573) {
/* The flop register will be used to determine if flash type is STM */
- flop = E1000_READ_REG(hw, FLOP);
+ flop = er32(FLOP);
for (i=0; i < attempts; i++) {
- eecd = E1000_READ_REG(hw, EECD);
+ eecd = er32(EECD);
if ((eecd & E1000_EECD_FLUPD) == 0) {
break;
}
@@ -5554,14 +5464,14 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
/* If STM opcode located in bits 15:8 of flop, reset firmware */
if ((flop & 0xFF00) == E1000_STM_OPCODE) {
- E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
+ ew32(HICR, E1000_HICR_FW_RESET);
}
/* Perform the flash update */
- E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
+ ew32(EECD, eecd | E1000_EECD_FLUPD);
for (i=0; i < attempts; i++) {
- eecd = E1000_READ_REG(hw, EECD);
+ eecd = er32(EECD);
if ((eecd & E1000_EECD_FLUPD) == 0) {
break;
}
@@ -5577,7 +5487,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
/* We're writing to the opposite bank so if we're on bank 1,
* write to bank 0 etc. We also need to erase the segment that
* is going to be written */
- if (!(E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL)) {
+ if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
new_bank_offset = hw->flash_bank_size * 2;
old_bank_offset = 0;
e1000_erase_ich8_4k_segment(hw, 1);
@@ -5687,8 +5597,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-s32
-e1000_read_mac_addr(struct e1000_hw * hw)
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
{
u16 offset;
u16 eeprom_data, i;
@@ -5701,8 +5610,8 @@ e1000_read_mac_addr(struct e1000_hw * hw)
DEBUGOUT("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
- hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
- hw->perm_mac_addr[i+1] = (u8) (eeprom_data >> 8);
+ hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF);
+ hw->perm_mac_addr[i+1] = (u8)(eeprom_data >> 8);
}
switch (hw->mac_type) {
@@ -5712,7 +5621,7 @@ e1000_read_mac_addr(struct e1000_hw * hw)
case e1000_82546_rev_3:
case e1000_82571:
case e1000_80003es2lan:
- if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
hw->perm_mac_addr[5] ^= 0x01;
break;
}
@@ -5731,8 +5640,7 @@ e1000_read_mac_addr(struct e1000_hw * hw)
* of the receive addresss registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
*****************************************************************************/
-static void
-e1000_init_rx_addrs(struct e1000_hw *hw)
+static void e1000_init_rx_addrs(struct e1000_hw *hw)
{
u32 i;
u32 rar_num;
@@ -5758,9 +5666,9 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
DEBUGOUT("Clearing RAR[1-15]\n");
for (i = 1; i < rar_num; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
}
}
@@ -5770,9 +5678,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
* hw - Struct containing variables accessed by shared code
* mc_addr - the multicast address to hash
*****************************************************************************/
-u32
-e1000_hash_mc_addr(struct e1000_hw *hw,
- u8 *mc_addr)
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
{
u32 hash_value = 0;
@@ -5787,37 +5693,37 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
case 0:
if (hw->mac_type == e1000_ich8lan) {
/* [47:38] i.e. 0x158 for above example address */
- hash_value = ((mc_addr[4] >> 6) | (((u16) mc_addr[5]) << 2));
+ hash_value = ((mc_addr[4] >> 6) | (((u16)mc_addr[5]) << 2));
} else {
/* [47:36] i.e. 0x563 for above example address */
- hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
+ hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
}
break;
case 1:
if (hw->mac_type == e1000_ich8lan) {
/* [46:37] i.e. 0x2B1 for above example address */
- hash_value = ((mc_addr[4] >> 5) | (((u16) mc_addr[5]) << 3));
+ hash_value = ((mc_addr[4] >> 5) | (((u16)mc_addr[5]) << 3));
} else {
/* [46:35] i.e. 0xAC6 for above example address */
- hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
+ hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
}
break;
case 2:
if (hw->mac_type == e1000_ich8lan) {
/*[45:36] i.e. 0x163 for above example address */
- hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
+ hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
} else {
/* [45:34] i.e. 0x5D8 for above example address */
- hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
+ hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
}
break;
case 3:
if (hw->mac_type == e1000_ich8lan) {
/* [43:34] i.e. 0x18D for above example address */
- hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
+ hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
} else {
/* [43:32] i.e. 0x634 for above example address */
- hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
+ hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
}
break;
}
@@ -5835,9 +5741,7 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
* hw - Struct containing variables accessed by shared code
* hash_value - Multicast address hash value
*****************************************************************************/
-void
-e1000_mta_set(struct e1000_hw *hw,
- u32 hash_value)
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg;
u32 mta;
@@ -5868,12 +5772,12 @@ e1000_mta_set(struct e1000_hw *hw,
if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
} else {
E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
}
}
@@ -5884,20 +5788,16 @@ e1000_mta_set(struct e1000_hw *hw,
* addr - Address to put into receive address register
* index - Receive address register to write
*****************************************************************************/
-void
-e1000_rar_set(struct e1000_hw *hw,
- u8 *addr,
- u32 index)
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
/* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
- rar_low = ((u32) addr[0] |
- ((u32) addr[1] << 8) |
- ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
- rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+ rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
/* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
* unit hang.
@@ -5930,9 +5830,9 @@ e1000_rar_set(struct e1000_hw *hw,
}
E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
}
/******************************************************************************
@@ -5942,10 +5842,7 @@ e1000_rar_set(struct e1000_hw *hw,
* offset - Offset in VLAN filer table to write
* value - Value to write into VLAN filter table
*****************************************************************************/
-void
-e1000_write_vfta(struct e1000_hw *hw,
- u32 offset,
- u32 value)
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
{
u32 temp;
@@ -5955,12 +5852,12 @@ e1000_write_vfta(struct e1000_hw *hw,
if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
} else {
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
}
}
@@ -5969,8 +5866,7 @@ e1000_write_vfta(struct e1000_hw *hw,
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static void
-e1000_clear_vfta(struct e1000_hw *hw)
+static void e1000_clear_vfta(struct e1000_hw *hw)
{
u32 offset;
u32 vfta_value = 0;
@@ -5999,12 +5895,11 @@ e1000_clear_vfta(struct e1000_hw *hw)
* manageability unit */
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
}
}
-static s32
-e1000_id_led_init(struct e1000_hw * hw)
+static s32 e1000_id_led_init(struct e1000_hw *hw)
{
u32 ledctl;
const u32 ledctl_mask = 0x000000FF;
@@ -6020,7 +5915,7 @@ e1000_id_led_init(struct e1000_hw * hw)
return E1000_SUCCESS;
}
- ledctl = E1000_READ_REG(hw, LEDCTL);
+ ledctl = er32(LEDCTL);
hw->ledctl_default = ledctl;
hw->ledctl_mode1 = hw->ledctl_default;
hw->ledctl_mode2 = hw->ledctl_default;
@@ -6086,8 +5981,7 @@ e1000_id_led_init(struct e1000_hw * hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-s32
-e1000_setup_led(struct e1000_hw *hw)
+s32 e1000_setup_led(struct e1000_hw *hw)
{
u32 ledctl;
s32 ret_val = E1000_SUCCESS;
@@ -6118,7 +6012,7 @@ e1000_setup_led(struct e1000_hw *hw)
/* Fall Through */
default:
if (hw->media_type == e1000_media_type_fiber) {
- ledctl = E1000_READ_REG(hw, LEDCTL);
+ ledctl = er32(LEDCTL);
/* Save current LEDCTL settings */
hw->ledctl_default = ledctl;
/* Turn off LED0 */
@@ -6127,9 +6021,9 @@ e1000_setup_led(struct e1000_hw *hw)
E1000_LEDCTL_LED0_MODE_MASK);
ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
E1000_LEDCTL_LED0_MODE_SHIFT);
- E1000_WRITE_REG(hw, LEDCTL, ledctl);
+ ew32(LEDCTL, ledctl);
} else if (hw->media_type == e1000_media_type_copper)
- E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
+ ew32(LEDCTL, hw->ledctl_mode1);
break;
}
@@ -6145,8 +6039,7 @@ e1000_setup_led(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-s32
-e1000_blink_led_start(struct e1000_hw *hw)
+s32 e1000_blink_led_start(struct e1000_hw *hw)
{
s16 i;
u32 ledctl_blink = 0;
@@ -6170,7 +6063,7 @@ e1000_blink_led_start(struct e1000_hw *hw)
ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8));
}
- E1000_WRITE_REG(hw, LEDCTL, ledctl_blink);
+ ew32(LEDCTL, ledctl_blink);
return E1000_SUCCESS;
}
@@ -6180,8 +6073,7 @@ e1000_blink_led_start(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-s32
-e1000_cleanup_led(struct e1000_hw *hw)
+s32 e1000_cleanup_led(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
@@ -6210,7 +6102,7 @@ e1000_cleanup_led(struct e1000_hw *hw)
break;
}
/* Restore LEDCTL settings */
- E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default);
+ ew32(LEDCTL, hw->ledctl_default);
break;
}
@@ -6222,10 +6114,9 @@ e1000_cleanup_led(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-s32
-e1000_led_on(struct e1000_hw *hw)
+s32 e1000_led_on(struct e1000_hw *hw)
{
- u32 ctrl = E1000_READ_REG(hw, CTRL);
+ u32 ctrl = er32(CTRL);
DEBUGFUNC("e1000_led_on");
@@ -6257,13 +6148,13 @@ e1000_led_on(struct e1000_hw *hw)
e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
} else if (hw->media_type == e1000_media_type_copper) {
- E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2);
+ ew32(LEDCTL, hw->ledctl_mode2);
return E1000_SUCCESS;
}
break;
}
- E1000_WRITE_REG(hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
return E1000_SUCCESS;
}
@@ -6273,10 +6164,9 @@ e1000_led_on(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-s32
-e1000_led_off(struct e1000_hw *hw)
+s32 e1000_led_off(struct e1000_hw *hw)
{
- u32 ctrl = E1000_READ_REG(hw, CTRL);
+ u32 ctrl = er32(CTRL);
DEBUGFUNC("e1000_led_off");
@@ -6308,13 +6198,13 @@ e1000_led_off(struct e1000_hw *hw)
e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
} else if (hw->media_type == e1000_media_type_copper) {
- E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
+ ew32(LEDCTL, hw->ledctl_mode1);
return E1000_SUCCESS;
}
break;
}
- E1000_WRITE_REG(hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
return E1000_SUCCESS;
}
@@ -6324,98 +6214,97 @@ e1000_led_off(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static void
-e1000_clear_hw_cntrs(struct e1000_hw *hw)
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
{
volatile u32 temp;
- temp = E1000_READ_REG(hw, CRCERRS);
- temp = E1000_READ_REG(hw, SYMERRS);
- temp = E1000_READ_REG(hw, MPC);
- temp = E1000_READ_REG(hw, SCC);
- temp = E1000_READ_REG(hw, ECOL);
- temp = E1000_READ_REG(hw, MCC);
- temp = E1000_READ_REG(hw, LATECOL);
- temp = E1000_READ_REG(hw, COLC);
- temp = E1000_READ_REG(hw, DC);
- temp = E1000_READ_REG(hw, SEC);
- temp = E1000_READ_REG(hw, RLEC);
- temp = E1000_READ_REG(hw, XONRXC);
- temp = E1000_READ_REG(hw, XONTXC);
- temp = E1000_READ_REG(hw, XOFFRXC);
- temp = E1000_READ_REG(hw, XOFFTXC);
- temp = E1000_READ_REG(hw, FCRUC);
+ temp = er32(CRCERRS);
+ temp = er32(SYMERRS);
+ temp = er32(MPC);
+ temp = er32(SCC);
+ temp = er32(ECOL);
+ temp = er32(MCC);
+ temp = er32(LATECOL);
+ temp = er32(COLC);
+ temp = er32(DC);
+ temp = er32(SEC);
+ temp = er32(RLEC);
+ temp = er32(XONRXC);
+ temp = er32(XONTXC);
+ temp = er32(XOFFRXC);
+ temp = er32(XOFFTXC);
+ temp = er32(FCRUC);
if (hw->mac_type != e1000_ich8lan) {
- temp = E1000_READ_REG(hw, PRC64);
- temp = E1000_READ_REG(hw, PRC127);
- temp = E1000_READ_REG(hw, PRC255);
- temp = E1000_READ_REG(hw, PRC511);
- temp = E1000_READ_REG(hw, PRC1023);
- temp = E1000_READ_REG(hw, PRC1522);
- }
-
- temp = E1000_READ_REG(hw, GPRC);
- temp = E1000_READ_REG(hw, BPRC);
- temp = E1000_READ_REG(hw, MPRC);
- temp = E1000_READ_REG(hw, GPTC);
- temp = E1000_READ_REG(hw, GORCL);
- temp = E1000_READ_REG(hw, GORCH);
- temp = E1000_READ_REG(hw, GOTCL);
- temp = E1000_READ_REG(hw, GOTCH);
- temp = E1000_READ_REG(hw, RNBC);
- temp = E1000_READ_REG(hw, RUC);
- temp = E1000_READ_REG(hw, RFC);
- temp = E1000_READ_REG(hw, ROC);
- temp = E1000_READ_REG(hw, RJC);
- temp = E1000_READ_REG(hw, TORL);
- temp = E1000_READ_REG(hw, TORH);
- temp = E1000_READ_REG(hw, TOTL);
- temp = E1000_READ_REG(hw, TOTH);
- temp = E1000_READ_REG(hw, TPR);
- temp = E1000_READ_REG(hw, TPT);
+ temp = er32(PRC64);
+ temp = er32(PRC127);
+ temp = er32(PRC255);
+ temp = er32(PRC511);
+ temp = er32(PRC1023);
+ temp = er32(PRC1522);
+ }
+
+ temp = er32(GPRC);
+ temp = er32(BPRC);
+ temp = er32(MPRC);
+ temp = er32(GPTC);
+ temp = er32(GORCL);
+ temp = er32(GORCH);
+ temp = er32(GOTCL);
+ temp = er32(GOTCH);
+ temp = er32(RNBC);
+ temp = er32(RUC);
+ temp = er32(RFC);
+ temp = er32(ROC);
+ temp = er32(RJC);
+ temp = er32(TORL);
+ temp = er32(TORH);
+ temp = er32(TOTL);
+ temp = er32(TOTH);
+ temp = er32(TPR);
+ temp = er32(TPT);
if (hw->mac_type != e1000_ich8lan) {
- temp = E1000_READ_REG(hw, PTC64);
- temp = E1000_READ_REG(hw, PTC127);
- temp = E1000_READ_REG(hw, PTC255);
- temp = E1000_READ_REG(hw, PTC511);
- temp = E1000_READ_REG(hw, PTC1023);
- temp = E1000_READ_REG(hw, PTC1522);
+ temp = er32(PTC64);
+ temp = er32(PTC127);
+ temp = er32(PTC255);
+ temp = er32(PTC511);
+ temp = er32(PTC1023);
+ temp = er32(PTC1522);
}
- temp = E1000_READ_REG(hw, MPTC);
- temp = E1000_READ_REG(hw, BPTC);
+ temp = er32(MPTC);
+ temp = er32(BPTC);
if (hw->mac_type < e1000_82543) return;
- temp = E1000_READ_REG(hw, ALGNERRC);
- temp = E1000_READ_REG(hw, RXERRC);
- temp = E1000_READ_REG(hw, TNCRS);
- temp = E1000_READ_REG(hw, CEXTERR);
- temp = E1000_READ_REG(hw, TSCTC);
- temp = E1000_READ_REG(hw, TSCTFC);
+ temp = er32(ALGNERRC);
+ temp = er32(RXERRC);
+ temp = er32(TNCRS);
+ temp = er32(CEXTERR);
+ temp = er32(TSCTC);
+ temp = er32(TSCTFC);
if (hw->mac_type <= e1000_82544) return;
- temp = E1000_READ_REG(hw, MGTPRC);
- temp = E1000_READ_REG(hw, MGTPDC);
- temp = E1000_READ_REG(hw, MGTPTC);
+ temp = er32(MGTPRC);
+ temp = er32(MGTPDC);
+ temp = er32(MGTPTC);
if (hw->mac_type <= e1000_82547_rev_2) return;
- temp = E1000_READ_REG(hw, IAC);
- temp = E1000_READ_REG(hw, ICRXOC);
+ temp = er32(IAC);
+ temp = er32(ICRXOC);
if (hw->mac_type == e1000_ich8lan) return;
- temp = E1000_READ_REG(hw, ICRXPTC);
- temp = E1000_READ_REG(hw, ICRXATC);
- temp = E1000_READ_REG(hw, ICTXPTC);
- temp = E1000_READ_REG(hw, ICTXATC);
- temp = E1000_READ_REG(hw, ICTXQEC);
- temp = E1000_READ_REG(hw, ICTXQMTC);
- temp = E1000_READ_REG(hw, ICRXDMTC);
+ temp = er32(ICRXPTC);
+ temp = er32(ICRXATC);
+ temp = er32(ICTXPTC);
+ temp = er32(ICTXATC);
+ temp = er32(ICTXQEC);
+ temp = er32(ICTXQMTC);
+ temp = er32(ICRXDMTC);
}
/******************************************************************************
@@ -6428,8 +6317,7 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
* current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
* before calling this function.
*****************************************************************************/
-void
-e1000_reset_adaptive(struct e1000_hw *hw)
+void e1000_reset_adaptive(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_reset_adaptive");
@@ -6442,7 +6330,7 @@ e1000_reset_adaptive(struct e1000_hw *hw)
hw->ifs_ratio = IFS_RATIO;
}
hw->in_ifs_mode = false;
- E1000_WRITE_REG(hw, AIT, 0);
+ ew32(AIT, 0);
} else {
DEBUGOUT("Not in Adaptive IFS mode!\n");
}
@@ -6456,8 +6344,7 @@ e1000_reset_adaptive(struct e1000_hw *hw)
* tx_packets - Number of transmits since last callback
* total_collisions - Number of collisions since last callback
*****************************************************************************/
-void
-e1000_update_adaptive(struct e1000_hw *hw)
+void e1000_update_adaptive(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_update_adaptive");
@@ -6470,14 +6357,14 @@ e1000_update_adaptive(struct e1000_hw *hw)
hw->current_ifs_val = hw->ifs_min_val;
else
hw->current_ifs_val += hw->ifs_step_size;
- E1000_WRITE_REG(hw, AIT, hw->current_ifs_val);
+ ew32(AIT, hw->current_ifs_val);
}
}
} else {
if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
hw->current_ifs_val = 0;
hw->in_ifs_mode = false;
- E1000_WRITE_REG(hw, AIT, 0);
+ ew32(AIT, 0);
}
}
} else {
@@ -6492,11 +6379,8 @@ e1000_update_adaptive(struct e1000_hw *hw)
* frame_len - The length of the frame in question
* mac_addr - The Ethernet destination address of the frame in question
*****************************************************************************/
-void
-e1000_tbi_adjust_stats(struct e1000_hw *hw,
- struct e1000_hw_stats *stats,
- u32 frame_len,
- u8 *mac_addr)
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
+ u32 frame_len, u8 *mac_addr)
{
u64 carry_bit;
@@ -6527,7 +6411,7 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw,
* since the test for a multicast frame will test positive on
* a broadcast frame.
*/
- if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
+ if ((mac_addr[0] == (u8)0xff) && (mac_addr[1] == (u8)0xff))
/* Broadcast packet */
stats->bprc++;
else if (*mac_addr & 0x01)
@@ -6570,8 +6454,7 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw,
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-void
-e1000_get_bus_info(struct e1000_hw *hw)
+void e1000_get_bus_info(struct e1000_hw *hw)
{
s32 ret_val;
u16 pci_ex_link_status;
@@ -6605,7 +6488,7 @@ e1000_get_bus_info(struct e1000_hw *hw)
hw->bus_width = e1000_bus_width_pciex_1;
break;
default:
- status = E1000_READ_REG(hw, STATUS);
+ status = er32(STATUS);
hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
e1000_bus_type_pcix : e1000_bus_type_pci;
@@ -6645,10 +6528,7 @@ e1000_get_bus_info(struct e1000_hw *hw)
* offset - offset to write to
* value - value to write
*****************************************************************************/
-static void
-e1000_write_reg_io(struct e1000_hw *hw,
- u32 offset,
- u32 value)
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
{
unsigned long io_addr = hw->io_base;
unsigned long io_data = hw->io_base + 4;
@@ -6672,10 +6552,8 @@ e1000_write_reg_io(struct e1000_hw *hw,
* register to the minimum and maximum range.
* For IGP phy's, the function calculates the range by the AGC registers.
*****************************************************************************/
-static s32
-e1000_get_cable_length(struct e1000_hw *hw,
- u16 *min_length,
- u16 *max_length)
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+ u16 *max_length)
{
s32 ret_val;
u16 agc_value = 0;
@@ -6863,9 +6741,8 @@ e1000_get_cable_length(struct e1000_hw *hw,
* return 0. If the link speed is 1000 Mbps the polarity status is in the
* IGP01E1000_PHY_PCS_INIT_REG.
*****************************************************************************/
-static s32
-e1000_check_polarity(struct e1000_hw *hw,
- e1000_rev_polarity *polarity)
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+ e1000_rev_polarity *polarity)
{
s32 ret_val;
u16 phy_data;
@@ -6939,8 +6816,7 @@ e1000_check_polarity(struct e1000_hw *hw,
* Link Health register. In IGP this bit is latched high, so the driver must
* read it immediately after link is established.
*****************************************************************************/
-static s32
-e1000_check_downshift(struct e1000_hw *hw)
+static s32 e1000_check_downshift(struct e1000_hw *hw)
{
s32 ret_val;
u16 phy_data;
@@ -6985,9 +6861,7 @@ e1000_check_downshift(struct e1000_hw *hw)
*
****************************************************************************/
-static s32
-e1000_config_dsp_after_link_change(struct e1000_hw *hw,
- bool link_up)
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
{
s32 ret_val;
u16 phy_data, phy_saved_data, speed, duplex, i;
@@ -7173,8 +7047,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
*
* hw - Struct containing variables accessed by shared code
****************************************************************************/
-static s32
-e1000_set_phy_mode(struct e1000_hw *hw)
+static s32 e1000_set_phy_mode(struct e1000_hw *hw)
{
s32 ret_val;
u16 eeprom_data;
@@ -7218,9 +7091,7 @@ e1000_set_phy_mode(struct e1000_hw *hw)
*
****************************************************************************/
-static s32
-e1000_set_d3_lplu_state(struct e1000_hw *hw,
- bool active)
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
{
u32 phy_ctrl = 0;
s32 ret_val;
@@ -7242,7 +7113,7 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
/* MAC writes into PHY register based on the state transition
* and start auto-negotiation. SW driver can overwrite the settings
* in CSR PHY power control E1000_PHY_CTRL register. */
- phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
+ phy_ctrl = er32(PHY_CTRL);
} else {
ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
if (ret_val)
@@ -7259,7 +7130,7 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
} else {
if (hw->mac_type == e1000_ich8lan) {
phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
- E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+ ew32(PHY_CTRL, phy_ctrl);
} else {
phy_data &= ~IGP02E1000_PM_D3_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
@@ -7310,7 +7181,7 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
} else {
if (hw->mac_type == e1000_ich8lan) {
phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
- E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+ ew32(PHY_CTRL, phy_ctrl);
} else {
phy_data |= IGP02E1000_PM_D3_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
@@ -7348,9 +7219,7 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
*
****************************************************************************/
-static s32
-e1000_set_d0_lplu_state(struct e1000_hw *hw,
- bool active)
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
{
u32 phy_ctrl = 0;
s32 ret_val;
@@ -7361,7 +7230,7 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
return E1000_SUCCESS;
if (hw->mac_type == e1000_ich8lan) {
- phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
+ phy_ctrl = er32(PHY_CTRL);
} else {
ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
if (ret_val)
@@ -7371,7 +7240,7 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
if (!active) {
if (hw->mac_type == e1000_ich8lan) {
phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
- E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+ ew32(PHY_CTRL, phy_ctrl);
} else {
phy_data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
@@ -7412,7 +7281,7 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
if (hw->mac_type == e1000_ich8lan) {
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
- E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+ ew32(PHY_CTRL, phy_ctrl);
} else {
phy_data |= IGP02E1000_PM_D0_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
@@ -7439,8 +7308,7 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static s32
-e1000_set_vco_speed(struct e1000_hw *hw)
+static s32 e1000_set_vco_speed(struct e1000_hw *hw)
{
s32 ret_val;
u16 default_page = 0;
@@ -7503,8 +7371,7 @@ e1000_set_vco_speed(struct e1000_hw *hw)
*
* returns: - E1000_SUCCESS .
****************************************************************************/
-static s32
-e1000_host_if_read_cookie(struct e1000_hw * hw, u8 *buffer)
+static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer)
{
u8 i;
u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET;
@@ -7514,7 +7381,7 @@ e1000_host_if_read_cookie(struct e1000_hw * hw, u8 *buffer)
offset = (offset >> 2);
for (i = 0; i < length; i++) {
- *((u32 *) buffer + i) =
+ *((u32 *)buffer + i) =
E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
}
return E1000_SUCCESS;
@@ -7530,21 +7397,20 @@ e1000_host_if_read_cookie(struct e1000_hw * hw, u8 *buffer)
* timeout
* - E1000_SUCCESS for success.
****************************************************************************/
-static s32
-e1000_mng_enable_host_if(struct e1000_hw * hw)
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
{
u32 hicr;
u8 i;
/* Check that the host interface is enabled. */
- hicr = E1000_READ_REG(hw, HICR);
+ hicr = er32(HICR);
if ((hicr & E1000_HICR_EN) == 0) {
DEBUGOUT("E1000_HOST_EN bit disabled.\n");
return -E1000_ERR_HOST_INTERFACE_COMMAND;
}
/* check the previous command is completed */
for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
- hicr = E1000_READ_REG(hw, HICR);
+ hicr = er32(HICR);
if (!(hicr & E1000_HICR_C))
break;
mdelay(1);
@@ -7564,9 +7430,8 @@ e1000_mng_enable_host_if(struct e1000_hw * hw)
*
* returns - E1000_SUCCESS for success.
****************************************************************************/
-static s32
-e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer,
- u16 length, u16 offset, u8 *sum)
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum)
{
u8 *tmp;
u8 *bufptr = buffer;
@@ -7632,9 +7497,8 @@ e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer,
*
* returns - E1000_SUCCESS for success.
****************************************************************************/
-static s32
-e1000_mng_write_cmd_header(struct e1000_hw * hw,
- struct e1000_host_mng_command_header * hdr)
+static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
{
u16 i;
u8 sum;
@@ -7648,7 +7512,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
sum = hdr->checksum;
hdr->checksum = 0;
- buffer = (u8 *) hdr;
+ buffer = (u8 *)hdr;
i = length;
while (i--)
sum += buffer[i];
@@ -7658,8 +7522,8 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
length >>= 2;
/* The device driver writes the relevant command block into the ram area. */
for (i = 0; i < length; i++) {
- E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *) hdr + i));
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *)hdr + i));
+ E1000_WRITE_FLUSH();
}
return E1000_SUCCESS;
@@ -7672,14 +7536,13 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
*
* returns - E1000_SUCCESS for success.
****************************************************************************/
-static s32
-e1000_mng_write_commit(struct e1000_hw * hw)
+static s32 e1000_mng_write_commit(struct e1000_hw *hw)
{
u32 hicr;
- hicr = E1000_READ_REG(hw, HICR);
+ hicr = er32(HICR);
/* Setting this bit tells the ARC that a new command is pending. */
- E1000_WRITE_REG(hw, HICR, hicr | E1000_HICR_C);
+ ew32(HICR, hicr | E1000_HICR_C);
return E1000_SUCCESS;
}
@@ -7690,12 +7553,11 @@ e1000_mng_write_commit(struct e1000_hw * hw)
*
* returns - true when the mode is IAMT or false.
****************************************************************************/
-bool
-e1000_check_mng_mode(struct e1000_hw *hw)
+bool e1000_check_mng_mode(struct e1000_hw *hw)
{
u32 fwsm;
- fwsm = E1000_READ_REG(hw, FWSM);
+ fwsm = er32(FWSM);
if (hw->mac_type == e1000_ich8lan) {
if ((fwsm & E1000_FWSM_MODE_MASK) ==
@@ -7712,9 +7574,7 @@ e1000_check_mng_mode(struct e1000_hw *hw)
/*****************************************************************************
* This function writes the dhcp info .
****************************************************************************/
-s32
-e1000_mng_write_dhcp_info(struct e1000_hw * hw, u8 *buffer,
- u16 length)
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
{
s32 ret_val;
struct e1000_host_mng_command_header hdr;
@@ -7744,8 +7604,7 @@ e1000_mng_write_dhcp_info(struct e1000_hw * hw, u8 *buffer,
*
* returns - checksum of buffer contents.
****************************************************************************/
-static u8
-e1000_calculate_mng_checksum(char *buffer, u32 length)
+static u8 e1000_calculate_mng_checksum(char *buffer, u32 length)
{
u8 sum = 0;
u32 i;
@@ -7756,7 +7615,7 @@ e1000_calculate_mng_checksum(char *buffer, u32 length)
for (i=0; i < length; i++)
sum += buffer[i];
- return (u8) (0 - sum);
+ return (u8)(0 - sum);
}
/*****************************************************************************
@@ -7764,8 +7623,7 @@ e1000_calculate_mng_checksum(char *buffer, u32 length)
*
* returns - true for packet filtering or false.
****************************************************************************/
-bool
-e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
{
/* called in init as well as watchdog timer functions */
@@ -7806,21 +7664,20 @@ e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
* returns: - true/false
*
*****************************************************************************/
-u32
-e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
{
u32 manc;
u32 fwsm, factps;
if (hw->asf_firmware_present) {
- manc = E1000_READ_REG(hw, MANC);
+ manc = er32(MANC);
if (!(manc & E1000_MANC_RCV_TCO_EN) ||
!(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
return false;
if (e1000_arc_subsystem_valid(hw)) {
- fwsm = E1000_READ_REG(hw, FWSM);
- factps = E1000_READ_REG(hw, FACTPS);
+ fwsm = er32(FWSM);
+ factps = er32(FACTPS);
if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
@@ -7832,8 +7689,7 @@ e1000_enable_mng_pass_thru(struct e1000_hw *hw)
return false;
}
-static s32
-e1000_polarity_reversal_workaround(struct e1000_hw *hw)
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
{
s32 ret_val;
u16 mii_status_reg;
@@ -7926,8 +7782,7 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw)
* returns: - none.
*
***************************************************************************/
-static void
-e1000_set_pci_express_master_disable(struct e1000_hw *hw)
+static void e1000_set_pci_express_master_disable(struct e1000_hw *hw)
{
u32 ctrl;
@@ -7936,9 +7791,9 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
if (hw->bus_type != e1000_bus_type_pci_express)
return;
- ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl = er32(CTRL);
ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
- E1000_WRITE_REG(hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
}
/*******************************************************************************
@@ -7952,8 +7807,7 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
* E1000_SUCCESS master requests disabled.
*
******************************************************************************/
-s32
-e1000_disable_pciex_master(struct e1000_hw *hw)
+s32 e1000_disable_pciex_master(struct e1000_hw *hw)
{
s32 timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */
@@ -7965,7 +7819,7 @@ e1000_disable_pciex_master(struct e1000_hw *hw)
e1000_set_pci_express_master_disable(hw);
while (timeout) {
- if (!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
+ if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
break;
else
udelay(100);
@@ -7990,8 +7844,7 @@ e1000_disable_pciex_master(struct e1000_hw *hw)
* E1000_SUCCESS at any other case.
*
******************************************************************************/
-static s32
-e1000_get_auto_rd_done(struct e1000_hw *hw)
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
{
s32 timeout = AUTO_READ_DONE_TIMEOUT;
@@ -8007,7 +7860,7 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
case e1000_80003es2lan:
case e1000_ich8lan:
while (timeout) {
- if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD)
+ if (er32(EECD) & E1000_EECD_AUTO_RD)
break;
else msleep(1);
timeout--;
@@ -8038,8 +7891,7 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
* E1000_SUCCESS at any other case.
*
***************************************************************************/
-static s32
-e1000_get_phy_cfg_done(struct e1000_hw *hw)
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
{
s32 timeout = PHY_CFG_TIMEOUT;
u32 cfg_mask = E1000_EEPROM_CFG_DONE;
@@ -8052,13 +7904,13 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
break;
case e1000_80003es2lan:
/* Separate *_CFG_DONE_* bit for each port */
- if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1;
/* Fall Through */
case e1000_82571:
case e1000_82572:
while (timeout) {
- if (E1000_READ_REG(hw, EEMNGCTL) & cfg_mask)
+ if (er32(EEMNGCTL) & cfg_mask)
break;
else
msleep(1);
@@ -8085,8 +7937,7 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
* E1000_SUCCESS at any other case.
*
***************************************************************************/
-static s32
-e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
+static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
{
s32 timeout;
u32 swsm;
@@ -8105,11 +7956,11 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
/* Get the FW semaphore. */
timeout = hw->eeprom.word_size + 1;
while (timeout) {
- swsm = E1000_READ_REG(hw, SWSM);
+ swsm = er32(SWSM);
swsm |= E1000_SWSM_SWESMBI;
- E1000_WRITE_REG(hw, SWSM, swsm);
+ ew32(SWSM, swsm);
/* if we managed to set the bit we got the semaphore. */
- swsm = E1000_READ_REG(hw, SWSM);
+ swsm = er32(SWSM);
if (swsm & E1000_SWSM_SWESMBI)
break;
@@ -8135,8 +7986,7 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
* returns: - None.
*
***************************************************************************/
-static void
-e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
+static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
{
u32 swsm;
@@ -8145,13 +7995,13 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
if (!hw->eeprom_semaphore_present)
return;
- swsm = E1000_READ_REG(hw, SWSM);
+ swsm = er32(SWSM);
if (hw->mac_type == e1000_80003es2lan) {
/* Release both semaphores. */
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
} else
swsm &= ~(E1000_SWSM_SWESMBI);
- E1000_WRITE_REG(hw, SWSM, swsm);
+ ew32(SWSM, swsm);
}
/***************************************************************************
@@ -8164,8 +8014,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
* E1000_SUCCESS at any other case.
*
***************************************************************************/
-static s32
-e1000_get_software_semaphore(struct e1000_hw *hw)
+static s32 e1000_get_software_semaphore(struct e1000_hw *hw)
{
s32 timeout = hw->eeprom.word_size + 1;
u32 swsm;
@@ -8177,7 +8026,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
}
while (timeout) {
- swsm = E1000_READ_REG(hw, SWSM);
+ swsm = er32(SWSM);
/* If SMBI bit cleared, it is now set and we hold the semaphore */
if (!(swsm & E1000_SWSM_SMBI))
break;
@@ -8200,8 +8049,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
* hw: Struct containing variables accessed by shared code
*
***************************************************************************/
-static void
-e1000_release_software_semaphore(struct e1000_hw *hw)
+static void e1000_release_software_semaphore(struct e1000_hw *hw)
{
u32 swsm;
@@ -8211,10 +8059,10 @@ e1000_release_software_semaphore(struct e1000_hw *hw)
return;
}
- swsm = E1000_READ_REG(hw, SWSM);
+ swsm = er32(SWSM);
/* Release the SW semaphores.*/
swsm &= ~E1000_SWSM_SMBI;
- E1000_WRITE_REG(hw, SWSM, swsm);
+ ew32(SWSM, swsm);
}
/******************************************************************************
@@ -8228,26 +8076,24 @@ e1000_release_software_semaphore(struct e1000_hw *hw)
* E1000_SUCCESS
*
*****************************************************************************/
-s32
-e1000_check_phy_reset_block(struct e1000_hw *hw)
+s32 e1000_check_phy_reset_block(struct e1000_hw *hw)
{
u32 manc = 0;
u32 fwsm = 0;
if (hw->mac_type == e1000_ich8lan) {
- fwsm = E1000_READ_REG(hw, FWSM);
+ fwsm = er32(FWSM);
return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
: E1000_BLK_PHY_RESET;
}
if (hw->mac_type > e1000_82547_rev_2)
- manc = E1000_READ_REG(hw, MANC);
+ manc = er32(MANC);
return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
E1000_BLK_PHY_RESET : E1000_SUCCESS;
}
-static u8
-e1000_arc_subsystem_valid(struct e1000_hw *hw)
+static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw)
{
u32 fwsm;
@@ -8261,7 +8107,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
case e1000_82572:
case e1000_82573:
case e1000_80003es2lan:
- fwsm = E1000_READ_REG(hw, FWSM);
+ fwsm = er32(FWSM);
if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
return true;
break;
@@ -8283,8 +8129,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
* returns: E1000_SUCCESS
*
*****************************************************************************/
-static s32
-e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
+static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
{
u32 gcr_reg = 0;
@@ -8297,19 +8142,19 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
return E1000_SUCCESS;
if (no_snoop) {
- gcr_reg = E1000_READ_REG(hw, GCR);
+ gcr_reg = er32(GCR);
gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
gcr_reg |= no_snoop;
- E1000_WRITE_REG(hw, GCR, gcr_reg);
+ ew32(GCR, gcr_reg);
}
if (hw->mac_type == e1000_ich8lan) {
u32 ctrl_ext;
- E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL);
+ ew32(GCR, PCI_EX_82566_SNOOP_ALL);
- ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
- E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ ew32(CTRL_EXT, ctrl_ext);
}
return E1000_SUCCESS;
@@ -8324,8 +8169,7 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
* hw: Struct containing variables accessed by shared code
*
***************************************************************************/
-static s32
-e1000_get_software_flag(struct e1000_hw *hw)
+static s32 e1000_get_software_flag(struct e1000_hw *hw)
{
s32 timeout = PHY_CFG_TIMEOUT;
u32 extcnf_ctrl;
@@ -8334,11 +8178,11 @@ e1000_get_software_flag(struct e1000_hw *hw)
if (hw->mac_type == e1000_ich8lan) {
while (timeout) {
- extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+ extcnf_ctrl = er32(EXTCNF_CTRL);
extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
- E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
- extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+ extcnf_ctrl = er32(EXTCNF_CTRL);
if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
break;
mdelay(1);
@@ -8363,17 +8207,16 @@ e1000_get_software_flag(struct e1000_hw *hw)
* hw: Struct containing variables accessed by shared code
*
***************************************************************************/
-static void
-e1000_release_software_flag(struct e1000_hw *hw)
+static void e1000_release_software_flag(struct e1000_hw *hw)
{
u32 extcnf_ctrl;
DEBUGFUNC("e1000_release_software_flag");
if (hw->mac_type == e1000_ich8lan) {
- extcnf_ctrl= E1000_READ_REG(hw, EXTCNF_CTRL);
+ extcnf_ctrl= er32(EXTCNF_CTRL);
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
- E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
}
return;
@@ -8388,9 +8231,8 @@ e1000_release_software_flag(struct e1000_hw *hw)
* data - word read from the EEPROM
* words - number of words to read
*****************************************************************************/
-static s32
-e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
s32 error = E1000_SUCCESS;
u32 flash_bank = 0;
@@ -8405,7 +8247,7 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
* to be updated with each read.
*/
/* Value of bit 22 corresponds to the flash bank we're on. */
- flash_bank = (E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
+ flash_bank = (er32(EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
/* Adjust offset appropriately if we're on bank 1 - adjust for word size */
bank_offset = flash_bank * (hw->flash_bank_size * 2);
@@ -8444,9 +8286,8 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
* words - number of words to write
* data - words to write to the EEPROM
*****************************************************************************/
-static s32
-e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
u32 i = 0;
s32 error = E1000_SUCCESS;
@@ -8491,8 +8332,7 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
*
* hw - The pointer to the hw structure
****************************************************************************/
-static s32
-e1000_ich8_cycle_init(struct e1000_hw *hw)
+static s32 e1000_ich8_cycle_init(struct e1000_hw *hw)
{
union ich8_hws_flash_status hsfsts;
s32 error = E1000_ERR_EEPROM;
@@ -8558,8 +8398,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
*
* hw - The pointer to the hw structure
****************************************************************************/
-static s32
-e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
+static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
{
union ich8_hws_flash_ctrl hsflctl;
union ich8_hws_flash_status hsfsts;
@@ -8593,9 +8432,8 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
* size - Size of data to read, 1=byte 2=word
* data - Pointer to the word to store the value read.
*****************************************************************************/
-static s32
-e1000_read_ich8_data(struct e1000_hw *hw, u32 index,
- u32 size, u16* data)
+static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+ u16 *data)
{
union ich8_hws_flash_status hsfsts;
union ich8_hws_flash_ctrl hsflctl;
@@ -8672,9 +8510,8 @@ e1000_read_ich8_data(struct e1000_hw *hw, u32 index,
* size - Size of data to read, 1=byte 2=word
* data - The byte(s) to write to the NVM.
*****************************************************************************/
-static s32
-e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
- u16 data)
+static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+ u16 data)
{
union ich8_hws_flash_status hsfsts;
union ich8_hws_flash_ctrl hsflctl;
@@ -8747,8 +8584,7 @@ e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
* index - The index of the byte to read.
* data - Pointer to a byte to store the value read.
*****************************************************************************/
-static s32
-e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8* data)
+static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data)
{
s32 status = E1000_SUCCESS;
u16 word = 0;
@@ -8770,8 +8606,7 @@ e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8* data)
* index - The index of the byte to write.
* byte - The byte to write to the NVM.
*****************************************************************************/
-static s32
-e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
+static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
{
s32 error = E1000_SUCCESS;
s32 program_retries = 0;
@@ -8803,8 +8638,7 @@ e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
* index - The index of the byte to read.
* data - The byte to write to the NVM.
*****************************************************************************/
-static s32
-e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
+static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
{
s32 status = E1000_SUCCESS;
u16 word = (u16)data;
@@ -8821,8 +8655,7 @@ e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
* index - The starting byte index of the word to read.
* data - Pointer to a word to store the value read.
*****************************************************************************/
-static s32
-e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
+static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
{
s32 status = E1000_SUCCESS;
status = e1000_read_ich8_data(hw, index, 2, data);
@@ -8840,8 +8673,7 @@ e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
* amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the
* bank size may be 4, 8 or 64 KBytes
*****************************************************************************/
-static s32
-e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
+static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
{
union ich8_hws_flash_status hsfsts;
union ich8_hws_flash_ctrl hsflctl;
@@ -8930,9 +8762,9 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
return error;
}
-static s32
-e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
- u32 cnf_base_addr, u32 cnf_size)
+static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+ u32 cnf_base_addr,
+ u32 cnf_size)
{
u32 ret_val = E1000_SUCCESS;
u16 word_addr, reg_data, reg_addr;
@@ -8972,8 +8804,7 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
*
* hw: Struct containing variables accessed by shared code
*****************************************************************************/
-static s32
-e1000_init_lcd_from_nvm(struct e1000_hw *hw)
+static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw)
{
u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop;
@@ -8981,32 +8812,32 @@ e1000_init_lcd_from_nvm(struct e1000_hw *hw)
return E1000_SUCCESS;
/* Check if SW needs configure the PHY */
- reg_data = E1000_READ_REG(hw, FEXTNVM);
+ reg_data = er32(FEXTNVM);
if (!(reg_data & FEXTNVM_SW_CONFIG))
return E1000_SUCCESS;
/* Wait for basic configuration completes before proceeding*/
loop = 0;
do {
- reg_data = E1000_READ_REG(hw, STATUS) & E1000_STATUS_LAN_INIT_DONE;
+ reg_data = er32(STATUS) & E1000_STATUS_LAN_INIT_DONE;
udelay(100);
loop++;
} while ((!reg_data) && (loop < 50));
/* Clear the Init Done bit for the next init event */
- reg_data = E1000_READ_REG(hw, STATUS);
+ reg_data = er32(STATUS);
reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
- E1000_WRITE_REG(hw, STATUS, reg_data);
+ ew32(STATUS, reg_data);
/* Make sure HW does not configure LCD from PHY extended configuration
before SW configuration */
- reg_data = E1000_READ_REG(hw, EXTCNF_CTRL);
+ reg_data = er32(EXTCNF_CTRL);
if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
- reg_data = E1000_READ_REG(hw, EXTCNF_SIZE);
+ reg_data = er32(EXTCNF_SIZE);
cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
cnf_size >>= 16;
if (cnf_size) {
- reg_data = E1000_READ_REG(hw, EXTCNF_CTRL);
+ reg_data = er32(EXTCNF_CTRL);
cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
/* cnf_base_addr is in DWORD */
cnf_base_addr >>= 16;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index cf12b05cd011..ad6da7b67e55 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,12 +31,7 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#ifndef CONFIG_E1000_NAPI
-#define DRIVERNAPI
-#else
-#define DRIVERNAPI "-NAPI"
-#endif
-#define DRV_VERSION "7.3.20-k2"DRIVERNAPI
+#define DRV_VERSION "7.3.20-k3-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -138,7 +133,6 @@ static irqreturn_t e1000_intr(int irq, void *data);
static irqreturn_t e1000_intr_msi(int irq, void *data);
static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring);
-#ifdef CONFIG_E1000_NAPI
static int e1000_clean(struct napi_struct *napi, int budget);
static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
@@ -146,12 +140,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
-#else
-static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring);
-static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring);
-#endif
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count);
@@ -232,8 +220,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
* loaded. All it does is register with the PCI subsystem.
**/
-static int __init
-e1000_init_module(void)
+static int __init e1000_init_module(void)
{
int ret;
printk(KERN_INFO "%s - version %s\n",
@@ -261,8 +248,7 @@ module_init(e1000_init_module);
* from memory.
**/
-static void __exit
-e1000_exit_module(void)
+static void __exit e1000_exit_module(void)
{
pci_unregister_driver(&e1000_driver);
}
@@ -271,12 +257,13 @@ module_exit(e1000_exit_module);
static int e1000_request_irq(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
irq_handler_t handler = e1000_intr;
int irq_flags = IRQF_SHARED;
int err;
- if (adapter->hw.mac_type >= e1000_82571) {
+ if (hw->mac_type >= e1000_82571) {
adapter->have_msi = !pci_enable_msi(adapter->pdev);
if (adapter->have_msi) {
handler = e1000_intr_msi;
@@ -311,11 +298,12 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
* @adapter: board private structure
**/
-static void
-e1000_irq_disable(struct e1000_adapter *adapter)
+static void e1000_irq_disable(struct e1000_adapter *adapter)
{
- E1000_WRITE_REG(&adapter->hw, IMC, ~0);
- E1000_WRITE_FLUSH(&adapter->hw);
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(IMC, ~0);
+ E1000_WRITE_FLUSH();
synchronize_irq(adapter->pdev->irq);
}
@@ -324,22 +312,23 @@ e1000_irq_disable(struct e1000_adapter *adapter)
* @adapter: board private structure
**/
-static void
-e1000_irq_enable(struct e1000_adapter *adapter)
+static void e1000_irq_enable(struct e1000_adapter *adapter)
{
- E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
- E1000_WRITE_FLUSH(&adapter->hw);
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(IMS, IMS_ENABLE_MASK);
+ E1000_WRITE_FLUSH();
}
-static void
-e1000_update_mng_vlan(struct e1000_adapter *adapter)
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- u16 vid = adapter->hw.mng_cookie.vlan_id;
+ u16 vid = hw->mng_cookie.vlan_id;
u16 old_vid = adapter->mng_vlan_id;
if (adapter->vlgrp) {
if (!vlan_group_get_device(adapter->vlgrp, vid)) {
- if (adapter->hw.mng_cookie.status &
+ if (hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
e1000_vlan_rx_add_vid(netdev, vid);
adapter->mng_vlan_id = vid;
@@ -366,26 +355,24 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
*
**/
-static void
-e1000_release_hw_control(struct e1000_adapter *adapter)
+static void e1000_release_hw_control(struct e1000_adapter *adapter)
{
u32 ctrl_ext;
u32 swsm;
+ struct e1000_hw *hw = &adapter->hw;
/* Let firmware taken over control of h/w */
- switch (adapter->hw.mac_type) {
+ switch (hw->mac_type) {
case e1000_82573:
- swsm = E1000_READ_REG(&adapter->hw, SWSM);
- E1000_WRITE_REG(&adapter->hw, SWSM,
- swsm & ~E1000_SWSM_DRV_LOAD);
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
break;
case e1000_82571:
case e1000_82572:
case e1000_80003es2lan:
case e1000_ich8lan:
- ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
- E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
- ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ ctrl_ext = er32(CTRL_EXT);
+ ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
break;
default:
break;
@@ -403,37 +390,36 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
*
**/
-static void
-e1000_get_hw_control(struct e1000_adapter *adapter)
+static void e1000_get_hw_control(struct e1000_adapter *adapter)
{
u32 ctrl_ext;
u32 swsm;
+ struct e1000_hw *hw = &adapter->hw;
/* Let firmware know the driver has taken over */
- switch (adapter->hw.mac_type) {
+ switch (hw->mac_type) {
case e1000_82573:
- swsm = E1000_READ_REG(&adapter->hw, SWSM);
- E1000_WRITE_REG(&adapter->hw, SWSM,
- swsm | E1000_SWSM_DRV_LOAD);
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
break;
case e1000_82571:
case e1000_82572:
case e1000_80003es2lan:
case e1000_ich8lan:
- ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
- E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
- ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ ctrl_ext = er32(CTRL_EXT);
+ ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
break;
default:
break;
}
}
-static void
-e1000_init_manageability(struct e1000_adapter *adapter)
+static void e1000_init_manageability(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
+
if (adapter->en_mng_pt) {
- u32 manc = E1000_READ_REG(&adapter->hw, MANC);
+ u32 manc = er32(MANC);
/* disable hardware interception of ARP */
manc &= ~(E1000_MANC_ARP_EN);
@@ -441,37 +427,38 @@ e1000_init_manageability(struct e1000_adapter *adapter)
/* enable receiving management packets to the host */
/* this will probably generate destination unreachable messages
* from the host OS, but the packets will be handled on SMBUS */
- if (adapter->hw.has_manc2h) {
- u32 manc2h = E1000_READ_REG(&adapter->hw, MANC2H);
+ if (hw->has_manc2h) {
+ u32 manc2h = er32(MANC2H);
manc |= E1000_MANC_EN_MNG2HOST;
#define E1000_MNG2HOST_PORT_623 (1 << 5)
#define E1000_MNG2HOST_PORT_664 (1 << 6)
manc2h |= E1000_MNG2HOST_PORT_623;
manc2h |= E1000_MNG2HOST_PORT_664;
- E1000_WRITE_REG(&adapter->hw, MANC2H, manc2h);
+ ew32(MANC2H, manc2h);
}
- E1000_WRITE_REG(&adapter->hw, MANC, manc);
+ ew32(MANC, manc);
}
}
-static void
-e1000_release_manageability(struct e1000_adapter *adapter)
+static void e1000_release_manageability(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
+
if (adapter->en_mng_pt) {
- u32 manc = E1000_READ_REG(&adapter->hw, MANC);
+ u32 manc = er32(MANC);
/* re-enable hardware interception of ARP */
manc |= E1000_MANC_ARP_EN;
- if (adapter->hw.has_manc2h)
+ if (hw->has_manc2h)
manc &= ~E1000_MANC_EN_MNG2HOST;
/* don't explicitly have to mess with MANC2H since
* MANC has an enable disable that gates MANC2H */
- E1000_WRITE_REG(&adapter->hw, MANC, manc);
+ ew32(MANC, manc);
}
}
@@ -506,18 +493,19 @@ static void e1000_configure(struct e1000_adapter *adapter)
int e1000_up(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
+
/* hardware has been reset, we need to reload some things */
e1000_configure(adapter);
clear_bit(__E1000_DOWN, &adapter->flags);
-#ifdef CONFIG_E1000_NAPI
napi_enable(&adapter->napi);
-#endif
+
e1000_irq_enable(adapter);
/* fire a link change interrupt to start the watchdog */
- E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC);
+ ew32(ICS, E1000_ICS_LSC);
return 0;
}
@@ -533,30 +521,33 @@ int e1000_up(struct e1000_adapter *adapter)
void e1000_power_up_phy(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
u16 mii_reg = 0;
/* Just clear the power down bit to wake the phy back up */
- if (adapter->hw.media_type == e1000_media_type_copper) {
+ if (hw->media_type == e1000_media_type_copper) {
/* according to the manual, the phy will retain its
* settings across a power-down/up cycle */
- e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+ e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+ e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
}
}
static void e1000_power_down_phy(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
+
/* Power down the PHY so no link is implied when interface is down *
* The PHY cannot be powered down if any of the following is true *
* (a) WoL is enabled
* (b) AMT is active
* (c) SoL/IDER session is active */
- if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
- adapter->hw.media_type == e1000_media_type_copper) {
+ if (!adapter->wol && hw->mac_type >= e1000_82540 &&
+ hw->media_type == e1000_media_type_copper) {
u16 mii_reg = 0;
- switch (adapter->hw.mac_type) {
+ switch (hw->mac_type) {
case e1000_82540:
case e1000_82545:
case e1000_82545_rev_3:
@@ -566,8 +557,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
case e1000_82541_rev_2:
case e1000_82547:
case e1000_82547_rev_2:
- if (E1000_READ_REG(&adapter->hw, MANC) &
- E1000_MANC_SMBUS_EN)
+ if (er32(MANC) & E1000_MANC_SMBUS_EN)
goto out;
break;
case e1000_82571:
@@ -575,24 +565,23 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
case e1000_82573:
case e1000_80003es2lan:
case e1000_ich8lan:
- if (e1000_check_mng_mode(&adapter->hw) ||
- e1000_check_phy_reset_block(&adapter->hw))
+ if (e1000_check_mng_mode(hw) ||
+ e1000_check_phy_reset_block(hw))
goto out;
break;
default:
goto out;
}
- e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+ e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+ e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
mdelay(1);
}
out:
return;
}
-void
-e1000_down(struct e1000_adapter *adapter)
+void e1000_down(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -600,9 +589,8 @@ e1000_down(struct e1000_adapter *adapter)
* reschedule our watchdog timer */
set_bit(__E1000_DOWN, &adapter->flags);
-#ifdef CONFIG_E1000_NAPI
napi_disable(&adapter->napi);
-#endif
+
e1000_irq_disable(adapter);
del_timer_sync(&adapter->tx_fifo_stall_timer);
@@ -620,8 +608,7 @@ e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_rx_rings(adapter);
}
-void
-e1000_reinit_locked(struct e1000_adapter *adapter)
+void e1000_reinit_locked(struct e1000_adapter *adapter)
{
WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
@@ -631,9 +618,9 @@ e1000_reinit_locked(struct e1000_adapter *adapter)
clear_bit(__E1000_RESETTING, &adapter->flags);
}
-void
-e1000_reset(struct e1000_adapter *adapter)
+void e1000_reset(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
u32 pba = 0, tx_space, min_tx_space, min_rx_space;
u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
bool legacy_pba_adjust = false;
@@ -642,7 +629,7 @@ e1000_reset(struct e1000_adapter *adapter)
* To take effect CTRL.RST is required.
*/
- switch (adapter->hw.mac_type) {
+ switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
case e1000_82543:
@@ -683,16 +670,16 @@ e1000_reset(struct e1000_adapter *adapter)
if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
pba -= 8; /* allocate more FIFO for Tx */
- if (adapter->hw.mac_type == e1000_82547) {
+ if (hw->mac_type == e1000_82547) {
adapter->tx_fifo_head = 0;
adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
adapter->tx_fifo_size =
(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
atomic_set(&adapter->tx_fifo_stall, 0);
}
- } else if (adapter->hw.max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
+ } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
/* adjust PBA for jumbo frames */
- E1000_WRITE_REG(&adapter->hw, PBA, pba);
+ ew32(PBA, pba);
/* To maintain wire speed transmits, the Tx FIFO should be
* large enough to accomodate two full transmit packets,
@@ -700,7 +687,7 @@ e1000_reset(struct e1000_adapter *adapter)
* the Rx FIFO should be large enough to accomodate at least
* one full receive packet and is similarly rounded up and
* expressed in KB. */
- pba = E1000_READ_REG(&adapter->hw, PBA);
+ pba = er32(PBA);
/* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
@@ -723,7 +710,7 @@ e1000_reset(struct e1000_adapter *adapter)
pba = pba - (min_tx_space - tx_space);
/* PCI/PCIx hardware has PBA alignment constraints */
- switch (adapter->hw.mac_type) {
+ switch (hw->mac_type) {
case e1000_82545 ... e1000_82546_rev_3:
pba &= ~(E1000_PBA_8K - 1);
break;
@@ -734,7 +721,7 @@ e1000_reset(struct e1000_adapter *adapter)
/* if short on rx space, rx wins and must trump tx
* adjustment or use Early Receive if available */
if (pba < min_rx_space) {
- switch (adapter->hw.mac_type) {
+ switch (hw->mac_type) {
case e1000_82573:
/* ERT enabled in e1000_configure_rx */
break;
@@ -746,7 +733,7 @@ e1000_reset(struct e1000_adapter *adapter)
}
}
- E1000_WRITE_REG(&adapter->hw, PBA, pba);
+ ew32(PBA, pba);
/* flow control settings */
/* Set the FC high water mark to 90% of the FIFO size.
@@ -759,54 +746,54 @@ e1000_reset(struct e1000_adapter *adapter)
if (pba < E1000_PBA_16K)
fc_high_water_mark = (pba * 1024) - 1600;
- adapter->hw.fc_high_water = fc_high_water_mark;
- adapter->hw.fc_low_water = fc_high_water_mark - 8;
- if (adapter->hw.mac_type == e1000_80003es2lan)
- adapter->hw.fc_pause_time = 0xFFFF;
+ hw->fc_high_water = fc_high_water_mark;
+ hw->fc_low_water = fc_high_water_mark - 8;
+ if (hw->mac_type == e1000_80003es2lan)
+ hw->fc_pause_time = 0xFFFF;
else
- adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
- adapter->hw.fc_send_xon = 1;
- adapter->hw.fc = adapter->hw.original_fc;
+ hw->fc_pause_time = E1000_FC_PAUSE_TIME;
+ hw->fc_send_xon = 1;
+ hw->fc = hw->original_fc;
/* Allow time for pending master requests to run */
- e1000_reset_hw(&adapter->hw);
- if (adapter->hw.mac_type >= e1000_82544)
- E1000_WRITE_REG(&adapter->hw, WUC, 0);
+ e1000_reset_hw(hw);
+ if (hw->mac_type >= e1000_82544)
+ ew32(WUC, 0);
- if (e1000_init_hw(&adapter->hw))
+ if (e1000_init_hw(hw))
DPRINTK(PROBE, ERR, "Hardware Error\n");
e1000_update_mng_vlan(adapter);
/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
- if (adapter->hw.mac_type >= e1000_82544 &&
- adapter->hw.mac_type <= e1000_82547_rev_2 &&
- adapter->hw.autoneg == 1 &&
- adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) {
- u32 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
+ if (hw->mac_type >= e1000_82544 &&
+ hw->mac_type <= e1000_82547_rev_2 &&
+ hw->autoneg == 1 &&
+ hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+ u32 ctrl = er32(CTRL);
/* clear phy power management bit if we are in gig only mode,
* which if enabled will attempt negotiation to 100Mb, which
* can cause a loss of link at power off or driver unload */
ctrl &= ~E1000_CTRL_SWDPIN3;
- E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
}
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
- E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
+ ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
- e1000_reset_adaptive(&adapter->hw);
- e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
+ e1000_reset_adaptive(hw);
+ e1000_phy_get_info(hw, &adapter->phy_info);
if (!adapter->smart_power_down &&
- (adapter->hw.mac_type == e1000_82571 ||
- adapter->hw.mac_type == e1000_82572)) {
+ (hw->mac_type == e1000_82571 ||
+ hw->mac_type == e1000_82572)) {
u16 phy_data = 0;
/* speed up time to link by disabling smart power down, ignore
* the return value of this function because there is nothing
* different we would do if it failed */
- e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+ e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
&phy_data);
phy_data &= ~IGP02E1000_PM_SPD;
- e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+ e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
phy_data);
}
@@ -865,13 +852,49 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
printk(KERN_ERR "to enable this network device.\n");
printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
"to your hardware vendor\n");
- printk(KERN_ERR "or Intel Customer Support: linux-nics@intel.com\n");
+ printk(KERN_ERR "or Intel Customer Support.\n");
printk(KERN_ERR "/*********************/\n");
kfree(data);
}
/**
+ * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
+ * @pdev: PCI device information struct
+ *
+ * Return true if an adapter needs ioport resources
+ **/
+static int e1000_is_need_ioport(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case E1000_DEV_ID_82540EM:
+ case E1000_DEV_ID_82540EM_LOM:
+ case E1000_DEV_ID_82540EP:
+ case E1000_DEV_ID_82540EP_LOM:
+ case E1000_DEV_ID_82540EP_LP:
+ case E1000_DEV_ID_82541EI:
+ case E1000_DEV_ID_82541EI_MOBILE:
+ case E1000_DEV_ID_82541ER:
+ case E1000_DEV_ID_82541ER_LOM:
+ case E1000_DEV_ID_82541GI:
+ case E1000_DEV_ID_82541GI_LF:
+ case E1000_DEV_ID_82541GI_MOBILE:
+ case E1000_DEV_ID_82544EI_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82544GC_COPPER:
+ case E1000_DEV_ID_82544GC_LOM:
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ case E1000_DEV_ID_82546EB_COPPER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* e1000_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in e1000_pci_tbl
@@ -882,37 +905,51 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-
-static int __devinit
-e1000_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __devinit e1000_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct net_device *netdev;
struct e1000_adapter *adapter;
+ struct e1000_hw *hw;
static int cards_found = 0;
static int global_quad_port_a = 0; /* global ksp3 port a indication */
int i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
+ int bars, need_ioport;
DECLARE_MAC_BUF(mac);
- if ((err = pci_enable_device(pdev)))
+ /* do not allocate ioport bars when not needed */
+ need_ioport = e1000_is_need_ioport(pdev);
+ if (need_ioport) {
+ bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
+ err = pci_enable_device(pdev);
+ } else {
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_enable_device(pdev);
+ }
+ if (err)
return err;
- if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
- !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
pci_using_dac = 1;
} else {
- if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
- (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
- E1000_ERR("No usable DMA configuration, aborting\n");
- goto err_dma;
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ E1000_ERR("No usable DMA configuration, "
+ "aborting\n");
+ goto err_dma;
+ }
}
pci_using_dac = 0;
}
- if ((err = pci_request_regions(pdev, e1000_driver_name)))
+ err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
+ if (err)
goto err_pci_reg;
pci_set_master(pdev);
@@ -928,21 +965,27 @@ e1000_probe(struct pci_dev *pdev,
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
- adapter->hw.back = adapter;
adapter->msg_enable = (1 << debug) - 1;
+ adapter->bars = bars;
+ adapter->need_ioport = need_ioport;
+
+ hw = &adapter->hw;
+ hw->back = adapter;
err = -EIO;
- adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
- pci_resource_len(pdev, BAR_0));
- if (!adapter->hw.hw_addr)
+ hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
+ pci_resource_len(pdev, BAR_0));
+ if (!hw->hw_addr)
goto err_ioremap;
- for (i = BAR_1; i <= BAR_5; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
- adapter->hw.io_base = pci_resource_start(pdev, i);
- break;
+ if (adapter->need_ioport) {
+ for (i = BAR_1; i <= BAR_5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ hw->io_base = pci_resource_start(pdev, i);
+ break;
+ }
}
}
@@ -957,9 +1000,7 @@ e1000_probe(struct pci_dev *pdev,
e1000_set_ethtool_ops(netdev);
netdev->tx_timeout = &e1000_tx_timeout;
netdev->watchdog_timeo = 5 * HZ;
-#ifdef CONFIG_E1000_NAPI
netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
-#endif
netdev->vlan_rx_register = e1000_vlan_rx_register;
netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
@@ -972,49 +1013,50 @@ e1000_probe(struct pci_dev *pdev,
/* setup the private structure */
- if ((err = e1000_sw_init(adapter)))
+ err = e1000_sw_init(adapter);
+ if (err)
goto err_sw_init;
err = -EIO;
/* Flash BAR mapping must happen after e1000_sw_init
* because it depends on mac_type */
- if ((adapter->hw.mac_type == e1000_ich8lan) &&
+ if ((hw->mac_type == e1000_ich8lan) &&
(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
- adapter->hw.flash_address =
+ hw->flash_address =
ioremap(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
- if (!adapter->hw.flash_address)
+ if (!hw->flash_address)
goto err_flashmap;
}
- if (e1000_check_phy_reset_block(&adapter->hw))
+ if (e1000_check_phy_reset_block(hw))
DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
- if (adapter->hw.mac_type >= e1000_82543) {
+ if (hw->mac_type >= e1000_82543) {
netdev->features = NETIF_F_SG |
NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
- if (adapter->hw.mac_type == e1000_ich8lan)
+ if (hw->mac_type == e1000_ich8lan)
netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
}
- if ((adapter->hw.mac_type >= e1000_82544) &&
- (adapter->hw.mac_type != e1000_82547))
+ if ((hw->mac_type >= e1000_82544) &&
+ (hw->mac_type != e1000_82547))
netdev->features |= NETIF_F_TSO;
- if (adapter->hw.mac_type > e1000_82547_rev_2)
+ if (hw->mac_type > e1000_82547_rev_2)
netdev->features |= NETIF_F_TSO6;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_LLTX;
- adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
+ adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
/* initialize eeprom parameters */
- if (e1000_init_eeprom_params(&adapter->hw)) {
+ if (e1000_init_eeprom_params(hw)) {
E1000_ERR("EEPROM initialization failed\n");
goto err_eeprom;
}
@@ -1022,10 +1064,10 @@ e1000_probe(struct pci_dev *pdev,
/* before reading the EEPROM, reset the controller to
* put the device in a known good starting state */
- e1000_reset_hw(&adapter->hw);
+ e1000_reset_hw(hw);
/* make sure the EEPROM is good */
- if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
+ if (e1000_validate_eeprom_checksum(hw) < 0) {
DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter);
/*
@@ -1036,24 +1078,24 @@ e1000_probe(struct pci_dev *pdev,
* interface after manually setting a hw addr using
* `ip set address`
*/
- memset(adapter->hw.mac_addr, 0, netdev->addr_len);
+ memset(hw->mac_addr, 0, netdev->addr_len);
} else {
/* copy the MAC address out of the EEPROM */
- if (e1000_read_mac_addr(&adapter->hw))
+ if (e1000_read_mac_addr(hw))
DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
}
/* don't block initalization here due to bad MAC address */
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
+ memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr))
DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
- e1000_get_bus_info(&adapter->hw);
+ e1000_get_bus_info(hw);
init_timer(&adapter->tx_fifo_stall_timer);
adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
- adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
+ adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = &e1000_watchdog;
@@ -1061,7 +1103,7 @@ e1000_probe(struct pci_dev *pdev,
init_timer(&adapter->phy_info_timer);
adapter->phy_info_timer.function = &e1000_update_phy_info;
- adapter->phy_info_timer.data = (unsigned long) adapter;
+ adapter->phy_info_timer.data = (unsigned long)adapter;
INIT_WORK(&adapter->reset_task, e1000_reset_task);
@@ -1072,18 +1114,18 @@ e1000_probe(struct pci_dev *pdev,
* enable the ACPI Magic Packet filter
*/
- switch (adapter->hw.mac_type) {
+ switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
case e1000_82543:
break;
case e1000_82544:
- e1000_read_eeprom(&adapter->hw,
+ e1000_read_eeprom(hw,
EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
eeprom_apme_mask = E1000_EEPROM_82544_APM;
break;
case e1000_ich8lan:
- e1000_read_eeprom(&adapter->hw,
+ e1000_read_eeprom(hw,
EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
break;
@@ -1091,14 +1133,14 @@ e1000_probe(struct pci_dev *pdev,
case e1000_82546_rev_3:
case e1000_82571:
case e1000_80003es2lan:
- if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
- e1000_read_eeprom(&adapter->hw,
+ if (er32(STATUS) & E1000_STATUS_FUNC_1){
+ e1000_read_eeprom(hw,
EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
break;
}
/* Fall Through */
default:
- e1000_read_eeprom(&adapter->hw,
+ e1000_read_eeprom(hw,
EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
break;
}
@@ -1117,7 +1159,7 @@ e1000_probe(struct pci_dev *pdev,
case E1000_DEV_ID_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
- if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0;
break;
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
@@ -1140,8 +1182,6 @@ e1000_probe(struct pci_dev *pdev,
adapter->wol = adapter->eeprom_wol;
/* print bus type/speed/width info */
- {
- struct e1000_hw *hw = &adapter->hw;
DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
(hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
@@ -1154,11 +1194,10 @@ e1000_probe(struct pci_dev *pdev,
(hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
(hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
"32-bit"));
- }
printk("%s\n", print_mac(mac, netdev->dev_addr));
- if (adapter->hw.bus_type == e1000_bus_type_pci_express) {
+ if (hw->bus_type == e1000_bus_type_pci_express) {
DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
"longer be supported by this driver in the future.\n",
pdev->vendor, pdev->device);
@@ -1173,8 +1212,8 @@ e1000_probe(struct pci_dev *pdev,
* DRV_LOAD until the interface is up. For all other cases,
* let the f/w know that the h/w is now under the control
* of the driver. */
- if (adapter->hw.mac_type != e1000_82573 ||
- !e1000_check_mng_mode(&adapter->hw))
+ if (hw->mac_type != e1000_82573 ||
+ !e1000_check_mng_mode(hw))
e1000_get_hw_control(adapter);
/* tell the stack to leave us alone until e1000_open() is called */
@@ -1182,7 +1221,8 @@ e1000_probe(struct pci_dev *pdev,
netif_stop_queue(netdev);
strcpy(netdev->name, "eth%d");
- if ((err = register_netdev(netdev)))
+ err = register_netdev(netdev);
+ if (err)
goto err_register;
DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
@@ -1193,28 +1233,24 @@ e1000_probe(struct pci_dev *pdev,
err_register:
e1000_release_hw_control(adapter);
err_eeprom:
- if (!e1000_check_phy_reset_block(&adapter->hw))
- e1000_phy_hw_reset(&adapter->hw);
+ if (!e1000_check_phy_reset_block(hw))
+ e1000_phy_hw_reset(hw);
- if (adapter->hw.flash_address)
- iounmap(adapter->hw.flash_address);
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
err_flashmap:
-#ifdef CONFIG_E1000_NAPI
for (i = 0; i < adapter->num_rx_queues; i++)
dev_put(&adapter->polling_netdev[i]);
-#endif
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
-#ifdef CONFIG_E1000_NAPI
kfree(adapter->polling_netdev);
-#endif
err_sw_init:
- iounmap(adapter->hw.hw_addr);
+ iounmap(hw->hw_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_regions(pdev);
+ pci_release_selected_regions(pdev, bars);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -1231,14 +1267,12 @@ err_dma:
* memory.
**/
-static void __devexit
-e1000_remove(struct pci_dev *pdev)
+static void __devexit e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_E1000_NAPI
+ struct e1000_hw *hw = &adapter->hw;
int i;
-#endif
cancel_work_sync(&adapter->reset_task);
@@ -1248,26 +1282,22 @@ e1000_remove(struct pci_dev *pdev)
* would have already happened in close and is redundant. */
e1000_release_hw_control(adapter);
-#ifdef CONFIG_E1000_NAPI
for (i = 0; i < adapter->num_rx_queues; i++)
dev_put(&adapter->polling_netdev[i]);
-#endif
unregister_netdev(netdev);
- if (!e1000_check_phy_reset_block(&adapter->hw))
- e1000_phy_hw_reset(&adapter->hw);
+ if (!e1000_check_phy_reset_block(hw))
+ e1000_phy_hw_reset(hw);
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
-#ifdef CONFIG_E1000_NAPI
kfree(adapter->polling_netdev);
-#endif
- iounmap(adapter->hw.hw_addr);
- if (adapter->hw.flash_address)
- iounmap(adapter->hw.flash_address);
- pci_release_regions(pdev);
+ iounmap(hw->hw_addr);
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ pci_release_selected_regions(pdev, adapter->bars);
free_netdev(netdev);
@@ -1283,15 +1313,12 @@ e1000_remove(struct pci_dev *pdev)
* OS network device settings (MTU size).
**/
-static int __devinit
-e1000_sw_init(struct e1000_adapter *adapter)
+static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
-#ifdef CONFIG_E1000_NAPI
int i;
-#endif
/* PCI config space info */
@@ -1349,14 +1376,12 @@ e1000_sw_init(struct e1000_adapter *adapter)
return -ENOMEM;
}
-#ifdef CONFIG_E1000_NAPI
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->polling_netdev[i].priv = adapter;
dev_hold(&adapter->polling_netdev[i]);
set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
}
spin_lock_init(&adapter->tx_queue_lock);
-#endif
/* Explicitly disable IRQ since the NIC can be in any state. */
e1000_irq_disable(adapter);
@@ -1377,8 +1402,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
* intended for Multiqueue, but should work fine with a single queue.
**/
-static int __devinit
-e1000_alloc_queues(struct e1000_adapter *adapter)
+static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
{
adapter->tx_ring = kcalloc(adapter->num_tx_queues,
sizeof(struct e1000_tx_ring), GFP_KERNEL);
@@ -1392,7 +1416,6 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
return -ENOMEM;
}
-#ifdef CONFIG_E1000_NAPI
adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
sizeof(struct net_device),
GFP_KERNEL);
@@ -1401,7 +1424,6 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
kfree(adapter->rx_ring);
return -ENOMEM;
}
-#endif
return E1000_SUCCESS;
}
@@ -1419,10 +1441,10 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
* and the stack is notified that the interface is ready.
**/
-static int
-e1000_open(struct net_device *netdev)
+static int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
int err;
/* disallow open during test */
@@ -1442,15 +1464,15 @@ e1000_open(struct net_device *netdev)
e1000_power_up_phy(adapter);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- if ((adapter->hw.mng_cookie.status &
+ if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
e1000_update_mng_vlan(adapter);
}
/* If AMT is enabled, let the firmware know that the network
* interface is now open */
- if (adapter->hw.mac_type == e1000_82573 &&
- e1000_check_mng_mode(&adapter->hw))
+ if (hw->mac_type == e1000_82573 &&
+ e1000_check_mng_mode(hw))
e1000_get_hw_control(adapter);
/* before we allocate an interrupt, we must be ready to handle it.
@@ -1466,16 +1488,14 @@ e1000_open(struct net_device *netdev)
/* From here on the code is the same as e1000_up() */
clear_bit(__E1000_DOWN, &adapter->flags);
-#ifdef CONFIG_E1000_NAPI
napi_enable(&adapter->napi);
-#endif
e1000_irq_enable(adapter);
netif_start_queue(netdev);
/* fire a link status change interrupt to start the watchdog */
- E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC);
+ ew32(ICS, E1000_ICS_LSC);
return E1000_SUCCESS;
@@ -1503,10 +1523,10 @@ err_setup_tx:
* hardware, and all transmit and receive resources are freed.
**/
-static int
-e1000_close(struct net_device *netdev)
+static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
@@ -1518,7 +1538,7 @@ e1000_close(struct net_device *netdev)
/* kill manageability vlan ID if supported, but not if a vlan with
* the same ID is registered on the host OS (let 8021q kill it) */
- if ((adapter->hw.mng_cookie.status &
+ if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
!(adapter->vlgrp &&
vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
@@ -1527,8 +1547,8 @@ e1000_close(struct net_device *netdev)
/* If AMT is enabled, let the firmware know that the network
* interface is now closed */
- if (adapter->hw.mac_type == e1000_82573 &&
- e1000_check_mng_mode(&adapter->hw))
+ if (hw->mac_type == e1000_82573 &&
+ e1000_check_mng_mode(hw))
e1000_release_hw_control(adapter);
return 0;
@@ -1540,17 +1560,17 @@ e1000_close(struct net_device *netdev)
* @start: address of beginning of memory
* @len: length of memory
**/
-static bool
-e1000_check_64k_bound(struct e1000_adapter *adapter,
- void *start, unsigned long len)
+static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
+ unsigned long len)
{
- unsigned long begin = (unsigned long) start;
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long begin = (unsigned long)start;
unsigned long end = begin + len;
/* First rev 82545 and 82546 need to not allow any memory
* write location to cross 64k boundary due to errata 23 */
- if (adapter->hw.mac_type == e1000_82545 ||
- adapter->hw.mac_type == e1000_82546) {
+ if (hw->mac_type == e1000_82545 ||
+ hw->mac_type == e1000_82546) {
return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
}
@@ -1565,9 +1585,8 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
* Return 0 on success, negative on failure
**/
-static int
-e1000_setup_tx_resources(struct e1000_adapter *adapter,
- struct e1000_tx_ring *txdr)
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *txdr)
{
struct pci_dev *pdev = adapter->pdev;
int size;
@@ -1641,8 +1660,7 @@ setup_tx_desc_die:
* Return 0 on success, negative on failure
**/
-int
-e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@@ -1668,8 +1686,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
* Configure the Tx unit of the MAC after a reset.
**/
-static void
-e1000_configure_tx(struct e1000_adapter *adapter)
+static void e1000_configure_tx(struct e1000_adapter *adapter)
{
u64 tdba;
struct e1000_hw *hw = &adapter->hw;
@@ -1684,18 +1701,18 @@ e1000_configure_tx(struct e1000_adapter *adapter)
tdba = adapter->tx_ring[0].dma;
tdlen = adapter->tx_ring[0].count *
sizeof(struct e1000_tx_desc);
- E1000_WRITE_REG(hw, TDLEN, tdlen);
- E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
- E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
- E1000_WRITE_REG(hw, TDT, 0);
- E1000_WRITE_REG(hw, TDH, 0);
+ ew32(TDLEN, tdlen);
+ ew32(TDBAH, (tdba >> 32));
+ ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
+ ew32(TDT, 0);
+ ew32(TDH, 0);
adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
break;
}
/* Set the default values for the Tx Inter Packet Gap timer */
- if (adapter->hw.mac_type <= e1000_82547_rev_2 &&
+ if (hw->mac_type <= e1000_82547_rev_2 &&
(hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes))
tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
@@ -1720,34 +1737,34 @@ e1000_configure_tx(struct e1000_adapter *adapter)
}
tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
- E1000_WRITE_REG(hw, TIPG, tipg);
+ ew32(TIPG, tipg);
/* Set the Tx Interrupt Delay register */
- E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
+ ew32(TIDV, adapter->tx_int_delay);
if (hw->mac_type >= e1000_82540)
- E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
+ ew32(TADV, adapter->tx_abs_int_delay);
/* Program the Transmit Control Register */
- tctl = E1000_READ_REG(hw, TCTL);
+ tctl = er32(TCTL);
tctl &= ~E1000_TCTL_CT;
tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
- tarc = E1000_READ_REG(hw, TARC0);
+ tarc = er32(TARC0);
/* set the speed mode bit, we'll clear it if we're not at
* gigabit link later */
tarc |= (1 << 21);
- E1000_WRITE_REG(hw, TARC0, tarc);
+ ew32(TARC0, tarc);
} else if (hw->mac_type == e1000_80003es2lan) {
- tarc = E1000_READ_REG(hw, TARC0);
+ tarc = er32(TARC0);
tarc |= 1;
- E1000_WRITE_REG(hw, TARC0, tarc);
- tarc = E1000_READ_REG(hw, TARC1);
+ ew32(TARC0, tarc);
+ tarc = er32(TARC1);
tarc |= 1;
- E1000_WRITE_REG(hw, TARC1, tarc);
+ ew32(TARC1, tarc);
}
e1000_config_collision_dist(hw);
@@ -1770,7 +1787,7 @@ e1000_configure_tx(struct e1000_adapter *adapter)
hw->bus_type == e1000_bus_type_pcix)
adapter->pcix_82544 = 1;
- E1000_WRITE_REG(hw, TCTL, tctl);
+ ew32(TCTL, tctl);
}
@@ -1782,10 +1799,10 @@ e1000_configure_tx(struct e1000_adapter *adapter)
* Returns 0 on success, negative on failure
**/
-static int
-e1000_setup_rx_resources(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rxdr)
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rxdr)
{
+ struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
int size, desc_len;
@@ -1818,7 +1835,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
return -ENOMEM;
}
- if (adapter->hw.mac_type <= e1000_82547_rev_2)
+ if (hw->mac_type <= e1000_82547_rev_2)
desc_len = sizeof(struct e1000_rx_desc);
else
desc_len = sizeof(union e1000_rx_desc_packet_split);
@@ -1887,8 +1904,7 @@ setup_rx_desc_die:
* Return 0 on success, negative on failure
**/
-int
-e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@@ -1913,24 +1929,24 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
**/
#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
-static void
-e1000_setup_rctl(struct e1000_adapter *adapter)
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
u32 rctl, rfctl;
u32 psrctl = 0;
#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
u32 pages = 0;
#endif
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl = er32(RCTL);
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
- if (adapter->hw.tbi_compatibility_on == 1)
+ if (hw->tbi_compatibility_on == 1)
rctl |= E1000_RCTL_SBP;
else
rctl &= ~E1000_RCTL_SBP;
@@ -1983,7 +1999,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
/* allocations using alloc_page take too long for regular MTU
* so only enable packet split for jumbo frames */
pages = PAGE_USE_COUNT(adapter->netdev->mtu);
- if ((adapter->hw.mac_type >= e1000_82571) && (pages <= 3) &&
+ if ((hw->mac_type >= e1000_82571) && (pages <= 3) &&
PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
adapter->rx_ps_pages = pages;
else
@@ -1991,14 +2007,14 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
#endif
if (adapter->rx_ps_pages) {
/* Configure extra packet-split registers */
- rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
+ rfctl = er32(RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
/* disable packet split support for IPv6 extension headers,
* because some malformed IPv6 headers can hang the RX */
rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
E1000_RFCTL_NEW_IPV6_EXT_DIS);
- E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
+ ew32(RFCTL, rfctl);
rctl |= E1000_RCTL_DTYP_PS;
@@ -2018,10 +2034,10 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
break;
}
- E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
+ ew32(PSRCTL, psrctl);
}
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ ew32(RCTL, rctl);
}
/**
@@ -2031,8 +2047,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
* Configure the Rx unit of the MAC after a reset.
**/
-static void
-e1000_configure_rx(struct e1000_adapter *adapter)
+static void e1000_configure_rx(struct e1000_adapter *adapter)
{
u64 rdba;
struct e1000_hw *hw = &adapter->hw;
@@ -2052,30 +2067,27 @@ e1000_configure_rx(struct e1000_adapter *adapter)
}
/* disable receives while setting up the descriptors */
- rctl = E1000_READ_REG(hw, RCTL);
- E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
/* set the Receive Delay Timer Register */
- E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
+ ew32(RDTR, adapter->rx_int_delay);
if (hw->mac_type >= e1000_82540) {
- E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
+ ew32(RADV, adapter->rx_abs_int_delay);
if (adapter->itr_setting != 0)
- E1000_WRITE_REG(hw, ITR,
- 1000000000 / (adapter->itr * 256));
+ ew32(ITR, 1000000000 / (adapter->itr * 256));
}
if (hw->mac_type >= e1000_82571) {
- ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext = er32(CTRL_EXT);
/* Reset delay timers after every interrupt */
ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
-#ifdef CONFIG_E1000_NAPI
/* Auto-Mask interrupts upon ICR access */
ctrl_ext |= E1000_CTRL_EXT_IAME;
- E1000_WRITE_REG(hw, IAM, 0xffffffff);
-#endif
- E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH(hw);
+ ew32(IAM, 0xffffffff);
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
}
/* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -2084,11 +2096,11 @@ e1000_configure_rx(struct e1000_adapter *adapter)
case 1:
default:
rdba = adapter->rx_ring[0].dma;
- E1000_WRITE_REG(hw, RDLEN, rdlen);
- E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
- E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
- E1000_WRITE_REG(hw, RDT, 0);
- E1000_WRITE_REG(hw, RDH, 0);
+ ew32(RDLEN, rdlen);
+ ew32(RDBAH, (rdba >> 32));
+ ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
+ ew32(RDT, 0);
+ ew32(RDH, 0);
adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
break;
@@ -2096,7 +2108,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
/* Enable 82543 Receive Checksum Offload for TCP and UDP */
if (hw->mac_type >= e1000_82543) {
- rxcsum = E1000_READ_REG(hw, RXCSUM);
+ rxcsum = er32(RXCSUM);
if (adapter->rx_csum) {
rxcsum |= E1000_RXCSUM_TUOFL;
@@ -2110,17 +2122,17 @@ e1000_configure_rx(struct e1000_adapter *adapter)
rxcsum &= ~E1000_RXCSUM_TUOFL;
/* don't need to clear IPPCSE as it defaults to 0 */
}
- E1000_WRITE_REG(hw, RXCSUM, rxcsum);
+ ew32(RXCSUM, rxcsum);
}
/* enable early receives on 82573, only takes effect if using > 2048
* byte total frame size. for example only for jumbo frames */
#define E1000_ERT_2048 0x100
if (hw->mac_type == e1000_82573)
- E1000_WRITE_REG(hw, ERT, E1000_ERT_2048);
+ ew32(ERT, E1000_ERT_2048);
/* Enable Receives */
- E1000_WRITE_REG(hw, RCTL, rctl);
+ ew32(RCTL, rctl);
}
/**
@@ -2131,9 +2143,8 @@ e1000_configure_rx(struct e1000_adapter *adapter)
* Free all transmit software resources
**/
-static void
-e1000_free_tx_resources(struct e1000_adapter *adapter,
- struct e1000_tx_ring *tx_ring)
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
{
struct pci_dev *pdev = adapter->pdev;
@@ -2154,8 +2165,7 @@ e1000_free_tx_resources(struct e1000_adapter *adapter,
* Free all transmit software resources
**/
-void
-e1000_free_all_tx_resources(struct e1000_adapter *adapter)
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
{
int i;
@@ -2163,9 +2173,8 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
}
-static void
-e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
- struct e1000_buffer *buffer_info)
+static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+ struct e1000_buffer *buffer_info)
{
if (buffer_info->dma) {
pci_unmap_page(adapter->pdev,
@@ -2187,10 +2196,10 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
* @tx_ring: ring to be cleaned
**/
-static void
-e1000_clean_tx_ring(struct e1000_adapter *adapter,
- struct e1000_tx_ring *tx_ring)
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
{
+ struct e1000_hw *hw = &adapter->hw;
struct e1000_buffer *buffer_info;
unsigned long size;
unsigned int i;
@@ -2213,8 +2222,8 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
tx_ring->next_to_clean = 0;
tx_ring->last_tx_tso = 0;
- writel(0, adapter->hw.hw_addr + tx_ring->tdh);
- writel(0, adapter->hw.hw_addr + tx_ring->tdt);
+ writel(0, hw->hw_addr + tx_ring->tdh);
+ writel(0, hw->hw_addr + tx_ring->tdt);
}
/**
@@ -2222,8 +2231,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
* @adapter: board private structure
**/
-static void
-e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
{
int i;
@@ -2239,9 +2247,8 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
* Free all receive software resources
**/
-static void
-e1000_free_rx_resources(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring)
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
{
struct pci_dev *pdev = adapter->pdev;
@@ -2266,8 +2273,7 @@ e1000_free_rx_resources(struct e1000_adapter *adapter,
* Free all receive software resources
**/
-void
-e1000_free_all_rx_resources(struct e1000_adapter *adapter)
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
{
int i;
@@ -2281,10 +2287,10 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter)
* @rx_ring: ring to free buffers from
**/
-static void
-e1000_clean_rx_ring(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring)
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
{
+ struct e1000_hw *hw = &adapter->hw;
struct e1000_buffer *buffer_info;
struct e1000_ps_page *ps_page;
struct e1000_ps_page_dma *ps_page_dma;
@@ -2331,8 +2337,8 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
- writel(0, adapter->hw.hw_addr + rx_ring->rdh);
- writel(0, adapter->hw.hw_addr + rx_ring->rdt);
+ writel(0, hw->hw_addr + rx_ring->rdh);
+ writel(0, hw->hw_addr + rx_ring->rdt);
}
/**
@@ -2340,8 +2346,7 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
* @adapter: board private structure
**/
-static void
-e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
{
int i;
@@ -2352,38 +2357,38 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
* and memory write and invalidate disabled for certain operations
*/
-static void
-e1000_enter_82542_rst(struct e1000_adapter *adapter)
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
u32 rctl;
- e1000_pci_clear_mwi(&adapter->hw);
+ e1000_pci_clear_mwi(hw);
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl = er32(RCTL);
rctl |= E1000_RCTL_RST;
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
- E1000_WRITE_FLUSH(&adapter->hw);
+ ew32(RCTL, rctl);
+ E1000_WRITE_FLUSH();
mdelay(5);
if (netif_running(netdev))
e1000_clean_all_rx_rings(adapter);
}
-static void
-e1000_leave_82542_rst(struct e1000_adapter *adapter)
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
u32 rctl;
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl = er32(RCTL);
rctl &= ~E1000_RCTL_RST;
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
- E1000_WRITE_FLUSH(&adapter->hw);
+ ew32(RCTL, rctl);
+ E1000_WRITE_FLUSH();
mdelay(5);
- if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
- e1000_pci_set_mwi(&adapter->hw);
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
if (netif_running(netdev)) {
/* No need to loop, because 82542 supports only 1 queue */
@@ -2401,10 +2406,10 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
* Returns 0 on success, negative on failure
**/
-static int
-e1000_set_mac(struct net_device *netdev, void *p)
+static int e1000_set_mac(struct net_device *netdev, void *p)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
@@ -2412,19 +2417,19 @@ e1000_set_mac(struct net_device *netdev, void *p)
/* 82542 2.0 needs to be in reset to write receive address registers */
- if (adapter->hw.mac_type == e1000_82542_rev2_0)
+ if (hw->mac_type == e1000_82542_rev2_0)
e1000_enter_82542_rst(adapter);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
- e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
+ e1000_rar_set(hw, hw->mac_addr, 0);
/* With 82571 controllers, LAA may be overwritten (with the default)
* due to controller reset from the other port. */
- if (adapter->hw.mac_type == e1000_82571) {
+ if (hw->mac_type == e1000_82571) {
/* activate the work around */
- adapter->hw.laa_is_present = 1;
+ hw->laa_is_present = 1;
/* Hold a copy of the LAA in RAR[14] This is done so that
* between the time RAR[0] gets clobbered and the time it
@@ -2432,11 +2437,11 @@ e1000_set_mac(struct net_device *netdev, void *p)
* of the RARs and no incoming packets directed to this port
* are dropped. Eventaully the LAA will be in RAR[0] and
* RAR[14] */
- e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
+ e1000_rar_set(hw, hw->mac_addr,
E1000_RAR_ENTRIES - 1);
}
- if (adapter->hw.mac_type == e1000_82542_rev2_0)
+ if (hw->mac_type == e1000_82542_rev2_0)
e1000_leave_82542_rst(adapter);
return 0;
@@ -2452,8 +2457,7 @@ e1000_set_mac(struct net_device *netdev, void *p)
* promiscuous mode, and all-multi behavior.
**/
-static void
-e1000_set_rx_mode(struct net_device *netdev)
+static void e1000_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2466,16 +2470,16 @@ e1000_set_rx_mode(struct net_device *netdev)
E1000_NUM_MTA_REGISTERS_ICH8LAN :
E1000_NUM_MTA_REGISTERS;
- if (adapter->hw.mac_type == e1000_ich8lan)
+ if (hw->mac_type == e1000_ich8lan)
rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
/* reserve RAR[14] for LAA over-write work-around */
- if (adapter->hw.mac_type == e1000_82571)
+ if (hw->mac_type == e1000_82571)
rar_entries--;
/* Check for Promiscuous and All Multicast modes */
- rctl = E1000_READ_REG(hw, RCTL);
+ rctl = er32(RCTL);
if (netdev->flags & IFF_PROMISC) {
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
@@ -2498,7 +2502,7 @@ e1000_set_rx_mode(struct net_device *netdev)
uc_ptr = netdev->uc_list;
}
- E1000_WRITE_REG(hw, RCTL, rctl);
+ ew32(RCTL, rctl);
/* 82542 2.0 needs to be in reset to write receive address registers */
@@ -2524,9 +2528,9 @@ e1000_set_rx_mode(struct net_device *netdev)
mc_ptr = mc_ptr->next;
} else {
E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
}
}
WARN_ON(uc_ptr != NULL);
@@ -2535,7 +2539,7 @@ e1000_set_rx_mode(struct net_device *netdev)
for (i = 0; i < mta_reg_count; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
- E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_FLUSH();
}
/* load any remaining addresses into the hash table */
@@ -2552,11 +2556,11 @@ e1000_set_rx_mode(struct net_device *netdev)
/* Need to wait a few seconds after link up to get diagnostic information from
* the phy */
-static void
-e1000_update_phy_info(unsigned long data)
+static void e1000_update_phy_info(unsigned long data)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
- e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ e1000_phy_get_info(hw, &adapter->phy_info);
}
/**
@@ -2564,33 +2568,25 @@ e1000_update_phy_info(unsigned long data)
* @data: pointer to adapter cast into an unsigned long
**/
-static void
-e1000_82547_tx_fifo_stall(unsigned long data)
+static void e1000_82547_tx_fifo_stall(unsigned long data)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
u32 tctl;
if (atomic_read(&adapter->tx_fifo_stall)) {
- if ((E1000_READ_REG(&adapter->hw, TDT) ==
- E1000_READ_REG(&adapter->hw, TDH)) &&
- (E1000_READ_REG(&adapter->hw, TDFT) ==
- E1000_READ_REG(&adapter->hw, TDFH)) &&
- (E1000_READ_REG(&adapter->hw, TDFTS) ==
- E1000_READ_REG(&adapter->hw, TDFHS))) {
- tctl = E1000_READ_REG(&adapter->hw, TCTL);
- E1000_WRITE_REG(&adapter->hw, TCTL,
- tctl & ~E1000_TCTL_EN);
- E1000_WRITE_REG(&adapter->hw, TDFT,
- adapter->tx_head_addr);
- E1000_WRITE_REG(&adapter->hw, TDFH,
- adapter->tx_head_addr);
- E1000_WRITE_REG(&adapter->hw, TDFTS,
- adapter->tx_head_addr);
- E1000_WRITE_REG(&adapter->hw, TDFHS,
- adapter->tx_head_addr);
- E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
- E1000_WRITE_FLUSH(&adapter->hw);
+ if ((er32(TDT) == er32(TDH)) &&
+ (er32(TDFT) == er32(TDFH)) &&
+ (er32(TDFTS) == er32(TDFHS))) {
+ tctl = er32(TCTL);
+ ew32(TCTL, tctl & ~E1000_TCTL_EN);
+ ew32(TDFT, adapter->tx_head_addr);
+ ew32(TDFH, adapter->tx_head_addr);
+ ew32(TDFTS, adapter->tx_head_addr);
+ ew32(TDFHS, adapter->tx_head_addr);
+ ew32(TCTL, tctl);
+ E1000_WRITE_FLUSH();
adapter->tx_fifo_head = 0;
atomic_set(&adapter->tx_fifo_stall, 0);
@@ -2605,45 +2601,45 @@ e1000_82547_tx_fifo_stall(unsigned long data)
* e1000_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
-static void
-e1000_watchdog(unsigned long data)
+static void e1000_watchdog(unsigned long data)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct e1000_tx_ring *txdr = adapter->tx_ring;
u32 link, tctl;
s32 ret_val;
- ret_val = e1000_check_for_link(&adapter->hw);
+ ret_val = e1000_check_for_link(hw);
if ((ret_val == E1000_ERR_PHY) &&
- (adapter->hw.phy_type == e1000_phy_igp_3) &&
- (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+ (hw->phy_type == e1000_phy_igp_3) &&
+ (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
/* See e1000_kumeran_lock_loss_workaround() */
DPRINTK(LINK, INFO,
"Gigabit has been disabled, downgrading speed\n");
}
- if (adapter->hw.mac_type == e1000_82573) {
- e1000_enable_tx_pkt_filtering(&adapter->hw);
- if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
+ if (hw->mac_type == e1000_82573) {
+ e1000_enable_tx_pkt_filtering(hw);
+ if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
e1000_update_mng_vlan(adapter);
}
- if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
- !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
- link = !adapter->hw.serdes_link_down;
+ if ((hw->media_type == e1000_media_type_internal_serdes) &&
+ !(er32(TXCW) & E1000_TXCW_ANE))
+ link = !hw->serdes_link_down;
else
- link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
+ link = er32(STATUS) & E1000_STATUS_LU;
if (link) {
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
bool txb2b = true;
- e1000_get_speed_and_duplex(&adapter->hw,
+ e1000_get_speed_and_duplex(hw,
&adapter->link_speed,
&adapter->link_duplex);
- ctrl = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl = er32(CTRL);
DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
"Flow Control: %s\n",
adapter->link_speed,
@@ -2671,19 +2667,19 @@ e1000_watchdog(unsigned long data)
break;
}
- if ((adapter->hw.mac_type == e1000_82571 ||
- adapter->hw.mac_type == e1000_82572) &&
+ if ((hw->mac_type == e1000_82571 ||
+ hw->mac_type == e1000_82572) &&
!txb2b) {
u32 tarc0;
- tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
+ tarc0 = er32(TARC0);
tarc0 &= ~(1 << 21);
- E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
+ ew32(TARC0, tarc0);
}
/* disable TSO for pcie and 10/100 speeds, to avoid
* some hardware issues */
if (!adapter->tso_force &&
- adapter->hw.bus_type == e1000_bus_type_pci_express){
+ hw->bus_type == e1000_bus_type_pci_express){
switch (adapter->link_speed) {
case SPEED_10:
case SPEED_100:
@@ -2704,9 +2700,9 @@ e1000_watchdog(unsigned long data)
/* enable transmits in the hardware, need to do this
* after setting TARC0 */
- tctl = E1000_READ_REG(&adapter->hw, TCTL);
+ tctl = er32(TCTL);
tctl |= E1000_TCTL_EN;
- E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
+ ew32(TCTL, tctl);
netif_carrier_on(netdev);
netif_wake_queue(netdev);
@@ -2714,10 +2710,9 @@ e1000_watchdog(unsigned long data)
adapter->smartspeed = 0;
} else {
/* make sure the receive unit is started */
- if (adapter->hw.rx_needs_kicking) {
- struct e1000_hw *hw = &adapter->hw;
- u32 rctl = E1000_READ_REG(hw, RCTL);
- E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN);
+ if (hw->rx_needs_kicking) {
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl | E1000_RCTL_EN);
}
}
} else {
@@ -2734,7 +2729,7 @@ e1000_watchdog(unsigned long data)
* disable receives in the ISR and
* reset device here in the watchdog
*/
- if (adapter->hw.mac_type == e1000_80003es2lan)
+ if (hw->mac_type == e1000_80003es2lan)
/* reset device */
schedule_work(&adapter->reset_task);
}
@@ -2744,9 +2739,9 @@ e1000_watchdog(unsigned long data)
e1000_update_stats(adapter);
- adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+ hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
adapter->tpt_old = adapter->stats.tpt;
- adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
+ hw->collision_delta = adapter->stats.colc - adapter->colc_old;
adapter->colc_old = adapter->stats.colc;
adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
@@ -2754,7 +2749,7 @@ e1000_watchdog(unsigned long data)
adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
adapter->gotcl_old = adapter->stats.gotcl;
- e1000_update_adaptive(&adapter->hw);
+ e1000_update_adaptive(hw);
if (!netif_carrier_ok(netdev)) {
if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
@@ -2768,15 +2763,15 @@ e1000_watchdog(unsigned long data)
}
/* Cause software interrupt to ensure rx ring is cleaned */
- E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
+ ew32(ICS, E1000_ICS_RXDMT0);
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = true;
/* With 82571 controllers, LAA may be overwritten due to controller
* reset from the other port. Set the appropriate LAA in RAR[0] */
- if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
- e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
+ if (hw->mac_type == e1000_82571 && hw->laa_is_present)
+ e1000_rar_set(hw, hw->mac_addr, 0);
/* Reset the timer */
mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
@@ -2806,9 +2801,7 @@ enum latency_range {
* @bytes: the number of bytes during this measurement interval
**/
static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
- u16 itr_setting,
- int packets,
- int bytes)
+ u16 itr_setting, int packets, int bytes)
{
unsigned int retval = itr_setting;
struct e1000_hw *hw = &adapter->hw;
@@ -2913,7 +2906,7 @@ set_itr_now:
min(adapter->itr + (new_itr >> 2), new_itr) :
new_itr;
adapter->itr = new_itr;
- E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256));
+ ew32(ITR, 1000000000 / (new_itr * 256));
}
return;
@@ -2926,9 +2919,8 @@ set_itr_now:
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
-static int
-e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
- struct sk_buff *skb)
+static int e1000_tso(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
@@ -2999,9 +2991,8 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
return false;
}
-static bool
-e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
- struct sk_buff *skb)
+static bool e1000_tx_csum(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
@@ -3038,11 +3029,13 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
#define E1000_MAX_TXD_PWR 12
#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
-static int
-e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
- struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
- unsigned int nr_frags, unsigned int mss)
+static int e1000_tx_map(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring,
+ struct sk_buff *skb, unsigned int first,
+ unsigned int max_per_txd, unsigned int nr_frags,
+ unsigned int mss)
{
+ struct e1000_hw *hw = &adapter->hw;
struct e1000_buffer *buffer_info;
unsigned int len = skb->len;
unsigned int offset = 0, size, count = 0, i;
@@ -3073,7 +3066,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
* The fix is to make sure that the first descriptor of a
* packet is smaller than 2048 - 16 - 16 (or 2016) bytes
*/
- if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
+ if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
(size > 2015) && count == 0))
size = 2015;
@@ -3145,10 +3138,11 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
return count;
}
-static void
-e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
- int tx_flags, int count)
+static void e1000_tx_queue(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring, int tx_flags,
+ int count)
{
+ struct e1000_hw *hw = &adapter->hw;
struct e1000_tx_desc *tx_desc = NULL;
struct e1000_buffer *buffer_info;
u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -3194,7 +3188,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
wmb();
tx_ring->next_to_use = i;
- writel(i, adapter->hw.hw_addr + tx_ring->tdt);
+ writel(i, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to our tail
* at a time, it syncronizes IO on IA64/Altix systems */
mmiowb();
@@ -3212,8 +3206,8 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
#define E1000_FIFO_HDR 0x10
#define E1000_82547_PAD_LEN 0x3E0
-static int
-e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+ struct sk_buff *skb)
{
u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
@@ -3239,19 +3233,19 @@ no_fifo_stall_required:
}
#define MINIMUM_DHCP_PACKET_SIZE 282
-static int
-e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
+static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
+ struct sk_buff *skb)
{
struct e1000_hw *hw = &adapter->hw;
u16 length, offset;
if (vlan_tx_tag_present(skb)) {
- if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
- ( adapter->hw.mng_cookie.status &
+ if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
+ ( hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
return 0;
}
if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
- struct ethhdr *eth = (struct ethhdr *) skb->data;
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
if ((htons(ETH_P_IP) == eth->h_proto)) {
const struct iphdr *ip =
(struct iphdr *)((u8 *)skb->data+14);
@@ -3304,10 +3298,10 @@ static int e1000_maybe_stop_tx(struct net_device *netdev,
}
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
-static int
-e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
struct e1000_tx_ring *tx_ring;
unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
@@ -3333,7 +3327,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* 82571 and newer doesn't need the workaround that limited descriptor
* length to 4kB */
- if (adapter->hw.mac_type >= e1000_82571)
+ if (hw->mac_type >= e1000_82571)
max_per_txd = 8192;
mss = skb_shinfo(skb)->gso_size;
@@ -3353,7 +3347,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
* frags into skb->data */
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (skb->data_len && hdr_len == len) {
- switch (adapter->hw.mac_type) {
+ switch (hw->mac_type) {
unsigned int pull_size;
case e1000_82544:
/* Make sure we have room to chop off 4 bytes,
@@ -3402,7 +3396,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* work-around for errata 10 and it applies to all controllers
* in PCI-X mode, so add one more descriptor to the count
*/
- if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
+ if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
(len > 2015)))
count++;
@@ -3414,8 +3408,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
count += nr_frags;
- if (adapter->hw.tx_pkt_filtering &&
- (adapter->hw.mac_type == e1000_82573))
+ if (hw->tx_pkt_filtering &&
+ (hw->mac_type == e1000_82573))
e1000_transfer_dhcp_info(adapter, skb);
if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
@@ -3429,7 +3423,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- if (unlikely(adapter->hw.mac_type == e1000_82547)) {
+ if (unlikely(hw->mac_type == e1000_82547)) {
if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
netif_stop_queue(netdev);
mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
@@ -3482,8 +3476,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
* @netdev: network interface device structure
**/
-static void
-e1000_tx_timeout(struct net_device *netdev)
+static void e1000_tx_timeout(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3492,8 +3485,7 @@ e1000_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->reset_task);
}
-static void
-e1000_reset_task(struct work_struct *work)
+static void e1000_reset_task(struct work_struct *work)
{
struct e1000_adapter *adapter =
container_of(work, struct e1000_adapter, reset_task);
@@ -3509,8 +3501,7 @@ e1000_reset_task(struct work_struct *work)
* The statistics are actually updated from the timer callback.
**/
-static struct net_device_stats *
-e1000_get_stats(struct net_device *netdev)
+static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3526,10 +3517,10 @@ e1000_get_stats(struct net_device *netdev)
* Returns 0 on success, negative on failure
**/
-static int
-e1000_change_mtu(struct net_device *netdev, int new_mtu)
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
u16 eeprom_data = 0;
@@ -3540,7 +3531,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
}
/* Adapter-specific max frame size limits. */
- switch (adapter->hw.mac_type) {
+ switch (hw->mac_type) {
case e1000_undefined ... e1000_82542_rev2_1:
case e1000_ich8lan:
if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
@@ -3552,9 +3543,9 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
/* Jumbo Frames not supported if:
* - this is not an 82573L device
* - ASPM is enabled in any way (0x1A bits 3:2) */
- e1000_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1,
+ e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
&eeprom_data);
- if ((adapter->hw.device_id != E1000_DEV_ID_82573L) ||
+ if ((hw->device_id != E1000_DEV_ID_82573L) ||
(eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
DPRINTK(PROBE, ERR,
@@ -3601,13 +3592,13 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = E1000_RXBUFFER_16384;
/* adjust allocation if LPE protects us, and we aren't using SBP */
- if (!adapter->hw.tbi_compatibility_on &&
+ if (!hw->tbi_compatibility_on &&
((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
netdev->mtu = new_mtu;
- adapter->hw.max_frame_size = max_frame;
+ hw->max_frame_size = max_frame;
if (netif_running(netdev))
e1000_reinit_locked(adapter);
@@ -3620,8 +3611,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
* @adapter: board private structure
**/
-void
-e1000_update_stats(struct e1000_adapter *adapter)
+void e1000_update_stats(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
@@ -3646,89 +3636,89 @@ e1000_update_stats(struct e1000_adapter *adapter)
* be written while holding adapter->stats_lock
*/
- adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
- adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
- adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
- adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
- adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
- adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
- adapter->stats.roc += E1000_READ_REG(hw, ROC);
-
- if (adapter->hw.mac_type != e1000_ich8lan) {
- adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
- adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
- adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
- adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
- adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
- adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
- }
-
- adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
- adapter->stats.mpc += E1000_READ_REG(hw, MPC);
- adapter->stats.scc += E1000_READ_REG(hw, SCC);
- adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
- adapter->stats.mcc += E1000_READ_REG(hw, MCC);
- adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
- adapter->stats.dc += E1000_READ_REG(hw, DC);
- adapter->stats.sec += E1000_READ_REG(hw, SEC);
- adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
- adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
- adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
- adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
- adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
- adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
- adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
- adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
- adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
- adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
- adapter->stats.ruc += E1000_READ_REG(hw, RUC);
- adapter->stats.rfc += E1000_READ_REG(hw, RFC);
- adapter->stats.rjc += E1000_READ_REG(hw, RJC);
- adapter->stats.torl += E1000_READ_REG(hw, TORL);
- adapter->stats.torh += E1000_READ_REG(hw, TORH);
- adapter->stats.totl += E1000_READ_REG(hw, TOTL);
- adapter->stats.toth += E1000_READ_REG(hw, TOTH);
- adapter->stats.tpr += E1000_READ_REG(hw, TPR);
-
- if (adapter->hw.mac_type != e1000_ich8lan) {
- adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
- adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
- adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
- adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
- adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
- adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
- }
-
- adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
- adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
+ adapter->stats.crcerrs += er32(CRCERRS);
+ adapter->stats.gprc += er32(GPRC);
+ adapter->stats.gorcl += er32(GORCL);
+ adapter->stats.gorch += er32(GORCH);
+ adapter->stats.bprc += er32(BPRC);
+ adapter->stats.mprc += er32(MPRC);
+ adapter->stats.roc += er32(ROC);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ adapter->stats.prc64 += er32(PRC64);
+ adapter->stats.prc127 += er32(PRC127);
+ adapter->stats.prc255 += er32(PRC255);
+ adapter->stats.prc511 += er32(PRC511);
+ adapter->stats.prc1023 += er32(PRC1023);
+ adapter->stats.prc1522 += er32(PRC1522);
+ }
+
+ adapter->stats.symerrs += er32(SYMERRS);
+ adapter->stats.mpc += er32(MPC);
+ adapter->stats.scc += er32(SCC);
+ adapter->stats.ecol += er32(ECOL);
+ adapter->stats.mcc += er32(MCC);
+ adapter->stats.latecol += er32(LATECOL);
+ adapter->stats.dc += er32(DC);
+ adapter->stats.sec += er32(SEC);
+ adapter->stats.rlec += er32(RLEC);
+ adapter->stats.xonrxc += er32(XONRXC);
+ adapter->stats.xontxc += er32(XONTXC);
+ adapter->stats.xoffrxc += er32(XOFFRXC);
+ adapter->stats.xofftxc += er32(XOFFTXC);
+ adapter->stats.fcruc += er32(FCRUC);
+ adapter->stats.gptc += er32(GPTC);
+ adapter->stats.gotcl += er32(GOTCL);
+ adapter->stats.gotch += er32(GOTCH);
+ adapter->stats.rnbc += er32(RNBC);
+ adapter->stats.ruc += er32(RUC);
+ adapter->stats.rfc += er32(RFC);
+ adapter->stats.rjc += er32(RJC);
+ adapter->stats.torl += er32(TORL);
+ adapter->stats.torh += er32(TORH);
+ adapter->stats.totl += er32(TOTL);
+ adapter->stats.toth += er32(TOTH);
+ adapter->stats.tpr += er32(TPR);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ adapter->stats.ptc64 += er32(PTC64);
+ adapter->stats.ptc127 += er32(PTC127);
+ adapter->stats.ptc255 += er32(PTC255);
+ adapter->stats.ptc511 += er32(PTC511);
+ adapter->stats.ptc1023 += er32(PTC1023);
+ adapter->stats.ptc1522 += er32(PTC1522);
+ }
+
+ adapter->stats.mptc += er32(MPTC);
+ adapter->stats.bptc += er32(BPTC);
/* used for adaptive IFS */
- hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
+ hw->tx_packet_delta = er32(TPT);
adapter->stats.tpt += hw->tx_packet_delta;
- hw->collision_delta = E1000_READ_REG(hw, COLC);
+ hw->collision_delta = er32(COLC);
adapter->stats.colc += hw->collision_delta;
if (hw->mac_type >= e1000_82543) {
- adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
- adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
- adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
- adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
- adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
- adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
+ adapter->stats.algnerrc += er32(ALGNERRC);
+ adapter->stats.rxerrc += er32(RXERRC);
+ adapter->stats.tncrs += er32(TNCRS);
+ adapter->stats.cexterr += er32(CEXTERR);
+ adapter->stats.tsctc += er32(TSCTC);
+ adapter->stats.tsctfc += er32(TSCTFC);
}
if (hw->mac_type > e1000_82547_rev_2) {
- adapter->stats.iac += E1000_READ_REG(hw, IAC);
- adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
-
- if (adapter->hw.mac_type != e1000_ich8lan) {
- adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
- adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
- adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
- adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
- adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
- adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
- adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
+ adapter->stats.iac += er32(IAC);
+ adapter->stats.icrxoc += er32(ICRXOC);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ adapter->stats.icrxptc += er32(ICRXPTC);
+ adapter->stats.icrxatc += er32(ICRXATC);
+ adapter->stats.ictxptc += er32(ICTXPTC);
+ adapter->stats.ictxatc += er32(ICTXATC);
+ adapter->stats.ictxqec += er32(ICTXQEC);
+ adapter->stats.ictxqmtc += er32(ICTXQMTC);
+ adapter->stats.icrxdmtc += er32(ICRXDMTC);
}
}
@@ -3756,7 +3746,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
adapter->net_stats.tx_window_errors = adapter->stats.latecol;
adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
- if (adapter->hw.bad_tx_carr_stats_fd &&
+ if (hw->bad_tx_carr_stats_fd &&
adapter->link_duplex == FULL_DUPLEX) {
adapter->net_stats.tx_carrier_errors = 0;
adapter->stats.tncrs = 0;
@@ -3779,10 +3769,10 @@ e1000_update_stats(struct e1000_adapter *adapter)
}
/* Management Stats */
- if (adapter->hw.has_smbus) {
- adapter->stats.mgptc += E1000_READ_REG(hw, MGTPTC);
- adapter->stats.mgprc += E1000_READ_REG(hw, MGTPRC);
- adapter->stats.mgpdc += E1000_READ_REG(hw, MGTPDC);
+ if (hw->has_smbus) {
+ adapter->stats.mgptc += er32(MGTPTC);
+ adapter->stats.mgprc += er32(MGTPRC);
+ adapter->stats.mgpdc += er32(MGTPDC);
}
spin_unlock_irqrestore(&adapter->stats_lock, flags);
@@ -3794,16 +3784,12 @@ e1000_update_stats(struct e1000_adapter *adapter)
* @data: pointer to a network interface device structure
**/
-static irqreturn_t
-e1000_intr_msi(int irq, void *data)
+static irqreturn_t e1000_intr_msi(int irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
-#ifndef CONFIG_E1000_NAPI
- int i;
-#endif
- u32 icr = E1000_READ_REG(hw, ICR);
+ u32 icr = er32(ICR);
/* in NAPI mode read ICR disables interrupts using IAM */
@@ -3813,17 +3799,16 @@ e1000_intr_msi(int irq, void *data)
* link down event; disable receives here in the ISR and reset
* adapter in watchdog */
if (netif_carrier_ok(netdev) &&
- (adapter->hw.mac_type == e1000_80003es2lan)) {
+ (hw->mac_type == e1000_80003es2lan)) {
/* disable receives */
- u32 rctl = E1000_READ_REG(hw, RCTL);
- E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->flags))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_E1000_NAPI
if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -3832,20 +3817,6 @@ e1000_intr_msi(int irq, void *data)
__netif_rx_schedule(netdev, &adapter->napi);
} else
e1000_irq_enable(adapter);
-#else
- adapter->total_tx_bytes = 0;
- adapter->total_rx_bytes = 0;
- adapter->total_tx_packets = 0;
- adapter->total_rx_packets = 0;
-
- for (i = 0; i < E1000_MAX_INTR; i++)
- if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
- !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
- break;
-
- if (likely(adapter->itr_setting & 3))
- e1000_set_itr(adapter);
-#endif
return IRQ_HANDLED;
}
@@ -3856,20 +3827,16 @@ e1000_intr_msi(int irq, void *data)
* @data: pointer to a network interface device structure
**/
-static irqreturn_t
-e1000_intr(int irq, void *data)
+static irqreturn_t e1000_intr(int irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 rctl, icr = E1000_READ_REG(hw, ICR);
-#ifndef CONFIG_E1000_NAPI
- int i;
-#endif
+ u32 rctl, icr = er32(ICR);
+
if (unlikely(!icr))
return IRQ_NONE; /* Not our interrupt */
-#ifdef CONFIG_E1000_NAPI
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
* not set, then the adapter didn't send an interrupt */
if (unlikely(hw->mac_type >= e1000_82571 &&
@@ -3878,7 +3845,6 @@ e1000_intr(int irq, void *data)
/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
* need for the IMC write */
-#endif
if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
hw->get_link_status = 1;
@@ -3888,21 +3854,20 @@ e1000_intr(int irq, void *data)
* reset adapter in watchdog
*/
if (netif_carrier_ok(netdev) &&
- (adapter->hw.mac_type == e1000_80003es2lan)) {
+ (hw->mac_type == e1000_80003es2lan)) {
/* disable receives */
- rctl = E1000_READ_REG(hw, RCTL);
- E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->flags))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_E1000_NAPI
if (unlikely(hw->mac_type < e1000_82571)) {
/* disable interrupts, without the synchronize_irq bit */
- E1000_WRITE_REG(hw, IMC, ~0);
- E1000_WRITE_FLUSH(hw);
+ ew32(IMC, ~0);
+ E1000_WRITE_FLUSH();
}
if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
adapter->total_tx_bytes = 0;
@@ -3914,48 +3879,15 @@ e1000_intr(int irq, void *data)
/* this really should not happen! if it does it is basically a
* bug, but not a hard error, so enable ints and continue */
e1000_irq_enable(adapter);
-#else
- /* Writing IMC and IMS is needed for 82547.
- * Due to Hub Link bus being occupied, an interrupt
- * de-assertion message is not able to be sent.
- * When an interrupt assertion message is generated later,
- * two messages are re-ordered and sent out.
- * That causes APIC to think 82547 is in de-assertion
- * state, while 82547 is in assertion state, resulting
- * in dead lock. Writing IMC forces 82547 into
- * de-assertion state.
- */
- if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
- E1000_WRITE_REG(hw, IMC, ~0);
-
- adapter->total_tx_bytes = 0;
- adapter->total_rx_bytes = 0;
- adapter->total_tx_packets = 0;
- adapter->total_rx_packets = 0;
- for (i = 0; i < E1000_MAX_INTR; i++)
- if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
- !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
- break;
-
- if (likely(adapter->itr_setting & 3))
- e1000_set_itr(adapter);
-
- if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
- e1000_irq_enable(adapter);
-
-#endif
return IRQ_HANDLED;
}
-#ifdef CONFIG_E1000_NAPI
/**
* e1000_clean - NAPI Rx polling callback
* @adapter: board private structure
**/
-
-static int
-e1000_clean(struct napi_struct *napi, int budget)
+static int e1000_clean(struct napi_struct *napi, int budget)
{
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
struct net_device *poll_dev = adapter->netdev;
@@ -3991,23 +3923,19 @@ e1000_clean(struct napi_struct *napi, int budget)
return work_done;
}
-#endif
/**
* e1000_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
**/
-
-static bool
-e1000_clean_tx_irq(struct e1000_adapter *adapter,
- struct e1000_tx_ring *tx_ring)
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
{
+ struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct e1000_buffer *buffer_info;
unsigned int i, eop;
-#ifdef CONFIG_E1000_NAPI
unsigned int count = 0;
-#endif
bool cleaned = false;
unsigned int total_tx_bytes=0, total_tx_packets=0;
@@ -4039,11 +3967,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
-#ifdef CONFIG_E1000_NAPI
#define E1000_TX_WEIGHT 64
/* weight of a sort for tx, to avoid endless transmit cleanup */
- if (count++ == E1000_TX_WEIGHT) break;
-#endif
+ if (count++ == E1000_TX_WEIGHT)
+ break;
}
tx_ring->next_to_clean = i;
@@ -4068,8 +3995,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
if (tx_ring->buffer_info[eop].dma &&
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
(adapter->tx_timeout_factor * HZ))
- && !(E1000_READ_REG(&adapter->hw, STATUS) &
- E1000_STATUS_TXOFF)) {
+ && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
@@ -4085,8 +4011,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
" next_to_watch.status <%x>\n",
(unsigned long)((tx_ring - adapter->tx_ring) /
sizeof(struct e1000_tx_ring)),
- readl(adapter->hw.hw_addr + tx_ring->tdh),
- readl(adapter->hw.hw_addr + tx_ring->tdt),
+ readl(hw->hw_addr + tx_ring->tdh),
+ readl(hw->hw_addr + tx_ring->tdt),
tx_ring->next_to_use,
tx_ring->next_to_clean,
tx_ring->buffer_info[eop].time_stamp,
@@ -4111,17 +4037,16 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @sk_buff: socket buffer with received data
**/
-static void
-e1000_rx_checksum(struct e1000_adapter *adapter,
- u32 status_err, u32 csum,
- struct sk_buff *skb)
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+ u32 csum, struct sk_buff *skb)
{
+ struct e1000_hw *hw = &adapter->hw;
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
skb->ip_summed = CHECKSUM_NONE;
/* 82543 or newer only */
- if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
+ if (unlikely(hw->mac_type < e1000_82543)) return;
/* Ignore Checksum bit is set */
if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
/* TCP/UDP checksum error bit is set */
@@ -4131,7 +4056,7 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
return;
}
/* TCP/UDP Checksum has not been calculated */
- if (adapter->hw.mac_type <= e1000_82547_rev_2) {
+ if (hw->mac_type <= e1000_82547_rev_2) {
if (!(status & E1000_RXD_STAT_TCPCS))
return;
} else {
@@ -4142,7 +4067,7 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
if (likely(status & E1000_RXD_STAT_TCPCS)) {
/* TCP checksum is good */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if (adapter->hw.mac_type > e1000_82547_rev_2) {
+ } else if (hw->mac_type > e1000_82547_rev_2) {
/* IP fragment with UDP payload */
/* Hardware complements the payload checksum, so we undo it
* and then put the value in host order for further stack use.
@@ -4158,17 +4083,11 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
* e1000_clean_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
**/
-
-static bool
-#ifdef CONFIG_E1000_NAPI
-e1000_clean_rx_irq(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring,
- int *work_done, int work_to_do)
-#else
-e1000_clean_rx_irq(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring)
-#endif
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
{
+ struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc, *next_rxd;
@@ -4189,11 +4108,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
struct sk_buff *skb;
u8 status;
-#ifdef CONFIG_E1000_NAPI
if (*work_done >= work_to_do)
break;
(*work_done)++;
-#endif
+
status = rx_desc->status;
skb = buffer_info->skb;
buffer_info->skb = NULL;
@@ -4226,11 +4144,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
last_byte = *(skb->data + length - 1);
- if (TBI_ACCEPT(&adapter->hw, status,
- rx_desc->errors, length, last_byte)) {
+ if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
+ last_byte)) {
spin_lock_irqsave(&adapter->stats_lock, flags);
- e1000_tbi_adjust_stats(&adapter->hw,
- &adapter->stats,
+ e1000_tbi_adjust_stats(hw, &adapter->stats,
length, skb->data);
spin_unlock_irqrestore(&adapter->stats_lock,
flags);
@@ -4280,7 +4197,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
le16_to_cpu(rx_desc->csum), skb);
skb->protocol = eth_type_trans(skb, netdev);
-#ifdef CONFIG_E1000_NAPI
+
if (unlikely(adapter->vlgrp &&
(status & E1000_RXD_STAT_VP))) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
@@ -4288,15 +4205,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
} else {
netif_receive_skb(skb);
}
-#else /* CONFIG_E1000_NAPI */
- if (unlikely(adapter->vlgrp &&
- (status & E1000_RXD_STAT_VP))) {
- vlan_hwaccel_rx(skb, adapter->vlgrp,
- le16_to_cpu(rx_desc->special));
- } else {
- netif_rx(skb);
- }
-#endif /* CONFIG_E1000_NAPI */
+
netdev->last_rx = jiffies;
next_desc:
@@ -4330,15 +4239,9 @@ next_desc:
* @adapter: board private structure
**/
-static bool
-#ifdef CONFIG_E1000_NAPI
-e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring,
- int *work_done, int work_to_do)
-#else
-e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring)
-#endif
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
{
union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
struct net_device *netdev = adapter->netdev;
@@ -4361,11 +4264,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
while (staterr & E1000_RXD_STAT_DD) {
ps_page = &rx_ring->ps_page[i];
ps_page_dma = &rx_ring->ps_page_dma[i];
-#ifdef CONFIG_E1000_NAPI
+
if (unlikely(*work_done >= work_to_do))
break;
(*work_done)++;
-#endif
+
skb = buffer_info->skb;
/* in the packet split case this is header only */
@@ -4438,7 +4341,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
}
for (j = 0; j < adapter->rx_ps_pages; j++) {
- if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
+ length = le16_to_cpu(rx_desc->wb.upper.length[j]);
+ if (!length)
break;
pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
PAGE_SIZE, PCI_DMA_FROMDEVICE);
@@ -4466,21 +4370,14 @@ copydone:
if (likely(rx_desc->wb.upper.header_status &
cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
adapter->rx_hdr_split++;
-#ifdef CONFIG_E1000_NAPI
+
if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->wb.middle.vlan));
} else {
netif_receive_skb(skb);
}
-#else /* CONFIG_E1000_NAPI */
- if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
- vlan_hwaccel_rx(skb, adapter->vlgrp,
- le16_to_cpu(rx_desc->wb.middle.vlan));
- } else {
- netif_rx(skb);
- }
-#endif /* CONFIG_E1000_NAPI */
+
netdev->last_rx = jiffies;
next_desc:
@@ -4517,11 +4414,11 @@ next_desc:
* @adapter: address of board private structure
**/
-static void
-e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring,
- int cleaned_count)
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count)
{
+ struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc;
@@ -4619,7 +4516,7 @@ map_skb:
* applicable for weak-ordered memory model archs,
* such as IA-64). */
wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->rdt);
+ writel(i, hw->hw_addr + rx_ring->rdt);
}
}
@@ -4628,11 +4525,11 @@ map_skb:
* @adapter: address of board private structure
**/
-static void
-e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring,
- int cleaned_count)
+static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count)
{
+ struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union e1000_rx_desc_packet_split *rx_desc;
@@ -4717,7 +4614,7 @@ no_buffers:
* descriptors are 32 bytes...so we increment tail
* twice as much.
*/
- writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
+ writel(i<<1, hw->hw_addr + rx_ring->rdt);
}
}
@@ -4726,49 +4623,49 @@ no_buffers:
* @adapter:
**/
-static void
-e1000_smartspeed(struct e1000_adapter *adapter)
+static void e1000_smartspeed(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
u16 phy_status;
u16 phy_ctrl;
- if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
- !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
+ if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
+ !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
return;
if (adapter->smartspeed == 0) {
/* If Master/Slave config fault is asserted twice,
* we assume back-to-back */
- e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
- e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
- e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
+ e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
if (phy_ctrl & CR_1000T_MS_ENABLE) {
phy_ctrl &= ~CR_1000T_MS_ENABLE;
- e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
+ e1000_write_phy_reg(hw, PHY_1000T_CTRL,
phy_ctrl);
adapter->smartspeed++;
- if (!e1000_phy_setup_autoneg(&adapter->hw) &&
- !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
+ if (!e1000_phy_setup_autoneg(hw) &&
+ !e1000_read_phy_reg(hw, PHY_CTRL,
&phy_ctrl)) {
phy_ctrl |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
+ e1000_write_phy_reg(hw, PHY_CTRL,
phy_ctrl);
}
}
return;
} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
/* If still no link, perhaps using 2/3 pair cable */
- e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
+ e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
phy_ctrl |= CR_1000T_MS_ENABLE;
- e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
- if (!e1000_phy_setup_autoneg(&adapter->hw) &&
- !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
+ e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
+ if (!e1000_phy_setup_autoneg(hw) &&
+ !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
phy_ctrl |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
}
}
/* Restart process after E1000_SMARTSPEED_MAX iterations */
@@ -4783,8 +4680,7 @@ e1000_smartspeed(struct e1000_adapter *adapter)
* @cmd:
**/
-static int
-e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCGMIIPHY:
@@ -4803,28 +4699,29 @@ e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* @cmd:
**/
-static int
-e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
struct mii_ioctl_data *data = if_mii(ifr);
int retval;
u16 mii_reg;
u16 spddplx;
unsigned long flags;
- if (adapter->hw.media_type != e1000_media_type_copper)
+ if (hw->media_type != e1000_media_type_copper)
return -EOPNOTSUPP;
switch (cmd) {
case SIOCGMIIPHY:
- data->phy_id = adapter->hw.phy_addr;
+ data->phy_id = hw->phy_addr;
break;
case SIOCGMIIREG:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
spin_lock_irqsave(&adapter->stats_lock, flags);
- if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
&data->val_out)) {
spin_unlock_irqrestore(&adapter->stats_lock, flags);
return -EIO;
@@ -4838,20 +4735,20 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return -EFAULT;
mii_reg = data->val_in;
spin_lock_irqsave(&adapter->stats_lock, flags);
- if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
+ if (e1000_write_phy_reg(hw, data->reg_num,
mii_reg)) {
spin_unlock_irqrestore(&adapter->stats_lock, flags);
return -EIO;
}
spin_unlock_irqrestore(&adapter->stats_lock, flags);
- if (adapter->hw.media_type == e1000_media_type_copper) {
+ if (hw->media_type == e1000_media_type_copper) {
switch (data->reg_num) {
case PHY_CTRL:
if (mii_reg & MII_CR_POWER_DOWN)
break;
if (mii_reg & MII_CR_AUTO_NEG_EN) {
- adapter->hw.autoneg = 1;
- adapter->hw.autoneg_advertised = 0x2F;
+ hw->autoneg = 1;
+ hw->autoneg_advertised = 0x2F;
} else {
if (mii_reg & 0x40)
spddplx = SPEED_1000;
@@ -4874,7 +4771,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
break;
case M88E1000_PHY_SPEC_CTRL:
case M88E1000_EXT_PHY_SPEC_CTRL:
- if (e1000_phy_reset(&adapter->hw))
+ if (e1000_phy_reset(hw))
return -EIO;
break;
}
@@ -4897,8 +4794,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return E1000_SUCCESS;
}
-void
-e1000_pci_set_mwi(struct e1000_hw *hw)
+void e1000_pci_set_mwi(struct e1000_hw *hw)
{
struct e1000_adapter *adapter = hw->back;
int ret_val = pci_set_mwi(adapter->pdev);
@@ -4907,30 +4803,26 @@ e1000_pci_set_mwi(struct e1000_hw *hw)
DPRINTK(PROBE, ERR, "Error in setting MWI\n");
}
-void
-e1000_pci_clear_mwi(struct e1000_hw *hw)
+void e1000_pci_clear_mwi(struct e1000_hw *hw)
{
struct e1000_adapter *adapter = hw->back;
pci_clear_mwi(adapter->pdev);
}
-int
-e1000_pcix_get_mmrbc(struct e1000_hw *hw)
+int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
{
struct e1000_adapter *adapter = hw->back;
return pcix_get_mmrbc(adapter->pdev);
}
-void
-e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
+void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
{
struct e1000_adapter *adapter = hw->back;
pcix_set_mmrbc(adapter->pdev, mmrbc);
}
-s32
-e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct e1000_adapter *adapter = hw->back;
u16 cap_offset;
@@ -4944,16 +4836,16 @@ e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
return E1000_SUCCESS;
}
-void
-e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
{
outl(value, port);
}
-static void
-e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+static void e1000_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl;
if (!test_bit(__E1000_DOWN, &adapter->flags))
@@ -4962,22 +4854,22 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
if (grp) {
/* enable VLAN tag insert/strip */
- ctrl = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl = er32(CTRL);
ctrl |= E1000_CTRL_VME;
- E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
if (adapter->hw.mac_type != e1000_ich8lan) {
/* enable VLAN receive filtering */
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl = er32(RCTL);
rctl &= ~E1000_RCTL_CFIEN;
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ ew32(RCTL, rctl);
e1000_update_mng_vlan(adapter);
}
} else {
/* disable VLAN tag insert/strip */
- ctrl = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl = er32(CTRL);
ctrl &= ~E1000_CTRL_VME;
- E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
if (adapter->hw.mac_type != e1000_ich8lan) {
if (adapter->mng_vlan_id !=
@@ -4993,27 +4885,27 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
e1000_irq_enable(adapter);
}
-static void
-e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
u32 vfta, index;
- if ((adapter->hw.mng_cookie.status &
+ if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
(vid == adapter->mng_vlan_id))
return;
/* add VID to filter table */
index = (vid >> 5) & 0x7F;
- vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
+ vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
vfta |= (1 << (vid & 0x1F));
- e1000_write_vfta(&adapter->hw, index, vfta);
+ e1000_write_vfta(hw, index, vfta);
}
-static void
-e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
u32 vfta, index;
if (!test_bit(__E1000_DOWN, &adapter->flags))
@@ -5022,7 +4914,7 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
- if ((adapter->hw.mng_cookie.status &
+ if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
(vid == adapter->mng_vlan_id)) {
/* release control to f/w */
@@ -5032,13 +4924,12 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
/* remove VID from filter table */
index = (vid >> 5) & 0x7F;
- vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
+ vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
vfta &= ~(1 << (vid & 0x1F));
- e1000_write_vfta(&adapter->hw, index, vfta);
+ e1000_write_vfta(hw, index, vfta);
}
-static void
-e1000_restore_vlan(struct e1000_adapter *adapter)
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
{
e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
@@ -5052,13 +4943,14 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
}
}
-int
-e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
{
- adapter->hw.autoneg = 0;
+ struct e1000_hw *hw = &adapter->hw;
+
+ hw->autoneg = 0;
/* Fiber NICs only allow 1000 gbps Full duplex */
- if ((adapter->hw.media_type == e1000_media_type_fiber) &&
+ if ((hw->media_type == e1000_media_type_fiber) &&
spddplx != (SPEED_1000 + DUPLEX_FULL)) {
DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
return -EINVAL;
@@ -5066,20 +4958,20 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
switch (spddplx) {
case SPEED_10 + DUPLEX_HALF:
- adapter->hw.forced_speed_duplex = e1000_10_half;
+ hw->forced_speed_duplex = e1000_10_half;
break;
case SPEED_10 + DUPLEX_FULL:
- adapter->hw.forced_speed_duplex = e1000_10_full;
+ hw->forced_speed_duplex = e1000_10_full;
break;
case SPEED_100 + DUPLEX_HALF:
- adapter->hw.forced_speed_duplex = e1000_100_half;
+ hw->forced_speed_duplex = e1000_100_half;
break;
case SPEED_100 + DUPLEX_FULL:
- adapter->hw.forced_speed_duplex = e1000_100_full;
+ hw->forced_speed_duplex = e1000_100_full;
break;
case SPEED_1000 + DUPLEX_FULL:
- adapter->hw.autoneg = 1;
- adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+ hw->autoneg = 1;
+ hw->autoneg_advertised = ADVERTISE_1000_FULL;
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
@@ -5089,11 +4981,11 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
return 0;
}
-static int
-e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status;
u32 wufc = adapter->wol;
#ifdef CONFIG_PM
@@ -5113,7 +5005,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
return retval;
#endif
- status = E1000_READ_REG(&adapter->hw, STATUS);
+ status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -5123,40 +5015,40 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
/* turn on all-multi mode if wake on multicast is enabled */
if (wufc & E1000_WUFC_MC) {
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl = er32(RCTL);
rctl |= E1000_RCTL_MPE;
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ ew32(RCTL, rctl);
}
- if (adapter->hw.mac_type >= e1000_82540) {
- ctrl = E1000_READ_REG(&adapter->hw, CTRL);
+ if (hw->mac_type >= e1000_82540) {
+ ctrl = er32(CTRL);
/* advertise wake from D3Cold */
#define E1000_CTRL_ADVD3WUC 0x00100000
/* phy power management enable */
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC |
E1000_CTRL_EN_PHY_PWR_MGMT;
- E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
+ ew32(CTRL, ctrl);
}
- if (adapter->hw.media_type == e1000_media_type_fiber ||
- adapter->hw.media_type == e1000_media_type_internal_serdes) {
+ if (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes) {
/* keep the laser running in D3 */
- ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+ ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
- E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
+ ew32(CTRL_EXT, ctrl_ext);
}
/* Allow time for pending master requests to run */
- e1000_disable_pciex_master(&adapter->hw);
+ e1000_disable_pciex_master(hw);
- E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
- E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
+ ew32(WUC, E1000_WUC_PME_EN);
+ ew32(WUFC, wufc);
pci_enable_wake(pdev, PCI_D3hot, 1);
pci_enable_wake(pdev, PCI_D3cold, 1);
} else {
- E1000_WRITE_REG(&adapter->hw, WUC, 0);
- E1000_WRITE_REG(&adapter->hw, WUFC, 0);
+ ew32(WUC, 0);
+ ew32(WUFC, 0);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
}
@@ -5169,8 +5061,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
pci_enable_wake(pdev, PCI_D3cold, 1);
}
- if (adapter->hw.phy_type == e1000_phy_igp_3)
- e1000_phy_powerdown_workaround(&adapter->hw);
+ if (hw->phy_type == e1000_phy_igp_3)
+ e1000_phy_powerdown_workaround(hw);
if (netif_running(netdev))
e1000_free_irq(adapter);
@@ -5187,16 +5079,21 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
}
#ifdef CONFIG_PM
-static int
-e1000_resume(struct pci_dev *pdev)
+static int e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
u32 err;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- if ((err = pci_enable_device(pdev))) {
+
+ if (adapter->need_ioport)
+ err = pci_enable_device(pdev);
+ else
+ err = pci_enable_device_mem(pdev);
+ if (err) {
printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
return err;
}
@@ -5205,12 +5102,15 @@ e1000_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
- if (netif_running(netdev) && (err = e1000_request_irq(adapter)))
- return err;
+ if (netif_running(netdev)) {
+ err = e1000_request_irq(adapter);
+ if (err)
+ return err;
+ }
e1000_power_up_phy(adapter);
e1000_reset(adapter);
- E1000_WRITE_REG(&adapter->hw, WUS, ~0);
+ ew32(WUS, ~0);
e1000_init_manageability(adapter);
@@ -5223,8 +5123,8 @@ e1000_resume(struct pci_dev *pdev)
* DRV_LOAD until the interface is up. For all other cases,
* let the f/w know that the h/w is now under the control
* of the driver. */
- if (adapter->hw.mac_type != e1000_82573 ||
- !e1000_check_mng_mode(&adapter->hw))
+ if (hw->mac_type != e1000_82573 ||
+ !e1000_check_mng_mode(hw))
e1000_get_hw_control(adapter);
return 0;
@@ -5242,16 +5142,12 @@ static void e1000_shutdown(struct pci_dev *pdev)
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
-static void
-e1000_netpoll(struct net_device *netdev)
+static void e1000_netpoll(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev);
-#ifndef CONFIG_E1000_NAPI
- adapter->clean_rx(adapter, adapter->rx_ring);
-#endif
enable_irq(adapter->pdev->irq);
}
#endif
@@ -5264,7 +5160,8 @@ e1000_netpoll(struct net_device *netdev)
* This function is called after a PCI bus error affecting
* this device has been detected.
*/
-static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev->priv;
@@ -5290,8 +5187,14 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
- if (pci_enable_device(pdev)) {
+ if (adapter->need_ioport)
+ err = pci_enable_device(pdev);
+ else
+ err = pci_enable_device_mem(pdev);
+ if (err) {
printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -5301,7 +5204,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3cold, 0);
e1000_reset(adapter);
- E1000_WRITE_REG(&adapter->hw, WUS, ~0);
+ ew32(WUS, ~0);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -5318,6 +5221,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
e1000_init_manageability(adapter);
@@ -5334,8 +5238,8 @@ static void e1000_io_resume(struct pci_dev *pdev)
* DRV_LOAD until the interface is up. For all other cases,
* let the f/w know that the h/w is now under the control
* of the driver. */
- if (adapter->hw.mac_type != e1000_82573 ||
- !e1000_check_mng_mode(&adapter->hw))
+ if (hw->mac_type != e1000_82573 ||
+ !e1000_check_mng_mode(hw))
e1000_get_hw_control(adapter);
}
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 365626d3177e..d9298522f5ae 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -55,13 +55,13 @@
#define DEBUGOUT7 DEBUGOUT3
-#define E1000_WRITE_REG(a, reg, value) ( \
- writel((value), ((a)->hw_addr + \
- (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg))))
+#define er32(reg) \
+ (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg)))
-#define E1000_READ_REG(a, reg) ( \
- readl((a)->hw_addr + \
- (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg)))
+#define ew32(reg, value) \
+ (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg))))
#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
writel((value), ((a)->hw_addr + \
@@ -96,7 +96,7 @@
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
(offset)))
-#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
+#define E1000_WRITE_FLUSH() er32(STATUS)
#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \
writel((value), ((a)->flash_address + reg)))
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index e6565ce686bc..b9f90a5d3d4d 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -213,10 +213,9 @@ struct e1000_option {
} arg;
};
-static int __devinit
-e1000_validate_option(unsigned int *value,
- const struct e1000_option *opt,
- struct e1000_adapter *adapter)
+static int __devinit e1000_validate_option(unsigned int *value,
+ const struct e1000_option *opt,
+ struct e1000_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
@@ -278,8 +277,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
* in a variable in the adapter structure.
**/
-void __devinit
-e1000_check_options(struct e1000_adapter *adapter)
+void __devinit e1000_check_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
@@ -551,8 +549,7 @@ e1000_check_options(struct e1000_adapter *adapter)
* Handles speed and duplex options on fiber adapters
**/
-static void __devinit
-e1000_check_fiber_options(struct e1000_adapter *adapter)
+static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (num_Speed > bd) {
@@ -579,8 +576,7 @@ e1000_check_fiber_options(struct e1000_adapter *adapter)
* Handles speed and duplex options on copper adapters
**/
-static void __devinit
-e1000_check_copper_options(struct e1000_adapter *adapter)
+static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
{
unsigned int speed, dplx, an;
int bd = adapter->bd_number;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 869544b8c05c..9c0f56b3c518 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4067,8 +4067,6 @@ static void e1000_netpoll(struct net_device *netdev)
disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev);
- e1000_clean_tx_irq(adapter);
-
enable_irq(adapter->pdev->irq);
}
#endif
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 32a4f17d35fc..ecd5c71a7a8a 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -2,12 +2,6 @@
* Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
*
- * This version of the driver is specific to the FADS implementation,
- * since the board contains control registers external to the processor
- * for the control of the LevelOne LXT970 transceiver. The MPC860T manual
- * describes connections using the internal parallel port I/O, which
- * is basically all of Port D.
- *
* Right now, I am very wasteful with the buffers. I allocate memory
* pages and then divide them into 2K frame buffers. This way I know I
* have buffers large enough to hold one frame within one buffer descriptor.
@@ -49,17 +43,9 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
- defined(CONFIG_M5272) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x)
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include "fec.h"
-#else
-#include <asm/8xx_immap.h>
-#include <asm/mpc8xx.h>
-#include "commproc.h"
-#endif
#if defined(CONFIG_FEC2)
#define FEC_MAX_PORTS 2
@@ -67,7 +53,7 @@
#define FEC_MAX_PORTS 1
#endif
-#if defined(CONFIG_FADS) || defined(CONFIG_RPXCLASSIC) || defined(CONFIG_M5272)
+#if defined(CONFIG_M5272)
#define HAVE_mii_link_interrupt
#endif
@@ -1235,14 +1221,9 @@ static phy_info_t const * const phy_info[] = {
/* ------------------------------------------------------------------------- */
#ifdef HAVE_mii_link_interrupt
-#ifdef CONFIG_RPXCLASSIC
-static void
-mii_link_interrupt(void *dev_id);
-#else
static irqreturn_t
mii_link_interrupt(int irq, void * dev_id);
#endif
-#endif
#if defined(CONFIG_M5272)
/*
@@ -1795,24 +1776,6 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0)
panic("Could not allocate FEC IRQ!");
-
-#ifdef CONFIG_RPXCLASSIC
- /* Make Port C, bit 15 an input that causes interrupts.
- */
- immap->im_ioport.iop_pcpar &= ~0x0001;
- immap->im_ioport.iop_pcdir &= ~0x0001;
- immap->im_ioport.iop_pcso &= ~0x0001;
- immap->im_ioport.iop_pcint |= 0x0001;
- cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev);
-
- /* Make LEDS reflect Link status.
- */
- *((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE;
-#endif
-#ifdef CONFIG_FADS
- if (request_8xxirq(SIU_IRQ2, mii_link_interrupt, 0, "mii", dev) != 0)
- panic("Could not allocate MII IRQ!");
-#endif
}
static void __inline__ fec_get_mac(struct net_device *dev)
@@ -1821,16 +1784,6 @@ static void __inline__ fec_get_mac(struct net_device *dev)
bd = (bd_t *)__res;
memcpy(dev->dev_addr, bd->bi_enetaddr, ETH_ALEN);
-
-#ifdef CONFIG_RPXCLASSIC
- /* The Embedded Planet boards have only one MAC address in
- * the EEPROM, but can have two Ethernet ports. For the
- * FEC port, we create another address by setting one of
- * the address bits above something that would have (up to
- * now) been allocated.
- */
- dev->dev_adrd[3] |= 0x80;
-#endif
}
static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
@@ -2109,13 +2062,8 @@ mii_discover_phy(uint mii_reg, struct net_device *dev)
/* This interrupt occurs when the PHY detects a link change.
*/
#ifdef HAVE_mii_link_interrupt
-#ifdef CONFIG_RPXCLASSIC
-static void
-mii_link_interrupt(void *dev_id)
-#else
static irqreturn_t
mii_link_interrupt(int irq, void * dev_id)
-#endif
{
struct net_device *dev = dev_id;
struct fec_enet_private *fep = netdev_priv(dev);
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index ae9ecb7df22b..4e4f68304e82 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -197,9 +197,6 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev)
if (priv->link == PHY_DOWN) {
new_state = 1;
priv->link = phydev->link;
- netif_tx_schedule_all(dev);
- netif_carrier_on(dev);
- netif_start_queue(dev);
}
} else if (priv->link) {
@@ -207,8 +204,6 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev)
priv->link = PHY_DOWN;
priv->speed = 0;
priv->duplex = -1;
- netif_stop_queue(dev);
- netif_carrier_off(dev);
}
if (new_state && netif_msg_link(priv))
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile
index 1ffbe0756a0c..d4a305ee3455 100644
--- a/drivers/net/fs_enet/Makefile
+++ b/drivers/net/fs_enet/Makefile
@@ -8,12 +8,7 @@ fs_enet-$(CONFIG_FS_ENET_HAS_SCC) += mac-scc.o
fs_enet-$(CONFIG_FS_ENET_HAS_FEC) += mac-fec.o
fs_enet-$(CONFIG_FS_ENET_HAS_FCC) += mac-fcc.o
-ifeq ($(CONFIG_PPC_CPM_NEW_BINDING),y)
obj-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o
obj-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o
-else
-fs_enet-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o
-fs_enet-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o
-endif
fs_enet-objs := fs_enet-main.o $(fs_enet-m)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 445763e5648e..9a51ec8293cc 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -36,26 +36,18 @@
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
#include <linux/vmalloc.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
-#include <linux/of_gpio.h>
-#include <linux/of_platform.h>
-#endif
-
#include "fs_enet.h"
/*************************************************/
-#ifndef CONFIG_PPC_CPM_NEW_BINDING
-static char version[] __devinitdata =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
-#endif
-
MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
MODULE_DESCRIPTION("Freescale Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -738,9 +730,6 @@ static void generic_adjust_link(struct net_device *dev)
if (!fep->oldlink) {
new_state = 1;
fep->oldlink = 1;
- netif_tx_schedule_all(dev);
- netif_carrier_on(dev);
- netif_start_queue(dev);
}
if (new_state)
@@ -750,8 +739,6 @@ static void generic_adjust_link(struct net_device *dev)
fep->oldlink = 0;
fep->oldspeed = 0;
fep->oldduplex = -1;
- netif_carrier_off(dev);
- netif_stop_queue(dev);
}
if (new_state && netif_msg_link(fep))
@@ -826,6 +813,8 @@ static int fs_enet_open(struct net_device *dev)
}
phy_start(fep->phydev);
+ netif_start_queue(dev);
+
return 0;
}
@@ -958,190 +947,6 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
extern int fs_mii_connect(struct net_device *dev);
extern void fs_mii_disconnect(struct net_device *dev);
-#ifndef CONFIG_PPC_CPM_NEW_BINDING
-static struct net_device *fs_init_instance(struct device *dev,
- struct fs_platform_info *fpi)
-{
- struct net_device *ndev = NULL;
- struct fs_enet_private *fep = NULL;
- int privsize, i, r, err = 0, registered = 0;
-
- fpi->fs_no = fs_get_id(fpi);
- /* guard */
- if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX)
- return ERR_PTR(-EINVAL);
-
- privsize = sizeof(*fep) + (sizeof(struct sk_buff **) *
- (fpi->rx_ring + fpi->tx_ring));
-
- ndev = alloc_etherdev(privsize);
- if (!ndev) {
- err = -ENOMEM;
- goto err;
- }
-
- fep = netdev_priv(ndev);
-
- fep->dev = dev;
- dev_set_drvdata(dev, ndev);
- fep->fpi = fpi;
- if (fpi->init_ioports)
- fpi->init_ioports((struct fs_platform_info *)fpi);
-
-#ifdef CONFIG_FS_ENET_HAS_FEC
- if (fs_get_fec_index(fpi->fs_no) >= 0)
- fep->ops = &fs_fec_ops;
-#endif
-
-#ifdef CONFIG_FS_ENET_HAS_SCC
- if (fs_get_scc_index(fpi->fs_no) >=0)
- fep->ops = &fs_scc_ops;
-#endif
-
-#ifdef CONFIG_FS_ENET_HAS_FCC
- if (fs_get_fcc_index(fpi->fs_no) >= 0)
- fep->ops = &fs_fcc_ops;
-#endif
-
- if (fep->ops == NULL) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s No matching ops found (%d).\n",
- ndev->name, fpi->fs_no);
- err = -EINVAL;
- goto err;
- }
-
- r = (*fep->ops->setup_data)(ndev);
- if (r != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s setup_data failed\n",
- ndev->name);
- err = r;
- goto err;
- }
-
- /* point rx_skbuff, tx_skbuff */
- fep->rx_skbuff = (struct sk_buff **)&fep[1];
- fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
-
- /* init locks */
- spin_lock_init(&fep->lock);
- spin_lock_init(&fep->tx_lock);
-
- /*
- * Set the Ethernet address.
- */
- for (i = 0; i < 6; i++)
- ndev->dev_addr[i] = fpi->macaddr[i];
-
- r = (*fep->ops->allocate_bd)(ndev);
-
- if (fep->ring_base == NULL) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r);
- err = r;
- goto err;
- }
-
- /*
- * Set receive and transmit descriptor base.
- */
- fep->rx_bd_base = fep->ring_base;
- fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
-
- /* initialize ring size variables */
- fep->tx_ring = fpi->tx_ring;
- fep->rx_ring = fpi->rx_ring;
-
- /*
- * The FEC Ethernet specific entries in the device structure.
- */
- ndev->open = fs_enet_open;
- ndev->hard_start_xmit = fs_enet_start_xmit;
- ndev->tx_timeout = fs_timeout;
- ndev->watchdog_timeo = 2 * HZ;
- ndev->stop = fs_enet_close;
- ndev->get_stats = fs_enet_get_stats;
- ndev->set_multicast_list = fs_set_multicast_list;
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
- ndev->poll_controller = fs_enet_netpoll;
-#endif
-
- netif_napi_add(ndev, &fep->napi,
- fs_enet_rx_napi, fpi->napi_weight);
-
- ndev->ethtool_ops = &fs_ethtool_ops;
- ndev->do_ioctl = fs_ioctl;
-
- init_timer(&fep->phy_timer_list);
-
- netif_carrier_off(ndev);
-
- err = register_netdev(ndev);
- if (err != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s register_netdev failed.\n", ndev->name);
- goto err;
- }
- registered = 1;
-
-
- return ndev;
-
-err:
- if (ndev != NULL) {
- if (registered)
- unregister_netdev(ndev);
-
- if (fep && fep->ops) {
- (*fep->ops->free_bd)(ndev);
- (*fep->ops->cleanup_data)(ndev);
- }
-
- free_netdev(ndev);
- }
-
- dev_set_drvdata(dev, NULL);
-
- return ERR_PTR(err);
-}
-
-static int fs_cleanup_instance(struct net_device *ndev)
-{
- struct fs_enet_private *fep;
- const struct fs_platform_info *fpi;
- struct device *dev;
-
- if (ndev == NULL)
- return -EINVAL;
-
- fep = netdev_priv(ndev);
- if (fep == NULL)
- return -EINVAL;
-
- fpi = fep->fpi;
-
- unregister_netdev(ndev);
-
- dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
- (void __force *)fep->ring_base, fep->ring_mem_addr);
-
- /* reset it */
- (*fep->ops->cleanup_data)(ndev);
-
- dev = fep->dev;
- if (dev != NULL) {
- dev_set_drvdata(dev, NULL);
- fep->dev = NULL;
- }
-
- free_netdev(ndev);
-
- return 0;
-}
-#endif
-
/**************************************************************************************/
/* handy pointer to the immap */
@@ -1168,7 +973,6 @@ static void cleanup_immap(void)
/**************************************************************************************/
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
static int __devinit find_phy(struct device_node *np,
struct fs_platform_info *fpi)
{
@@ -1408,121 +1212,6 @@ static void __exit fs_cleanup(void)
of_unregister_platform_driver(&fs_enet_driver);
cleanup_immap();
}
-#else
-static int __devinit fs_enet_probe(struct device *dev)
-{
- struct net_device *ndev;
-
- /* no fixup - no device */
- if (dev->platform_data == NULL) {
- printk(KERN_INFO "fs_enet: "
- "probe called with no platform data; "
- "remove unused devices\n");
- return -ENODEV;
- }
-
- ndev = fs_init_instance(dev, dev->platform_data);
- if (IS_ERR(ndev))
- return PTR_ERR(ndev);
- return 0;
-}
-
-static int fs_enet_remove(struct device *dev)
-{
- return fs_cleanup_instance(dev_get_drvdata(dev));
-}
-
-static struct device_driver fs_enet_fec_driver = {
- .name = "fsl-cpm-fec",
- .bus = &platform_bus_type,
- .probe = fs_enet_probe,
- .remove = fs_enet_remove,
-#ifdef CONFIG_PM
-/* .suspend = fs_enet_suspend, TODO */
-/* .resume = fs_enet_resume, TODO */
-#endif
-};
-
-static struct device_driver fs_enet_scc_driver = {
- .name = "fsl-cpm-scc",
- .bus = &platform_bus_type,
- .probe = fs_enet_probe,
- .remove = fs_enet_remove,
-#ifdef CONFIG_PM
-/* .suspend = fs_enet_suspend, TODO */
-/* .resume = fs_enet_resume, TODO */
-#endif
-};
-
-static struct device_driver fs_enet_fcc_driver = {
- .name = "fsl-cpm-fcc",
- .bus = &platform_bus_type,
- .probe = fs_enet_probe,
- .remove = fs_enet_remove,
-#ifdef CONFIG_PM
-/* .suspend = fs_enet_suspend, TODO */
-/* .resume = fs_enet_resume, TODO */
-#endif
-};
-
-static int __init fs_init(void)
-{
- int r;
-
- printk(KERN_INFO
- "%s", version);
-
- r = setup_immap();
- if (r != 0)
- return r;
-
-#ifdef CONFIG_FS_ENET_HAS_FCC
- /* let's insert mii stuff */
- r = fs_enet_mdio_bb_init();
-
- if (r != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- "BB PHY init failed.\n");
- return r;
- }
- r = driver_register(&fs_enet_fcc_driver);
- if (r != 0)
- goto err;
-#endif
-
-#ifdef CONFIG_FS_ENET_HAS_FEC
- r = fs_enet_mdio_fec_init();
- if (r != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- "FEC PHY init failed.\n");
- return r;
- }
-
- r = driver_register(&fs_enet_fec_driver);
- if (r != 0)
- goto err;
-#endif
-
-#ifdef CONFIG_FS_ENET_HAS_SCC
- r = driver_register(&fs_enet_scc_driver);
- if (r != 0)
- goto err;
-#endif
-
- return 0;
-err:
- cleanup_immap();
- return r;
-}
-
-static void __exit fs_cleanup(void)
-{
- driver_unregister(&fs_enet_fec_driver);
- driver_unregister(&fs_enet_fcc_driver);
- driver_unregister(&fs_enet_scc_driver);
- cleanup_immap();
-}
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
static void fs_enet_netpoll(struct net_device *dev)
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index e05389c49bbb..db46d2e72329 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -138,10 +138,6 @@ struct fs_enet_private {
};
/***************************************************************************/
-#ifndef CONFIG_PPC_CPM_NEW_BINDING
-int fs_enet_mdio_bb_init(void);
-int fs_enet_mdio_fec_init(void);
-#endif
void fs_init_bds(struct net_device *dev);
void fs_cleanup_bds(struct net_device *dev);
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 8268b3535b30..0a97fc2d97ec 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -33,6 +33,7 @@
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/of_device.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>
@@ -42,10 +43,6 @@
#include <asm/irq.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
-#include <asm/of_device.h>
-#endif
-
#include "fs_enet.h"
/*************************************************/
@@ -87,7 +84,6 @@ static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op)
static int do_pd_setup(struct fs_enet_private *fep)
{
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
struct of_device *ofdev = to_of_device(fep->dev);
struct fs_platform_info *fpi = fep->fpi;
int ret = -EINVAL;
@@ -125,44 +121,6 @@ out_fccp:
iounmap(fep->fcc.fccp);
out:
return ret;
-#else
- struct platform_device *pdev = to_platform_device(fep->dev);
- struct resource *r;
-
- /* Fill out IRQ field */
- fep->interrupt = platform_get_irq(pdev, 0);
- if (fep->interrupt < 0)
- return -EINVAL;
-
- /* Attach the memory for the FCC Parameter RAM */
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
- fep->fcc.ep = ioremap(r->start, r->end - r->start + 1);
- if (fep->fcc.ep == NULL)
- return -EINVAL;
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
- fep->fcc.fccp = ioremap(r->start, r->end - r->start + 1);
- if (fep->fcc.fccp == NULL)
- return -EINVAL;
-
- if (fep->fpi->fcc_regs_c) {
- fep->fcc.fcccp = (void __iomem *)fep->fpi->fcc_regs_c;
- } else {
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "fcc_regs_c");
- fep->fcc.fcccp = ioremap(r->start,
- r->end - r->start + 1);
- }
-
- if (fep->fcc.fcccp == NULL)
- return -EINVAL;
-
- fep->fcc.mem = (void __iomem *)fep->fpi->mem_offset;
- if (fep->fcc.mem == NULL)
- return -EINVAL;
-
- return 0;
-#endif
}
#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
@@ -173,17 +131,6 @@ out:
static int setup_data(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
-#ifndef CONFIG_PPC_CPM_NEW_BINDING
- struct fs_platform_info *fpi = fep->fpi;
-
- fpi->cp_command = (fpi->cp_page << 26) |
- (fpi->cp_block << 21) |
- (12 << 6);
-
- fep->fcc.idx = fs_get_fcc_index(fpi->fs_no);
- if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */
- return -EINVAL;
-#endif
if (do_pd_setup(fep) != 0)
return -EINVAL;
@@ -304,9 +251,6 @@ static void restart(struct net_device *dev)
fcc_enet_t __iomem *ep = fep->fcc.ep;
dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
u16 paddrh, paddrm, paddrl;
-#ifndef CONFIG_PPC_CPM_NEW_BINDING
- u16 mem_addr;
-#endif
const unsigned char *mac;
int i;
@@ -338,19 +282,10 @@ static void restart(struct net_device *dev)
* this area.
*/
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset);
W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32);
W16(ep, fen_padptr, fpi->dpram_offset + 64);
-#else
- mem_addr = (u32) fep->fcc.mem; /* de-fixup dpram offset */
-
- W16(ep, fen_genfcc.fcc_riptr, (mem_addr & 0xffff));
- W16(ep, fen_genfcc.fcc_tiptr, ((mem_addr + 32) & 0xffff));
-
- W16(ep, fen_padptr, mem_addr + 64);
-#endif
/* fill with special symbol... */
memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 8a311d1e435b..0a7d1c5c6524 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -32,6 +32,7 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
+#include <linux/of_device.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
@@ -43,10 +44,6 @@
#include <asm/cpm1.h>
#endif
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
-#include <asm/of_device.h>
-#endif
-
#include "fs_enet.h"
#include "fec.h"
@@ -99,7 +96,6 @@ static int whack_reset(fec_t __iomem *fecp)
static int do_pd_setup(struct fs_enet_private *fep)
{
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
struct of_device *ofdev = to_of_device(fep->dev);
fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
@@ -111,23 +107,6 @@ static int do_pd_setup(struct fs_enet_private *fep)
return -EINVAL;
return 0;
-#else
- struct platform_device *pdev = to_platform_device(fep->dev);
- struct resource *r;
-
- /* Fill out IRQ field */
- fep->interrupt = platform_get_irq_byname(pdev,"interrupt");
- if (fep->interrupt < 0)
- return -EINVAL;
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- fep->fec.fecp = ioremap(r->start, r->end - r->start + 1);
-
- if(fep->fec.fecp == NULL)
- return -EINVAL;
-
- return 0;
-#endif
}
#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index e3557eca7b6d..029b3c7ef29c 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -32,6 +32,7 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
@@ -43,10 +44,6 @@
#include <asm/cpm1.h>
#endif
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
-#include <linux/of_platform.h>
-#endif
-
#include "fs_enet.h"
/*************************************************/
@@ -99,7 +96,6 @@ static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
static int do_pd_setup(struct fs_enet_private *fep)
{
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
struct of_device *ofdev = to_of_device(fep->dev);
fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
@@ -115,27 +111,6 @@ static int do_pd_setup(struct fs_enet_private *fep)
iounmap(fep->scc.sccp);
return -EINVAL;
}
-#else
- struct platform_device *pdev = to_platform_device(fep->dev);
- struct resource *r;
-
- /* Fill out IRQ field */
- fep->interrupt = platform_get_irq_byname(pdev, "interrupt");
- if (fep->interrupt < 0)
- return -EINVAL;
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- fep->scc.sccp = ioremap(r->start, r->end - r->start + 1);
-
- if (fep->scc.sccp == NULL)
- return -EINVAL;
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram");
- fep->scc.ep = ioremap(r->start, r->end - r->start + 1);
-
- if (fep->scc.ep == NULL)
- return -EINVAL;
-#endif
return 0;
}
@@ -149,16 +124,6 @@ static int setup_data(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
-#ifndef CONFIG_PPC_CPM_NEW_BINDING
- struct fs_platform_info *fpi = fep->fpi;
-
- fep->scc.idx = fs_get_scc_index(fpi->fs_no);
- if ((unsigned int)fep->fcc.idx >= 4) /* max 4 SCCs */
- return -EINVAL;
-
- fpi->cp_command = fep->fcc.idx << 6;
-#endif
-
do_pd_setup(fep);
fep->scc.hthi = 0;
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 1620030cd33c..be4b72f4f49a 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -22,10 +22,7 @@
#include <linux/mii.h>
#include <linux/platform_device.h>
#include <linux/mdio-bitbang.h>
-
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
#include <linux/of_platform.h>
-#endif
#include "fs_enet.h"
@@ -110,7 +107,6 @@ static struct mdiobb_ops bb_ops = {
.get_mdio_data = mdio_read,
};
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
struct device_node *np)
{
@@ -271,106 +267,3 @@ static void fs_enet_mdio_bb_exit(void)
module_init(fs_enet_mdio_bb_init);
module_exit(fs_enet_mdio_bb_exit);
-#else
-static int __devinit fs_mii_bitbang_init(struct bb_info *bitbang,
- struct fs_mii_bb_platform_info *fmpi)
-{
- bitbang->dir = (u32 __iomem *)fmpi->mdio_dir.offset;
- bitbang->dat = (u32 __iomem *)fmpi->mdio_dat.offset;
- bitbang->mdio_msk = 1U << (31 - fmpi->mdio_dat.bit);
- bitbang->mdc_msk = 1U << (31 - fmpi->mdc_dat.bit);
-
- return 0;
-}
-
-static int __devinit fs_enet_mdio_probe(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct fs_mii_bb_platform_info *pdata;
- struct mii_bus *new_bus;
- struct bb_info *bitbang;
- int err = 0;
-
- if (NULL == dev)
- return -EINVAL;
-
- bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
-
- if (NULL == bitbang)
- return -ENOMEM;
-
- bitbang->ctrl.ops = &bb_ops;
-
- new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
-
- if (NULL == new_bus)
- return -ENOMEM;
-
- new_bus->name = "BB MII Bus",
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
-
- new_bus->phy_mask = ~0x9;
- pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data;
-
- if (NULL == pdata) {
- printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id);
- return -ENODEV;
- }
-
- /*set up workspace*/
- fs_mii_bitbang_init(bitbang, pdata);
-
- new_bus->priv = bitbang;
-
- new_bus->irq = pdata->irq;
-
- new_bus->dev = dev;
- dev_set_drvdata(dev, new_bus);
-
- err = mdiobus_register(new_bus);
-
- if (0 != err) {
- printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
- new_bus->name);
- goto bus_register_fail;
- }
-
- return 0;
-
-bus_register_fail:
- free_mdio_bitbang(new_bus);
- kfree(bitbang);
-
- return err;
-}
-
-static int fs_enet_mdio_remove(struct device *dev)
-{
- struct mii_bus *bus = dev_get_drvdata(dev);
-
- mdiobus_unregister(bus);
-
- dev_set_drvdata(dev, NULL);
-
- free_mdio_bitbang(bus);
-
- return 0;
-}
-
-static struct device_driver fs_enet_bb_mdio_driver = {
- .name = "fsl-bb-mdio",
- .bus = &platform_bus_type,
- .probe = fs_enet_mdio_probe,
- .remove = fs_enet_mdio_remove,
-};
-
-int fs_enet_mdio_bb_init(void)
-{
- return driver_register(&fs_enet_bb_mdio_driver);
-}
-
-void fs_enet_mdio_bb_exit(void)
-{
- driver_unregister(&fs_enet_bb_mdio_driver);
-}
-#endif
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 8f6a43b0e0ff..695f74cc4398 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -31,15 +31,12 @@
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
-#include <linux/of_platform.h>
-#endif
-
#include "fs_enet.h"
#include "fec.h"
@@ -51,52 +48,6 @@
#define FEC_MII_LOOPS 10000
-#ifndef CONFIG_PPC_CPM_NEW_BINDING
-static int match_has_phy (struct device *dev, void* data)
-{
- struct platform_device* pdev = container_of(dev, struct platform_device, dev);
- struct fs_platform_info* fpi;
- if(strcmp(pdev->name, (char*)data))
- {
- return 0;
- }
-
- fpi = pdev->dev.platform_data;
- if((fpi)&&(fpi->has_phy))
- return 1;
- return 0;
-}
-
-static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi)
-{
- struct resource *r;
- fec_t __iomem *fecp;
- char* name = "fsl-cpm-fec";
-
- /* we need fec in order to be useful */
- struct platform_device *fec_pdev =
- container_of(bus_find_device(&platform_bus_type, NULL, name, match_has_phy),
- struct platform_device, dev);
-
- if(fec_pdev == NULL) {
- printk(KERN_ERR"Unable to find PHY for %s", name);
- return -ENODEV;
- }
-
- r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs");
-
- fec->fecp = fecp = ioremap(r->start,sizeof(fec_t));
- fec->mii_speed = fmpi->mii_speed;
-
- setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
- setbits32(&fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
- out_be32(&fecp->fec_ievent, FEC_ENET_MII);
- out_be32(&fecp->fec_mii_speed, fec->mii_speed);
-
- return 0;
-}
-#endif
-
static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
{
struct fec_info* fec = bus->priv;
@@ -151,7 +102,6 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
return 0;
}
-#ifdef CONFIG_PPC_CPM_NEW_BINDING
static void __devinit add_phy(struct mii_bus *bus, struct device_node *np)
{
const u32 *data;
@@ -286,95 +236,3 @@ static void fs_enet_mdio_fec_exit(void)
module_init(fs_enet_mdio_fec_init);
module_exit(fs_enet_mdio_fec_exit);
-#else
-static int __devinit fs_enet_fec_mdio_probe(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct fs_mii_fec_platform_info *pdata;
- struct mii_bus *new_bus;
- struct fec_info *fec;
- int err = 0;
- if (NULL == dev)
- return -EINVAL;
- new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
-
- if (NULL == new_bus)
- return -ENOMEM;
-
- fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
-
- if (NULL == fec)
- return -ENOMEM;
-
- new_bus->name = "FEC MII Bus",
- new_bus->read = &fs_enet_fec_mii_read,
- new_bus->write = &fs_enet_fec_mii_write,
- new_bus->reset = &fs_enet_fec_mii_reset,
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
-
- pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data;
-
- if (NULL == pdata) {
- printk(KERN_ERR "fs_enet FEC mdio %d: Missing platform data!\n", pdev->id);
- return -ENODEV;
- }
-
- /*set up workspace*/
-
- fs_mii_fec_init(fec, pdata);
- new_bus->priv = fec;
-
- new_bus->irq = pdata->irq;
-
- new_bus->dev = dev;
- dev_set_drvdata(dev, new_bus);
-
- err = mdiobus_register(new_bus);
-
- if (0 != err) {
- printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
- new_bus->name);
- goto bus_register_fail;
- }
-
- return 0;
-
-bus_register_fail:
- kfree(new_bus);
-
- return err;
-}
-
-
-static int fs_enet_fec_mdio_remove(struct device *dev)
-{
- struct mii_bus *bus = dev_get_drvdata(dev);
-
- mdiobus_unregister(bus);
-
- dev_set_drvdata(dev, NULL);
- kfree(bus->priv);
-
- bus->priv = NULL;
- kfree(bus);
-
- return 0;
-}
-
-static struct device_driver fs_enet_fec_mdio_driver = {
- .name = "fsl-cpm-fec-mdio",
- .bus = &platform_bus_type,
- .probe = fs_enet_fec_mdio_probe,
- .remove = fs_enet_fec_mdio_remove,
-};
-
-int fs_enet_mdio_fec_init(void)
-{
- return driver_register(&fs_enet_fec_mdio_driver);
-}
-
-void fs_enet_mdio_fec_exit(void)
-{
- driver_unregister(&fs_enet_fec_mdio_driver);
-}
-#endif
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 39b45e901be6..b8394cf134e8 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -134,6 +134,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int l
static void gfar_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp);
void gfar_halt(struct net_device *dev);
+#ifdef CONFIG_PM
+static void gfar_halt_nodisable(struct net_device *dev);
+#endif
void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
@@ -207,6 +210,7 @@ static int gfar_probe(struct platform_device *pdev)
spin_lock_init(&priv->txlock);
spin_lock_init(&priv->rxlock);
+ spin_lock_init(&priv->bflock);
platform_set_drvdata(pdev, dev);
@@ -378,6 +382,103 @@ static int gfar_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int gfar_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ u32 tempval;
+
+ int magic_packet = priv->wol_en &&
+ (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+
+ netif_device_detach(dev);
+
+ if (netif_running(dev)) {
+ spin_lock_irqsave(&priv->txlock, flags);
+ spin_lock(&priv->rxlock);
+
+ gfar_halt_nodisable(dev);
+
+ /* Disable Tx, and Rx if wake-on-LAN is disabled. */
+ tempval = gfar_read(&priv->regs->maccfg1);
+
+ tempval &= ~MACCFG1_TX_EN;
+
+ if (!magic_packet)
+ tempval &= ~MACCFG1_RX_EN;
+
+ gfar_write(&priv->regs->maccfg1, tempval);
+
+ spin_unlock(&priv->rxlock);
+ spin_unlock_irqrestore(&priv->txlock, flags);
+
+#ifdef CONFIG_GFAR_NAPI
+ napi_disable(&priv->napi);
+#endif
+
+ if (magic_packet) {
+ /* Enable interrupt on Magic Packet */
+ gfar_write(&priv->regs->imask, IMASK_MAG);
+
+ /* Enable Magic Packet mode */
+ tempval = gfar_read(&priv->regs->maccfg2);
+ tempval |= MACCFG2_MPEN;
+ gfar_write(&priv->regs->maccfg2, tempval);
+ } else {
+ phy_stop(priv->phydev);
+ }
+ }
+
+ return 0;
+}
+
+static int gfar_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ u32 tempval;
+ int magic_packet = priv->wol_en &&
+ (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+
+ if (!netif_running(dev)) {
+ netif_device_attach(dev);
+ return 0;
+ }
+
+ if (!magic_packet && priv->phydev)
+ phy_start(priv->phydev);
+
+ /* Disable Magic Packet mode, in case something
+ * else woke us up.
+ */
+
+ spin_lock_irqsave(&priv->txlock, flags);
+ spin_lock(&priv->rxlock);
+
+ tempval = gfar_read(&priv->regs->maccfg2);
+ tempval &= ~MACCFG2_MPEN;
+ gfar_write(&priv->regs->maccfg2, tempval);
+
+ gfar_start(dev);
+
+ spin_unlock(&priv->rxlock);
+ spin_unlock_irqrestore(&priv->txlock, flags);
+
+ netif_device_attach(dev);
+
+#ifdef CONFIG_GFAR_NAPI
+ napi_enable(&priv->napi);
+#endif
+
+ return 0;
+}
+#else
+#define gfar_suspend NULL
+#define gfar_resume NULL
+#endif
/* Reads the controller's registers to determine what interface
* connects it to the PHY.
@@ -534,8 +635,9 @@ static void init_registers(struct net_device *dev)
}
+#ifdef CONFIG_PM
/* Halt the receive and transmit queues */
-void gfar_halt(struct net_device *dev)
+static void gfar_halt_nodisable(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->regs;
@@ -558,6 +660,15 @@ void gfar_halt(struct net_device *dev)
(IEVENT_GRSC | IEVENT_GTSC)))
cpu_relax();
}
+}
+#endif
+
+/* Halt the receive and transmit queues */
+void gfar_halt(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->regs;
+ u32 tempval;
/* Disable Rx and Tx */
tempval = gfar_read(&regs->maccfg1);
@@ -1909,7 +2020,12 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
u32 events = gfar_read(&priv->regs->ievent);
/* Clear IEVENT */
- gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
+ gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
+
+ /* Magic Packet is not an error. */
+ if ((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
+ (events & IEVENT_MAG))
+ events &= ~IEVENT_MAG;
/* Hmm... */
if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
@@ -1977,6 +2093,8 @@ MODULE_ALIAS("platform:fsl-gianfar");
static struct platform_driver gfar_driver = {
.probe = gfar_probe,
.remove = gfar_remove,
+ .suspend = gfar_suspend,
+ .resume = gfar_resume,
.driver = {
.name = "fsl-gianfar",
.owner = THIS_MODULE,
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index bead71cb2b16..d59df98bd636 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -157,6 +157,7 @@ extern const char gfar_driver_version[];
#define MACCFG2_GMII 0x00000200
#define MACCFG2_HUGEFRAME 0x00000020
#define MACCFG2_LENGTHCHECK 0x00000010
+#define MACCFG2_MPEN 0x00000008
#define ECNTRL_INIT_SETTINGS 0x00001000
#define ECNTRL_TBI_MODE 0x00000020
@@ -229,6 +230,7 @@ extern const char gfar_driver_version[];
#define IEVENT_CRL 0x00020000
#define IEVENT_XFUN 0x00010000
#define IEVENT_RXB0 0x00008000
+#define IEVENT_MAG 0x00000800
#define IEVENT_GRSC 0x00000100
#define IEVENT_RXF0 0x00000080
#define IEVENT_FIR 0x00000008
@@ -241,7 +243,8 @@ extern const char gfar_driver_version[];
#define IEVENT_ERR_MASK \
(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
- | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR)
+ | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \
+ | IEVENT_MAG)
#define IMASK_INIT_CLEAR 0x00000000
#define IMASK_BABR 0x80000000
@@ -259,6 +262,7 @@ extern const char gfar_driver_version[];
#define IMASK_CRL 0x00020000
#define IMASK_XFUN 0x00010000
#define IMASK_RXB0 0x00008000
+#define IMASK_MAG 0x00000800
#define IMASK_GTSC 0x00000100
#define IMASK_RXFEN0 0x00000080
#define IMASK_FIR 0x00000008
@@ -726,10 +730,14 @@ struct gfar_private {
unsigned int fifo_starve;
unsigned int fifo_starve_off;
+ /* Bitfield update lock */
+ spinlock_t bflock;
+
unsigned char vlan_enable:1,
rx_csum_enable:1,
extended_hash:1,
- bd_stash_en:1;
+ bd_stash_en:1,
+ wol_en:1; /* Wake-on-LAN enabled */
unsigned short padding;
unsigned int interruptTransmit;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6007147cc1e9..fb7d3ccc0fdc 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -479,14 +479,13 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
{
struct gfar_private *priv = netdev_priv(dev);
+ unsigned long flags;
int err = 0;
if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
return -EOPNOTSUPP;
if (dev->flags & IFF_UP) {
- unsigned long flags;
-
/* Halt TX and RX, and process the frames which
* have already been received */
spin_lock_irqsave(&priv->txlock, flags);
@@ -502,7 +501,9 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
stop_gfar(dev);
}
+ spin_lock_irqsave(&priv->bflock, flags);
priv->rx_csum_enable = data;
+ spin_unlock_irqrestore(&priv->bflock, flags);
if (dev->flags & IFF_UP)
err = startup_gfar(dev);
@@ -564,6 +565,38 @@ static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
priv->msg_enable = data;
}
+#ifdef CONFIG_PM
+static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
+ } else {
+ wol->supported = wol->wolopts = 0;
+ }
+}
+
+static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
+ wol->wolopts != 0)
+ return -EINVAL;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->bflock, flags);
+ priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0;
+ spin_unlock_irqrestore(&priv->bflock, flags);
+
+ return 0;
+}
+#endif
const struct ethtool_ops gfar_ethtool_ops = {
.get_settings = gfar_gsettings,
@@ -585,4 +618,8 @@ const struct ethtool_ops gfar_ethtool_ops = {
.set_tx_csum = gfar_set_tx_csum,
.get_msglevel = gfar_get_msglevel,
.set_msglevel = gfar_set_msglevel,
+#ifdef CONFIG_PM
+ .get_wol = gfar_get_wol,
+ .set_wol = gfar_set_wol,
+#endif
};
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index b6500b2aacf2..58f4b1d7bf1f 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -123,6 +123,7 @@ static LIST_HEAD(bpq_devices);
* off into a separate class since they always nest.
*/
static struct lock_class_key bpq_netdev_xmit_lock_key;
+static struct lock_class_key bpq_netdev_addr_lock_key;
static void bpq_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
@@ -133,6 +134,7 @@ static void bpq_set_lockdep_class_one(struct net_device *dev,
static void bpq_set_lockdep_class(struct net_device *dev)
{
+ lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
}
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index c2c4f49d7578..8239939554bc 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -262,7 +262,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
}
outw(Perf_Page, ioaddr + HP_PAGING);
- NS8390_init(dev, 0);
+ NS8390p_init(dev, 0);
/* Leave the 8390 and HP chip reset. */
outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION);
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 8281209ededf..0a8c64930ad3 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -389,7 +389,7 @@ static void __init
hp_init_card(struct net_device *dev)
{
int irq = dev->irq;
- NS8390_init(dev, 0);
+ NS8390p_init(dev, 0);
outb_p(irqmap[irq&0x0f] | HP_RUN,
dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
return;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 00527805e4f1..e5a6e2e84540 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -33,6 +33,7 @@
*/
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -52,7 +53,9 @@
#include <asm/hvcall.h>
#include <asm/atomic.h>
#include <asm/vio.h>
+#include <asm/iommu.h>
#include <asm/uaccess.h>
+#include <asm/firmware.h>
#include <linux/seq_file.h>
#include "ibmveth.h"
@@ -94,8 +97,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
static struct kobj_type ktype_veth_pool;
+
#ifdef CONFIG_PROC_FS
#define IBMVETH_PROC_DIR "ibmveth"
static struct proc_dir_entry *ibmveth_proc_dir;
@@ -226,16 +231,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
u32 i;
u32 count = pool->size - atomic_read(&pool->available);
u32 buffers_added = 0;
+ struct sk_buff *skb;
+ unsigned int free_index, index;
+ u64 correlator;
+ unsigned long lpar_rc;
+ dma_addr_t dma_addr;
mb();
for(i = 0; i < count; ++i) {
- struct sk_buff *skb;
- unsigned int free_index, index;
- u64 correlator;
union ibmveth_buf_desc desc;
- unsigned long lpar_rc;
- dma_addr_t dma_addr;
skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
@@ -255,6 +260,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
pool->buff_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_addr))
+ goto failure;
+
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool->dma_addr[index] = dma_addr;
pool->skbuff[index] = skb;
@@ -267,20 +275,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
- if(lpar_rc != H_SUCCESS) {
- pool->free_map[free_index] = index;
- pool->skbuff[index] = NULL;
- if (pool->consumer_index == 0)
- pool->consumer_index = pool->size - 1;
- else
- pool->consumer_index--;
- dma_unmap_single(&adapter->vdev->dev,
- pool->dma_addr[index], pool->buff_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- adapter->replenish_add_buff_failure++;
- break;
- } else {
+ if (lpar_rc != H_SUCCESS)
+ goto failure;
+ else {
buffers_added++;
adapter->replenish_add_buff_success++;
}
@@ -288,6 +285,24 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
mb();
atomic_add(buffers_added, &(pool->available));
+ return;
+
+failure:
+ pool->free_map[free_index] = index;
+ pool->skbuff[index] = NULL;
+ if (pool->consumer_index == 0)
+ pool->consumer_index = pool->size - 1;
+ else
+ pool->consumer_index--;
+ if (!dma_mapping_error(dma_addr))
+ dma_unmap_single(&adapter->vdev->dev,
+ pool->dma_addr[index], pool->buff_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ adapter->replenish_add_buff_failure++;
+
+ mb();
+ atomic_add(buffers_added, &(pool->available));
}
/* replenish routine */
@@ -297,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
adapter->replenish_task_cycles++;
- for(i = 0; i < IbmVethNumBufferPools; i++)
+ for (i = (IbmVethNumBufferPools - 1); i >= 0; i--)
if(adapter->rx_buff_pool[i].active)
ibmveth_replenish_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
@@ -472,6 +487,18 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
if (adapter->rx_buff_pool[i].active)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
+
+ if (adapter->bounce_buffer != NULL) {
+ if (!dma_mapping_error(adapter->bounce_buffer_dma)) {
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->bounce_buffer_dma,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ DMA_BIDIRECTIONAL);
+ adapter->bounce_buffer_dma = DMA_ERROR_CODE;
+ }
+ kfree(adapter->bounce_buffer);
+ adapter->bounce_buffer = NULL;
+ }
}
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
@@ -607,6 +634,24 @@ static int ibmveth_open(struct net_device *netdev)
return rc;
}
+ adapter->bounce_buffer =
+ kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
+ if (!adapter->bounce_buffer) {
+ ibmveth_error_printk("unable to allocate bounce buffer\n");
+ ibmveth_cleanup(adapter);
+ napi_disable(&adapter->napi);
+ return -ENOMEM;
+ }
+ adapter->bounce_buffer_dma =
+ dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
+ netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(adapter->bounce_buffer_dma)) {
+ ibmveth_error_printk("unable to map bounce buffer\n");
+ ibmveth_cleanup(adapter);
+ napi_disable(&adapter->napi);
+ return -ENOMEM;
+ }
+
ibmveth_debug_printk("initial replenish cycle\n");
ibmveth_interrupt(netdev->irq, netdev);
@@ -853,10 +898,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int tx_packets = 0;
unsigned int tx_send_failed = 0;
unsigned int tx_map_failed = 0;
+ int used_bounce = 0;
+ unsigned long data_dma_addr;
desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
- desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
+ data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
@@ -875,12 +922,16 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
buf[1] = 0;
}
- if (dma_mapping_error(desc.fields.address)) {
- ibmveth_error_printk("tx: unable to map xmit buffer\n");
+ if (dma_mapping_error(data_dma_addr)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ ibmveth_error_printk("tx: unable to map xmit buffer\n");
+ skb_copy_from_linear_data(skb, adapter->bounce_buffer,
+ skb->len);
+ desc.fields.address = adapter->bounce_buffer_dma;
tx_map_failed++;
- tx_dropped++;
- goto out;
- }
+ used_bounce = 1;
+ } else
+ desc.fields.address = data_dma_addr;
/* send the frame. Arbitrarily set retrycount to 1024 */
correlator = 0;
@@ -904,8 +955,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
netdev->trans_start = jiffies;
}
- dma_unmap_single(&adapter->vdev->dev, desc.fields.address,
- skb->len, DMA_TO_DEVICE);
+ if (!used_bounce)
+ dma_unmap_single(&adapter->vdev->dev, data_dma_addr,
+ skb->len, DMA_TO_DEVICE);
out: spin_lock_irqsave(&adapter->stats_lock, flags);
netdev->stats.tx_dropped += tx_dropped;
@@ -1053,9 +1105,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
{
struct ibmveth_adapter *adapter = dev->priv;
+ struct vio_dev *viodev = adapter->vdev;
int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
- int reinit = 0;
- int i, rc;
+ int i;
if (new_mtu < IBMVETH_MAX_MTU)
return -EINVAL;
@@ -1067,23 +1119,34 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
if (i == IbmVethNumBufferPools)
return -EINVAL;
+ /* Deactivate all the buffer pools so that the next loop can activate
+ only the buffer pools necessary to hold the new MTU */
+ for (i = 0; i < IbmVethNumBufferPools; i++)
+ if (adapter->rx_buff_pool[i].active) {
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
+ adapter->rx_buff_pool[i].active = 0;
+ }
+
/* Look for an active buffer pool that can hold the new MTU */
for(i = 0; i<IbmVethNumBufferPools; i++) {
- if (!adapter->rx_buff_pool[i].active) {
- adapter->rx_buff_pool[i].active = 1;
- reinit = 1;
- }
+ adapter->rx_buff_pool[i].active = 1;
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
- if (reinit && netif_running(adapter->netdev)) {
+ if (netif_running(adapter->netdev)) {
adapter->pool_config = 1;
ibmveth_close(adapter->netdev);
adapter->pool_config = 0;
dev->mtu = new_mtu;
- if ((rc = ibmveth_open(adapter->netdev)))
- return rc;
- } else
- dev->mtu = new_mtu;
+ vio_cmo_set_dev_desired(viodev,
+ ibmveth_get_desired_dma
+ (viodev));
+ return ibmveth_open(adapter->netdev);
+ }
+ dev->mtu = new_mtu;
+ vio_cmo_set_dev_desired(viodev,
+ ibmveth_get_desired_dma
+ (viodev));
return 0;
}
}
@@ -1098,6 +1161,46 @@ static void ibmveth_poll_controller(struct net_device *dev)
}
#endif
+/**
+ * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
+ *
+ * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
+ *
+ * Return value:
+ * Number of bytes of IO data the driver will need to perform well.
+ */
+static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&vdev->dev);
+ struct ibmveth_adapter *adapter;
+ unsigned long ret;
+ int i;
+ int rxqentries = 1;
+
+ /* netdev inits at probe time along with the structures we need below*/
+ if (netdev == NULL)
+ return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
+
+ adapter = netdev_priv(netdev);
+
+ ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
+ ret += IOMMU_PAGE_ALIGN(netdev->mtu);
+
+ for (i = 0; i < IbmVethNumBufferPools; i++) {
+ /* add the size of the active receive buffers */
+ if (adapter->rx_buff_pool[i].active)
+ ret +=
+ adapter->rx_buff_pool[i].size *
+ IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
+ buff_size);
+ rxqentries += adapter->rx_buff_pool[i].size;
+ }
+ /* add the size of the receive queue entries */
+ ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
+
+ return ret;
+}
+
static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
int rc, i;
@@ -1242,6 +1345,8 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
ibmveth_proc_unregister_adapter(adapter);
free_netdev(netdev);
+ dev_set_drvdata(&dev->dev, NULL);
+
return 0;
}
@@ -1402,14 +1507,15 @@ const char * buf, size_t count)
return -EPERM;
}
- pool->active = 0;
if (netif_running(netdev)) {
adapter->pool_config = 1;
ibmveth_close(netdev);
+ pool->active = 0;
adapter->pool_config = 0;
if ((rc = ibmveth_open(netdev)))
return rc;
}
+ pool->active = 0;
}
} else if (attr == &veth_num_attr) {
if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
@@ -1485,6 +1591,7 @@ static struct vio_driver ibmveth_driver = {
.id_table = ibmveth_device_table,
.probe = ibmveth_probe,
.remove = ibmveth_remove,
+ .get_desired_dma = ibmveth_get_desired_dma,
.driver = {
.name = ibmveth_driver_name,
.owner = THIS_MODULE,
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 41f61cd18852..d28186948752 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -93,9 +93,12 @@ static inline long h_illan_attributes(unsigned long unit_address,
plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
#define IbmVethNumBufferPools 5
+#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
#define IBMVETH_MAX_MTU 68
#define IBMVETH_MAX_POOL_COUNT 4096
+#define IBMVETH_BUFF_LIST_SIZE 4096
+#define IBMVETH_FILT_LIST_SIZE 4096
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
@@ -143,6 +146,8 @@ struct ibmveth_adapter {
struct ibmveth_rx_q rx_queue;
int pool_config;
int rx_csum;
+ void *bounce_buffer;
+ dma_addr_t bounce_buffer_dma;
/* adapter specific stats */
u64 replenish_task_cycles;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 1b7cb29fe68e..b602c4dd0d14 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -385,7 +385,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *rx_ring = &adapter->rx_ring[i];
- rx_ring->buddy = 0;
+ rx_ring->buddy = NULL;
igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
adapter->eims_enable_mask |= rx_ring->eims_value;
if (rx_ring->itr_val)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index be7b723c924f..e5f3da8468cc 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -70,8 +70,6 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT),
- board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
board_82598 },
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 0496d16f9de5..daba82bbcb56 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -164,9 +164,7 @@ static void macb_handle_link_change(struct net_device *dev)
}
if (phydev->link != bp->link) {
- if (phydev->link)
- netif_tx_schedule_all(dev);
- else {
+ if (!phydev->link) {
bp->speed = 0;
bp->duplex = -1;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index efbc15567dd3..42394505bb50 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -276,6 +276,7 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
* separate class since they always nest.
*/
static struct lock_class_key macvlan_netdev_xmit_lock_key;
+static struct lock_class_key macvlan_netdev_addr_lock_key;
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
@@ -295,6 +296,8 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
static void macvlan_set_lockdep_class(struct net_device *dev)
{
+ lockdep_set_class(&dev->addr_list_lock,
+ &macvlan_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
}
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 0b32648a2136..4cb364e67dc6 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -287,7 +287,7 @@ int meth_reset(struct net_device *dev)
/* Initial mode: 10 | Half-duplex | Accept normal packets */
priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG;
- if (dev->flags | IFF_PROMISC)
+ if (dev->flags & IFF_PROMISC)
priv->mac_ctrl |= METH_PROMISC;
mace->eth.mac_ctrl = priv->mac_ctrl;
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index 70dff94a8bc6..04d5bc69a6f8 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -67,6 +67,8 @@ enum {
CMD_STAT_BAD_INDEX = 0x0a,
/* FW image corrupted: */
CMD_STAT_BAD_NVMEM = 0x0b,
+ /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
+ CMD_STAT_ICM_ERROR = 0x0c,
/* Attempt to modify a QP/EE which is not in the presumed state: */
CMD_STAT_BAD_QP_STATE = 0x10,
/* Bad segment parameters (Address/Size): */
@@ -119,6 +121,7 @@ static int mlx4_status_to_errno(u8 status)
[CMD_STAT_BAD_RES_STATE] = -EBADF,
[CMD_STAT_BAD_INDEX] = -EBADF,
[CMD_STAT_BAD_NVMEM] = -EFAULT,
+ [CMD_STAT_ICM_ERROR] = -ENFILE,
[CMD_STAT_BAD_QP_STATE] = -EINVAL,
[CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
[CMD_STAT_REG_BOUND] = -EBUSY,
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index e141a1513f07..ea3a09aaa844 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -33,6 +33,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 2b5006b9be67..57278224ba1e 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -46,6 +46,10 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
+static int enable_qos;
+module_param(enable_qos, bool, 0444);
+MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
+
#define MLX4_GET(dest, source, offset) \
do { \
void *__p = (char *) (source) + (offset); \
@@ -198,7 +202,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
-#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x97
+#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
@@ -373,12 +377,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
- if (dev_cap->bmme_flags & 1)
- mlx4_dbg(dev, "Base MM extensions: yes "
- "(flags %d, rsvd L_Key %08x)\n",
- dev_cap->bmme_flags, dev_cap->reserved_lkey);
- else
- mlx4_dbg(dev, "Base MM extensions: no\n");
+ mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
+ dev_cap->bmme_flags, dev_cap->reserved_lkey);
/*
* Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
@@ -737,6 +737,10 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
+ /* Enable QoS support if module parameter set */
+ if (enable_qos)
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
+
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index a0e046c149b7..fbf0e22be122 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -98,7 +98,7 @@ struct mlx4_dev_cap {
int cmpt_entry_sz;
int mtt_entry_sz;
int resize_srq;
- u8 bmme_flags;
+ u32 bmme_flags;
u32 reserved_lkey;
u64 max_icm_sz;
int max_gso_sz;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index d3736013fe9b..8e1d24cda1b0 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -158,6 +158,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
+ dev->caps.bmme_flags = dev_cap->bmme_flags;
+ dev->caps.reserved_lkey = dev_cap->reserved_lkey;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index a4023c2dd050..78038499cff5 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -118,6 +118,7 @@ struct mlx4_bitmap {
struct mlx4_buddy {
unsigned long **bits;
+ unsigned int *num_free;
int max_order;
spinlock_t lock;
};
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 03a9abcce524..a3c04c5f12c2 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -47,7 +47,7 @@ struct mlx4_mpt_entry {
__be32 flags;
__be32 qpn;
__be32 key;
- __be32 pd;
+ __be32 pd_flags;
__be64 start;
__be64 length;
__be32 lkey;
@@ -61,11 +61,15 @@ struct mlx4_mpt_entry {
} __attribute__((packed));
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
+#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
#define MLX4_MPT_FLAG_MIO (1 << 17)
#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
#define MLX4_MPT_FLAG_REGION (1 << 8)
+#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 26)
+#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
+
#define MLX4_MTT_FLAG_PRESENT 1
#define MLX4_MPT_STATUS_SW 0xF0
@@ -79,23 +83,26 @@ static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
spin_lock(&buddy->lock);
- for (o = order; o <= buddy->max_order; ++o) {
- m = 1 << (buddy->max_order - o);
- seg = find_first_bit(buddy->bits[o], m);
- if (seg < m)
- goto found;
- }
+ for (o = order; o <= buddy->max_order; ++o)
+ if (buddy->num_free[o]) {
+ m = 1 << (buddy->max_order - o);
+ seg = find_first_bit(buddy->bits[o], m);
+ if (seg < m)
+ goto found;
+ }
spin_unlock(&buddy->lock);
return -1;
found:
clear_bit(seg, buddy->bits[o]);
+ --buddy->num_free[o];
while (o > order) {
--o;
seg <<= 1;
set_bit(seg ^ 1, buddy->bits[o]);
+ ++buddy->num_free[o];
}
spin_unlock(&buddy->lock);
@@ -113,11 +120,13 @@ static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
while (test_bit(seg ^ 1, buddy->bits[order])) {
clear_bit(seg ^ 1, buddy->bits[order]);
+ --buddy->num_free[order];
seg >>= 1;
++order;
}
set_bit(seg, buddy->bits[order]);
+ ++buddy->num_free[order];
spin_unlock(&buddy->lock);
}
@@ -131,7 +140,9 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
GFP_KERNEL);
- if (!buddy->bits)
+ buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
+ GFP_KERNEL);
+ if (!buddy->bits || !buddy->num_free)
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
@@ -143,6 +154,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
}
set_bit(0, buddy->bits[buddy->max_order]);
+ buddy->num_free[buddy->max_order] = 1;
return 0;
@@ -150,9 +162,10 @@ err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
kfree(buddy->bits[i]);
+err_out:
kfree(buddy->bits);
+ kfree(buddy->num_free);
-err_out:
return -ENOMEM;
}
@@ -164,6 +177,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
kfree(buddy->bits[i]);
kfree(buddy->bits);
+ kfree(buddy->num_free);
}
static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
@@ -314,21 +328,30 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
memset(mpt_entry, 0, sizeof *mpt_entry);
- mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS |
- MLX4_MPT_FLAG_MIO |
+ mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
MLX4_MPT_FLAG_REGION |
mr->access);
mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
- mpt_entry->pd = cpu_to_be32(mr->pd);
+ mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
mpt_entry->start = cpu_to_be64(mr->iova);
mpt_entry->length = cpu_to_be64(mr->size);
mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+
if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
mpt_entry->mtt_seg = 0;
- } else
+ } else {
mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+ }
+
+ if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
+ /* fast register MR in free state */
+ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
+ mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG);
+ } else {
+ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
+ }
err = mlx4_SW2HW_MPT(dev, mailbox,
key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index 3a93c5f0f7ab..aa616892d09c 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -91,6 +91,13 @@ EXPORT_SYMBOL_GPL(mlx4_uar_free);
int mlx4_init_uar_table(struct mlx4_dev *dev)
{
+ if (dev->caps.num_uars <= 128) {
+ mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
+ dev->caps.num_uars);
+ mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
+ return -ENODEV;
+ }
+
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
dev->caps.num_uars, dev->caps.num_uars - 1,
max(128, dev->caps.reserved_uars));
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 83a877f3a553..8a97a0066a88 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -2112,7 +2112,7 @@ static void mv643xx_eth_netpoll(struct net_device *dev)
mv643xx_eth_irq(dev->irq, dev);
- wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_CAUSE_EXT);
+ wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
}
#endif
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index b3981ed972bf..3ab0e5289f7a 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -125,7 +125,6 @@ struct myri10ge_cmd {
struct myri10ge_rx_buf {
struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
- u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */
struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
struct myri10ge_rx_buffer_state *info;
struct page *page;
@@ -140,7 +139,6 @@ struct myri10ge_rx_buf {
struct myri10ge_tx_buf {
struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
- u8 __iomem *wc_fifo; /* w/c send fifo address */
struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
char *req_bytes;
struct myri10ge_tx_buffer_state *info;
@@ -332,10 +330,6 @@ MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
static int myri10ge_reset_recover = 1;
-static int myri10ge_wcfifo = 0;
-module_param(myri10ge_wcfifo, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled");
-
static int myri10ge_max_slices = 1;
module_param(myri10ge_max_slices, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
@@ -1218,14 +1212,8 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
/* copy 8 descriptors to the firmware at a time */
if ((idx & 7) == 7) {
- if (rx->wc_fifo == NULL)
- myri10ge_submit_8rx(&rx->lanai[idx - 7],
- &rx->shadow[idx - 7]);
- else {
- mb();
- myri10ge_pio_copy(rx->wc_fifo,
- &rx->shadow[idx - 7], 64);
- }
+ myri10ge_submit_8rx(&rx->lanai[idx - 7],
+ &rx->shadow[idx - 7]);
}
}
}
@@ -2229,18 +2217,6 @@ static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
(mgp->sram + cmd.data0);
- if (myri10ge_wcfifo && mgp->wc_enabled) {
- ss->tx.wc_fifo = (u8 __iomem *)
- mgp->sram + MXGEFW_ETH_SEND_4 + 64 * slice;
- ss->rx_small.wc_fifo = (u8 __iomem *)
- mgp->sram + MXGEFW_ETH_RECV_SMALL + 64 * slice;
- ss->rx_big.wc_fifo = (u8 __iomem *)
- mgp->sram + MXGEFW_ETH_RECV_BIG + 64 * slice;
- } else {
- ss->tx.wc_fifo = NULL;
- ss->rx_small.wc_fifo = NULL;
- ss->rx_big.wc_fifo = NULL;
- }
return status;
}
@@ -2573,27 +2549,6 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
mb();
}
-static inline void
-myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx,
- struct mcp_kreq_ether_send *src, int cnt)
-{
- tx->req += cnt;
- mb();
- while (cnt >= 4) {
- myri10ge_pio_copy(tx->wc_fifo, src, 64);
- mb();
- src += 4;
- cnt -= 4;
- }
- if (cnt > 0) {
- /* pad it to 64 bytes. The src is 64 bytes bigger than it
- * needs to be so that we don't overrun it */
- myri10ge_pio_copy(tx->wc_fifo + MXGEFW_ETH_SEND_OFFSET(cnt),
- src, 64);
- mb();
- }
-}
-
/*
* Transmit a packet. We need to split the packet so that a single
* segment does not cross myri10ge->tx_boundary, so this makes segment
@@ -2830,10 +2785,7 @@ again:
MXGEFW_FLAGS_FIRST)));
idx = ((count - 1) + tx->req) & tx->mask;
tx->info[idx].last = 1;
- if (tx->wc_fifo == NULL)
- myri10ge_submit_req(tx, tx->req_list, count);
- else
- myri10ge_submit_req_wc(tx, tx->req_list, count);
+ myri10ge_submit_req(tx, tx->req_list, count);
tx->pkt_start++;
if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
tx->stop_queue++;
@@ -3768,14 +3720,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (mgp->sram_size > mgp->board_span) {
dev_err(&pdev->dev, "board span %ld bytes too small\n",
mgp->board_span);
- goto abort_with_wc;
+ goto abort_with_mtrr;
}
- mgp->sram = ioremap(mgp->iomem_base, mgp->board_span);
+ mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
if (mgp->sram == NULL) {
dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
mgp->board_span, mgp->iomem_base);
status = -ENXIO;
- goto abort_with_wc;
+ goto abort_with_mtrr;
}
memcpy_fromio(mgp->eeprom_strings,
mgp->sram + mgp->sram_size - MYRI10GE_EEPROM_STRINGS_SIZE,
@@ -3876,7 +3828,7 @@ abort_with_firmware:
abort_with_ioremap:
iounmap(mgp->sram);
-abort_with_wc:
+abort_with_mtrr:
#ifdef CONFIG_MTRR
if (mgp->mtrr >= 0)
mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 14126973bd12..2fec6122c7fa 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -355,7 +355,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
/* Read the 16 bytes of station address PROM.
- We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We must first initialize registers, similar to NS8390p_init(eifdev, 0).
We can't reliably read the SAPROM address without this.
(I learned the hard way!). */
{
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
index 8f7256346922..332df75a9ab6 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ne2.c
@@ -404,7 +404,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot)
/* Read the 16 bytes of station address PROM.
We must first initialize registers, similar to
- NS8390_init(eifdev, 0).
+ NS8390p_init(eifdev, 0).
We can't reliably read the SAPROM address without this.
(I learned the hard way!). */
{
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile
index a07cdc6f7384..8e7c4c910d2a 100644
--- a/drivers/net/netxen/Makefile
+++ b/drivers/net/netxen/Makefile
@@ -32,4 +32,4 @@
obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o
netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \
- netxen_nic_isr.o netxen_nic_ethtool.o netxen_nic_niu.o
+ netxen_nic_ethtool.o netxen_nic_niu.o netxen_nic_ctx.o
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index da4c4fb97064..8e736614407d 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -54,6 +54,7 @@
#include <linux/mm.h>
#include <linux/mman.h>
+#include <linux/vmalloc.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -63,10 +64,12 @@
#include "netxen_nic_hw.h"
-#define _NETXEN_NIC_LINUX_MAJOR 3
-#define _NETXEN_NIC_LINUX_MINOR 4
-#define _NETXEN_NIC_LINUX_SUBVERSION 18
-#define NETXEN_NIC_LINUX_VERSIONID "3.4.18"
+#define _NETXEN_NIC_LINUX_MAJOR 4
+#define _NETXEN_NIC_LINUX_MINOR 0
+#define _NETXEN_NIC_LINUX_SUBVERSION 0
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.0"
+
+#define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c))
#define NETXEN_NUM_FLASH_SECTORS (64)
#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024)
@@ -84,7 +87,7 @@
#define TX_RINGSIZE \
(sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count)
#define RCV_BUFFSIZE \
- (sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count)
+ (sizeof(struct netxen_rx_buffer) * rds_ring->max_rx_desc_count)
#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
#define NETXEN_NETDEV_STATUS 0x1
@@ -111,6 +114,13 @@
#define NX_P2_C0 0x24
#define NX_P2_C1 0x25
+#define NX_P3_A0 0x30
+#define NX_P3_A2 0x30
+#define NX_P3_B0 0x40
+#define NX_P3_B1 0x41
+
+#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1)
+#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0)
#define FIRST_PAGE_GROUP_START 0
#define FIRST_PAGE_GROUP_END 0x100000
@@ -125,6 +135,16 @@
#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START
#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START
+#define P2_MAX_MTU (8000)
+#define P3_MAX_MTU (9600)
+#define NX_ETHERMTU 1500
+#define NX_MAX_ETHERHDR 32 /* This contains some padding */
+
+#define NX_RX_NORMAL_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU)
+#define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU)
+#define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU)
+#define NX_CT_DEFAULT_RX_BUF_LEN 2048
+
#define MAX_RX_BUFFER_LENGTH 1760
#define MAX_RX_JUMBO_BUFFER_LENGTH 8062
#define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512)
@@ -132,7 +152,6 @@
#define RX_JUMBO_DMA_MAP_LEN \
(MAX_RX_JUMBO_BUFFER_LENGTH - 2)
#define RX_LRO_DMA_MAP_LEN (MAX_RX_LRO_BUFFER_LENGTH - 2)
-#define NETXEN_ROM_ROUNDUP 0x80000000ULL
/*
* Maximum number of ring contexts
@@ -140,16 +159,16 @@
#define MAX_RING_CTX 1
/* Opcodes to be used with the commands */
-enum {
- TX_ETHER_PKT = 0x01,
-/* The following opcodes are for IP checksum */
- TX_TCP_PKT,
- TX_UDP_PKT,
- TX_IP_PKT,
- TX_TCP_LSO,
- TX_IPSEC,
- TX_IPSEC_CMD
-};
+#define TX_ETHER_PKT 0x01
+#define TX_TCP_PKT 0x02
+#define TX_UDP_PKT 0x03
+#define TX_IP_PKT 0x04
+#define TX_TCP_LSO 0x05
+#define TX_TCP_LSO6 0x06
+#define TX_IPSEC 0x07
+#define TX_IPSEC_CMD 0x0a
+#define TX_TCPV6_PKT 0x0b
+#define TX_UDPV6_PKT 0x0c
/* The following opcodes are for internal consumption. */
#define NETXEN_CONTROL_OP 0x10
@@ -191,6 +210,7 @@ enum {
#define MAX_RCV_DESCRIPTORS 16384
#define MAX_CMD_DESCRIPTORS_HOST (MAX_CMD_DESCRIPTORS / 4)
#define MAX_RCV_DESCRIPTORS_1G (MAX_RCV_DESCRIPTORS / 4)
+#define MAX_RCV_DESCRIPTORS_10G 8192
#define MAX_JUMBO_RCV_DESCRIPTORS 1024
#define MAX_LRO_RCV_DESCRIPTORS 64
#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
@@ -219,8 +239,6 @@ enum {
#define MPORT_MULTI_FUNCTION_MODE 0x2222
#include "netxen_nic_phan_reg.h"
-extern unsigned long long netxen_dma_mask;
-extern unsigned long last_schedule_time;
/*
* NetXen host-peg signal message structure
@@ -289,7 +307,7 @@ struct netxen_ring_ctx {
#define netxen_set_cmd_desc_port(cmd_desc, var) \
((cmd_desc)->port_ctxid |= ((var) & 0x0F))
#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \
- ((cmd_desc)->port_ctxid |= ((var) & 0xF0))
+ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
#define netxen_set_cmd_desc_flags(cmd_desc, val) \
(cmd_desc)->flags_opcode = ((cmd_desc)->flags_opcode & \
@@ -377,8 +395,8 @@ struct rcv_desc {
};
/* opcode field in status_desc */
-#define RCV_NIC_PKT (0xA)
-#define STATUS_NIC_PKT ((RCV_NIC_PKT) << 12)
+#define NETXEN_NIC_RXPKT_DESC 0x04
+#define NETXEN_OLD_RXPKT_DESC 0x3f
/* for status field in status_desc */
#define STATUS_NEED_CKSUM (1)
@@ -410,6 +428,8 @@ struct rcv_desc {
(((sts_data) >> 28) & 0xFFFF)
#define netxen_get_sts_prot(sts_data) \
(((sts_data) >> 44) & 0x0F)
+#define netxen_get_sts_pkt_offset(sts_data) \
+ (((sts_data) >> 48) & 0x1F)
#define netxen_get_sts_opcode(sts_data) \
(((sts_data) >> 58) & 0x03F)
@@ -424,17 +444,30 @@ struct rcv_desc {
struct status_desc {
/* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
- 28-43 reference_handle, 44-47 protocol, 48-52 unused
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
53-55 desc_cnt, 56-57 owner, 58-63 opcode
*/
__le64 status_desc_data;
- __le32 hash_value;
- u8 hash_type;
- u8 msg_type;
- u8 unused;
- /* Bit pattern: 0-6 lro_count indicates frag sequence,
- 7 last_frag indicates last frag */
- u8 lro;
+ union {
+ struct {
+ __le32 hash_value;
+ u8 hash_type;
+ u8 msg_type;
+ u8 unused;
+ union {
+ /* Bit pattern: 0-6 lro_count indicates frag
+ * sequence, 7 last_frag indicates last frag
+ */
+ u8 lro;
+
+ /* chained buffers */
+ u8 nr_frags;
+ };
+ };
+ struct {
+ __le16 frag_handles[4];
+ };
+ };
} __attribute__ ((aligned(16)));
enum {
@@ -464,7 +497,20 @@ typedef enum {
NETXEN_BRDTYPE_P2_SB31_10G_IMEZ = 0x000d,
NETXEN_BRDTYPE_P2_SB31_10G_HMEZ = 0x000e,
- NETXEN_BRDTYPE_P2_SB31_10G_CX4 = 0x000f
+ NETXEN_BRDTYPE_P2_SB31_10G_CX4 = 0x000f,
+
+ NETXEN_BRDTYPE_P3_REF_QG = 0x0021,
+ NETXEN_BRDTYPE_P3_HMEZ = 0x0022,
+ NETXEN_BRDTYPE_P3_10G_CX4_LP = 0x0023,
+ NETXEN_BRDTYPE_P3_4_GB = 0x0024,
+ NETXEN_BRDTYPE_P3_IMEZ = 0x0025,
+ NETXEN_BRDTYPE_P3_10G_SFP_PLUS = 0x0026,
+ NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027,
+ NETXEN_BRDTYPE_P3_XG_LOM = 0x0028,
+ NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029,
+ NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031,
+ NETXEN_BRDTYPE_P3_10G_XFP = 0x0032
+
} netxen_brdtype_t;
typedef enum {
@@ -747,6 +793,7 @@ struct netxen_cmd_buffer {
/* In rx_buffer, we do not need multiple fragments as is a single buffer */
struct netxen_rx_buffer {
+ struct list_head list;
struct sk_buff *skb;
u64 dma;
u16 ref_handle;
@@ -765,7 +812,6 @@ struct netxen_rx_buffer {
* contains interrupt info as well shared hardware info.
*/
struct netxen_hardware_context {
- struct pci_dev *pdev;
void __iomem *pci_base0;
void __iomem *pci_base1;
void __iomem *pci_base2;
@@ -773,15 +819,20 @@ struct netxen_hardware_context {
unsigned long first_page_group_start;
void __iomem *db_base;
unsigned long db_len;
+ unsigned long pci_len0;
+
+ u8 cut_through;
+ int qdr_sn_window;
+ int ddr_mn_window;
+ unsigned long mn_win_crb;
+ unsigned long ms_win_crb;
u8 revision_id;
u16 board_type;
struct netxen_board_info boardcfg;
- u32 xg_linkup;
- u32 qg_linksup;
+ u32 linkup;
/* Address of cmd ring in Phantom */
struct cmd_desc_type0 *cmd_desc_head;
- struct pci_dev *cmd_desc_pdev;
dma_addr_t cmd_desc_phys_addr;
struct netxen_adapter *adapter;
int pci_func;
@@ -813,17 +864,17 @@ struct netxen_adapter_stats {
* Rcv Descriptor Context. One such per Rcv Descriptor. There may
* be one Rcv Descriptor for normal packets, one for jumbo and may be others.
*/
-struct netxen_rcv_desc_ctx {
+struct nx_host_rds_ring {
u32 flags;
u32 producer;
- u32 rcv_pending; /* Num of bufs posted in phantom */
dma_addr_t phys_addr;
- struct pci_dev *phys_pdev;
+ u32 crb_rcv_producer; /* reg offset */
struct rcv_desc *desc_head; /* address of rx ring in Phantom */
u32 max_rx_desc_count;
u32 dma_size;
u32 skb_size;
struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */
+ struct list_head free_list;
int begin_alloc;
};
@@ -834,17 +885,319 @@ struct netxen_rcv_desc_ctx {
* present elsewhere.
*/
struct netxen_recv_context {
- struct netxen_rcv_desc_ctx rcv_desc[NUM_RCV_DESC_RINGS];
- u32 status_rx_producer;
+ u32 state;
+ u16 context_id;
+ u16 virt_port;
+
+ struct nx_host_rds_ring rds_rings[NUM_RCV_DESC_RINGS];
u32 status_rx_consumer;
+ u32 crb_sts_consumer; /* reg offset */
dma_addr_t rcv_status_desc_phys_addr;
- struct pci_dev *rcv_status_desc_pdev;
struct status_desc *rcv_status_desc_head;
};
-#define NETXEN_NIC_MSI_ENABLED 0x02
-#define NETXEN_DMA_MASK 0xfffffffe
-#define NETXEN_DB_MAPSIZE_BYTES 0x1000
+/* New HW context creation */
+
+#define NX_OS_CRB_RETRY_COUNT 4000
+#define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \
+ (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
+
+#define NX_CDRP_CLEAR 0x00000000
+#define NX_CDRP_CMD_BIT 0x80000000
+
+/*
+ * All responses must have the NX_CDRP_CMD_BIT cleared
+ * in the crb NX_CDRP_CRB_OFFSET.
+ */
+#define NX_CDRP_FORM_RSP(rsp) (rsp)
+#define NX_CDRP_IS_RSP(rsp) (((rsp) & NX_CDRP_CMD_BIT) == 0)
+
+#define NX_CDRP_RSP_OK 0x00000001
+#define NX_CDRP_RSP_FAIL 0x00000002
+#define NX_CDRP_RSP_TIMEOUT 0x00000003
+
+/*
+ * All commands must have the NX_CDRP_CMD_BIT set in
+ * the crb NX_CDRP_CRB_OFFSET.
+ */
+#define NX_CDRP_FORM_CMD(cmd) (NX_CDRP_CMD_BIT | (cmd))
+#define NX_CDRP_IS_CMD(cmd) (((cmd) & NX_CDRP_CMD_BIT) != 0)
+
+#define NX_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
+#define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
+#define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
+#define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
+#define NX_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
+#define NX_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
+#define NX_CDRP_CMD_CREATE_RX_CTX 0x00000007
+#define NX_CDRP_CMD_DESTROY_RX_CTX 0x00000008
+#define NX_CDRP_CMD_CREATE_TX_CTX 0x00000009
+#define NX_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
+#define NX_CDRP_CMD_SETUP_STATISTICS 0x0000000e
+#define NX_CDRP_CMD_GET_STATISTICS 0x0000000f
+#define NX_CDRP_CMD_DELETE_STATISTICS 0x00000010
+#define NX_CDRP_CMD_SET_MTU 0x00000012
+#define NX_CDRP_CMD_MAX 0x00000013
+
+#define NX_RCODE_SUCCESS 0
+#define NX_RCODE_NO_HOST_MEM 1
+#define NX_RCODE_NO_HOST_RESOURCE 2
+#define NX_RCODE_NO_CARD_CRB 3
+#define NX_RCODE_NO_CARD_MEM 4
+#define NX_RCODE_NO_CARD_RESOURCE 5
+#define NX_RCODE_INVALID_ARGS 6
+#define NX_RCODE_INVALID_ACTION 7
+#define NX_RCODE_INVALID_STATE 8
+#define NX_RCODE_NOT_SUPPORTED 9
+#define NX_RCODE_NOT_PERMITTED 10
+#define NX_RCODE_NOT_READY 11
+#define NX_RCODE_DOES_NOT_EXIST 12
+#define NX_RCODE_ALREADY_EXISTS 13
+#define NX_RCODE_BAD_SIGNATURE 14
+#define NX_RCODE_CMD_NOT_IMPL 15
+#define NX_RCODE_CMD_INVALID 16
+#define NX_RCODE_TIMEOUT 17
+#define NX_RCODE_CMD_FAILED 18
+#define NX_RCODE_MAX_EXCEEDED 19
+#define NX_RCODE_MAX 20
+
+#define NX_DESTROY_CTX_RESET 0
+#define NX_DESTROY_CTX_D3_RESET 1
+#define NX_DESTROY_CTX_MAX 2
+
+/*
+ * Capabilities
+ */
+#define NX_CAP_BIT(class, bit) (1 << bit)
+#define NX_CAP0_LEGACY_CONTEXT NX_CAP_BIT(0, 0)
+#define NX_CAP0_MULTI_CONTEXT NX_CAP_BIT(0, 1)
+#define NX_CAP0_LEGACY_MN NX_CAP_BIT(0, 2)
+#define NX_CAP0_LEGACY_MS NX_CAP_BIT(0, 3)
+#define NX_CAP0_CUT_THROUGH NX_CAP_BIT(0, 4)
+#define NX_CAP0_LRO NX_CAP_BIT(0, 5)
+#define NX_CAP0_LSO NX_CAP_BIT(0, 6)
+#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7)
+#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8)
+
+/*
+ * Context state
+ */
+#define NX_HOST_CTX_STATE_FREED 0
+#define NX_HOST_CTX_STATE_ALLOCATED 1
+#define NX_HOST_CTX_STATE_ACTIVE 2
+#define NX_HOST_CTX_STATE_DISABLED 3
+#define NX_HOST_CTX_STATE_QUIESCED 4
+#define NX_HOST_CTX_STATE_MAX 5
+
+/*
+ * Rx context
+ */
+
+typedef struct {
+ u64 host_phys_addr; /* Ring base addr */
+ u32 ring_size; /* Ring entries */
+ u16 msi_index;
+ u16 rsvd; /* Padding */
+} nx_hostrq_sds_ring_t;
+
+typedef struct {
+ u64 host_phys_addr; /* Ring base addr */
+ u64 buff_size; /* Packet buffer size */
+ u32 ring_size; /* Ring entries */
+ u32 ring_kind; /* Class of ring */
+} nx_hostrq_rds_ring_t;
+
+typedef struct {
+ u64 host_rsp_dma_addr; /* Response dma'd here */
+ u32 capabilities[4]; /* Flag bit vector */
+ u32 host_int_crb_mode; /* Interrupt crb usage */
+ u32 host_rds_crb_mode; /* RDS crb usage */
+ /* These ring offsets are relative to data[0] below */
+ u32 rds_ring_offset; /* Offset to RDS config */
+ u32 sds_ring_offset; /* Offset to SDS config */
+ u16 num_rds_rings; /* Count of RDS rings */
+ u16 num_sds_rings; /* Count of SDS rings */
+ u16 rsvd1; /* Padding */
+ u16 rsvd2; /* Padding */
+ u8 reserved[128]; /* reserve space for future expansion*/
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N hostrq_rds_rings
+ - N hostrq_sds_rings */
+ char data[0];
+} nx_hostrq_rx_ctx_t;
+
+typedef struct {
+ u32 host_producer_crb; /* Crb to use */
+ u32 rsvd1; /* Padding */
+} nx_cardrsp_rds_ring_t;
+
+typedef struct {
+ u32 host_consumer_crb; /* Crb to use */
+ u32 interrupt_crb; /* Crb to use */
+} nx_cardrsp_sds_ring_t;
+
+typedef struct {
+ /* These ring offsets are relative to data[0] below */
+ u32 rds_ring_offset; /* Offset to RDS config */
+ u32 sds_ring_offset; /* Offset to SDS config */
+ u32 host_ctx_state; /* Starting State */
+ u32 num_fn_per_port; /* How many PCI fn share the port */
+ u16 num_rds_rings; /* Count of RDS rings */
+ u16 num_sds_rings; /* Count of SDS rings */
+ u16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ u8 reserved[128]; /* save space for future expansion */
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N cardrsp_rds_rings
+ - N cardrs_sds_rings */
+ char data[0];
+} nx_cardrsp_rx_ctx_t;
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
+ (sizeof(HOSTRQ_RX) + \
+ (rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) + \
+ (sds_rings)*(sizeof(nx_hostrq_sds_ring_t)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
+ (sizeof(CARDRSP_RX) + \
+ (rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) + \
+ (sds_rings)*(sizeof(nx_cardrsp_sds_ring_t)))
+
+/*
+ * Tx context
+ */
+
+typedef struct {
+ u64 host_phys_addr; /* Ring base addr */
+ u32 ring_size; /* Ring entries */
+ u32 rsvd; /* Padding */
+} nx_hostrq_cds_ring_t;
+
+typedef struct {
+ u64 host_rsp_dma_addr; /* Response dma'd here */
+ u64 cmd_cons_dma_addr; /* */
+ u64 dummy_dma_addr; /* */
+ u32 capabilities[4]; /* Flag bit vector */
+ u32 host_int_crb_mode; /* Interrupt crb usage */
+ u32 rsvd1; /* Padding */
+ u16 rsvd2; /* Padding */
+ u16 interrupt_ctl;
+ u16 msi_index;
+ u16 rsvd3; /* Padding */
+ nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */
+ u8 reserved[128]; /* future expansion */
+} nx_hostrq_tx_ctx_t;
+
+typedef struct {
+ u32 host_producer_crb; /* Crb to use */
+ u32 interrupt_crb; /* Crb to use */
+} nx_cardrsp_cds_ring_t;
+
+typedef struct {
+ u32 host_ctx_state; /* Starting state */
+ u16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */
+ u8 reserved[128]; /* future expansion */
+} nx_cardrsp_tx_ctx_t;
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define NX_HOST_RDS_CRB_MODE_UNIQUE 0
+#define NX_HOST_RDS_CRB_MODE_SHARED 1
+#define NX_HOST_RDS_CRB_MODE_CUSTOM 2
+#define NX_HOST_RDS_CRB_MODE_MAX 3
+
+#define NX_HOST_INT_CRB_MODE_UNIQUE 0
+#define NX_HOST_INT_CRB_MODE_SHARED 1
+#define NX_HOST_INT_CRB_MODE_NORX 2
+#define NX_HOST_INT_CRB_MODE_NOTX 3
+#define NX_HOST_INT_CRB_MODE_NORXTX 4
+
+
+/* MAC */
+
+#define MC_COUNT_P2 16
+#define MC_COUNT_P3 38
+
+#define NETXEN_MAC_NOOP 0
+#define NETXEN_MAC_ADD 1
+#define NETXEN_MAC_DEL 2
+
+typedef struct nx_mac_list_s {
+ struct nx_mac_list_s *next;
+ uint8_t mac_addr[MAX_ADDR_LEN];
+} nx_mac_list_t;
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US 3
+#define NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+#define NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS 64
+#define NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US 4
+
+#define NETXEN_NIC_INTR_DEFAULT 0x04
+
+typedef union {
+ struct {
+ uint16_t rx_packets;
+ uint16_t rx_time_us;
+ uint16_t tx_packets;
+ uint16_t tx_time_us;
+ } data;
+ uint64_t word;
+} nx_nic_intr_coalesce_data_t;
+
+typedef struct {
+ uint16_t stats_time_us;
+ uint16_t rate_sample_time;
+ uint16_t flags;
+ uint16_t rsvd_1;
+ uint32_t low_threshold;
+ uint32_t high_threshold;
+ nx_nic_intr_coalesce_data_t normal;
+ nx_nic_intr_coalesce_data_t low;
+ nx_nic_intr_coalesce_data_t high;
+ nx_nic_intr_coalesce_data_t irq;
+} nx_nic_intr_coalesce_t;
+
+typedef struct {
+ u64 qhdr;
+ u64 req_hdr;
+ u64 words[6];
+} nx_nic_req_t;
+
+typedef struct {
+ u8 op;
+ u8 tag;
+ u8 mac_addr[6];
+} nx_mac_req_t;
+
+#define MAX_PENDING_DESC_BLOCK_SIZE 64
+
+#define NETXEN_NIC_MSI_ENABLED 0x02
+#define NETXEN_NIC_MSIX_ENABLED 0x04
+#define NETXEN_IS_MSI_FAMILY(adapter) \
+ ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
+
+#define MSIX_ENTRIES_PER_ADAPTER 8
+#define NETXEN_MSIX_TBL_SPACE 8192
+#define NETXEN_PCI_REG_MSIX_TBL 0x44
+
+#define NETXEN_DB_MAPSIZE_BYTES 0x1000
+
+#define NETXEN_NETDEV_WEIGHT 120
+#define NETXEN_ADAPTER_UP_MAGIC 777
+#define NETXEN_NIC_PEG_TUNE 0
struct netxen_dummy_dma {
void *addr;
@@ -854,46 +1207,65 @@ struct netxen_dummy_dma {
struct netxen_adapter {
struct netxen_hardware_context ahw;
- struct netxen_adapter *master;
struct net_device *netdev;
struct pci_dev *pdev;
+ int pci_using_dac;
struct napi_struct napi;
struct net_device_stats net_stats;
- unsigned char mac_addr[ETH_ALEN];
int mtu;
int portnum;
u8 physical_port;
+ u16 tx_context_id;
+
+ uint8_t mc_enabled;
+ uint8_t max_mc_count;
+ nx_mac_list_t *mac_list;
+
+ struct netxen_legacy_intr_set legacy_intr;
+ u32 crb_intr_mask;
struct work_struct watchdog_task;
struct timer_list watchdog_timer;
struct work_struct tx_timeout_task;
u32 curr_window;
+ u32 crb_win;
+ rwlock_t adapter_lock;
+
+ uint64_t dma_mask;
u32 cmd_producer;
__le32 *cmd_consumer;
u32 last_cmd_consumer;
+ u32 crb_addr_cmd_producer;
+ u32 crb_addr_cmd_consumer;
u32 max_tx_desc_count;
u32 max_rx_desc_count;
u32 max_jumbo_rx_desc_count;
u32 max_lro_rx_desc_count;
+ int max_rds_rings;
+
u32 flags;
u32 irq;
int driver_mismatch;
u32 temp;
+ u32 fw_major;
+
+ u8 msix_supported;
+ u8 max_possible_rss_rings;
+ struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+
struct netxen_adapter_stats stats;
- u16 portno;
u16 link_speed;
u16 link_duplex;
u16 state;
u16 link_autoneg;
int rx_csum;
int status;
- spinlock_t stats_lock;
struct netxen_cmd_buffer *cmd_buf_arr; /* Command buffers for xmit */
@@ -905,25 +1277,33 @@ struct netxen_adapter {
int is_up;
struct netxen_dummy_dma dummy_dma;
+ nx_nic_intr_coalesce_t coal;
/* Context interface shared between card and host */
struct netxen_ring_ctx *ctx_desc;
- struct pci_dev *ctx_desc_pdev;
dma_addr_t ctx_desc_phys_addr;
int intr_scheme;
int msi_mode;
int (*enable_phy_interrupts) (struct netxen_adapter *);
int (*disable_phy_interrupts) (struct netxen_adapter *);
- void (*handle_phy_intr) (struct netxen_adapter *);
int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t);
int (*set_mtu) (struct netxen_adapter *, int);
int (*set_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t);
- int (*unset_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t);
int (*phy_read) (struct netxen_adapter *, long reg, u32 *);
int (*phy_write) (struct netxen_adapter *, long reg, u32 val);
int (*init_port) (struct netxen_adapter *, int);
- void (*init_niu) (struct netxen_adapter *);
int (*stop_port) (struct netxen_adapter *);
+
+ int (*hw_read_wx)(struct netxen_adapter *, ulong, void *, int);
+ int (*hw_write_wx)(struct netxen_adapter *, ulong, void *, int);
+ int (*pci_mem_read)(struct netxen_adapter *, u64, void *, int);
+ int (*pci_mem_write)(struct netxen_adapter *, u64, void *, int);
+ int (*pci_write_immediate)(struct netxen_adapter *, u64, u32);
+ u32 (*pci_read_immediate)(struct netxen_adapter *, u64);
+ void (*pci_write_normalize)(struct netxen_adapter *, u64, u32);
+ u32 (*pci_read_normalize)(struct netxen_adapter *, u64);
+ unsigned long (*pci_set_window)(struct netxen_adapter *,
+ unsigned long long);
}; /* netxen_adapter structure */
/*
@@ -988,8 +1368,6 @@ int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter);
int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter);
int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter);
int netxen_niu_gbe_disable_phy_interrupts(struct netxen_adapter *adapter);
-void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter);
-void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter);
int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
__u32 * readval);
int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter,
@@ -998,27 +1376,61 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter,
/* Functions available from netxen_nic_hw.c */
int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu);
int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu);
-void netxen_nic_init_niu_gb(struct netxen_adapter *adapter);
-void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw);
void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val);
int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off);
void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value);
-void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 * value);
+void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 *value);
+void netxen_nic_write_w1(struct netxen_adapter *adapter, u32 index, u32 value);
+void netxen_nic_read_w1(struct netxen_adapter *adapter, u32 index, u32 *value);
int netxen_nic_get_board_info(struct netxen_adapter *adapter);
-int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data,
- int len);
-int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data,
- int len);
+
+int netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter,
+ ulong off, void *data, int len);
+int netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter,
+ ulong off, void *data, int len);
+int netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter,
+ u64 off, void *data, int size);
+int netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter,
+ u64 off, void *data, int size);
+int netxen_nic_pci_write_immediate_128M(struct netxen_adapter *adapter,
+ u64 off, u32 data);
+u32 netxen_nic_pci_read_immediate_128M(struct netxen_adapter *adapter, u64 off);
+void netxen_nic_pci_write_normalize_128M(struct netxen_adapter *adapter,
+ u64 off, u32 data);
+u32 netxen_nic_pci_read_normalize_128M(struct netxen_adapter *adapter, u64 off);
+unsigned long netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
+ unsigned long long addr);
+void netxen_nic_pci_change_crbwindow_128M(struct netxen_adapter *adapter,
+ u32 wndw);
+
+int netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter,
+ ulong off, void *data, int len);
+int netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter,
+ ulong off, void *data, int len);
+int netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
+ u64 off, void *data, int size);
+int netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
+ u64 off, void *data, int size);
void netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
unsigned long off, int data);
+int netxen_nic_pci_write_immediate_2M(struct netxen_adapter *adapter,
+ u64 off, u32 data);
+u32 netxen_nic_pci_read_immediate_2M(struct netxen_adapter *adapter, u64 off);
+void netxen_nic_pci_write_normalize_2M(struct netxen_adapter *adapter,
+ u64 off, u32 data);
+u32 netxen_nic_pci_read_normalize_2M(struct netxen_adapter *adapter, u64 off);
+unsigned long netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
+ unsigned long long addr);
/* Functions from netxen_nic_init.c */
void netxen_free_adapter_offload(struct netxen_adapter *adapter);
int netxen_initialize_adapter_offload(struct netxen_adapter *adapter);
int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
+int netxen_receive_peg_ready(struct netxen_adapter *adapter);
int netxen_load_firmware(struct netxen_adapter *adapter);
int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
+
int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
u8 *bytes, size_t size);
@@ -1032,33 +1444,43 @@ void netxen_halt_pegs(struct netxen_adapter *adapter);
int netxen_rom_se(struct netxen_adapter *adapter, int addr);
-/* Functions from netxen_nic_isr.c */
-void netxen_initialize_adapter_sw(struct netxen_adapter *adapter);
-void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr,
- struct pci_dev **used_dev);
+int netxen_alloc_sw_resources(struct netxen_adapter *adapter);
+void netxen_free_sw_resources(struct netxen_adapter *adapter);
+
+int netxen_alloc_hw_resources(struct netxen_adapter *adapter);
+void netxen_free_hw_resources(struct netxen_adapter *adapter);
+
+void netxen_release_rx_buffers(struct netxen_adapter *adapter);
+void netxen_release_tx_buffers(struct netxen_adapter *adapter);
+
void netxen_initialize_adapter_ops(struct netxen_adapter *adapter);
int netxen_init_firmware(struct netxen_adapter *adapter);
-void netxen_free_hw_resources(struct netxen_adapter *adapter);
void netxen_tso_check(struct netxen_adapter *adapter,
struct cmd_desc_type0 *desc, struct sk_buff *skb);
-int netxen_nic_hw_resources(struct netxen_adapter *adapter);
void netxen_nic_clear_stats(struct netxen_adapter *adapter);
void netxen_watchdog_task(struct work_struct *work);
void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
u32 ringid);
int netxen_process_cmd_ring(struct netxen_adapter *adapter);
u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
-void netxen_nic_set_multi(struct net_device *netdev);
+void netxen_p2_nic_set_multi(struct net_device *netdev);
+void netxen_p3_nic_set_multi(struct net_device *netdev);
+int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
+
+u32 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu);
int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
+
int netxen_nic_set_mac(struct net_device *netdev, void *p);
struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
+void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
+ uint32_t crb_producer);
/*
* NetXen Board information
*/
-#define NETXEN_MAX_SHORT_NAME 16
+#define NETXEN_MAX_SHORT_NAME 32
struct netxen_brdinfo {
netxen_brdtype_t brdtype; /* type of board */
long ports; /* max no of physical ports */
@@ -1072,6 +1494,17 @@ static const struct netxen_brdinfo netxen_boards[] = {
{NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"},
{NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"},
{NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"},
+ {NETXEN_BRDTYPE_P3_REF_QG, 4, "Reference Quad Gig "},
+ {NETXEN_BRDTYPE_P3_HMEZ, 2, "Dual XGb HMEZ"},
+ {NETXEN_BRDTYPE_P3_10G_CX4_LP, 2, "Dual XGb CX4 LP"},
+ {NETXEN_BRDTYPE_P3_4_GB, 4, "Quad Gig LP"},
+ {NETXEN_BRDTYPE_P3_IMEZ, 2, "Dual XGb IMEZ"},
+ {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"},
+ {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"},
+ {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"},
+ {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "Quad GB - March Madness"},
+ {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"},
+ {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"}
};
#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards)
@@ -1097,7 +1530,7 @@ dma_watchdog_shutdown_request(struct netxen_adapter *adapter)
u32 ctrl;
/* check if already inactive */
- if (netxen_nic_hw_read_wx(adapter,
+ if (adapter->hw_read_wx(adapter,
NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4))
printk(KERN_ERR "failed to read dma watchdog status\n");
@@ -1117,7 +1550,7 @@ dma_watchdog_shutdown_poll_result(struct netxen_adapter *adapter)
{
u32 ctrl;
- if (netxen_nic_hw_read_wx(adapter,
+ if (adapter->hw_read_wx(adapter,
NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4))
printk(KERN_ERR "failed to read dma watchdog status\n");
@@ -1129,7 +1562,7 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter)
{
u32 ctrl;
- if (netxen_nic_hw_read_wx(adapter,
+ if (adapter->hw_read_wx(adapter,
NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4))
printk(KERN_ERR "failed to read dma watchdog status\n");
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
new file mode 100644
index 000000000000..64babc59e699
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -0,0 +1,710 @@
+/*
+ * Copyright (C) 2003 - 2008 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ * info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ */
+
+#include "netxen_nic_hw.h"
+#include "netxen_nic.h"
+#include "netxen_nic_phan_reg.h"
+
+#define NXHAL_VERSION 1
+
+static int
+netxen_api_lock(struct netxen_adapter *adapter)
+{
+ u32 done = 0, timeout = 0;
+
+ for (;;) {
+ /* Acquire PCIE HW semaphore5 */
+ netxen_nic_read_w0(adapter,
+ NETXEN_PCIE_REG(PCIE_SEM5_LOCK), &done);
+
+ if (done == 1)
+ break;
+
+ if (++timeout >= NX_OS_CRB_RETRY_COUNT) {
+ printk(KERN_ERR "%s: lock timeout.\n", __func__);
+ return -1;
+ }
+
+ msleep(1);
+ }
+
+#if 0
+ netxen_nic_write_w1(adapter,
+ NETXEN_API_LOCK_ID, NX_OS_API_LOCK_DRIVER);
+#endif
+ return 0;
+}
+
+static int
+netxen_api_unlock(struct netxen_adapter *adapter)
+{
+ u32 val;
+
+ /* Release PCIE HW semaphore5 */
+ netxen_nic_read_w0(adapter,
+ NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK), &val);
+ return 0;
+}
+
+static u32
+netxen_poll_rsp(struct netxen_adapter *adapter)
+{
+ u32 raw_rsp, rsp = NX_CDRP_RSP_OK;
+ int timeout = 0;
+
+ do {
+ /* give atleast 1ms for firmware to respond */
+ msleep(1);
+
+ if (++timeout > NX_OS_CRB_RETRY_COUNT)
+ return NX_CDRP_RSP_TIMEOUT;
+
+ netxen_nic_read_w1(adapter, NX_CDRP_CRB_OFFSET,
+ &raw_rsp);
+
+ rsp = le32_to_cpu(raw_rsp);
+ } while (!NX_CDRP_IS_RSP(rsp));
+
+ return rsp;
+}
+
+static u32
+netxen_issue_cmd(struct netxen_adapter *adapter,
+ u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
+{
+ u32 rsp;
+ u32 signature = 0;
+ u32 rcode = NX_RCODE_SUCCESS;
+
+ signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version);
+
+ /* Acquire semaphore before accessing CRB */
+ if (netxen_api_lock(adapter))
+ return NX_RCODE_TIMEOUT;
+
+ netxen_nic_write_w1(adapter, NX_SIGN_CRB_OFFSET,
+ cpu_to_le32(signature));
+
+ netxen_nic_write_w1(adapter, NX_ARG1_CRB_OFFSET,
+ cpu_to_le32(arg1));
+
+ netxen_nic_write_w1(adapter, NX_ARG2_CRB_OFFSET,
+ cpu_to_le32(arg2));
+
+ netxen_nic_write_w1(adapter, NX_ARG3_CRB_OFFSET,
+ cpu_to_le32(arg3));
+
+ netxen_nic_write_w1(adapter, NX_CDRP_CRB_OFFSET,
+ cpu_to_le32(NX_CDRP_FORM_CMD(cmd)));
+
+ rsp = netxen_poll_rsp(adapter);
+
+ if (rsp == NX_CDRP_RSP_TIMEOUT) {
+ printk(KERN_ERR "%s: card response timeout.\n",
+ netxen_nic_driver_name);
+
+ rcode = NX_RCODE_TIMEOUT;
+ } else if (rsp == NX_CDRP_RSP_FAIL) {
+ netxen_nic_read_w1(adapter, NX_ARG1_CRB_OFFSET, &rcode);
+ rcode = le32_to_cpu(rcode);
+
+ printk(KERN_ERR "%s: failed card response code:0x%x\n",
+ netxen_nic_driver_name, rcode);
+ }
+
+ /* Release semaphore */
+ netxen_api_unlock(adapter);
+
+ return rcode;
+}
+
+u32
+nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu)
+{
+ u32 rcode = NX_RCODE_SUCCESS;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
+
+ if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
+ rcode = netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ recv_ctx->context_id,
+ mtu,
+ 0,
+ NX_CDRP_CMD_SET_MTU);
+
+ return rcode;
+}
+
+static int
+nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
+{
+ void *addr;
+ nx_hostrq_rx_ctx_t *prq;
+ nx_cardrsp_rx_ctx_t *prsp;
+ nx_hostrq_rds_ring_t *prq_rds;
+ nx_hostrq_sds_ring_t *prq_sds;
+ nx_cardrsp_rds_ring_t *prsp_rds;
+ nx_cardrsp_sds_ring_t *prsp_sds;
+ struct nx_host_rds_ring *rds_ring;
+
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ u64 phys_addr;
+
+ int i, nrds_rings, nsds_rings;
+ size_t rq_size, rsp_size;
+ u32 cap, reg;
+
+ int err;
+
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
+
+ /* only one sds ring for now */
+ nrds_rings = adapter->max_rds_rings;
+ nsds_rings = 1;
+
+ rq_size =
+ SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
+ rsp_size =
+ SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &hostrq_phys_addr);
+ if (addr == NULL)
+ return -ENOMEM;
+ prq = (nx_hostrq_rx_ctx_t *)addr;
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &cardrsp_phys_addr);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+ prsp = (nx_cardrsp_rx_ctx_t *)addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+ cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
+ cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
+
+ prq->capabilities[0] = cpu_to_le32(cap);
+ prq->host_int_crb_mode =
+ cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
+ prq->host_rds_crb_mode =
+ cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
+
+ prq->num_rds_rings = cpu_to_le16(nrds_rings);
+ prq->num_sds_rings = cpu_to_le16(nsds_rings);
+ prq->rds_ring_offset = 0;
+ prq->sds_ring_offset = prq->rds_ring_offset +
+ (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
+
+ prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + prq->rds_ring_offset);
+
+ for (i = 0; i < nrds_rings; i++) {
+
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+ prq_rds[i].ring_size = cpu_to_le32(rds_ring->max_rx_desc_count);
+ prq_rds[i].ring_kind = cpu_to_le32(i);
+ prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+ }
+
+ prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + prq->sds_ring_offset);
+
+ prq_sds[0].host_phys_addr =
+ cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr);
+ prq_sds[0].ring_size = cpu_to_le32(adapter->max_rx_desc_count);
+ /* only one msix vector for now */
+ prq_sds[0].msi_index = cpu_to_le32(0);
+
+ /* now byteswap offsets */
+ prq->rds_ring_offset = cpu_to_le32(prq->rds_ring_offset);
+ prq->sds_ring_offset = cpu_to_le32(prq->sds_ring_offset);
+
+ phys_addr = hostrq_phys_addr;
+ err = netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ (u32)(phys_addr >> 32),
+ (u32)(phys_addr & 0xffffffff),
+ rq_size,
+ NX_CDRP_CMD_CREATE_RX_CTX);
+ if (err) {
+ printk(KERN_WARNING
+ "Failed to create rx ctx in firmware%d\n", err);
+ goto out_free_rsp;
+ }
+
+
+ prsp_rds = ((nx_cardrsp_rds_ring_t *)
+ &prsp->data[prsp->rds_ring_offset]);
+
+ for (i = 0; i < le32_to_cpu(prsp->num_rds_rings); i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+ rds_ring->crb_rcv_producer = NETXEN_NIC_REG(reg - 0x200);
+ }
+
+ prsp_sds = ((nx_cardrsp_sds_ring_t *)
+ &prsp->data[prsp->sds_ring_offset]);
+ reg = le32_to_cpu(prsp_sds[0].host_consumer_crb);
+ recv_ctx->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200);
+
+ reg = le32_to_cpu(prsp_sds[0].interrupt_crb);
+ adapter->crb_intr_mask = NETXEN_NIC_REG(reg - 0x200);
+
+ recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+ recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+ recv_ctx->virt_port = le16_to_cpu(prsp->virt_port);
+
+out_free_rsp:
+ pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+ return err;
+}
+
+static void
+nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
+
+ if (netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ recv_ctx->context_id,
+ NX_DESTROY_CTX_RESET,
+ 0,
+ NX_CDRP_CMD_DESTROY_RX_CTX)) {
+
+ printk(KERN_WARNING
+ "%s: Failed to destroy rx ctx in firmware\n",
+ netxen_nic_driver_name);
+ }
+}
+
+static int
+nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
+{
+ nx_hostrq_tx_ctx_t *prq;
+ nx_hostrq_cds_ring_t *prq_cds;
+ nx_cardrsp_tx_ctx_t *prsp;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u32 temp;
+ int err = 0;
+ u64 offset, phys_addr;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+
+ rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
+ rq_addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &rq_phys_addr);
+ if (!rq_addr)
+ return -ENOMEM;
+
+ rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
+ rsp_addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &rsp_phys_addr);
+ if (!rsp_addr) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+
+ memset(rq_addr, 0, rq_size);
+ prq = (nx_hostrq_tx_ctx_t *)rq_addr;
+
+ memset(rsp_addr, 0, rsp_size);
+ prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+ temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
+ prq->capabilities[0] = cpu_to_le32(temp);
+
+ prq->host_int_crb_mode =
+ cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
+
+ prq->interrupt_ctl = 0;
+ prq->msi_index = 0;
+
+ prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
+
+ offset = adapter->ctx_desc_phys_addr+sizeof(struct netxen_ring_ctx);
+ prq->cmd_cons_dma_addr = cpu_to_le64(offset);
+
+ prq_cds = &prq->cds_ring;
+
+ prq_cds->host_phys_addr =
+ cpu_to_le64(adapter->ahw.cmd_desc_phys_addr);
+
+ prq_cds->ring_size = cpu_to_le32(adapter->max_tx_desc_count);
+
+ phys_addr = rq_phys_addr;
+ err = netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ (u32)(phys_addr >> 32),
+ ((u32)phys_addr & 0xffffffff),
+ rq_size,
+ NX_CDRP_CMD_CREATE_TX_CTX);
+
+ if (err == NX_RCODE_SUCCESS) {
+ temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+ adapter->crb_addr_cmd_producer =
+ NETXEN_NIC_REG(temp - 0x200);
+#if 0
+ adapter->tx_state =
+ le32_to_cpu(prsp->host_ctx_state);
+#endif
+ adapter->tx_context_id =
+ le16_to_cpu(prsp->context_id);
+ } else {
+ printk(KERN_WARNING
+ "Failed to create tx ctx in firmware%d\n", err);
+ err = -EIO;
+ }
+
+ pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+
+ return err;
+}
+
+static void
+nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
+{
+ if (netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ adapter->tx_context_id,
+ NX_DESTROY_CTX_RESET,
+ 0,
+ NX_CDRP_CMD_DESTROY_TX_CTX)) {
+
+ printk(KERN_WARNING
+ "%s: Failed to destroy tx ctx in firmware\n",
+ netxen_nic_driver_name);
+ }
+}
+
+static u64 ctx_addr_sig_regs[][3] = {
+ {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
+ {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
+ {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
+ {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
+};
+
+#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
+#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
+#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
+
+#define lower32(x) ((u32)((x) & 0xffffffff))
+#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
+
+static struct netxen_recv_crb recv_crb_registers[] = {
+ /* Instance 0 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x100),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x110),
+ /* LRO */
+ NETXEN_NIC_REG(0x120)
+ },
+ /* crb_sts_consumer: */
+ NETXEN_NIC_REG(0x138),
+ },
+ /* Instance 1 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x144),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x154),
+ /* LRO */
+ NETXEN_NIC_REG(0x164)
+ },
+ /* crb_sts_consumer: */
+ NETXEN_NIC_REG(0x17c),
+ },
+ /* Instance 2 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x1d8),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x1f8),
+ /* LRO */
+ NETXEN_NIC_REG(0x208)
+ },
+ /* crb_sts_consumer: */
+ NETXEN_NIC_REG(0x220),
+ },
+ /* Instance 3 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x22c),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x23c),
+ /* LRO */
+ NETXEN_NIC_REG(0x24c)
+ },
+ /* crb_sts_consumer: */
+ NETXEN_NIC_REG(0x264),
+ },
+};
+
+static int
+netxen_init_old_ctx(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ int ctx, ring;
+ int func_id = adapter->portnum;
+
+ adapter->ctx_desc->cmd_ring_addr =
+ cpu_to_le64(adapter->ahw.cmd_desc_phys_addr);
+ adapter->ctx_desc->cmd_ring_size =
+ cpu_to_le32(adapter->max_tx_desc_count);
+
+ for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+ recv_ctx = &adapter->recv_ctx[ctx];
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr =
+ cpu_to_le64(rds_ring->phys_addr);
+ adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size =
+ cpu_to_le32(rds_ring->max_rx_desc_count);
+ }
+ adapter->ctx_desc->sts_ring_addr =
+ cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr);
+ adapter->ctx_desc->sts_ring_size =
+ cpu_to_le32(adapter->max_rx_desc_count);
+ }
+
+ adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_LO(func_id),
+ lower32(adapter->ctx_desc_phys_addr));
+ adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_HI(func_id),
+ upper32(adapter->ctx_desc_phys_addr));
+ adapter->pci_write_normalize(adapter, CRB_CTX_SIGNATURE_REG(func_id),
+ NETXEN_CTX_SIGNATURE | func_id);
+ return 0;
+}
+
+static uint32_t sw_int_mask[4] = {
+ CRB_SW_INT_MASK_0, CRB_SW_INT_MASK_1,
+ CRB_SW_INT_MASK_2, CRB_SW_INT_MASK_3
+};
+
+int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_hardware_context *hw = &adapter->ahw;
+ u32 state = 0;
+ void *addr;
+ int err = 0;
+ int ctx, ring;
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+
+ err = netxen_receive_peg_ready(adapter);
+ if (err) {
+ printk(KERN_ERR "Rcv Peg initialization not complete:%x.\n",
+ state);
+ return err;
+ }
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ &adapter->ctx_desc_phys_addr);
+
+ if (addr == NULL) {
+ DPRINTK(ERR, "failed to allocate hw context\n");
+ return -ENOMEM;
+ }
+ memset(addr, 0, sizeof(struct netxen_ring_ctx));
+ adapter->ctx_desc = (struct netxen_ring_ctx *)addr;
+ adapter->ctx_desc->ctx_id = cpu_to_le32(adapter->portnum);
+ adapter->ctx_desc->cmd_consumer_offset =
+ cpu_to_le64(adapter->ctx_desc_phys_addr +
+ sizeof(struct netxen_ring_ctx));
+ adapter->cmd_consumer =
+ (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
+
+ /* cmd desc ring */
+ addr = pci_alloc_consistent(adapter->pdev,
+ sizeof(struct cmd_desc_type0) *
+ adapter->max_tx_desc_count,
+ &hw->cmd_desc_phys_addr);
+
+ if (addr == NULL) {
+ printk(KERN_ERR "%s failed to allocate tx desc ring\n",
+ netxen_nic_driver_name);
+ return -ENOMEM;
+ }
+
+ hw->cmd_desc_head = (struct cmd_desc_type0 *)addr;
+
+ for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+ recv_ctx = &adapter->recv_ctx[ctx];
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ /* rx desc ring */
+ rds_ring = &recv_ctx->rds_rings[ring];
+ addr = pci_alloc_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE,
+ &rds_ring->phys_addr);
+ if (addr == NULL) {
+ printk(KERN_ERR "%s failed to allocate rx "
+ "desc ring[%d]\n",
+ netxen_nic_driver_name, ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ rds_ring->desc_head = (struct rcv_desc *)addr;
+
+ if (adapter->fw_major < 4)
+ rds_ring->crb_rcv_producer =
+ recv_crb_registers[adapter->portnum].
+ crb_rcv_producer[ring];
+ }
+
+ /* status desc ring */
+ addr = pci_alloc_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE,
+ &recv_ctx->rcv_status_desc_phys_addr);
+ if (addr == NULL) {
+ printk(KERN_ERR "%s failed to allocate sts desc ring\n",
+ netxen_nic_driver_name);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ recv_ctx->rcv_status_desc_head = (struct status_desc *)addr;
+
+ if (adapter->fw_major < 4)
+ recv_ctx->crb_sts_consumer =
+ recv_crb_registers[adapter->portnum].
+ crb_sts_consumer;
+ }
+
+ if (adapter->fw_major >= 4) {
+ adapter->intr_scheme = INTR_SCHEME_PERPORT;
+ adapter->msi_mode = MSI_MODE_MULTIFUNC;
+
+ err = nx_fw_cmd_create_rx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ err = nx_fw_cmd_create_tx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ } else {
+
+ adapter->intr_scheme = adapter->pci_read_normalize(adapter,
+ CRB_NIC_CAPABILITIES_FW);
+ adapter->msi_mode = adapter->pci_read_normalize(adapter,
+ CRB_NIC_MSI_MODE_FW);
+ adapter->crb_intr_mask = sw_int_mask[adapter->portnum];
+
+ err = netxen_init_old_ctx(adapter);
+ if (err) {
+ netxen_free_hw_resources(adapter);
+ return err;
+ }
+
+ }
+
+ return 0;
+
+err_out_free:
+ netxen_free_hw_resources(adapter);
+ return err;
+}
+
+void netxen_free_hw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ int ctx, ring;
+
+ if (adapter->fw_major >= 4) {
+ nx_fw_cmd_destroy_tx_ctx(adapter);
+ nx_fw_cmd_destroy_rx_ctx(adapter);
+ }
+
+ if (adapter->ctx_desc != NULL) {
+ pci_free_consistent(adapter->pdev,
+ sizeof(struct netxen_ring_ctx) +
+ sizeof(uint32_t),
+ adapter->ctx_desc,
+ adapter->ctx_desc_phys_addr);
+ adapter->ctx_desc = NULL;
+ }
+
+ if (adapter->ahw.cmd_desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ sizeof(struct cmd_desc_type0) *
+ adapter->max_tx_desc_count,
+ adapter->ahw.cmd_desc_head,
+ adapter->ahw.cmd_desc_phys_addr);
+ adapter->ahw.cmd_desc_head = NULL;
+ }
+
+ for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+ recv_ctx = &adapter->recv_ctx[ctx];
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ if (rds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE,
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
+ rds_ring->desc_head = NULL;
+ }
+ }
+
+ if (recv_ctx->rcv_status_desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE,
+ recv_ctx->rcv_status_desc_head,
+ recv_ctx->rcv_status_desc_phys_addr);
+ recv_ctx->rcv_status_desc_head = NULL;
+ }
+ }
+}
+
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 723487bf200c..48ee06b6f4e9 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -93,17 +93,21 @@ static void
netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
{
struct netxen_adapter *adapter = netdev_priv(dev);
+ unsigned long flags;
u32 fw_major = 0;
u32 fw_minor = 0;
u32 fw_build = 0;
strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
- fw_major = readl(NETXEN_CRB_NORMALIZE(adapter,
- NETXEN_FW_VERSION_MAJOR));
- fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter,
- NETXEN_FW_VERSION_MINOR));
- fw_build = readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB));
+ write_lock_irqsave(&adapter->adapter_lock, flags);
+ fw_major = adapter->pci_read_normalize(adapter,
+ NETXEN_FW_VERSION_MAJOR);
+ fw_minor = adapter->pci_read_normalize(adapter,
+ NETXEN_FW_VERSION_MINOR);
+ fw_build = adapter->pci_read_normalize(adapter,
+ NETXEN_FW_VERSION_SUB);
+ write_unlock_irqrestore(&adapter->adapter_lock, flags);
sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
@@ -159,9 +163,16 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
switch ((netxen_brdtype_t) boardinfo->board_type) {
case NETXEN_BRDTYPE_P2_SB35_4G:
case NETXEN_BRDTYPE_P2_SB31_2G:
+ case NETXEN_BRDTYPE_P3_REF_QG:
+ case NETXEN_BRDTYPE_P3_4_GB:
+ case NETXEN_BRDTYPE_P3_4_GB_MM:
+ case NETXEN_BRDTYPE_P3_10000_BASE_T:
+
ecmd->supported |= SUPPORTED_Autoneg;
ecmd->advertising |= ADVERTISED_Autoneg;
case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4_LP:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
@@ -171,12 +182,17 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
break;
case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ case NETXEN_BRDTYPE_P3_IMEZ:
+ case NETXEN_BRDTYPE_P3_XG_LOM:
+ case NETXEN_BRDTYPE_P3_HMEZ:
ecmd->supported |= SUPPORTED_MII;
ecmd->advertising |= ADVERTISED_MII;
ecmd->port = PORT_FIBRE;
ecmd->autoneg = AUTONEG_DISABLE;
break;
case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+ case NETXEN_BRDTYPE_P3_10G_XFP:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
@@ -349,19 +365,18 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
struct netxen_adapter *adapter = netdev_priv(dev);
__u32 mode, *regs_buff = p;
- void __iomem *addr;
int i, window;
memset(p, 0, NETXEN_NIC_REGS_LEN);
regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
(adapter->pdev)->device;
/* which mode */
- NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_MODE, &regs_buff[0]);
+ adapter->hw_read_wx(adapter, NETXEN_NIU_MODE, &regs_buff[0], 4);
mode = regs_buff[0];
/* Common registers to all the modes */
- NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER,
- &regs_buff[2]);
+ adapter->hw_read_wx(adapter,
+ NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER, &regs_buff[2], 4);
/* GB/XGB Mode */
mode = (mode / 2) - 1;
window = 0;
@@ -372,9 +387,9 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
window = adapter->physical_port *
NETXEN_NIC_PORT_WINDOW;
- NETXEN_NIC_LOCKED_READ_REG(niu_registers[mode].
- reg[i - 3] + window,
- &regs_buff[i]);
+ adapter->hw_read_wx(adapter,
+ niu_registers[mode].reg[i - 3] + window,
+ &regs_buff[i], 4);
}
}
@@ -398,7 +413,7 @@ static u32 netxen_nic_test_link(struct net_device *dev)
return !val;
}
} else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
- val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
+ val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
return (val == XG_LINK_UP) ? 0 : 1;
}
return -EIO;
@@ -427,6 +442,7 @@ netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
return 0;
}
+#if 0
static int
netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 * bytes)
@@ -447,7 +463,6 @@ netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
}
printk(KERN_INFO "%s: flash unlocked. \n",
netxen_nic_driver_name);
- last_schedule_time = jiffies;
ret = netxen_flash_erase_secondary(adapter);
if (ret != FLASH_SUCCESS) {
printk(KERN_ERR "%s: Flash erase failed.\n",
@@ -497,6 +512,7 @@ netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
return netxen_rom_fast_write_words(adapter, offset, bytes, eeprom->len);
}
+#endif /* 0 */
static void
netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
@@ -508,9 +524,9 @@ netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
ring->rx_jumbo_pending = 0;
for (i = 0; i < MAX_RCV_CTX; ++i) {
ring->rx_pending += adapter->recv_ctx[i].
- rcv_desc[RCV_DESC_NORMAL_CTXID].max_rx_desc_count;
+ rds_rings[RCV_DESC_NORMAL_CTXID].max_rx_desc_count;
ring->rx_jumbo_pending += adapter->recv_ctx[i].
- rcv_desc[RCV_DESC_JUMBO_CTXID].max_rx_desc_count;
+ rds_rings[RCV_DESC_JUMBO_CTXID].max_rx_desc_count;
}
ring->tx_pending = adapter->max_tx_desc_count;
@@ -655,7 +671,7 @@ static int netxen_nic_reg_test(struct net_device *dev)
data_written = (u32)0xa5a5a5a5;
netxen_nic_reg_write(adapter, CRB_SCRATCHPAD_TEST, data_written);
- data_read = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_SCRATCHPAD_TEST));
+ data_read = adapter->pci_read_normalize(adapter, CRB_SCRATCHPAD_TEST);
if (data_written != data_read)
return 1;
@@ -736,6 +752,117 @@ static int netxen_nic_set_rx_csum(struct net_device *dev, u32 data)
return 0;
}
+static u32 netxen_nic_get_tso(struct net_device *dev)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
+
+ return (dev->features & NETIF_F_TSO) != 0;
+}
+
+static int netxen_nic_set_tso(struct net_device *dev, u32 data)
+{
+ if (data) {
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ dev->features |= NETIF_F_TSO;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ dev->features |= NETIF_F_TSO6;
+ } else
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int netxen_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ if (!NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return -EINVAL;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ /*
+ * Return Error if unsupported values or
+ * unsupported parameters are set.
+ */
+ if (ethcoal->rx_coalesce_usecs > 0xffff ||
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
+ return -EINVAL;
+
+ if (!ethcoal->rx_coalesce_usecs ||
+ !ethcoal->rx_max_coalesced_frames) {
+ adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ } else {
+ adapter->coal.flags = 0;
+ adapter->coal.normal.data.rx_time_us =
+ ethcoal->rx_coalesce_usecs;
+ adapter->coal.normal.data.rx_packets =
+ ethcoal->rx_max_coalesced_frames;
+ }
+ adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
+ adapter->coal.normal.data.tx_packets =
+ ethcoal->tx_max_coalesced_frames;
+
+ netxen_config_intr_coalesce(adapter);
+
+ return 0;
+}
+
+static int netxen_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ if (!NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return -EINVAL;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
+ ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
+ ethcoal->rx_max_coalesced_frames =
+ adapter->coal.normal.data.rx_packets;
+ ethcoal->tx_max_coalesced_frames =
+ adapter->coal.normal.data.tx_packets;
+
+ return 0;
+}
+
struct ethtool_ops netxen_nic_ethtool_ops = {
.get_settings = netxen_nic_get_settings,
.set_settings = netxen_nic_set_settings,
@@ -745,17 +872,22 @@ struct ethtool_ops netxen_nic_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_eeprom_len = netxen_nic_get_eeprom_len,
.get_eeprom = netxen_nic_get_eeprom,
+#if 0
.set_eeprom = netxen_nic_set_eeprom,
+#endif
.get_ringparam = netxen_nic_get_ringparam,
.get_pauseparam = netxen_nic_get_pauseparam,
.set_pauseparam = netxen_nic_set_pauseparam,
.set_tx_csum = ethtool_op_set_tx_csum,
.set_sg = ethtool_op_set_sg,
- .set_tso = ethtool_op_set_tso,
+ .get_tso = netxen_nic_get_tso,
+ .set_tso = netxen_nic_set_tso,
.self_test = netxen_nic_diag_test,
.get_strings = netxen_nic_get_strings,
.get_ethtool_stats = netxen_nic_get_ethtool_stats,
.get_sset_count = netxen_get_sset_count,
.get_rx_csum = netxen_nic_get_rx_csum,
.set_rx_csum = netxen_nic_set_rx_csum,
+ .get_coalesce = netxen_get_intr_coalesce,
+ .set_coalesce = netxen_set_intr_coalesce,
};
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 24d027e29c45..3ce13e451aac 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -126,7 +126,8 @@ enum {
NETXEN_HW_PEGR0_CRB_AGT_ADR,
NETXEN_HW_PEGR1_CRB_AGT_ADR,
NETXEN_HW_PEGR2_CRB_AGT_ADR,
- NETXEN_HW_PEGR3_CRB_AGT_ADR
+ NETXEN_HW_PEGR3_CRB_AGT_ADR,
+ NETXEN_HW_PEGN4_CRB_AGT_ADR
};
/* Hub 5 */
@@ -316,6 +317,8 @@ enum {
((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR)
#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \
((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN4 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN4_CRB_AGT_ADR)
#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \
((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR)
#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \
@@ -435,6 +438,7 @@ enum {
#define NETXEN_CRB_ROMUSB \
NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
+#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64)
#define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH)
@@ -446,6 +450,7 @@ enum {
#define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND)
#define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI)
#define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN)
+#define NETXEN_CRB_QDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SN)
#define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS)
#define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD
@@ -461,11 +466,20 @@ enum {
#define ISR_INT_TARGET_MASK_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
#define ISR_INT_TARGET_STATUS_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
#define ISR_INT_TARGET_MASK_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
#define NETXEN_PCI_MAPSIZE 128
#define NETXEN_PCI_DDR_NET (0x00000000UL)
#define NETXEN_PCI_QDR_NET (0x04000000UL)
#define NETXEN_PCI_DIRECT_CRB (0x04400000UL)
+#define NETXEN_PCI_CAMQM (0x04800000UL)
#define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL)
#define NETXEN_PCI_OCM0 (0x05000000UL)
#define NETXEN_PCI_OCM0_MAX (0x050fffffUL)
@@ -474,6 +488,13 @@ enum {
#define NETXEN_PCI_CRBSPACE (0x06000000UL)
#define NETXEN_PCI_128MB_SIZE (0x08000000UL)
#define NETXEN_PCI_32MB_SIZE (0x02000000UL)
+#define NETXEN_PCI_2MB_SIZE (0x00200000UL)
+
+#define NETXEN_PCI_MN_2M (0)
+#define NETXEN_PCI_MS_2M (0x80000)
+#define NETXEN_PCI_OCM0_2M (0x000c0000UL)
+#define NETXEN_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define NETXEN_PCI_CAMQM_2M_END (0x04800800UL)
#define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM)
@@ -484,7 +505,14 @@ enum {
#define NETXEN_ADDR_OCM1 (0x0000000200400000ULL)
#define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL)
#define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL)
-#define NETXEN_ADDR_QDR_NET_MAX (0x00000003003fffffULL)
+#define NETXEN_ADDR_QDR_NET_MAX_P2 (0x00000003003fffffULL)
+#define NETXEN_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+
+/*
+ * Register offsets for MN
+ */
+#define NETXEN_MIU_CONTROL (0x000)
+#define NETXEN_MIU_MN_CONTROL (NETXEN_CRB_DDR_NET+NETXEN_MIU_CONTROL)
/* 200ms delay in each loop */
#define NETXEN_NIU_PHY_WAITLEN 200000
@@ -550,6 +578,9 @@ enum {
#define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018)
#define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c)
+#define NETXEN_UNICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1080)
+#define NETXEN_MULTICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1100)
+
#define NETXEN_NIU_GB_MAC_CONFIG_0(I) \
(NETXEN_CRB_NIU + 0x30000 + (I)*0x10000)
#define NETXEN_NIU_GB_MAC_CONFIG_1(I) \
@@ -630,16 +661,76 @@ enum {
#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054)
#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058)
+/* P3 802.3ap */
+#define NETXEN_NIU_AP_MAC_CONFIG_0(I) (NETXEN_CRB_NIU+0xa0000+(I)*0x10000)
+#define NETXEN_NIU_AP_MAC_CONFIG_1(I) (NETXEN_CRB_NIU+0xa0004+(I)*0x10000)
+#define NETXEN_NIU_AP_MAC_IPG_IFG(I) (NETXEN_CRB_NIU+0xa0008+(I)*0x10000)
+#define NETXEN_NIU_AP_HALF_DUPLEX_CTRL(I) (NETXEN_CRB_NIU+0xa000c+(I)*0x10000)
+#define NETXEN_NIU_AP_MAX_FRAME_SIZE(I) (NETXEN_CRB_NIU+0xa0010+(I)*0x10000)
+#define NETXEN_NIU_AP_TEST_REG(I) (NETXEN_CRB_NIU+0xa001c+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_CONFIG(I) (NETXEN_CRB_NIU+0xa0020+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_COMMAND(I) (NETXEN_CRB_NIU+0xa0024+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_ADDR(I) (NETXEN_CRB_NIU+0xa0028+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_CTRL(I) (NETXEN_CRB_NIU+0xa002c+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_STATUS(I) (NETXEN_CRB_NIU+0xa0030+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_INDICATE(I) (NETXEN_CRB_NIU+0xa0034+(I)*0x10000)
+#define NETXEN_NIU_AP_INTERFACE_CTRL(I) (NETXEN_CRB_NIU+0xa0038+(I)*0x10000)
+#define NETXEN_NIU_AP_INTERFACE_STATUS(I) (NETXEN_CRB_NIU+0xa003c+(I)*0x10000)
+#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000)
+#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000)
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_CONTROL (0x000)
+#define MIU_TEST_AGT_CTRL (0x090)
+#define MIU_TEST_AGT_ADDR_LO (0x094)
+#define MIU_TEST_AGT_ADDR_HI (0x098)
+#define MIU_TEST_AGT_WRDATA_LO (0x0a0)
+#define MIU_TEST_AGT_WRDATA_HI (0x0a4)
+#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i)))
+#define MIU_TEST_AGT_RDDATA_LO (0x0a8)
+#define MIU_TEST_AGT_RDDATA_HI (0x0ac)
+#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i)))
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_START 1
+#define MIU_TA_CTL_ENABLE 2
+#define MIU_TA_CTL_WRITE 4
+#define MIU_TA_CTL_BUSY 8
+
+#define SIU_TEST_AGT_CTRL (0x060)
+#define SIU_TEST_AGT_ADDR_LO (0x064)
+#define SIU_TEST_AGT_ADDR_HI (0x078)
+#define SIU_TEST_AGT_WRDATA_LO (0x068)
+#define SIU_TEST_AGT_WRDATA_HI (0x06c)
+#define SIU_TEST_AGT_WRDATA(i) (0x068+(4*(i)))
+#define SIU_TEST_AGT_RDDATA_LO (0x070)
+#define SIU_TEST_AGT_RDDATA_HI (0x074)
+#define SIU_TEST_AGT_RDDATA(i) (0x070+(4*(i)))
+
+#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
+#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
+
/* XG Link status */
#define XG_LINK_UP 0x10
#define XG_LINK_DOWN 0x20
+#define XG_LINK_UP_P3 0x01
+#define XG_LINK_DOWN_P3 0x02
+#define XG_LINK_STATE_P3_MASK 0xf
+#define XG_LINK_STATE_P3(pcifn,val) \
+ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
+
#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000)
#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg))
#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150))
#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154))
#define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158))
#define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100))
+#define NETXEN_CRB_WIN_LOCK_ID (NETXEN_CAM_RAM(0x124))
#define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120))
@@ -654,30 +745,71 @@ enum {
#define PCIX_INT_VECTOR (0x10100)
#define PCIX_INT_MASK (0x10104)
-#define PCIX_MN_WINDOW_F0 (0x10200)
-#define PCIX_MN_WINDOW(_f) (PCIX_MN_WINDOW_F0 + (0x20 * (_f)))
-#define PCIX_MS_WINDOW (0x10204)
-#define PCIX_SN_WINDOW_F0 (0x10208)
-#define PCIX_SN_WINDOW(_f) (PCIX_SN_WINDOW_F0 + (0x20 * (_f)))
#define PCIX_CRB_WINDOW (0x10210)
#define PCIX_CRB_WINDOW_F0 (0x10210)
#define PCIX_CRB_WINDOW_F1 (0x10230)
#define PCIX_CRB_WINDOW_F2 (0x10250)
#define PCIX_CRB_WINDOW_F3 (0x10270)
+#define PCIX_CRB_WINDOW_F4 (0x102ac)
+#define PCIX_CRB_WINDOW_F5 (0x102bc)
+#define PCIX_CRB_WINDOW_F6 (0x102cc)
+#define PCIX_CRB_WINDOW_F7 (0x102dc)
+#define PCIE_CRB_WINDOW_REG(func) (((func) < 4) ? \
+ (PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\
+ (PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_MN_WINDOW (0x10200)
+#define PCIX_MN_WINDOW_F0 (0x10200)
+#define PCIX_MN_WINDOW_F1 (0x10220)
+#define PCIX_MN_WINDOW_F2 (0x10240)
+#define PCIX_MN_WINDOW_F3 (0x10260)
+#define PCIX_MN_WINDOW_F4 (0x102a0)
+#define PCIX_MN_WINDOW_F5 (0x102b0)
+#define PCIX_MN_WINDOW_F6 (0x102c0)
+#define PCIX_MN_WINDOW_F7 (0x102d0)
+#define PCIE_MN_WINDOW_REG(func) (((func) < 4) ? \
+ (PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\
+ (PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_SN_WINDOW (0x10208)
+#define PCIX_SN_WINDOW_F0 (0x10208)
+#define PCIX_SN_WINDOW_F1 (0x10228)
+#define PCIX_SN_WINDOW_F2 (0x10248)
+#define PCIX_SN_WINDOW_F3 (0x10268)
+#define PCIX_SN_WINDOW_F4 (0x102a8)
+#define PCIX_SN_WINDOW_F5 (0x102b8)
+#define PCIX_SN_WINDOW_F6 (0x102c8)
+#define PCIX_SN_WINDOW_F7 (0x102d8)
+#define PCIE_SN_WINDOW_REG(func) (((func) < 4) ? \
+ (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\
+ (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4))))
#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
#define PCIX_TARGET_MASK (0x10128)
-#define PCIX_TARGET_STATUS_F1 (0x10160)
-#define PCIX_TARGET_MASK_F1 (0x10170)
-#define PCIX_TARGET_STATUS_F2 (0x10164)
-#define PCIX_TARGET_MASK_F2 (0x10174)
-#define PCIX_TARGET_STATUS_F3 (0x10168)
-#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
#define PCIX_MSI_F0 (0x13000)
#define PCIX_MSI_F1 (0x13004)
#define PCIX_MSI_F2 (0x13008)
#define PCIX_MSI_F3 (0x1300c)
+#define PCIX_MSI_F4 (0x13010)
+#define PCIX_MSI_F5 (0x13014)
+#define PCIX_MSI_F6 (0x13018)
+#define PCIX_MSI_F7 (0x1301c)
#define PCIX_MSI_F(i) (0x13000+((i)*4))
#define PCIX_PS_MEM_SPACE (0x90000)
@@ -695,11 +827,102 @@ enum {
#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
#define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */
#define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */
-
+#define PCIE_SEM5_LOCK (0x1c028) /* API lock */
+#define PCIE_SEM5_UNLOCK (0x1c02c) /* API unlock */
+#define PCIE_SEM6_LOCK (0x1c030) /* sw lock */
+#define PCIE_SEM6_UNLOCK (0x1c034) /* sw unlock */
+#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */
+#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3 (0x120c8)
#define PCIE_MAX_MASTER_SPLIT (0x14048)
+#define NETXEN_PORT_MODE_NONE 0
+#define NETXEN_PORT_MODE_XG 1
+#define NETXEN_PORT_MODE_GB 2
+#define NETXEN_PORT_MODE_802_3_AP 3
+#define NETXEN_PORT_MODE_AUTO_NEG 4
+#define NETXEN_PORT_MODE_AUTO_NEG_1G 5
+#define NETXEN_PORT_MODE_AUTO_NEG_XG 6
+#define NETXEN_PORT_MODE_ADDR (NETXEN_CAM_RAM(0x24))
+#define NETXEN_WOL_PORT_MODE (NETXEN_CAM_RAM(0x198))
+
#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14)
+#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct netxen_legacy_intr_set {
+ uint32_t int_vec_bit;
+ uint32_t tgt_status_reg;
+ uint32_t tgt_mask_reg;
+ uint32_t pci_int_reg;
+};
+
+#define NX_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
#endif /* __NETXEN_NIC_HDR_H_ */
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index c43d06b8de9b..96a3bc6426e2 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -38,242 +38,262 @@
#include <net/ip.h>
-struct netxen_recv_crb recv_crb_registers[] = {
- /*
- * Instance 0.
- */
- {
- /* rcv_desc_crb: */
- {
- {
- /* crb_rcv_producer_offset: */
- NETXEN_NIC_REG(0x100),
- /* crb_rcv_consumer_offset: */
- NETXEN_NIC_REG(0x104),
- /* crb_gloablrcv_ring: */
- NETXEN_NIC_REG(0x108),
- /* crb_rcv_ring_size */
- NETXEN_NIC_REG(0x10c),
-
- },
- /* Jumbo frames */
- {
- /* crb_rcv_producer_offset: */
- NETXEN_NIC_REG(0x110),
- /* crb_rcv_consumer_offset: */
- NETXEN_NIC_REG(0x114),
- /* crb_gloablrcv_ring: */
- NETXEN_NIC_REG(0x118),
- /* crb_rcv_ring_size */
- NETXEN_NIC_REG(0x11c),
- },
- /* LRO */
- {
- /* crb_rcv_producer_offset: */
- NETXEN_NIC_REG(0x120),
- /* crb_rcv_consumer_offset: */
- NETXEN_NIC_REG(0x124),
- /* crb_gloablrcv_ring: */
- NETXEN_NIC_REG(0x128),
- /* crb_rcv_ring_size */
- NETXEN_NIC_REG(0x12c),
- }
- },
- /* crb_rcvstatus_ring: */
- NETXEN_NIC_REG(0x130),
- /* crb_rcv_status_producer: */
- NETXEN_NIC_REG(0x134),
- /* crb_rcv_status_consumer: */
- NETXEN_NIC_REG(0x138),
- /* crb_rcvpeg_state: */
- NETXEN_NIC_REG(0x13c),
- /* crb_status_ring_size */
- NETXEN_NIC_REG(0x140),
-
- },
- /*
- * Instance 1,
- */
- {
- /* rcv_desc_crb: */
- {
- {
- /* crb_rcv_producer_offset: */
- NETXEN_NIC_REG(0x144),
- /* crb_rcv_consumer_offset: */
- NETXEN_NIC_REG(0x148),
- /* crb_globalrcv_ring: */
- NETXEN_NIC_REG(0x14c),
- /* crb_rcv_ring_size */
- NETXEN_NIC_REG(0x150),
-
- },
- /* Jumbo frames */
- {
- /* crb_rcv_producer_offset: */
- NETXEN_NIC_REG(0x154),
- /* crb_rcv_consumer_offset: */
- NETXEN_NIC_REG(0x158),
- /* crb_globalrcv_ring: */
- NETXEN_NIC_REG(0x15c),
- /* crb_rcv_ring_size */
- NETXEN_NIC_REG(0x160),
- },
- /* LRO */
- {
- /* crb_rcv_producer_offset: */
- NETXEN_NIC_REG(0x164),
- /* crb_rcv_consumer_offset: */
- NETXEN_NIC_REG(0x168),
- /* crb_globalrcv_ring: */
- NETXEN_NIC_REG(0x16c),
- /* crb_rcv_ring_size */
- NETXEN_NIC_REG(0x170),
- }
-
- },
- /* crb_rcvstatus_ring: */
- NETXEN_NIC_REG(0x174),
- /* crb_rcv_status_producer: */
- NETXEN_NIC_REG(0x178),
- /* crb_rcv_status_consumer: */
- NETXEN_NIC_REG(0x17c),
- /* crb_rcvpeg_state: */
- NETXEN_NIC_REG(0x180),
- /* crb_status_ring_size */
- NETXEN_NIC_REG(0x184),
- },
- /*
- * Instance 2,
- */
- {
- {
- {
- /* crb_rcv_producer_offset: */
- NETXEN_NIC_REG(0x1d8),
- /* crb_rcv_consumer_offset: */
- NETXEN_NIC_REG(0x1dc),
- /* crb_gloablrcv_ring: */
- NETXEN_NIC_REG(0x1f0),
- /* crb_rcv_ring_size */
- NETXEN_NIC_REG(0x1f4),
- },
- /* Jumbo frames */
- {
- /* crb_rcv_producer_offset: */
- NETXEN_NIC_REG(0x1f8),
- /* crb_rcv_consumer_offset: */
- NETXEN_NIC_REG(0x1fc),
- /* crb_gloablrcv_ring: */
- NETXEN_NIC_REG(0x200),
- /* crb_rcv_ring_size */
- NETXEN_NIC_REG(0x204),
- },
- /* LRO */
- {
- /* crb_rcv_producer_offset: */
- NETXEN_NIC_REG(0x208),
- /* crb_rcv_consumer_offset: */
- NETXEN_NIC_REG(0x20c),
- /* crb_gloablrcv_ring: */
- NETXEN_NIC_REG(0x210),
- /* crb_rcv_ring_size */
- NETXEN_NIC_REG(0x214),
- }
- },
- /* crb_rcvstatus_ring: */
- NETXEN_NIC_REG(0x218),
- /* crb_rcv_status_producer: */
- NETXEN_NIC_REG(0x21c),
- /* crb_rcv_status_consumer: */
- NETXEN_NIC_REG(0x220),
- /* crb_rcvpeg_state: */
- NETXEN_NIC_REG(0x224),
- /* crb_status_ring_size */
- NETXEN_NIC_REG(0x228),
- },
- /*
- * Instance 3,
- */
- {
- {
- {
- /* crb_rcv_producer_offset: */
- NETXEN_NIC_REG(0x22c),
- /* crb_rcv_consumer_offset: */
- NETXEN_NIC_REG(0x230),
- /* crb_gloablrcv_ring: */
- NETXEN_NIC_REG(0x234),
- /* crb_rcv_ring_size */
- NETXEN_NIC_REG(0x238),
- },
- /* Jumbo frames */
- {
- /* crb_rcv_producer_offset: */
- NETXEN_NIC_REG(0x23c),
- /* crb_rcv_consumer_offset: */
- NETXEN_NIC_REG(0x240),
- /* crb_gloablrcv_ring: */
- NETXEN_NIC_REG(0x244),
- /* crb_rcv_ring_size */
- NETXEN_NIC_REG(0x248),
- },
- /* LRO */
- {
- /* crb_rcv_producer_offset: */
- NETXEN_NIC_REG(0x24c),
- /* crb_rcv_consumer_offset: */
- NETXEN_NIC_REG(0x250),
- /* crb_gloablrcv_ring: */
- NETXEN_NIC_REG(0x254),
- /* crb_rcv_ring_size */
- NETXEN_NIC_REG(0x258),
- }
- },
- /* crb_rcvstatus_ring: */
- NETXEN_NIC_REG(0x25c),
- /* crb_rcv_status_producer: */
- NETXEN_NIC_REG(0x260),
- /* crb_rcv_status_consumer: */
- NETXEN_NIC_REG(0x264),
- /* crb_rcvpeg_state: */
- NETXEN_NIC_REG(0x268),
- /* crb_status_ring_size */
- NETXEN_NIC_REG(0x26c),
- },
+#define MASK(n) ((1ULL<<(n))-1)
+#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define MS_WIN(addr) (addr & 0x0ffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+#define CRB_WIN_LOCK_TIMEOUT 100000000
+static crb_128M_2M_block_map_t crb_128M_2M_map[64] = {
+ {{{0, 0, 0, 0} } }, /* 0: PCI */
+ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+ {{{0, 0, 0, 0} } }, /* 3: */
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
+ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+ {{{0, 0, 0, 0} } }, /* 23: */
+ {{{0, 0, 0, 0} } }, /* 24: */
+ {{{0, 0, 0, 0} } }, /* 25: */
+ {{{0, 0, 0, 0} } }, /* 26: */
+ {{{0, 0, 0, 0} } }, /* 27: */
+ {{{0, 0, 0, 0} } }, /* 28: */
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+ {{{0} } }, /* 32: PCI */
+ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+ {{{0} } }, /* 35: */
+ {{{0} } }, /* 36: */
+ {{{0} } }, /* 37: */
+ {{{0} } }, /* 38: */
+ {{{0} } }, /* 39: */
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+ {{{0} } }, /* 52: */
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+ {{{0} } }, /* 59: I2C0 */
+ {{{0} } }, /* 60: I2C1 */
+ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
};
-static u64 ctx_addr_sig_regs[][3] = {
- {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
- {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
- {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
- {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static unsigned crb_hub_agt[64] =
+{
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PS,
+ NETXEN_HW_CRB_HUB_AGT_ADR_MN,
+ NETXEN_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SRE,
+ NETXEN_HW_CRB_HUB_AGT_ADR_NIU,
+ NETXEN_HW_CRB_HUB_AGT_ADR_QMN,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN3,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2Q,
+ NETXEN_HW_CRB_HUB_AGT_ADR_TIMR,
+ NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN4,
+ NETXEN_HW_CRB_HUB_AGT_ADR_XDMA,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN3,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGND,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGNI,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGSI,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PS,
+ NETXEN_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7,
+ NETXEN_HW_CRB_HUB_AGT_ADR_XDMA,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2Q,
+ NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9,
+ NETXEN_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SMB,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2C0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
};
-#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
-#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
-#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
-
/* PCI Windowing for DDR regions. */
#define ADDR_IN_RANGE(addr, low, high) \
(((addr) <= (high)) && ((addr) >= (low)))
-#define NETXEN_FLASH_BASE (NETXEN_BOOTLD_START)
-#define NETXEN_PHANTOM_MEM_BASE (NETXEN_FLASH_BASE)
#define NETXEN_MAX_MTU 8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE
#define NETXEN_MIN_MTU 64
#define NETXEN_ETH_FCS_SIZE 4
#define NETXEN_ENET_HEADER_SIZE 14
-#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
+#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
#define NETXEN_FIRMWARE_LEN ((16 * 1024) / 4)
#define NETXEN_NIU_HDRSIZE (0x1 << 6)
#define NETXEN_NIU_TLRSIZE (0x1 << 5)
-#define lower32(x) ((u32)((x) & 0xffffffff))
-#define upper32(x) \
- ((u32)(((unsigned long long)(x) >> 32) & 0xffffffff))
-
#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL
#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL
#define NETXEN_NIC_EPG_PAUSE_ADDR1 0x2200010000c28001ULL
@@ -281,10 +301,6 @@ static u64 ctx_addr_sig_regs[][3] = {
#define NETXEN_NIC_WINDOW_MARGIN 0x100000
-static unsigned long netxen_nic_pci_set_window(struct netxen_adapter *adapter,
- unsigned long long addr);
-void netxen_free_hw_resources(struct netxen_adapter *adapter);
-
int netxen_nic_set_mac(struct net_device *netdev, void *p)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
@@ -296,266 +312,370 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- DPRINTK(INFO, "valid ether addr\n");
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- if (adapter->macaddr_set)
- adapter->macaddr_set(adapter, addr->sa_data);
+ /* For P3, MAC addr is not set in NIU */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ if (adapter->macaddr_set)
+ adapter->macaddr_set(adapter, addr->sa_data);
return 0;
}
-/*
- * netxen_nic_set_multi - Multicast
- */
-void netxen_nic_set_multi(struct net_device *netdev)
+#define NETXEN_UNICAST_ADDR(port, index) \
+ (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8))
+#define NETXEN_MCAST_ADDR(port, index) \
+ (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8))
+#define MAC_HI(addr) \
+ ((addr[2] << 16) | (addr[1] << 8) | (addr[0]))
+#define MAC_LO(addr) \
+ ((addr[5] << 16) | (addr[4] << 8) | (addr[3]))
+
+static int
+netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter)
+{
+ u32 val = 0;
+ u16 port = adapter->physical_port;
+ u8 *addr = adapter->netdev->dev_addr;
+
+ if (adapter->mc_enabled)
+ return 0;
+
+ adapter->hw_read_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4);
+ val |= (1UL << (28+port));
+ adapter->hw_write_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4);
+
+ /* add broadcast addr to filter */
+ val = 0xffffff;
+ netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_UNICAST_ADDR(port, 0)+4, val);
+
+ /* add station addr to filter */
+ val = MAC_HI(addr);
+ netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 1), val);
+ val = MAC_LO(addr);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_UNICAST_ADDR(port, 1)+4, val);
+
+ adapter->mc_enabled = 1;
+ return 0;
+}
+
+static int
+netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter)
+{
+ u32 val = 0;
+ u16 port = adapter->physical_port;
+ u8 *addr = adapter->netdev->dev_addr;
+
+ if (!adapter->mc_enabled)
+ return 0;
+
+ adapter->hw_read_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4);
+ val &= ~(1UL << (28+port));
+ adapter->hw_write_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4);
+
+ val = MAC_HI(addr);
+ netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
+ val = MAC_LO(addr);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_UNICAST_ADDR(port, 0)+4, val);
+
+ netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 1), 0);
+ netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0);
+
+ adapter->mc_enabled = 0;
+ return 0;
+}
+
+static int
+netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
+ int index, u8 *addr)
+{
+ u32 hi = 0, lo = 0;
+ u16 port = adapter->physical_port;
+
+ lo = MAC_LO(addr);
+ hi = MAC_HI(addr);
+
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_MCAST_ADDR(port, index), hi);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_MCAST_ADDR(port, index)+4, lo);
+
+ return 0;
+}
+
+void netxen_p2_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
struct dev_mc_list *mc_ptr;
+ u8 null_addr[6];
+ int index = 0;
+
+ memset(null_addr, 0, 6);
- mc_ptr = netdev->mc_list;
if (netdev->flags & IFF_PROMISC) {
- if (adapter->set_promisc)
- adapter->set_promisc(adapter,
- NETXEN_NIU_PROMISC_MODE);
- } else {
- if (adapter->unset_promisc)
- adapter->unset_promisc(adapter,
- NETXEN_NIU_NON_PROMISC_MODE);
+
+ adapter->set_promisc(adapter,
+ NETXEN_NIU_PROMISC_MODE);
+
+ /* Full promiscuous mode */
+ netxen_nic_disable_mcast_filter(adapter);
+
+ return;
+ }
+
+ if (netdev->mc_count == 0) {
+ adapter->set_promisc(adapter,
+ NETXEN_NIU_NON_PROMISC_MODE);
+ netxen_nic_disable_mcast_filter(adapter);
+ return;
}
+
+ adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE);
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev->mc_count > adapter->max_mc_count) {
+ netxen_nic_disable_mcast_filter(adapter);
+ return;
+ }
+
+ netxen_nic_enable_mcast_filter(adapter);
+
+ for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next, index++)
+ netxen_nic_set_mcast_addr(adapter, index, mc_ptr->dmi_addr);
+
+ if (index != netdev->mc_count)
+ printk(KERN_WARNING "%s: %s multicast address count mismatch\n",
+ netxen_nic_driver_name, netdev->name);
+
+ /* Clear out remaining addresses */
+ for (; index < adapter->max_mc_count; index++)
+ netxen_nic_set_mcast_addr(adapter, index, null_addr);
}
-/*
- * netxen_nic_change_mtu - Change the Maximum Transfer Unit
- * @returns 0 on success, negative on failure
- */
-int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
+static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
+ u8 *addr, nx_mac_list_t **add_list, nx_mac_list_t **del_list)
{
- struct netxen_adapter *adapter = netdev_priv(netdev);
- int eff_mtu = mtu + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE;
+ nx_mac_list_t *cur, *prev;
+
+ /* if in del_list, move it to adapter->mac_list */
+ for (cur = *del_list, prev = NULL; cur;) {
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ if (prev == NULL)
+ *del_list = cur->next;
+ else
+ prev->next = cur->next;
+ cur->next = adapter->mac_list;
+ adapter->mac_list = cur;
+ return 0;
+ }
+ prev = cur;
+ cur = cur->next;
+ }
+
+ /* make sure to add each mac address only once */
+ for (cur = adapter->mac_list; cur; cur = cur->next) {
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
+ return 0;
+ }
+ /* not in del_list, create new entry and add to add_list */
+ cur = kmalloc(sizeof(*cur), in_atomic()? GFP_ATOMIC : GFP_KERNEL);
+ if (cur == NULL) {
+ printk(KERN_ERR "%s: cannot allocate memory. MAC filtering may"
+ "not work properly from now.\n", __func__);
+ return -1;
+ }
- if ((eff_mtu > NETXEN_MAX_MTU) || (eff_mtu < NETXEN_MIN_MTU)) {
- printk(KERN_ERR "%s: %s %d is not supported.\n",
- netxen_nic_driver_name, netdev->name, mtu);
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+ cur->next = *add_list;
+ *add_list = cur;
+ return 0;
+}
+
+static int
+netxen_send_cmd_descs(struct netxen_adapter *adapter,
+ struct cmd_desc_type0 *cmd_desc_arr, int nr_elements)
+{
+ uint32_t i, producer;
+ struct netxen_cmd_buffer *pbuf;
+ struct cmd_desc_type0 *cmd_desc;
+
+ if (nr_elements > MAX_PENDING_DESC_BLOCK_SIZE || nr_elements == 0) {
+ printk(KERN_WARNING "%s: Too many command descriptors in a "
+ "request\n", __func__);
return -EINVAL;
}
- if (adapter->set_mtu)
- adapter->set_mtu(adapter, mtu);
- netdev->mtu = mtu;
+ i = 0;
+
+ producer = adapter->cmd_producer;
+ do {
+ cmd_desc = &cmd_desc_arr[i];
+
+ pbuf = &adapter->cmd_buf_arr[producer];
+ pbuf->mss = 0;
+ pbuf->total_length = 0;
+ pbuf->skb = NULL;
+ pbuf->cmd = 0;
+ pbuf->frag_count = 0;
+ pbuf->port = 0;
+
+ /* adapter->ahw.cmd_desc_head[producer] = *cmd_desc; */
+ memcpy(&adapter->ahw.cmd_desc_head[producer],
+ &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+
+ producer = get_next_index(producer,
+ adapter->max_tx_desc_count);
+ i++;
+
+ } while (i != nr_elements);
+
+ adapter->cmd_producer = producer;
+
+ /* write producer index to start the xmit */
+
+ netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer);
return 0;
}
-/*
- * check if the firmware has been downloaded and ready to run and
- * setup the address for the descriptors in the adapter
- */
-int netxen_nic_hw_resources(struct netxen_adapter *adapter)
+#define NIC_REQUEST 0x14
+#define NETXEN_MAC_EVENT 0x1
+
+static int nx_p3_sre_macaddr_change(struct net_device *dev,
+ u8 *addr, unsigned op)
{
- struct netxen_hardware_context *hw = &adapter->ahw;
- u32 state = 0;
- void *addr;
- int loops = 0, err = 0;
- int ctx, ring;
- struct netxen_recv_context *recv_ctx;
- struct netxen_rcv_desc_ctx *rcv_desc;
- int func_id = adapter->portnum;
-
- DPRINTK(INFO, "crb_base: %lx %x", NETXEN_PCI_CRBSPACE,
- PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCI_CRBSPACE));
- DPRINTK(INFO, "cam base: %lx %x", NETXEN_CRB_CAM,
- pci_base_offset(adapter, NETXEN_CRB_CAM));
- DPRINTK(INFO, "cam RAM: %lx %x", NETXEN_CAM_RAM_BASE,
- pci_base_offset(adapter, NETXEN_CAM_RAM_BASE));
-
-
- for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
- DPRINTK(INFO, "Command Peg ready..waiting for rcv peg\n");
- loops = 0;
- state = 0;
- /* Window 1 call */
- state = readl(NETXEN_CRB_NORMALIZE(adapter,
- recv_crb_registers[ctx].
- crb_rcvpeg_state));
- while (state != PHAN_PEG_RCV_INITIALIZED && loops < 20) {
- msleep(1);
- /* Window 1 call */
- state = readl(NETXEN_CRB_NORMALIZE(adapter,
- recv_crb_registers
- [ctx].
- crb_rcvpeg_state));
- loops++;
- }
- if (loops >= 20) {
- printk(KERN_ERR "Rcv Peg initialization not complete:"
- "%x.\n", state);
- err = -EIO;
- return err;
- }
+ struct netxen_adapter *adapter = (struct netxen_adapter *)dev->priv;
+ nx_nic_req_t req;
+ nx_mac_req_t mac_req;
+ int rv;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr |= (NIC_REQUEST << 23);
+ req.req_hdr |= NETXEN_MAC_EVENT;
+ req.req_hdr |= ((u64)adapter->portnum << 16);
+ mac_req.op = op;
+ memcpy(&mac_req.mac_addr, addr, 6);
+ req.words[0] = cpu_to_le64(*(u64 *)&mac_req);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "ERROR. Could not send mac update\n");
+ return rv;
}
- adapter->intr_scheme = readl(
- NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_CAPABILITIES_FW));
- adapter->msi_mode = readl(
- NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_MSI_MODE_FW));
-
- addr = netxen_alloc(adapter->ahw.pdev,
- sizeof(struct netxen_ring_ctx) +
- sizeof(uint32_t),
- (dma_addr_t *) & adapter->ctx_desc_phys_addr,
- &adapter->ctx_desc_pdev);
-
- if (addr == NULL) {
- DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
- err = -ENOMEM;
- return err;
- }
- memset(addr, 0, sizeof(struct netxen_ring_ctx));
- adapter->ctx_desc = (struct netxen_ring_ctx *)addr;
- adapter->ctx_desc->ctx_id = cpu_to_le32(adapter->portnum);
- adapter->ctx_desc->cmd_consumer_offset =
- cpu_to_le64(adapter->ctx_desc_phys_addr +
- sizeof(struct netxen_ring_ctx));
- adapter->cmd_consumer = (__le32 *) (((char *)addr) +
- sizeof(struct netxen_ring_ctx));
-
- addr = netxen_alloc(adapter->ahw.pdev,
- sizeof(struct cmd_desc_type0) *
- adapter->max_tx_desc_count,
- (dma_addr_t *) & hw->cmd_desc_phys_addr,
- &adapter->ahw.cmd_desc_pdev);
-
- if (addr == NULL) {
- DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
- netxen_free_hw_resources(adapter);
- return -ENOMEM;
- }
-
- adapter->ctx_desc->cmd_ring_addr =
- cpu_to_le64(hw->cmd_desc_phys_addr);
- adapter->ctx_desc->cmd_ring_size =
- cpu_to_le32(adapter->max_tx_desc_count);
-
- hw->cmd_desc_head = (struct cmd_desc_type0 *)addr;
-
- for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
- recv_ctx = &adapter->recv_ctx[ctx];
-
- for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
- rcv_desc = &recv_ctx->rcv_desc[ring];
- addr = netxen_alloc(adapter->ahw.pdev,
- RCV_DESC_RINGSIZE,
- &rcv_desc->phys_addr,
- &rcv_desc->phys_pdev);
- if (addr == NULL) {
- DPRINTK(ERR, "bad return from "
- "pci_alloc_consistent\n");
- netxen_free_hw_resources(adapter);
- err = -ENOMEM;
- return err;
- }
- rcv_desc->desc_head = (struct rcv_desc *)addr;
- adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr =
- cpu_to_le64(rcv_desc->phys_addr);
- adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size =
- cpu_to_le32(rcv_desc->max_rx_desc_count);
- }
- addr = netxen_alloc(adapter->ahw.pdev, STATUS_DESC_RINGSIZE,
- &recv_ctx->rcv_status_desc_phys_addr,
- &recv_ctx->rcv_status_desc_pdev);
- if (addr == NULL) {
- DPRINTK(ERR, "bad return from"
- " pci_alloc_consistent\n");
- netxen_free_hw_resources(adapter);
- err = -ENOMEM;
- return err;
- }
- recv_ctx->rcv_status_desc_head = (struct status_desc *)addr;
- adapter->ctx_desc->sts_ring_addr =
- cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr);
- adapter->ctx_desc->sts_ring_size =
- cpu_to_le32(adapter->max_rx_desc_count);
-
- }
- /* Window = 1 */
-
- writel(lower32(adapter->ctx_desc_phys_addr),
- NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_ADDR_REG_LO(func_id)));
- writel(upper32(adapter->ctx_desc_phys_addr),
- NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_ADDR_REG_HI(func_id)));
- writel(NETXEN_CTX_SIGNATURE | func_id,
- NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_SIGNATURE_REG(func_id)));
- return err;
+ return 0;
}
-void netxen_free_hw_resources(struct netxen_adapter *adapter)
+void netxen_p3_nic_set_multi(struct net_device *netdev)
{
- struct netxen_recv_context *recv_ctx;
- struct netxen_rcv_desc_ctx *rcv_desc;
- int ctx, ring;
-
- if (adapter->ctx_desc != NULL) {
- pci_free_consistent(adapter->ctx_desc_pdev,
- sizeof(struct netxen_ring_ctx) +
- sizeof(uint32_t),
- adapter->ctx_desc,
- adapter->ctx_desc_phys_addr);
- adapter->ctx_desc = NULL;
- }
-
- if (adapter->ahw.cmd_desc_head != NULL) {
- pci_free_consistent(adapter->ahw.cmd_desc_pdev,
- sizeof(struct cmd_desc_type0) *
- adapter->max_tx_desc_count,
- adapter->ahw.cmd_desc_head,
- adapter->ahw.cmd_desc_phys_addr);
- adapter->ahw.cmd_desc_head = NULL;
- }
-
- for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
- recv_ctx = &adapter->recv_ctx[ctx];
- for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
- rcv_desc = &recv_ctx->rcv_desc[ring];
-
- if (rcv_desc->desc_head != NULL) {
- pci_free_consistent(rcv_desc->phys_pdev,
- RCV_DESC_RINGSIZE,
- rcv_desc->desc_head,
- rcv_desc->phys_addr);
- rcv_desc->desc_head = NULL;
- }
- }
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ nx_mac_list_t *cur, *next, *del_list, *add_list = NULL;
+ struct dev_mc_list *mc_ptr;
+ u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+ adapter->set_promisc(adapter, NETXEN_NIU_PROMISC_MODE);
- if (recv_ctx->rcv_status_desc_head != NULL) {
- pci_free_consistent(recv_ctx->rcv_status_desc_pdev,
- STATUS_DESC_RINGSIZE,
- recv_ctx->rcv_status_desc_head,
- recv_ctx->
- rcv_status_desc_phys_addr);
- recv_ctx->rcv_status_desc_head = NULL;
+ /*
+ * Programming mac addresses will automaticly enabling L2 filtering.
+ * HW will replace timestamp with L2 conid when L2 filtering is
+ * enabled. This causes problem for LSA. Do not enabling L2 filtering
+ * until that problem is fixed.
+ */
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev->mc_count > adapter->max_mc_count))
+ return;
+
+ del_list = adapter->mac_list;
+ adapter->mac_list = NULL;
+
+ nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list);
+ if (netdev->mc_count > 0) {
+ nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list);
+ for (mc_ptr = netdev->mc_list; mc_ptr;
+ mc_ptr = mc_ptr->next) {
+ nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr,
+ &add_list, &del_list);
}
}
+ for (cur = del_list; cur;) {
+ nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL);
+ next = cur->next;
+ kfree(cur);
+ cur = next;
+ }
+ for (cur = add_list; cur;) {
+ nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_ADD);
+ next = cur->next;
+ cur->next = adapter->mac_list;
+ adapter->mac_list = cur;
+ cur = next;
+ }
}
-void netxen_tso_check(struct netxen_adapter *adapter,
- struct cmd_desc_type0 *desc, struct sk_buff *skb)
+#define NETXEN_CONFIG_INTR_COALESCE 3
+
+/*
+ * Send the interrupt coalescing parameter set by ethtool to the card.
+ */
+int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
{
- if (desc->mss) {
- desc->total_hdr_length = (sizeof(struct ethhdr) +
- ip_hdrlen(skb) + tcp_hdrlen(skb));
- netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO);
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
- netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT);
- } else if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
- netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT);
- } else {
- return;
- }
+ nx_nic_req_t req;
+ int rv;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+
+ req.qhdr |= (NIC_REQUEST << 23);
+ req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE;
+ req.req_hdr |= ((u64)adapter->portnum << 16);
+
+ memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal));
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "ERROR. Could not send "
+ "interrupt coalescing parameters\n");
}
- desc->tcp_hdr_offset = skb_transport_offset(skb);
- desc->ip_hdr_offset = skb_network_offset(skb);
+
+ return rv;
+}
+
+/*
+ * netxen_nic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+#define MTU_FUDGE_FACTOR 100
+
+int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ int max_mtu;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ max_mtu = P3_MAX_MTU;
+ else
+ max_mtu = P2_MAX_MTU;
+
+ if (mtu > max_mtu) {
+ printk(KERN_ERR "%s: mtu > %d bytes unsupported\n",
+ netdev->name, max_mtu);
+ return -EINVAL;
+ }
+
+ if (adapter->set_mtu)
+ adapter->set_mtu(adapter, mtu);
+ netdev->mtu = mtu;
+
+ mtu += MTU_FUDGE_FACTOR;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ nx_fw_cmd_set_mtu(adapter, mtu);
+ else if (adapter->set_mtu)
+ adapter->set_mtu(adapter, mtu);
+
+ return 0;
}
int netxen_is_flash_supported(struct netxen_adapter *adapter)
@@ -632,41 +752,49 @@ int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 mac[])
return 0;
}
+#define CRB_WIN_LOCK_TIMEOUT 100000000
+
+static int crb_win_lock(struct netxen_adapter *adapter)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore3 from PCI HW block */
+ adapter->hw_read_wx(adapter,
+ NETXEN_PCIE_REG(PCIE_SEM7_LOCK), &done, 4);
+ if (done == 1)
+ break;
+ if (timeout >= CRB_WIN_LOCK_TIMEOUT)
+ return -1;
+ timeout++;
+ udelay(1);
+ }
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_CRB_WIN_LOCK_ID, adapter->portnum);
+ return 0;
+}
+
+static void crb_win_unlock(struct netxen_adapter *adapter)
+{
+ int val;
+
+ adapter->hw_read_wx(adapter,
+ NETXEN_PCIE_REG(PCIE_SEM7_UNLOCK), &val, 4);
+}
+
/*
* Changes the CRB window to the specified window.
*/
-void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw)
+void
+netxen_nic_pci_change_crbwindow_128M(struct netxen_adapter *adapter, u32 wndw)
{
void __iomem *offset;
u32 tmp;
int count = 0;
+ uint8_t func = adapter->ahw.pci_func;
if (adapter->curr_window == wndw)
return;
- switch(adapter->ahw.pci_func) {
- case 0:
- offset = PCI_OFFSET_SECOND_RANGE(adapter,
- NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW));
- break;
- case 1:
- offset = PCI_OFFSET_SECOND_RANGE(adapter,
- NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW_F1));
- break;
- case 2:
- offset = PCI_OFFSET_SECOND_RANGE(adapter,
- NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW_F2));
- break;
- case 3:
- offset = PCI_OFFSET_SECOND_RANGE(adapter,
- NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW_F3));
- break;
- default:
- printk(KERN_INFO "Changing the window for PCI function "
- "%d\n", adapter->ahw.pci_func);
- offset = PCI_OFFSET_SECOND_RANGE(adapter,
- NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW));
- break;
- }
/*
* Move the CRB window.
* We need to write to the "direct access" region of PCI
@@ -675,6 +803,8 @@ void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw)
* register address is received by PCI. The direct region bypasses
* the CRB bus.
*/
+ offset = PCI_OFFSET_SECOND_RANGE(adapter,
+ NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func)));
if (wndw & 0x1)
wndw = NETXEN_WINDOW_ONE;
@@ -685,7 +815,7 @@ void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw)
while ((tmp = readl(offset)) != wndw) {
printk(KERN_WARNING "%s: %s WARNING: CRB window value not "
"registered properly: 0x%08x.\n",
- netxen_nic_driver_name, __FUNCTION__, tmp);
+ netxen_nic_driver_name, __func__, tmp);
mdelay(1);
if (count >= 10)
break;
@@ -698,51 +828,119 @@ void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw)
adapter->curr_window = 0;
}
+/*
+ * Return -1 if off is not valid,
+ * 1 if window access is needed. 'off' is set to offset from
+ * CRB space in 128M pci map
+ * 0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int
+netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter,
+ ulong *off, int len)
+{
+ unsigned long end = *off + len;
+ crb_128M_2M_sub_block_map_t *m;
+
+
+ if (*off >= NETXEN_CRB_MAX)
+ return -1;
+
+ if (*off >= NETXEN_PCI_CAMQM && (end <= NETXEN_PCI_CAMQM_2M_END)) {
+ *off = (*off - NETXEN_PCI_CAMQM) + NETXEN_PCI_CAMQM_2M_BASE +
+ (ulong)adapter->ahw.pci_base0;
+ return 0;
+ }
+
+ if (*off < NETXEN_PCI_CRBSPACE)
+ return -1;
+
+ *off -= NETXEN_PCI_CRBSPACE;
+ end = *off + len;
+
+ /*
+ * Try direct map
+ */
+ m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
+
+ if (m->valid && (m->start_128M <= *off) && (m->end_128M >= end)) {
+ *off = *off + m->start_2M - m->start_128M +
+ (ulong)adapter->ahw.pci_base0;
+ return 0;
+ }
+
+ /*
+ * Not in direct map, use crb window
+ */
+ return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong *off)
+{
+ u32 win_read;
+
+ adapter->crb_win = CRB_HI(*off);
+ writel(adapter->crb_win, (void *)(CRB_WINDOW_2M +
+ adapter->ahw.pci_base0));
+ /*
+ * Read back value to make sure write has gone through before trying
+ * to use it.
+ */
+ win_read = readl((void *)(CRB_WINDOW_2M + adapter->ahw.pci_base0));
+ if (win_read != adapter->crb_win) {
+ printk(KERN_ERR "%s: Written crbwin (0x%x) != "
+ "Read crbwin (0x%x), off=0x%lx\n",
+ __func__, adapter->crb_win, win_read, *off);
+ }
+ *off = (*off & MASK(16)) + CRB_INDIRECT_2M +
+ (ulong)adapter->ahw.pci_base0;
+}
+
int netxen_load_firmware(struct netxen_adapter *adapter)
{
int i;
u32 data, size = 0;
- u32 flashaddr = NETXEN_FLASH_BASE, memaddr = NETXEN_PHANTOM_MEM_BASE;
- u64 off;
- void __iomem *addr;
+ u32 flashaddr = NETXEN_BOOTLD_START, memaddr = NETXEN_BOOTLD_START;
- size = NETXEN_FIRMWARE_LEN;
- writel(1, NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CAS_RST));
+ size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START)/4;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ adapter->pci_write_normalize(adapter,
+ NETXEN_ROMUSB_GLB_CAS_RST, 1);
for (i = 0; i < size; i++) {
- int retries = 10;
if (netxen_rom_fast_read(adapter, flashaddr, (int *)&data) != 0)
return -EIO;
- off = netxen_nic_pci_set_window(adapter, memaddr);
- addr = pci_base_offset(adapter, off);
- writel(data, addr);
- do {
- if (readl(addr) == data)
- break;
- msleep(100);
- writel(data, addr);
- } while (--retries);
- if (!retries) {
- printk(KERN_ERR "%s: firmware load aborted, write failed at 0x%x\n",
- netxen_nic_driver_name, memaddr);
- return -EIO;
- }
+ adapter->pci_mem_write(adapter, memaddr, &data, 4);
flashaddr += 4;
memaddr += 4;
+ cond_resched();
+ }
+ msleep(1);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ adapter->pci_write_normalize(adapter,
+ NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
+ else {
+ adapter->pci_write_normalize(adapter,
+ NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
+ adapter->pci_write_normalize(adapter,
+ NETXEN_ROMUSB_GLB_CAS_RST, 0);
}
- udelay(100);
- /* make sure Casper is powered on */
- writel(0x3fff,
- NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL));
- writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CAS_RST));
return 0;
}
int
-netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data,
- int len)
+netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter,
+ ulong off, void *data, int len)
{
void __iomem *addr;
@@ -750,7 +948,7 @@ netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data,
addr = NETXEN_CRB_NORMALIZE(adapter, off);
} else { /* Window 0 */
addr = pci_base_offset(adapter, off);
- netxen_nic_pci_change_crbwindow(adapter, 0);
+ netxen_nic_pci_change_crbwindow_128M(adapter, 0);
}
DPRINTK(INFO, "writing to base %lx offset %llx addr %p"
@@ -758,7 +956,7 @@ netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data,
pci_base(adapter, off), off, addr,
*(unsigned long long *)data, len);
if (!addr) {
- netxen_nic_pci_change_crbwindow(adapter, 1);
+ netxen_nic_pci_change_crbwindow_128M(adapter, 1);
return 1;
}
@@ -785,14 +983,14 @@ netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data,
break;
}
if (!ADDR_IN_WINDOW1(off))
- netxen_nic_pci_change_crbwindow(adapter, 1);
+ netxen_nic_pci_change_crbwindow_128M(adapter, 1);
return 0;
}
int
-netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data,
- int len)
+netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter,
+ ulong off, void *data, int len)
{
void __iomem *addr;
@@ -800,13 +998,13 @@ netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data,
addr = NETXEN_CRB_NORMALIZE(adapter, off);
} else { /* Window 0 */
addr = pci_base_offset(adapter, off);
- netxen_nic_pci_change_crbwindow(adapter, 0);
+ netxen_nic_pci_change_crbwindow_128M(adapter, 0);
}
DPRINTK(INFO, "reading from base %lx offset %llx addr %p\n",
pci_base(adapter, off), off, addr);
if (!addr) {
- netxen_nic_pci_change_crbwindow(adapter, 1);
+ netxen_nic_pci_change_crbwindow_128M(adapter, 1);
return 1;
}
switch (len) {
@@ -830,81 +1028,195 @@ netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data,
DPRINTK(INFO, "read %lx\n", *(unsigned long *)data);
if (!ADDR_IN_WINDOW1(off))
- netxen_nic_pci_change_crbwindow(adapter, 1);
+ netxen_nic_pci_change_crbwindow_128M(adapter, 1);
return 0;
}
-void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val)
-{ /* Only for window 1 */
- void __iomem *addr;
+int
+netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter,
+ ulong off, void *data, int len)
+{
+ unsigned long flags = 0;
+ int rv;
+
+ rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off, len);
+
+ if (rv == -1) {
+ printk(KERN_ERR "%s: invalid offset: 0x%016lx\n",
+ __func__, off);
+ dump_stack();
+ return -1;
+ }
+
+ if (rv == 1) {
+ write_lock_irqsave(&adapter->adapter_lock, flags);
+ crb_win_lock(adapter);
+ netxen_nic_pci_set_crbwindow_2M(adapter, &off);
+ }
- addr = NETXEN_CRB_NORMALIZE(adapter, off);
- DPRINTK(INFO, "writing to base %lx offset %llx addr %p data %x\n",
- pci_base(adapter, off), off, addr, val);
- writel(val, addr);
+ DPRINTK(1, INFO, "write data %lx to offset %llx, len=%d\n",
+ *(unsigned long *)data, off, len);
+ switch (len) {
+ case 1:
+ writeb(*(uint8_t *)data, (void *)off);
+ break;
+ case 2:
+ writew(*(uint16_t *)data, (void *)off);
+ break;
+ case 4:
+ writel(*(uint32_t *)data, (void *)off);
+ break;
+ case 8:
+ writeq(*(uint64_t *)data, (void *)off);
+ break;
+ default:
+ DPRINTK(1, INFO,
+ "writing data %lx to offset %llx, num words=%d\n",
+ *(unsigned long *)data, off, (len>>3));
+ break;
+ }
+ if (rv == 1) {
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ }
+
+ return 0;
}
-int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off)
-{ /* Only for window 1 */
- void __iomem *addr;
- int val;
+int
+netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter,
+ ulong off, void *data, int len)
+{
+ unsigned long flags = 0;
+ int rv;
- addr = NETXEN_CRB_NORMALIZE(adapter, off);
- DPRINTK(INFO, "reading from base %lx offset %llx addr %p\n",
- pci_base(adapter, off), off, addr);
- val = readl(addr);
- writel(val, addr);
+ rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off, len);
+
+ if (rv == -1) {
+ printk(KERN_ERR "%s: invalid offset: 0x%016lx\n",
+ __func__, off);
+ dump_stack();
+ return -1;
+ }
+
+ if (rv == 1) {
+ write_lock_irqsave(&adapter->adapter_lock, flags);
+ crb_win_lock(adapter);
+ netxen_nic_pci_set_crbwindow_2M(adapter, &off);
+ }
+
+ DPRINTK(1, INFO, "read from offset %lx, len=%d\n", off, len);
+
+ switch (len) {
+ case 1:
+ *(uint8_t *)data = readb((void *)off);
+ break;
+ case 2:
+ *(uint16_t *)data = readw((void *)off);
+ break;
+ case 4:
+ *(uint32_t *)data = readl((void *)off);
+ break;
+ case 8:
+ *(uint64_t *)data = readq((void *)off);
+ break;
+ default:
+ break;
+ }
+ DPRINTK(1, INFO, "read %lx\n", *(unsigned long *)data);
+
+ if (rv == 1) {
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ }
+
+ return 0;
+}
+
+void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val)
+{
+ adapter->hw_write_wx(adapter, off, &val, 4);
+}
+
+int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off)
+{
+ int val;
+ adapter->hw_read_wx(adapter, off, &val, 4);
return val;
}
/* Change the window to 0, write and change back to window 1. */
void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value)
{
- void __iomem *addr;
-
- netxen_nic_pci_change_crbwindow(adapter, 0);
- addr = pci_base_offset(adapter, index);
- writel(value, addr);
- netxen_nic_pci_change_crbwindow(adapter, 1);
+ adapter->hw_write_wx(adapter, index, &value, 4);
}
/* Change the window to 0, read and change back to window 1. */
-void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 * value)
+void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 *value)
{
- void __iomem *addr;
+ adapter->hw_read_wx(adapter, index, value, 4);
+}
- addr = pci_base_offset(adapter, index);
+void netxen_nic_write_w1(struct netxen_adapter *adapter, u32 index, u32 value)
+{
+ adapter->hw_write_wx(adapter, index, &value, 4);
+}
+
+void netxen_nic_read_w1(struct netxen_adapter *adapter, u32 index, u32 *value)
+{
+ adapter->hw_read_wx(adapter, index, value, 4);
+}
+
+/*
+ * check memory access boundary.
+ * used by test agent. support ddr access only for now
+ */
+static unsigned long
+netxen_nic_pci_mem_bound_check(struct netxen_adapter *adapter,
+ unsigned long long addr, int size)
+{
+ if (!ADDR_IN_RANGE(addr,
+ NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX) ||
+ !ADDR_IN_RANGE(addr+size-1,
+ NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX) ||
+ ((size != 1) && (size != 2) && (size != 4) && (size != 8))) {
+ return 0;
+ }
- netxen_nic_pci_change_crbwindow(adapter, 0);
- *value = readl(addr);
- netxen_nic_pci_change_crbwindow(adapter, 1);
+ return 1;
}
static int netxen_pci_set_window_warning_count;
-static unsigned long netxen_nic_pci_set_window(struct netxen_adapter *adapter,
- unsigned long long addr)
+unsigned long
+netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
+ unsigned long long addr)
{
- static int ddr_mn_window = -1;
- static int qdr_sn_window = -1;
+ void __iomem *offset;
int window;
+ unsigned long long qdr_max;
+ uint8_t func = adapter->ahw.pci_func;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ qdr_max = NETXEN_ADDR_QDR_NET_MAX_P2;
+ } else {
+ qdr_max = NETXEN_ADDR_QDR_NET_MAX_P3;
+ }
if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
/* DDR network side */
addr -= NETXEN_ADDR_DDR_NET;
window = (addr >> 25) & 0x3ff;
- if (ddr_mn_window != window) {
- ddr_mn_window = window;
- writel(window, PCI_OFFSET_SECOND_RANGE(adapter,
- NETXEN_PCIX_PH_REG
- (PCIX_MN_WINDOW(adapter->ahw.pci_func))));
+ if (adapter->ahw.ddr_mn_window != window) {
+ adapter->ahw.ddr_mn_window = window;
+ offset = PCI_OFFSET_SECOND_RANGE(adapter,
+ NETXEN_PCIX_PH_REG(PCIE_MN_WINDOW_REG(func)));
+ writel(window, offset);
/* MUST make sure window is set before we forge on... */
- readl(PCI_OFFSET_SECOND_RANGE(adapter,
- NETXEN_PCIX_PH_REG
- (PCIX_MN_WINDOW(adapter->ahw.pci_func))));
+ readl(offset);
}
addr -= (window * NETXEN_WINDOW_ONE);
addr += NETXEN_PCI_DDR_NET;
@@ -914,22 +1226,17 @@ static unsigned long netxen_nic_pci_set_window(struct netxen_adapter *adapter,
} else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
addr -= NETXEN_ADDR_OCM1;
addr += NETXEN_PCI_OCM1;
- } else
- if (ADDR_IN_RANGE
- (addr, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX)) {
+ } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_QDR_NET, qdr_max)) {
/* QDR network side */
addr -= NETXEN_ADDR_QDR_NET;
window = (addr >> 22) & 0x3f;
- if (qdr_sn_window != window) {
- qdr_sn_window = window;
- writel((window << 22),
- PCI_OFFSET_SECOND_RANGE(adapter,
- NETXEN_PCIX_PH_REG
- (PCIX_SN_WINDOW(adapter->ahw.pci_func))));
+ if (adapter->ahw.qdr_sn_window != window) {
+ adapter->ahw.qdr_sn_window = window;
+ offset = PCI_OFFSET_SECOND_RANGE(adapter,
+ NETXEN_PCIX_PH_REG(PCIE_SN_WINDOW_REG(func)));
+ writel((window << 22), offset);
/* MUST make sure window is set before we forge on... */
- readl(PCI_OFFSET_SECOND_RANGE(adapter,
- NETXEN_PCIX_PH_REG
- (PCIX_SN_WINDOW(adapter->ahw.pci_func))));
+ readl(offset);
}
addr -= (window * 0x400000);
addr += NETXEN_PCI_QDR_NET;
@@ -943,11 +1250,711 @@ static unsigned long netxen_nic_pci_set_window(struct netxen_adapter *adapter,
printk("%s: Warning:netxen_nic_pci_set_window()"
" Unknown address range!\n",
netxen_nic_driver_name);
+ addr = -1UL;
+ }
+ return addr;
+}
+
+/*
+ * Note : only 32-bit writes!
+ */
+int netxen_nic_pci_write_immediate_128M(struct netxen_adapter *adapter,
+ u64 off, u32 data)
+{
+ writel(data, (void __iomem *)(PCI_OFFSET_SECOND_RANGE(adapter, off)));
+ return 0;
+}
+
+u32 netxen_nic_pci_read_immediate_128M(struct netxen_adapter *adapter, u64 off)
+{
+ return readl((void __iomem *)(pci_base_offset(adapter, off)));
+}
+
+void netxen_nic_pci_write_normalize_128M(struct netxen_adapter *adapter,
+ u64 off, u32 data)
+{
+ writel(data, NETXEN_CRB_NORMALIZE(adapter, off));
+}
+
+u32 netxen_nic_pci_read_normalize_128M(struct netxen_adapter *adapter, u64 off)
+{
+ return readl(NETXEN_CRB_NORMALIZE(adapter, off));
+}
+
+unsigned long
+netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
+ unsigned long long addr)
+{
+ int window;
+ u32 win_read;
+
+ if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ /* DDR network side */
+ window = MN_WIN(addr);
+ adapter->ahw.ddr_mn_window = window;
+ adapter->hw_write_wx(adapter,
+ adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
+ &window, 4);
+ adapter->hw_read_wx(adapter,
+ adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
+ &win_read, 4);
+ if ((win_read << 17) != window) {
+ printk(KERN_INFO "Written MNwin (0x%x) != "
+ "Read MNwin (0x%x)\n", window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_DDR_NET;
+ } else if (ADDR_IN_RANGE(addr,
+ NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+ if ((addr & 0x00ff800) == 0xff800) {
+ printk("%s: QM access not handled.\n", __func__);
+ addr = -1UL;
+ }
+
+ window = OCM_WIN(addr);
+ adapter->ahw.ddr_mn_window = window;
+ adapter->hw_write_wx(adapter,
+ adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
+ &window, 4);
+ adapter->hw_read_wx(adapter,
+ adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
+ &win_read, 4);
+ if ((win_read >> 7) != window) {
+ printk(KERN_INFO "%s: Written OCMwin (0x%x) != "
+ "Read OCMwin (0x%x)\n",
+ __func__, window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_OCM0_2M;
+
+ } else if (ADDR_IN_RANGE(addr,
+ NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P3)) {
+ /* QDR network side */
+ window = MS_WIN(addr);
+ adapter->ahw.qdr_sn_window = window;
+ adapter->hw_write_wx(adapter,
+ adapter->ahw.ms_win_crb | NETXEN_PCI_CRBSPACE,
+ &window, 4);
+ adapter->hw_read_wx(adapter,
+ adapter->ahw.ms_win_crb | NETXEN_PCI_CRBSPACE,
+ &win_read, 4);
+ if (win_read != window) {
+ printk(KERN_INFO "%s: Written MSwin (0x%x) != "
+ "Read MSwin (0x%x)\n",
+ __func__, window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_QDR_NET;
+ } else {
+ /*
+ * peg gdb frequently accesses memory that doesn't exist,
+ * this limits the chit chat so debugging isn't slowed down.
+ */
+ if ((netxen_pci_set_window_warning_count++ < 8)
+ || (netxen_pci_set_window_warning_count%64 == 0)) {
+ printk("%s: Warning:%s Unknown address range!\n",
+ __func__, netxen_nic_driver_name);
+}
+ addr = -1UL;
}
return addr;
}
+static int netxen_nic_pci_is_same_window(struct netxen_adapter *adapter,
+ unsigned long long addr)
+{
+ int window;
+ unsigned long long qdr_max;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ qdr_max = NETXEN_ADDR_QDR_NET_MAX_P2;
+ else
+ qdr_max = NETXEN_ADDR_QDR_NET_MAX_P3;
+
+ if (ADDR_IN_RANGE(addr,
+ NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ /* DDR network side */
+ BUG(); /* MN access can not come here */
+ } else if (ADDR_IN_RANGE(addr,
+ NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+ return 1;
+ } else if (ADDR_IN_RANGE(addr,
+ NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+ return 1;
+ } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_QDR_NET, qdr_max)) {
+ /* QDR network side */
+ window = ((addr - NETXEN_ADDR_QDR_NET) >> 22) & 0x3f;
+ if (adapter->ahw.qdr_sn_window == window)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int netxen_nic_pci_mem_read_direct(struct netxen_adapter *adapter,
+ u64 off, void *data, int size)
+{
+ unsigned long flags;
+ void *addr;
+ int ret = 0;
+ u64 start;
+ uint8_t *mem_ptr = NULL;
+ unsigned long mem_base;
+ unsigned long mem_page;
+
+ write_lock_irqsave(&adapter->adapter_lock, flags);
+
+ /*
+ * If attempting to access unknown address or straddle hw windows,
+ * do not access.
+ */
+ start = adapter->pci_set_window(adapter, off);
+ if ((start == -1UL) ||
+ (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) {
+ write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ printk(KERN_ERR "%s out of bound pci memory access. "
+ "offset is 0x%llx\n", netxen_nic_driver_name, off);
+ return -1;
+ }
+
+ addr = (void *)(pci_base_offset(adapter, start));
+ if (!addr) {
+ write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ mem_base = pci_resource_start(adapter->pdev, 0);
+ mem_page = start & PAGE_MASK;
+ /* Map two pages whenever user tries to access addresses in two
+ consecutive pages.
+ */
+ if (mem_page != ((start + size - 1) & PAGE_MASK))
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+ else
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+ if (mem_ptr == 0UL) {
+ *(uint8_t *)data = 0;
+ return -1;
+ }
+ addr = mem_ptr;
+ addr += start & (PAGE_SIZE - 1);
+ write_lock_irqsave(&adapter->adapter_lock, flags);
+ }
+
+ switch (size) {
+ case 1:
+ *(uint8_t *)data = readb(addr);
+ break;
+ case 2:
+ *(uint16_t *)data = readw(addr);
+ break;
+ case 4:
+ *(uint32_t *)data = readl(addr);
+ break;
+ case 8:
+ *(uint64_t *)data = readq(addr);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ DPRINTK(1, INFO, "read %llx\n", *(unsigned long long *)data);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+static int
+netxen_nic_pci_mem_write_direct(struct netxen_adapter *adapter, u64 off,
+ void *data, int size)
+{
+ unsigned long flags;
+ void *addr;
+ int ret = 0;
+ u64 start;
+ uint8_t *mem_ptr = NULL;
+ unsigned long mem_base;
+ unsigned long mem_page;
+
+ write_lock_irqsave(&adapter->adapter_lock, flags);
+
+ /*
+ * If attempting to access unknown address or straddle hw windows,
+ * do not access.
+ */
+ start = adapter->pci_set_window(adapter, off);
+ if ((start == -1UL) ||
+ (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) {
+ write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ printk(KERN_ERR "%s out of bound pci memory access. "
+ "offset is 0x%llx\n", netxen_nic_driver_name, off);
+ return -1;
+ }
+
+ addr = (void *)(pci_base_offset(adapter, start));
+ if (!addr) {
+ write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ mem_base = pci_resource_start(adapter->pdev, 0);
+ mem_page = start & PAGE_MASK;
+ /* Map two pages whenever user tries to access addresses in two
+ * consecutive pages.
+ */
+ if (mem_page != ((start + size - 1) & PAGE_MASK))
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
+ else
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+ if (mem_ptr == 0UL)
+ return -1;
+ addr = mem_ptr;
+ addr += start & (PAGE_SIZE - 1);
+ write_lock_irqsave(&adapter->adapter_lock, flags);
+ }
+
+ switch (size) {
+ case 1:
+ writeb(*(uint8_t *)data, addr);
+ break;
+ case 2:
+ writew(*(uint16_t *)data, addr);
+ break;
+ case 4:
+ writel(*(uint32_t *)data, addr);
+ break;
+ case 8:
+ writeq(*(uint64_t *)data, addr);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ DPRINTK(1, INFO, "writing data %llx to offset %llx\n",
+ *(unsigned long long *)data, start);
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+#define MAX_CTL_CHECK 1000
+
+int
+netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter,
+ u64 off, void *data, int size)
+{
+ unsigned long flags, mem_crb;
+ int i, j, ret = 0, loop, sz[2], off0;
+ uint32_t temp;
+ uint64_t off8, tmpw, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (netxen_nic_pci_mem_bound_check(adapter, off, size) == 0)
+ return netxen_nic_pci_mem_write_direct(adapter,
+ off, data, size);
+
+ off8 = off & 0xfffffff8;
+ off0 = off & 0x7;
+ sz[0] = (size < (8 - off0)) ? size : (8 - off0);
+ sz[1] = size - sz[0];
+ loop = ((off0 + size - 1) >> 3) + 1;
+ mem_crb = (unsigned long)pci_base_offset(adapter, NETXEN_CRB_DDR_NET);
+
+ if ((size != 8) || (off0 != 0)) {
+ for (i = 0; i < loop; i++) {
+ if (adapter->pci_mem_read(adapter,
+ off8 + (i << 3), &word[i], 8))
+ return -1;
+ }
+ }
+
+ switch (size) {
+ case 1:
+ tmpw = *((uint8_t *)data);
+ break;
+ case 2:
+ tmpw = *((uint16_t *)data);
+ break;
+ case 4:
+ tmpw = *((uint32_t *)data);
+ break;
+ case 8:
+ default:
+ tmpw = *((uint64_t *)data);
+ break;
+ }
+ word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[0] |= tmpw << (off0 * 8);
+
+ if (loop == 2) {
+ word[1] &= ~(~0ULL << (sz[1] * 8));
+ word[1] |= tmpw >> (sz[0] * 8);
+ }
+
+ write_lock_irqsave(&adapter->adapter_lock, flags);
+ netxen_nic_pci_change_crbwindow_128M(adapter, 0);
+
+ for (i = 0; i < loop; i++) {
+ writel((uint32_t)(off8 + (i << 3)),
+ (void *)(mem_crb+MIU_TEST_AGT_ADDR_LO));
+ writel(0,
+ (void *)(mem_crb+MIU_TEST_AGT_ADDR_HI));
+ writel(word[i] & 0xffffffff,
+ (void *)(mem_crb+MIU_TEST_AGT_WRDATA_LO));
+ writel((word[i] >> 32) & 0xffffffff,
+ (void *)(mem_crb+MIU_TEST_AGT_WRDATA_HI));
+ writel(MIU_TA_CTL_ENABLE|MIU_TA_CTL_WRITE,
+ (void *)(mem_crb+MIU_TEST_AGT_CTRL));
+ writel(MIU_TA_CTL_START|MIU_TA_CTL_ENABLE|MIU_TA_CTL_WRITE,
+ (void *)(mem_crb+MIU_TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(
+ (void *)(mem_crb+MIU_TEST_AGT_CTRL));
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk("%s: %s Fail to write through agent\n",
+ __func__, netxen_nic_driver_name);
+ ret = -1;
+ break;
+ }
+ }
+
+ netxen_nic_pci_change_crbwindow_128M(adapter, 1);
+ write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ return ret;
+}
+
+int
+netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter,
+ u64 off, void *data, int size)
+{
+ unsigned long flags, mem_crb;
+ int i, j = 0, k, start, end, loop, sz[2], off0[2];
+ uint32_t temp;
+ uint64_t off8, val, word[2] = {0, 0};
+
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (netxen_nic_pci_mem_bound_check(adapter, off, size) == 0)
+ return netxen_nic_pci_mem_read_direct(adapter, off, data, size);
+
+ off8 = off & 0xfffffff8;
+ off0[0] = off & 0x7;
+ off0[1] = 0;
+ sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
+ sz[1] = size - sz[0];
+ loop = ((off0[0] + size - 1) >> 3) + 1;
+ mem_crb = (unsigned long)pci_base_offset(adapter, NETXEN_CRB_DDR_NET);
+
+ write_lock_irqsave(&adapter->adapter_lock, flags);
+ netxen_nic_pci_change_crbwindow_128M(adapter, 0);
+
+ for (i = 0; i < loop; i++) {
+ writel((uint32_t)(off8 + (i << 3)),
+ (void *)(mem_crb+MIU_TEST_AGT_ADDR_LO));
+ writel(0,
+ (void *)(mem_crb+MIU_TEST_AGT_ADDR_HI));
+ writel(MIU_TA_CTL_ENABLE,
+ (void *)(mem_crb+MIU_TEST_AGT_CTRL));
+ writel(MIU_TA_CTL_START|MIU_TA_CTL_ENABLE,
+ (void *)(mem_crb+MIU_TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(
+ (void *)(mem_crb+MIU_TEST_AGT_CTRL));
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk(KERN_ERR "%s: %s Fail to read through agent\n",
+ __func__, netxen_nic_driver_name);
+ break;
+ }
+
+ start = off0[i] >> 2;
+ end = (off0[i] + sz[i] - 1) >> 2;
+ for (k = start; k <= end; k++) {
+ word[i] |= ((uint64_t) readl(
+ (void *)(mem_crb +
+ MIU_TEST_AGT_RDDATA(k))) << (32*k));
+ }
+ }
+
+ netxen_nic_pci_change_crbwindow_128M(adapter, 1);
+ write_unlock_irqrestore(&adapter->adapter_lock, flags);
+
+ if (j >= MAX_CTL_CHECK)
+ return -1;
+
+ if (sz[0] == 8) {
+ val = word[0];
+ } else {
+ val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
+ ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
+ }
+
+ switch (size) {
+ case 1:
+ *(uint8_t *)data = val;
+ break;
+ case 2:
+ *(uint16_t *)data = val;
+ break;
+ case 4:
+ *(uint32_t *)data = val;
+ break;
+ case 8:
+ *(uint64_t *)data = val;
+ break;
+ }
+ DPRINTK(1, INFO, "read %llx\n", *(unsigned long long *)data);
+ return 0;
+}
+
+int
+netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
+ u64 off, void *data, int size)
+{
+ int i, j, ret = 0, loop, sz[2], off0;
+ uint32_t temp;
+ uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (off >= NETXEN_ADDR_QDR_NET && off <= NETXEN_ADDR_QDR_NET_MAX_P3)
+ mem_crb = NETXEN_CRB_QDR_NET;
+ else {
+ mem_crb = NETXEN_CRB_DDR_NET;
+ if (netxen_nic_pci_mem_bound_check(adapter, off, size) == 0)
+ return netxen_nic_pci_mem_write_direct(adapter,
+ off, data, size);
+ }
+
+ off8 = off & 0xfffffff8;
+ off0 = off & 0x7;
+ sz[0] = (size < (8 - off0)) ? size : (8 - off0);
+ sz[1] = size - sz[0];
+ loop = ((off0 + size - 1) >> 3) + 1;
+
+ if ((size != 8) || (off0 != 0)) {
+ for (i = 0; i < loop; i++) {
+ if (adapter->pci_mem_read(adapter, off8 + (i << 3),
+ &word[i], 8))
+ return -1;
+ }
+ }
+
+ switch (size) {
+ case 1:
+ tmpw = *((uint8_t *)data);
+ break;
+ case 2:
+ tmpw = *((uint16_t *)data);
+ break;
+ case 4:
+ tmpw = *((uint32_t *)data);
+ break;
+ case 8:
+ default:
+ tmpw = *((uint64_t *)data);
+ break;
+ }
+
+ word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[0] |= tmpw << (off0 * 8);
+
+ if (loop == 2) {
+ word[1] &= ~(~0ULL << (sz[1] * 8));
+ word[1] |= tmpw >> (sz[0] * 8);
+ }
+
+ /*
+ * don't lock here - write_wx gets the lock if each time
+ * write_lock_irqsave(&adapter->adapter_lock, flags);
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
+ */
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << 3);
+ adapter->hw_write_wx(adapter,
+ mem_crb+MIU_TEST_AGT_ADDR_LO, &temp, 4);
+ temp = 0;
+ adapter->hw_write_wx(adapter,
+ mem_crb+MIU_TEST_AGT_ADDR_HI, &temp, 4);
+ temp = word[i] & 0xffffffff;
+ adapter->hw_write_wx(adapter,
+ mem_crb+MIU_TEST_AGT_WRDATA_LO, &temp, 4);
+ temp = (word[i] >> 32) & 0xffffffff;
+ adapter->hw_write_wx(adapter,
+ mem_crb+MIU_TEST_AGT_WRDATA_HI, &temp, 4);
+ temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ adapter->hw_write_wx(adapter,
+ mem_crb+MIU_TEST_AGT_CTRL, &temp, 4);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ adapter->hw_write_wx(adapter,
+ mem_crb+MIU_TEST_AGT_CTRL, &temp, 4);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ adapter->hw_read_wx(adapter,
+ mem_crb + MIU_TEST_AGT_CTRL, &temp, 4);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk(KERN_ERR "%s: Fail to write through agent\n",
+ netxen_nic_driver_name);
+ ret = -1;
+ break;
+ }
+ }
+
+ /*
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
+ * write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ */
+ return ret;
+}
+
+int
+netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
+ u64 off, void *data, int size)
+{
+ int i, j = 0, k, start, end, loop, sz[2], off0[2];
+ uint32_t temp;
+ uint64_t off8, val, mem_crb, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+
+ if (off >= NETXEN_ADDR_QDR_NET && off <= NETXEN_ADDR_QDR_NET_MAX_P3)
+ mem_crb = NETXEN_CRB_QDR_NET;
+ else {
+ mem_crb = NETXEN_CRB_DDR_NET;
+ if (netxen_nic_pci_mem_bound_check(adapter, off, size) == 0)
+ return netxen_nic_pci_mem_read_direct(adapter,
+ off, data, size);
+ }
+
+ off8 = off & 0xfffffff8;
+ off0[0] = off & 0x7;
+ off0[1] = 0;
+ sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
+ sz[1] = size - sz[0];
+ loop = ((off0[0] + size - 1) >> 3) + 1;
+
+ /*
+ * don't lock here - write_wx gets the lock if each time
+ * write_lock_irqsave(&adapter->adapter_lock, flags);
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
+ */
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << 3);
+ adapter->hw_write_wx(adapter,
+ mem_crb + MIU_TEST_AGT_ADDR_LO, &temp, 4);
+ temp = 0;
+ adapter->hw_write_wx(adapter,
+ mem_crb + MIU_TEST_AGT_ADDR_HI, &temp, 4);
+ temp = MIU_TA_CTL_ENABLE;
+ adapter->hw_write_wx(adapter,
+ mem_crb + MIU_TEST_AGT_CTRL, &temp, 4);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ adapter->hw_write_wx(adapter,
+ mem_crb + MIU_TEST_AGT_CTRL, &temp, 4);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ adapter->hw_read_wx(adapter,
+ mem_crb + MIU_TEST_AGT_CTRL, &temp, 4);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk(KERN_ERR "%s: Fail to read through agent\n",
+ netxen_nic_driver_name);
+ break;
+ }
+
+ start = off0[i] >> 2;
+ end = (off0[i] + sz[i] - 1) >> 2;
+ for (k = start; k <= end; k++) {
+ adapter->hw_read_wx(adapter,
+ mem_crb + MIU_TEST_AGT_RDDATA(k), &temp, 4);
+ word[i] |= ((uint64_t)temp << (32 * k));
+ }
+ }
+
+ /*
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
+ * write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ */
+
+ if (j >= MAX_CTL_CHECK)
+ return -1;
+
+ if (sz[0] == 8) {
+ val = word[0];
+ } else {
+ val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
+ ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
+ }
+
+ switch (size) {
+ case 1:
+ *(uint8_t *)data = val;
+ break;
+ case 2:
+ *(uint16_t *)data = val;
+ break;
+ case 4:
+ *(uint32_t *)data = val;
+ break;
+ case 8:
+ *(uint64_t *)data = val;
+ break;
+ }
+ DPRINTK(1, INFO, "read %llx\n", *(unsigned long long *)data);
+ return 0;
+}
+
+/*
+ * Note : only 32-bit writes!
+ */
+int netxen_nic_pci_write_immediate_2M(struct netxen_adapter *adapter,
+ u64 off, u32 data)
+{
+ adapter->hw_write_wx(adapter, off, &data, 4);
+
+ return 0;
+}
+
+u32 netxen_nic_pci_read_immediate_2M(struct netxen_adapter *adapter, u64 off)
+{
+ u32 temp;
+ adapter->hw_read_wx(adapter, off, &temp, 4);
+ return temp;
+}
+
+void netxen_nic_pci_write_normalize_2M(struct netxen_adapter *adapter,
+ u64 off, u32 data)
+{
+ adapter->hw_write_wx(adapter, off, &data, 4);
+}
+
+u32 netxen_nic_pci_read_normalize_2M(struct netxen_adapter *adapter, u64 off)
+{
+ u32 temp;
+ adapter->hw_read_wx(adapter, off, &temp, 4);
+ return temp;
+}
+
#if 0
int
netxen_nic_erase_pxe(struct netxen_adapter *adapter)
@@ -1003,12 +2010,25 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ case NETXEN_BRDTYPE_P3_HMEZ:
+ case NETXEN_BRDTYPE_P3_XG_LOM:
+ case NETXEN_BRDTYPE_P3_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4_LP:
+ case NETXEN_BRDTYPE_P3_IMEZ:
+ case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+ case NETXEN_BRDTYPE_P3_10G_XFP:
+ case NETXEN_BRDTYPE_P3_10000_BASE_T:
+
adapter->ahw.board_type = NETXEN_NIC_XGBE;
break;
case NETXEN_BRDTYPE_P1_BD:
case NETXEN_BRDTYPE_P1_SB:
case NETXEN_BRDTYPE_P1_SMAX:
case NETXEN_BRDTYPE_P1_SOCK:
+ case NETXEN_BRDTYPE_P3_REF_QG:
+ case NETXEN_BRDTYPE_P3_4_GB:
+ case NETXEN_BRDTYPE_P3_4_GB_MM:
+
adapter->ahw.board_type = NETXEN_NIC_GBE;
break;
default:
@@ -1042,25 +2062,11 @@ int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
return 0;
}
-void netxen_nic_init_niu_gb(struct netxen_adapter *adapter)
-{
- netxen_niu_gbe_init_port(adapter, adapter->physical_port);
-}
-
void
-netxen_crb_writelit_adapter(struct netxen_adapter *adapter, unsigned long off,
- int data)
+netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
+ unsigned long off, int data)
{
- void __iomem *addr;
-
- if (ADDR_IN_WINDOW1(off)) {
- writel(data, NETXEN_CRB_NORMALIZE(adapter, off));
- } else {
- netxen_nic_pci_change_crbwindow(adapter, 0);
- addr = pci_base_offset(adapter, off);
- writel(data, addr);
- netxen_nic_pci_change_crbwindow(adapter, 1);
- }
+ adapter->hw_write_wx(adapter, off, &data, 4);
}
void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
@@ -1147,12 +2153,11 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter)
addr += sizeof(u32);
}
- fw_major = readl(NETXEN_CRB_NORMALIZE(adapter,
- NETXEN_FW_VERSION_MAJOR));
- fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter,
- NETXEN_FW_VERSION_MINOR));
- fw_build =
- readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB));
+ adapter->hw_read_wx(adapter, NETXEN_FW_VERSION_MAJOR, &fw_major, 4);
+ adapter->hw_read_wx(adapter, NETXEN_FW_VERSION_MINOR, &fw_minor, 4);
+ adapter->hw_read_wx(adapter, NETXEN_FW_VERSION_SUB, &fw_build, 4);
+
+ adapter->fw_major = fw_major;
if (adapter->portnum == 0) {
get_brd_name_by_type(board_info->board_type, brd_name);
@@ -1163,28 +2168,13 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter)
fw_minor, fw_build);
}
- if (fw_major != _NETXEN_NIC_LINUX_MAJOR) {
- adapter->driver_mismatch = 1;
- }
- if (fw_minor != _NETXEN_NIC_LINUX_MINOR &&
- fw_minor != (_NETXEN_NIC_LINUX_MINOR + 1)) {
+ if (NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build) <
+ NETXEN_VERSION_CODE(3, 4, 216)) {
adapter->driver_mismatch = 1;
- }
- if (adapter->driver_mismatch) {
- printk(KERN_ERR "%s: driver and firmware version mismatch\n",
- adapter->netdev->name);
+ printk(KERN_ERR "%s: firmware version %d.%d.%d unsupported\n",
+ netxen_nic_driver_name,
+ fw_major, fw_minor, fw_build);
return;
}
-
- switch (adapter->ahw.board_type) {
- case NETXEN_NIC_GBE:
- dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
- adapter->netdev->name);
- break;
- case NETXEN_NIC_XGBE:
- dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
- adapter->netdev->name);
- break;
- }
}
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index a3ea1dd98c41..b8e0030f03d7 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -82,19 +82,9 @@ struct netxen_adapter;
#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20)
-#define NETXEN_NIC_LOCKED_READ_REG(X, Y) \
- addr = pci_base_offset(adapter, X); \
- *(u32 *)Y = readl((void __iomem*) addr);
-
struct netxen_port;
void netxen_nic_set_link_parameters(struct netxen_adapter *adapter);
void netxen_nic_flash_print(struct netxen_adapter *adapter);
-int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off,
- void *data, int len);
-void netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
- unsigned long off, int data);
-int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off,
- void *data, int len);
typedef u8 netxen_ethernet_macaddr_t[6];
@@ -432,7 +422,8 @@ typedef enum {
/* Promiscous mode options (GbE mode only) */
typedef enum {
NETXEN_NIU_PROMISC_MODE = 0,
- NETXEN_NIU_NON_PROMISC_MODE
+ NETXEN_NIU_NON_PROMISC_MODE,
+ NETXEN_NIU_ALLMULTI_MODE
} netxen_niu_prom_mode_t;
/*
@@ -478,42 +469,6 @@ typedef enum {
#define netxen_xg_soft_reset(config_word) \
((config_word) |= 1 << 4)
-/*
- * MAC Control Register
- *
- * Bit 0-1 : id_pool0
- * Bit 2 : enable_xtnd0
- * Bit 4-5 : id_pool1
- * Bit 6 : enable_xtnd1
- * Bit 8-9 : id_pool2
- * Bit 10 : enable_xtnd2
- * Bit 12-13 : id_pool3
- * Bit 14 : enable_xtnd3
- * Bit 24-25 : mode_select
- * Bit 28-31 : enable_pool
- */
-
-#define netxen_nic_mcr_set_id_pool0(config, val) \
- ((config) |= ((val) &0x03))
-#define netxen_nic_mcr_set_enable_xtnd0(config) \
- ((config) |= 1 << 3)
-#define netxen_nic_mcr_set_id_pool1(config, val) \
- ((config) |= (((val) & 0x03) << 4))
-#define netxen_nic_mcr_set_enable_xtnd1(config) \
- ((config) |= 1 << 6)
-#define netxen_nic_mcr_set_id_pool2(config, val) \
- ((config) |= (((val) & 0x03) << 8))
-#define netxen_nic_mcr_set_enable_xtnd2(config) \
- ((config) |= 1 << 10)
-#define netxen_nic_mcr_set_id_pool3(config, val) \
- ((config) |= (((val) & 0x03) << 12))
-#define netxen_nic_mcr_set_enable_xtnd3(config) \
- ((config) |= 1 << 14)
-#define netxen_nic_mcr_set_mode_select(config, val) \
- ((config) |= (((val) & 0x03) << 24))
-#define netxen_nic_mcr_set_enable_pool(config, val) \
- ((config) |= (((val) & 0x0f) << 28))
-
/* Set promiscuous mode for a GbE interface */
int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
netxen_niu_prom_mode_t mode);
@@ -538,4 +493,15 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter);
int netxen_niu_disable_xg_port(struct netxen_adapter *adapter);
+typedef struct {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+} crb_128M_2M_sub_block_map_t;
+
+typedef struct {
+ crb_128M_2M_sub_block_map_t sub_block[16];
+} crb_128M_2M_block_map_t;
+
#endif /* __NETXEN_NIC_HW_H_ */
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 70d1b22ced22..01ab31b34a85 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -42,8 +42,6 @@ struct crb_addr_pair {
u32 data;
};
-unsigned long last_schedule_time;
-
#define NETXEN_MAX_CRB_XFORM 60
static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
#define NETXEN_ADDR_ERROR (0xffffffff)
@@ -117,6 +115,8 @@ static void crb_addr_transform_setup(void)
crb_addr_transform(C2C1);
crb_addr_transform(C2C0);
crb_addr_transform(SMB);
+ crb_addr_transform(OCM0);
+ crb_addr_transform(I2C0);
}
int netxen_init_firmware(struct netxen_adapter *adapter)
@@ -124,15 +124,15 @@ int netxen_init_firmware(struct netxen_adapter *adapter)
u32 state = 0, loops = 0, err = 0;
/* Window 1 call */
- state = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
+ state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
if (state == PHAN_INITIALIZE_ACK)
return 0;
while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) {
- udelay(100);
+ msleep(1);
/* Window 1 call */
- state = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
+ state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
loops++;
}
@@ -143,64 +143,193 @@ int netxen_init_firmware(struct netxen_adapter *adapter)
return err;
}
/* Window 1 call */
- writel(INTR_SCHEME_PERPORT,
- NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_CAPABILITIES_HOST));
- writel(MSI_MODE_MULTIFUNC,
- NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_MSI_MODE_HOST));
- writel(MPORT_MULTI_FUNCTION_MODE,
- NETXEN_CRB_NORMALIZE(adapter, CRB_MPORT_MODE));
- writel(PHAN_INITIALIZE_ACK,
- NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
+ adapter->pci_write_normalize(adapter,
+ CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
+ adapter->pci_write_normalize(adapter,
+ CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+ adapter->pci_write_normalize(adapter,
+ CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
+ adapter->pci_write_normalize(adapter,
+ CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
return err;
}
-#define NETXEN_ADDR_LIMIT 0xffffffffULL
+void netxen_release_rx_buffers(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct netxen_rx_buffer *rx_buf;
+ int i, ctxid, ring;
+
+ for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
+ recv_ctx = &adapter->recv_ctx[ctxid];
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ for (i = 0; i < rds_ring->max_rx_desc_count; ++i) {
+ rx_buf = &(rds_ring->rx_buf_arr[i]);
+ if (rx_buf->state == NETXEN_BUFFER_FREE)
+ continue;
+ pci_unmap_single(adapter->pdev,
+ rx_buf->dma,
+ rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+ if (rx_buf->skb != NULL)
+ dev_kfree_skb_any(rx_buf->skb);
+ }
+ }
+ }
+}
-void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr,
- struct pci_dev **used_dev)
+void netxen_release_tx_buffers(struct netxen_adapter *adapter)
{
- void *addr;
+ struct netxen_cmd_buffer *cmd_buf;
+ struct netxen_skb_frag *buffrag;
+ int i, j;
+
+ cmd_buf = adapter->cmd_buf_arr;
+ for (i = 0; i < adapter->max_tx_desc_count; i++) {
+ buffrag = cmd_buf->frag_array;
+ if (buffrag->dma) {
+ pci_unmap_single(adapter->pdev, buffrag->dma,
+ buffrag->length, PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ for (j = 0; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(adapter->pdev, buffrag->dma,
+ buffrag->length,
+ PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ }
+ /* Free the skb we received in netxen_nic_xmit_frame */
+ if (cmd_buf->skb) {
+ dev_kfree_skb_any(cmd_buf->skb);
+ cmd_buf->skb = NULL;
+ }
+ cmd_buf++;
+ }
+}
- addr = pci_alloc_consistent(pdev, sz, ptr);
- if ((unsigned long long)(*ptr) < NETXEN_ADDR_LIMIT) {
- *used_dev = pdev;
- return addr;
+void netxen_free_sw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ int ctx, ring;
+
+ for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) {
+ recv_ctx = &adapter->recv_ctx[ctx];
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ if (rds_ring->rx_buf_arr) {
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
+ }
+ }
}
- pci_free_consistent(pdev, sz, addr, *ptr);
- addr = pci_alloc_consistent(NULL, sz, ptr);
- *used_dev = NULL;
- return addr;
+ if (adapter->cmd_buf_arr)
+ vfree(adapter->cmd_buf_arr);
+ return;
}
-void netxen_initialize_adapter_sw(struct netxen_adapter *adapter)
+int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
{
- int ctxid, ring;
- u32 i;
- u32 num_rx_bufs = 0;
- struct netxen_rcv_desc_ctx *rcv_desc;
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct netxen_rx_buffer *rx_buf;
+ int ctx, ring, i, num_rx_bufs;
- DPRINTK(INFO, "initializing some queues: %p\n", adapter);
- for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
- for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
- struct netxen_rx_buffer *rx_buf;
- rcv_desc = &adapter->recv_ctx[ctxid].rcv_desc[ring];
- rcv_desc->begin_alloc = 0;
- rx_buf = rcv_desc->rx_buf_arr;
- num_rx_bufs = rcv_desc->max_rx_desc_count;
+ struct netxen_cmd_buffer *cmd_buf_arr;
+ struct net_device *netdev = adapter->netdev;
+
+ cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
+ if (cmd_buf_arr == NULL) {
+ printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n",
+ netdev->name);
+ return -ENOMEM;
+ }
+ memset(cmd_buf_arr, 0, TX_RINGSIZE);
+ adapter->cmd_buf_arr = cmd_buf_arr;
+
+ for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) {
+ recv_ctx = &adapter->recv_ctx[ctx];
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ switch (RCV_DESC_TYPE(ring)) {
+ case RCV_DESC_NORMAL:
+ rds_ring->max_rx_desc_count =
+ adapter->max_rx_desc_count;
+ rds_ring->flags = RCV_DESC_NORMAL;
+ if (adapter->ahw.cut_through) {
+ rds_ring->dma_size =
+ NX_CT_DEFAULT_RX_BUF_LEN;
+ rds_ring->skb_size =
+ NX_CT_DEFAULT_RX_BUF_LEN;
+ } else {
+ rds_ring->dma_size = RX_DMA_MAP_LEN;
+ rds_ring->skb_size =
+ MAX_RX_BUFFER_LENGTH;
+ }
+ break;
+
+ case RCV_DESC_JUMBO:
+ rds_ring->max_rx_desc_count =
+ adapter->max_jumbo_rx_desc_count;
+ rds_ring->flags = RCV_DESC_JUMBO;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ rds_ring->dma_size =
+ NX_P3_RX_JUMBO_BUF_MAX_LEN;
+ else
+ rds_ring->dma_size =
+ NX_P2_RX_JUMBO_BUF_MAX_LEN;
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ case RCV_RING_LRO:
+ rds_ring->max_rx_desc_count =
+ adapter->max_lro_rx_desc_count;
+ rds_ring->flags = RCV_DESC_LRO;
+ rds_ring->dma_size = RX_LRO_DMA_MAP_LEN;
+ rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
+ break;
+
+ }
+ rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
+ vmalloc(RCV_BUFFSIZE);
+ if (rds_ring->rx_buf_arr == NULL) {
+ printk(KERN_ERR "%s: Failed to allocate "
+ "rx buffer ring %d\n",
+ netdev->name, ring);
+ /* free whatever was already allocated */
+ goto err_out;
+ }
+ memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE);
+ INIT_LIST_HEAD(&rds_ring->free_list);
+ rds_ring->begin_alloc = 0;
/*
* Now go through all of them, set reference handles
* and put them in the queues.
*/
+ num_rx_bufs = rds_ring->max_rx_desc_count;
+ rx_buf = rds_ring->rx_buf_arr;
for (i = 0; i < num_rx_bufs; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
rx_buf->ref_handle = i;
rx_buf->state = NETXEN_BUFFER_FREE;
- DPRINTK(INFO, "Rx buf:ctx%d i(%d) rx_buf:"
- "%p\n", ctxid, i, rx_buf);
rx_buf++;
}
}
}
+
+ return 0;
+
+err_out:
+ netxen_free_sw_resources(adapter);
+ return -ENOMEM;
}
void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
@@ -211,14 +340,12 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
netxen_niu_gbe_enable_phy_interrupts;
adapter->disable_phy_interrupts =
netxen_niu_gbe_disable_phy_interrupts;
- adapter->handle_phy_intr = netxen_nic_gbe_handle_phy_intr;
adapter->macaddr_set = netxen_niu_macaddr_set;
adapter->set_mtu = netxen_nic_set_mtu_gb;
adapter->set_promisc = netxen_niu_set_promiscuous_mode;
- adapter->unset_promisc = netxen_niu_set_promiscuous_mode;
adapter->phy_read = netxen_niu_gbe_phy_read;
adapter->phy_write = netxen_niu_gbe_phy_write;
- adapter->init_niu = netxen_nic_init_niu_gb;
+ adapter->init_port = netxen_niu_gbe_init_port;
adapter->stop_port = netxen_niu_disable_gbe_port;
break;
@@ -227,12 +354,10 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
netxen_niu_xgbe_enable_phy_interrupts;
adapter->disable_phy_interrupts =
netxen_niu_xgbe_disable_phy_interrupts;
- adapter->handle_phy_intr = netxen_nic_xgbe_handle_phy_intr;
adapter->macaddr_set = netxen_niu_xg_macaddr_set;
adapter->set_mtu = netxen_nic_set_mtu_xgb;
adapter->init_port = netxen_niu_xg_init_port;
adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode;
- adapter->unset_promisc = netxen_niu_xg_set_promiscuous_mode;
adapter->stop_port = netxen_niu_disable_xg_port;
break;
@@ -270,7 +395,9 @@ static u32 netxen_decode_crb_addr(u32 addr)
static long rom_max_timeout = 100;
static long rom_lock_timeout = 10000;
+#if 0
static long rom_write_timeout = 700;
+#endif
static int rom_lock(struct netxen_adapter *adapter)
{
@@ -319,6 +446,7 @@ static int netxen_wait_rom_done(struct netxen_adapter *adapter)
return 0;
}
+#if 0
static int netxen_rom_wren(struct netxen_adapter *adapter)
{
/* Set write enable latch in ROM status register */
@@ -348,6 +476,7 @@ static int netxen_do_rom_rdsr(struct netxen_adapter *adapter)
}
return netxen_rdcrbreg(adapter, NETXEN_ROMUSB_ROM_RDATA);
}
+#endif
static void netxen_rom_unlock(struct netxen_adapter *adapter)
{
@@ -358,6 +487,7 @@ static void netxen_rom_unlock(struct netxen_adapter *adapter)
}
+#if 0
static int netxen_rom_wip_poll(struct netxen_adapter *adapter)
{
long timeout = 0;
@@ -393,6 +523,7 @@ static int do_rom_fast_write(struct netxen_adapter *adapter, int addr,
return netxen_rom_wip_poll(adapter);
}
+#endif
static int do_rom_fast_read(struct netxen_adapter *adapter,
int addr, int *valp)
@@ -475,7 +606,6 @@ int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data)
netxen_rom_unlock(adapter);
return ret;
}
-#endif /* 0 */
static int do_rom_fast_write_words(struct netxen_adapter *adapter,
int addr, u8 *bytes, size_t size)
@@ -740,28 +870,25 @@ int netxen_flash_unlock(struct netxen_adapter *adapter)
return ret;
}
+#endif /* 0 */
#define NETXEN_BOARDTYPE 0x4008
#define NETXEN_BOARDNUM 0x400c
#define NETXEN_CHIPNUM 0x4010
-#define NETXEN_ROMBUS_RESET 0xFFFFFFFF
-#define NETXEN_ROM_FIRST_BARRIER 0x800000000ULL
-#define NETXEN_ROM_FOUND_INIT 0x400
int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
{
int addr, val;
- int n, i;
- int init_delay = 0;
+ int i, init_delay = 0;
struct crb_addr_pair *buf;
+ unsigned offset, n;
u32 off;
/* resetall */
netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
- NETXEN_ROMBUS_RESET);
+ 0xffffffff);
if (verbose) {
- int val;
if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
printk("P2 ROM board type: 0x%08x\n", val);
else
@@ -776,117 +903,141 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
printk("Could not read chip number\n");
}
- if (netxen_rom_fast_read(adapter, 0, &n) == 0
- && (n & NETXEN_ROM_FIRST_BARRIER)) {
- n &= ~NETXEN_ROM_ROUNDUP;
- if (n < NETXEN_ROM_FOUND_INIT) {
- if (verbose)
- printk("%s: %d CRB init values found"
- " in ROM.\n", netxen_nic_driver_name, n);
- } else {
- printk("%s:n=0x%x Error! NetXen card flash not"
- " initialized.\n", __FUNCTION__, n);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
+ (n != 0xcafecafeUL) ||
+ netxen_rom_fast_read(adapter, 4, &n) != 0) {
+ printk(KERN_ERR "%s: ERROR Reading crb_init area: "
+ "n: %08x\n", netxen_nic_driver_name, n);
return -EIO;
}
- buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
- if (buf == NULL) {
- printk("%s: netxen_pinit_from_rom: Unable to calloc "
- "memory.\n", netxen_nic_driver_name);
- return -ENOMEM;
- }
- for (i = 0; i < n; i++) {
- if (netxen_rom_fast_read(adapter, 8 * i + 4, &val) != 0
- || netxen_rom_fast_read(adapter, 8 * i + 8,
- &addr) != 0)
- return -EIO;
-
- buf[i].addr = addr;
- buf[i].data = val;
-
- if (verbose)
- printk("%s: PCI: 0x%08x == 0x%08x\n",
- netxen_nic_driver_name, (unsigned int)
- netxen_decode_crb_addr(addr), val);
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+ } else {
+ if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
+ !(n & 0x80000000)) {
+ printk(KERN_ERR "%s: ERROR Reading crb_init area: "
+ "n: %08x\n", netxen_nic_driver_name, n);
+ return -EIO;
}
- for (i = 0; i < n; i++) {
+ offset = 1;
+ n &= ~0x80000000;
+ }
+
+ if (n < 1024) {
+ if (verbose)
+ printk(KERN_DEBUG "%s: %d CRB init values found"
+ " in ROM.\n", netxen_nic_driver_name, n);
+ } else {
+ printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
+ " initialized.\n", __func__, n);
+ return -EIO;
+ }
+
+ buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n",
+ netxen_nic_driver_name);
+ return -ENOMEM;
+ }
+ for (i = 0; i < n; i++) {
+ if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+ netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0)
+ return -EIO;
+
+ buf[i].addr = addr;
+ buf[i].data = val;
- off = netxen_decode_crb_addr(buf[i].addr);
- if (off == NETXEN_ADDR_ERROR) {
- printk(KERN_ERR"CRB init value out of range %x\n",
+ if (verbose)
+ printk(KERN_DEBUG "%s: PCI: 0x%08x == 0x%08x\n",
+ netxen_nic_driver_name,
+ (u32)netxen_decode_crb_addr(addr), val);
+ }
+ for (i = 0; i < n; i++) {
+
+ off = netxen_decode_crb_addr(buf[i].addr);
+ if (off == NETXEN_ADDR_ERROR) {
+ printk(KERN_ERR"CRB init value out of range %x\n",
buf[i].addr);
+ continue;
+ }
+ off += NETXEN_PCI_CRBSPACE;
+ /* skipping cold reboot MAGIC */
+ if (off == NETXEN_CAM_RAM(0x1fc))
+ continue;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ /* do not reset PCI */
+ if (off == (ROMUSB_GLB + 0xbc))
continue;
- }
- off += NETXEN_PCI_CRBSPACE;
- /* skipping cold reboot MAGIC */
- if (off == NETXEN_CAM_RAM(0x1fc))
+ if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
+ buf[i].data = 0x1020;
+ /* skip the function enable register */
+ if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+ if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
continue;
+ if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
+ continue;
+ }
- /* After writing this register, HW needs time for CRB */
- /* to quiet down (else crb_window returns 0xffffffff) */
- if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
- init_delay = 1;
+ if (off == NETXEN_ADDR_ERROR) {
+ printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
+ netxen_nic_driver_name, buf[i].addr);
+ continue;
+ }
+
+ /* After writing this register, HW needs time for CRB */
+ /* to quiet down (else crb_window returns 0xffffffff) */
+ if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
+ init_delay = 1;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
/* hold xdma in reset also */
buf[i].data = NETXEN_NIC_XDMA_RESET;
}
+ }
- if (ADDR_IN_WINDOW1(off)) {
- writel(buf[i].data,
- NETXEN_CRB_NORMALIZE(adapter, off));
- } else {
- netxen_nic_pci_change_crbwindow(adapter, 0);
- writel(buf[i].data,
- pci_base_offset(adapter, off));
+ adapter->hw_write_wx(adapter, off, &buf[i].data, 4);
- netxen_nic_pci_change_crbwindow(adapter, 1);
- }
- if (init_delay == 1) {
- msleep(1000);
- init_delay = 0;
- }
- msleep(1);
+ if (init_delay == 1) {
+ msleep(1000);
+ init_delay = 0;
}
- kfree(buf);
+ msleep(1);
+ }
+ kfree(buf);
- /* disable_peg_cache_all */
+ /* disable_peg_cache_all */
- /* unreset_net_cache */
- netxen_nic_hw_read_wx(adapter, NETXEN_ROMUSB_GLB_SW_RESET, &val,
- 4);
- netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
- (val & 0xffffff0f));
- /* p2dn replyCount */
- netxen_crb_writelit_adapter(adapter,
- NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
- /* disable_peg_cache 0 */
+ /* unreset_net_cache */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ adapter->hw_read_wx(adapter,
+ NETXEN_ROMUSB_GLB_SW_RESET, &val, 4);
netxen_crb_writelit_adapter(adapter,
- NETXEN_CRB_PEG_NET_D + 0x4c, 8);
- /* disable_peg_cache 1 */
- netxen_crb_writelit_adapter(adapter,
- NETXEN_CRB_PEG_NET_I + 0x4c, 8);
-
- /* peg_clr_all */
-
- /* peg_clr 0 */
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8,
- 0);
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc,
- 0);
- /* peg_clr 1 */
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8,
- 0);
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc,
- 0);
- /* peg_clr 2 */
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8,
- 0);
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc,
- 0);
- /* peg_clr 3 */
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8,
- 0);
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc,
- 0);
+ NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
}
+
+ /* p2dn replyCount */
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
+ /* disable_peg_cache 0 */
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
+ /* disable_peg_cache 1 */
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
+
+ /* peg_clr_all */
+
+ /* peg_clr 0 */
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
+ /* peg_clr 1 */
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
+ /* peg_clr 2 */
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
+ /* peg_clr 3 */
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
return 0;
}
@@ -897,12 +1048,12 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
uint32_t lo;
adapter->dummy_dma.addr =
- pci_alloc_consistent(adapter->ahw.pdev,
+ pci_alloc_consistent(adapter->pdev,
NETXEN_HOST_DUMMY_DMA_SIZE,
&adapter->dummy_dma.phys_addr);
if (adapter->dummy_dma.addr == NULL) {
printk("%s: ERROR: Could not allocate dummy DMA memory\n",
- __FUNCTION__);
+ __func__);
return -ENOMEM;
}
@@ -910,8 +1061,13 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
hi = (addr >> 32) & 0xffffffff;
lo = addr & 0xffffffff;
- writel(hi, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI));
- writel(lo, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO));
+ adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
+ adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ uint32_t temp = 0;
+ adapter->hw_write_wx(adapter, CRB_HOST_DUMMY_BUF, &temp, 4);
+ }
return 0;
}
@@ -931,7 +1087,7 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter)
} while (--i);
if (i) {
- pci_free_consistent(adapter->ahw.pdev,
+ pci_free_consistent(adapter->pdev,
NETXEN_HOST_DUMMY_DMA_SIZE,
adapter->dummy_dma.addr,
adapter->dummy_dma.phys_addr);
@@ -946,22 +1102,24 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter)
int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
{
u32 val = 0;
- int retries = 30;
+ int retries = 60;
if (!pegtune_val) {
do {
- val = readl(NETXEN_CRB_NORMALIZE
- (adapter, CRB_CMDPEG_STATE));
- pegtune_val = readl(NETXEN_CRB_NORMALIZE
- (adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE));
+ val = adapter->pci_read_normalize(adapter,
+ CRB_CMDPEG_STATE);
if (val == PHAN_INITIALIZE_COMPLETE ||
val == PHAN_INITIALIZE_ACK)
return 0;
- msleep(1000);
+ msleep(500);
+
} while (--retries);
+
if (!retries) {
+ pegtune_val = adapter->pci_read_normalize(adapter,
+ NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
printk(KERN_WARNING "netxen_phantom_init: init failed, "
"pegtune_val=%x\n", pegtune_val);
return -1;
@@ -971,58 +1129,61 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
return 0;
}
-static int netxen_nic_check_temp(struct netxen_adapter *adapter)
+int netxen_receive_peg_ready(struct netxen_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- uint32_t temp, temp_state, temp_val;
- int rv = 0;
-
- temp = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_TEMP_STATE));
-
- temp_state = nx_get_temp_state(temp);
- temp_val = nx_get_temp_val(temp);
-
- if (temp_state == NX_TEMP_PANIC) {
- printk(KERN_ALERT
- "%s: Device temperature %d degrees C exceeds"
- " maximum allowed. Hardware has been shut down.\n",
- netxen_nic_driver_name, temp_val);
-
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- rv = 1;
- } else if (temp_state == NX_TEMP_WARN) {
- if (adapter->temp == NX_TEMP_NORMAL) {
- printk(KERN_ALERT
- "%s: Device temperature %d degrees C "
- "exceeds operating range."
- " Immediate action needed.\n",
- netxen_nic_driver_name, temp_val);
- }
- } else {
- if (adapter->temp == NX_TEMP_WARN) {
- printk(KERN_INFO
- "%s: Device temperature is now %d degrees C"
- " in normal range.\n", netxen_nic_driver_name,
- temp_val);
- }
+ u32 val = 0;
+ int retries = 2000;
+
+ do {
+ val = adapter->pci_read_normalize(adapter, CRB_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(10);
+
+ } while (--retries);
+
+ if (!retries) {
+ printk(KERN_ERR "Receive Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
}
- adapter->temp = temp_state;
- return rv;
+
+ return 0;
}
-void netxen_watchdog_task(struct work_struct *work)
+static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
{
- struct netxen_adapter *adapter =
- container_of(work, struct netxen_adapter, watchdog_task);
+ struct netxen_rx_buffer *buffer;
+ struct sk_buff *skb;
- if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter))
- return;
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
- if (adapter->handle_phy_intr)
- adapter->handle_phy_intr(adapter);
+ skb = buffer->skb;
+ if (!skb)
+ goto no_skb;
- mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
+ if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
+ adapter->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb->dev = adapter->netdev;
+
+ buffer->skb = NULL;
+
+no_skb:
+ buffer->state = NETXEN_BUFFER_FREE;
+ buffer->lro_current_frags = 0;
+ buffer->lro_expected_frags = 0;
+ list_add_tail(&buffer->list, &rds_ring->free_list);
+ return skb;
}
/*
@@ -1031,9 +1192,8 @@ void netxen_watchdog_task(struct work_struct *work)
* invoke the routine to send more rx buffers to the Phantom...
*/
static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
- struct status_desc *desc)
+ struct status_desc *desc, struct status_desc *frag_desc)
{
- struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
u64 sts_data = le64_to_cpu(desc->status_desc_data);
int index = netxen_get_sts_refhandle(sts_data);
@@ -1042,8 +1202,8 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
struct sk_buff *skb;
u32 length = netxen_get_sts_totallength(sts_data);
u32 desc_ctx;
- struct netxen_rcv_desc_ctx *rcv_desc;
- int ret;
+ u16 pkt_offset = 0, cksum;
+ struct nx_host_rds_ring *rds_ring;
desc_ctx = netxen_get_sts_type(sts_data);
if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) {
@@ -1052,13 +1212,13 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
return;
}
- rcv_desc = &recv_ctx->rcv_desc[desc_ctx];
- if (unlikely(index > rcv_desc->max_rx_desc_count)) {
+ rds_ring = &recv_ctx->rds_rings[desc_ctx];
+ if (unlikely(index > rds_ring->max_rx_desc_count)) {
DPRINTK(ERR, "Got a buffer index:%x Max is %x\n",
- index, rcv_desc->max_rx_desc_count);
+ index, rds_ring->max_rx_desc_count);
return;
}
- buffer = &rcv_desc->rx_buf_arr[index];
+ buffer = &rds_ring->rx_buf_arr[index];
if (desc_ctx == RCV_DESC_LRO_CTXID) {
buffer->lro_current_frags++;
if (netxen_get_sts_desc_lro_last_frag(desc)) {
@@ -1079,43 +1239,52 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
}
}
- pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size,
- PCI_DMA_FROMDEVICE);
+ cksum = netxen_get_sts_status(sts_data);
- skb = (struct sk_buff *)buffer->skb;
-
- if (likely(adapter->rx_csum &&
- netxen_get_sts_status(sts_data) == STATUS_CKSUM_OK)) {
- adapter->stats.csummed++;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else
- skb->ip_summed = CHECKSUM_NONE;
+ skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return;
- skb->dev = netdev;
if (desc_ctx == RCV_DESC_LRO_CTXID) {
/* True length was only available on the last pkt */
skb_put(skb, buffer->lro_length);
} else {
- skb_put(skb, length);
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ pkt_offset = netxen_get_sts_pkt_offset(sts_data);
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
}
skb->protocol = eth_type_trans(skb, netdev);
- ret = netif_receive_skb(skb);
- netdev->last_rx = jiffies;
-
- rcv_desc->rcv_pending--;
-
/*
- * We just consumed one buffer so post a buffer.
+ * rx buffer chaining is disabled, walk and free
+ * any spurious rx buffer chain.
*/
- buffer->skb = NULL;
- buffer->state = NETXEN_BUFFER_FREE;
- buffer->lro_current_frags = 0;
- buffer->lro_expected_frags = 0;
+ if (frag_desc) {
+ u16 i, nr_frags = desc->nr_frags;
+
+ dev_kfree_skb_any(skb);
+ for (i = 0; i < nr_frags; i++) {
+ index = frag_desc->frag_handles[i];
+ skb = netxen_process_rxbuf(adapter,
+ rds_ring, index, cksum);
+ if (skb)
+ dev_kfree_skb_any(skb);
+ }
+ adapter->stats.rxdropped++;
+ } else {
- adapter->stats.no_rcv++;
- adapter->stats.rxbytes += length;
+ netif_receive_skb(skb);
+ netdev->last_rx = jiffies;
+
+ adapter->stats.no_rcv++;
+ adapter->stats.rxbytes += length;
+ }
}
/* Process Receive status ring */
@@ -1123,10 +1292,11 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
{
struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
struct status_desc *desc_head = recv_ctx->rcv_status_desc_head;
- struct status_desc *desc; /* used to read status desc here */
+ struct status_desc *desc, *frag_desc;
u32 consumer = recv_ctx->status_rx_consumer;
- u32 producer = 0;
int count = 0, ring;
+ u64 sts_data;
+ u16 opcode;
while (count < max) {
desc = &desc_head[consumer];
@@ -1135,24 +1305,38 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
netxen_get_sts_owner(desc));
break;
}
- netxen_process_rcv(adapter, ctxid, desc);
+
+ sts_data = le64_to_cpu(desc->status_desc_data);
+ opcode = netxen_get_sts_opcode(sts_data);
+ frag_desc = NULL;
+ if (opcode == NETXEN_NIC_RXPKT_DESC) {
+ if (desc->nr_frags) {
+ consumer = get_next_index(consumer,
+ adapter->max_rx_desc_count);
+ frag_desc = &desc_head[consumer];
+ netxen_set_sts_owner(frag_desc,
+ STATUS_OWNER_PHANTOM);
+ }
+ }
+
+ netxen_process_rcv(adapter, ctxid, desc, frag_desc);
+
netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM);
- consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1);
+
+ consumer = get_next_index(consumer,
+ adapter->max_rx_desc_count);
count++;
}
- for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++)
+ for (ring = 0; ring < adapter->max_rds_rings; ring++)
netxen_post_rx_buffers_nodb(adapter, ctxid, ring);
/* update the consumer index in phantom */
if (count) {
recv_ctx->status_rx_consumer = consumer;
- recv_ctx->status_rx_producer = producer;
/* Window = 1 */
- writel(consumer,
- NETXEN_CRB_NORMALIZE(adapter,
- recv_crb_registers[adapter->portnum].
- crb_rcv_status_consumer));
+ adapter->pci_write_normalize(adapter,
+ recv_ctx->crb_sts_consumer, consumer);
}
return count;
@@ -1231,10 +1415,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
*/
void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
{
- struct pci_dev *pdev = adapter->ahw.pdev;
+ struct pci_dev *pdev = adapter->pdev;
struct sk_buff *skb;
struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
- struct netxen_rcv_desc_ctx *rcv_desc = NULL;
+ struct nx_host_rds_ring *rds_ring = NULL;
uint producer;
struct rcv_desc *pdesc;
struct netxen_rx_buffer *buffer;
@@ -1242,41 +1426,36 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
int index = 0;
netxen_ctx_msg msg = 0;
dma_addr_t dma;
+ struct list_head *head;
- rcv_desc = &recv_ctx->rcv_desc[ringid];
+ rds_ring = &recv_ctx->rds_rings[ringid];
+
+ producer = rds_ring->producer;
+ index = rds_ring->begin_alloc;
+ head = &rds_ring->free_list;
- producer = rcv_desc->producer;
- index = rcv_desc->begin_alloc;
- buffer = &rcv_desc->rx_buf_arr[index];
/* We can start writing rx descriptors into the phantom memory. */
- while (buffer->state == NETXEN_BUFFER_FREE) {
- skb = dev_alloc_skb(rcv_desc->skb_size);
+ while (!list_empty(head)) {
+
+ skb = dev_alloc_skb(rds_ring->skb_size);
if (unlikely(!skb)) {
- /*
- * TODO
- * We need to schedule the posting of buffers to the pegs.
- */
- rcv_desc->begin_alloc = index;
- DPRINTK(ERR, "netxen_post_rx_buffers: "
- " allocated only %d buffers\n", count);
+ rds_ring->begin_alloc = index;
break;
}
+ buffer = list_entry(head->next, struct netxen_rx_buffer, list);
+ list_del(&buffer->list);
+
count++; /* now there should be no failure */
- pdesc = &rcv_desc->desc_head[producer];
+ pdesc = &rds_ring->desc_head[producer];
-#if defined(XGB_DEBUG)
- *(unsigned long *)(skb->head) = 0xc0debabe;
- if (skb_is_nonlinear(skb)) {
- printk("Allocated SKB @%p is nonlinear\n");
- }
-#endif
- skb_reserve(skb, 2);
+ if (!adapter->ahw.cut_through)
+ skb_reserve(skb, 2);
/* This will be setup when we receive the
* buffer after it has been filled FSL TBD TBD
* skb->dev = netdev;
*/
- dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size,
+ dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
PCI_DMA_FROMDEVICE);
pdesc->addr_buffer = cpu_to_le64(dma);
buffer->skb = skb;
@@ -1284,112 +1463,101 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
buffer->dma = dma;
/* make a rcv descriptor */
pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
- pdesc->buffer_length = cpu_to_le32(rcv_desc->dma_size);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
DPRINTK(INFO, "done writing descripter\n");
producer =
- get_next_index(producer, rcv_desc->max_rx_desc_count);
- index = get_next_index(index, rcv_desc->max_rx_desc_count);
- buffer = &rcv_desc->rx_buf_arr[index];
+ get_next_index(producer, rds_ring->max_rx_desc_count);
+ index = get_next_index(index, rds_ring->max_rx_desc_count);
}
/* if we did allocate buffers, then write the count to Phantom */
if (count) {
- rcv_desc->begin_alloc = index;
- rcv_desc->rcv_pending += count;
- rcv_desc->producer = producer;
+ rds_ring->begin_alloc = index;
+ rds_ring->producer = producer;
/* Window = 1 */
- writel((producer - 1) &
- (rcv_desc->max_rx_desc_count - 1),
- NETXEN_CRB_NORMALIZE(adapter,
- recv_crb_registers[
- adapter->portnum].
- rcv_desc_crb[ringid].
- crb_rcv_producer_offset));
+ adapter->pci_write_normalize(adapter,
+ rds_ring->crb_rcv_producer,
+ (producer-1) & (rds_ring->max_rx_desc_count-1));
+
+ if (adapter->fw_major < 4) {
/*
* Write a doorbell msg to tell phanmon of change in
* receive ring producer
+ * Only for firmware version < 4.0.0
*/
netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
netxen_set_msg_privid(msg);
netxen_set_msg_count(msg,
((producer -
- 1) & (rcv_desc->
+ 1) & (rds_ring->
max_rx_desc_count - 1)));
netxen_set_msg_ctxid(msg, adapter->portnum);
netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
writel(msg,
DB_NORMALIZE(adapter,
NETXEN_RCV_PRODUCER_OFFSET));
+ }
}
}
static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
uint32_t ctx, uint32_t ringid)
{
- struct pci_dev *pdev = adapter->ahw.pdev;
+ struct pci_dev *pdev = adapter->pdev;
struct sk_buff *skb;
struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
- struct netxen_rcv_desc_ctx *rcv_desc = NULL;
+ struct nx_host_rds_ring *rds_ring = NULL;
u32 producer;
struct rcv_desc *pdesc;
struct netxen_rx_buffer *buffer;
int count = 0;
int index = 0;
+ struct list_head *head;
- rcv_desc = &recv_ctx->rcv_desc[ringid];
+ rds_ring = &recv_ctx->rds_rings[ringid];
- producer = rcv_desc->producer;
- index = rcv_desc->begin_alloc;
- buffer = &rcv_desc->rx_buf_arr[index];
+ producer = rds_ring->producer;
+ index = rds_ring->begin_alloc;
+ head = &rds_ring->free_list;
/* We can start writing rx descriptors into the phantom memory. */
- while (buffer->state == NETXEN_BUFFER_FREE) {
- skb = dev_alloc_skb(rcv_desc->skb_size);
+ while (!list_empty(head)) {
+
+ skb = dev_alloc_skb(rds_ring->skb_size);
if (unlikely(!skb)) {
- /*
- * We need to schedule the posting of buffers to the pegs.
- */
- rcv_desc->begin_alloc = index;
- DPRINTK(ERR, "netxen_post_rx_buffers_nodb: "
- " allocated only %d buffers\n", count);
+ rds_ring->begin_alloc = index;
break;
}
+
+ buffer = list_entry(head->next, struct netxen_rx_buffer, list);
+ list_del(&buffer->list);
+
count++; /* now there should be no failure */
- pdesc = &rcv_desc->desc_head[producer];
- skb_reserve(skb, 2);
- /*
- * This will be setup when we receive the
- * buffer after it has been filled
- * skb->dev = netdev;
- */
+ pdesc = &rds_ring->desc_head[producer];
+ if (!adapter->ahw.cut_through)
+ skb_reserve(skb, 2);
buffer->skb = skb;
buffer->state = NETXEN_BUFFER_BUSY;
buffer->dma = pci_map_single(pdev, skb->data,
- rcv_desc->dma_size,
+ rds_ring->dma_size,
PCI_DMA_FROMDEVICE);
/* make a rcv descriptor */
pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
- pdesc->buffer_length = cpu_to_le32(rcv_desc->dma_size);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
- DPRINTK(INFO, "done writing descripter\n");
producer =
- get_next_index(producer, rcv_desc->max_rx_desc_count);
- index = get_next_index(index, rcv_desc->max_rx_desc_count);
- buffer = &rcv_desc->rx_buf_arr[index];
+ get_next_index(producer, rds_ring->max_rx_desc_count);
+ index = get_next_index(index, rds_ring->max_rx_desc_count);
+ buffer = &rds_ring->rx_buf_arr[index];
}
/* if we did allocate buffers, then write the count to Phantom */
if (count) {
- rcv_desc->begin_alloc = index;
- rcv_desc->rcv_pending += count;
- rcv_desc->producer = producer;
+ rds_ring->begin_alloc = index;
+ rds_ring->producer = producer;
/* Window = 1 */
- writel((producer - 1) &
- (rcv_desc->max_rx_desc_count - 1),
- NETXEN_CRB_NORMALIZE(adapter,
- recv_crb_registers[
- adapter->portnum].
- rcv_desc_crb[ringid].
- crb_rcv_producer_offset));
+ adapter->pci_write_normalize(adapter,
+ rds_ring->crb_rcv_producer,
+ (producer-1) & (rds_ring->max_rx_desc_count-1));
wmb();
}
}
diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c
deleted file mode 100644
index 96cec41f9019..000000000000
--- a/drivers/net/netxen/netxen_nic_isr.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Copyright (C) 2003 - 2006 NetXen, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
- *
- * Contact Information:
- * info@netxen.com
- * NetXen,
- * 3965 Freedom Circle, Fourth floor,
- * Santa Clara, CA 95054
- */
-
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-
-#include "netxen_nic.h"
-#include "netxen_nic_hw.h"
-#include "netxen_nic_phan_reg.h"
-
-/*
- * netxen_nic_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- */
-struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
-{
- struct netxen_adapter *adapter = netdev_priv(netdev);
- struct net_device_stats *stats = &adapter->net_stats;
-
- memset(stats, 0, sizeof(*stats));
-
- /* total packets received */
- stats->rx_packets = adapter->stats.no_rcv;
- /* total packets transmitted */
- stats->tx_packets = adapter->stats.xmitedframes +
- adapter->stats.xmitfinished;
- /* total bytes received */
- stats->rx_bytes = adapter->stats.rxbytes;
- /* total bytes transmitted */
- stats->tx_bytes = adapter->stats.txbytes;
- /* bad packets received */
- stats->rx_errors = adapter->stats.rcvdbadskb;
- /* packet transmit problems */
- stats->tx_errors = adapter->stats.nocmddescriptor;
- /* no space in linux buffers */
- stats->rx_dropped = adapter->stats.rxdropped;
- /* no space available in linux */
- stats->tx_dropped = adapter->stats.txdropped;
-
- return stats;
-}
-
-static void netxen_indicate_link_status(struct netxen_adapter *adapter,
- u32 link)
-{
- struct net_device *netdev = adapter->netdev;
-
- if (link)
- netif_carrier_on(netdev);
- else
- netif_carrier_off(netdev);
-}
-
-#if 0
-void netxen_handle_port_int(struct netxen_adapter *adapter, u32 enable)
-{
- __u32 int_src;
-
- /* This should clear the interrupt source */
- if (adapter->phy_read)
- adapter->phy_read(adapter,
- NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS,
- &int_src);
- if (int_src == 0) {
- DPRINTK(INFO, "No phy interrupts for port #%d\n", portno);
- return;
- }
- if (adapter->disable_phy_interrupts)
- adapter->disable_phy_interrupts(adapter);
-
- if (netxen_get_phy_int_jabber(int_src))
- DPRINTK(INFO, "Jabber interrupt \n");
-
- if (netxen_get_phy_int_polarity_changed(int_src))
- DPRINTK(INFO, "POLARITY CHANGED int \n");
-
- if (netxen_get_phy_int_energy_detect(int_src))
- DPRINTK(INFO, "ENERGY DETECT INT \n");
-
- if (netxen_get_phy_int_downshift(int_src))
- DPRINTK(INFO, "DOWNSHIFT INT \n");
- /* write it down later.. */
- if ((netxen_get_phy_int_speed_changed(int_src))
- || (netxen_get_phy_int_link_status_changed(int_src))) {
- __u32 status;
-
- DPRINTK(INFO, "SPEED CHANGED OR LINK STATUS CHANGED \n");
-
- if (adapter->phy_read
- && adapter->phy_read(adapter,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
- &status) == 0) {
- if (netxen_get_phy_int_link_status_changed(int_src)) {
- if (netxen_get_phy_link(status)) {
- printk(KERN_INFO "%s: %s Link UP\n",
- netxen_nic_driver_name,
- adapter->netdev->name);
-
- } else {
- printk(KERN_INFO "%s: %s Link DOWN\n",
- netxen_nic_driver_name,
- adapter->netdev->name);
- }
- netxen_indicate_link_status(adapter,
- netxen_get_phy_link
- (status));
- }
- }
- }
- if (adapter->enable_phy_interrupts)
- adapter->enable_phy_interrupts(adapter);
-}
-#endif /* 0 */
-
-static void netxen_nic_isr_other(struct netxen_adapter *adapter)
-{
- int portno = adapter->portnum;
- u32 val, linkup, qg_linksup;
-
- /* verify the offset */
- val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
- val = val >> adapter->physical_port;
- if (val == adapter->ahw.qg_linksup)
- return;
-
- qg_linksup = adapter->ahw.qg_linksup;
- adapter->ahw.qg_linksup = val;
- DPRINTK(INFO, "link update 0x%08x\n", val);
-
- linkup = val & 1;
-
- if (linkup != (qg_linksup & 1)) {
- printk(KERN_INFO "%s: %s PORT %d link %s\n",
- adapter->netdev->name,
- netxen_nic_driver_name, portno,
- ((linkup == 0) ? "down" : "up"));
- netxen_indicate_link_status(adapter, linkup);
- if (linkup)
- netxen_nic_set_link_parameters(adapter);
-
- }
-}
-
-void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter)
-{
- netxen_nic_isr_other(adapter);
-}
-
-#if 0
-int netxen_nic_link_ok(struct netxen_adapter *adapter)
-{
- switch (adapter->ahw.board_type) {
- case NETXEN_NIC_GBE:
- return ((adapter->ahw.qg_linksup) & 1);
-
- case NETXEN_NIC_XGBE:
- return ((adapter->ahw.xg_linkup) & 1);
-
- default:
- printk(KERN_ERR"%s: Function: %s, Unknown board type\n",
- netxen_nic_driver_name, __FUNCTION__);
- break;
- }
-
- return 0;
-}
-#endif /* 0 */
-
-void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- u32 val;
-
- /* WINDOW = 1 */
- val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
- val >>= (adapter->physical_port * 8);
- val &= 0xff;
-
- if (adapter->ahw.xg_linkup == 1 && val != XG_LINK_UP) {
- printk(KERN_INFO "%s: %s NIC Link is down\n",
- netxen_nic_driver_name, netdev->name);
- adapter->ahw.xg_linkup = 0;
- if (netif_running(netdev)) {
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- }
- } else if (adapter->ahw.xg_linkup == 0 && val == XG_LINK_UP) {
- printk(KERN_INFO "%s: %s NIC Link is up\n",
- netxen_nic_driver_name, netdev->name);
- adapter->ahw.xg_linkup = 1;
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
- }
-}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 63cd67b931e7..91d209a8f6cb 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -49,13 +49,18 @@ char netxen_nic_driver_name[] = "netxen_nic";
static char netxen_nic_driver_string[] = "NetXen Network Driver version "
NETXEN_NIC_LINUX_VERSIONID;
-#define NETXEN_NETDEV_WEIGHT 120
-#define NETXEN_ADAPTER_UP_MAGIC 777
-#define NETXEN_NIC_PEG_TUNE 0
+static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
+
+/* Default to restricted 1G auto-neg mode */
+static int wol_port_mode = 5;
+
+static int use_msi = 1;
+
+static int use_msi_x = 1;
/* Local functions to NetXen NIC driver */
static int __devinit netxen_nic_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent);
+ const struct pci_device_id *ent);
static void __devexit netxen_nic_remove(struct pci_dev *pdev);
static int netxen_nic_open(struct net_device *netdev);
static int netxen_nic_close(struct net_device *netdev);
@@ -83,6 +88,7 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
ENTRY(0x0005),
ENTRY(0x0024),
ENTRY(0x0025),
+ ENTRY(0x0100),
{0,}
};
@@ -108,95 +114,61 @@ static struct workqueue_struct *netxen_workq;
static void netxen_watchdog(unsigned long);
-static void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
- uint32_t crb_producer)
+static uint32_t crb_cmd_producer[4] = {
+ CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
+ CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
+};
+
+void
+netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
+ uint32_t crb_producer)
{
- switch (adapter->portnum) {
- case 0:
- writel(crb_producer, NETXEN_CRB_NORMALIZE
- (adapter, CRB_CMD_PRODUCER_OFFSET));
- return;
- case 1:
- writel(crb_producer, NETXEN_CRB_NORMALIZE
- (adapter, CRB_CMD_PRODUCER_OFFSET_1));
- return;
- case 2:
- writel(crb_producer, NETXEN_CRB_NORMALIZE
- (adapter, CRB_CMD_PRODUCER_OFFSET_2));
- return;
- case 3:
- writel(crb_producer, NETXEN_CRB_NORMALIZE
- (adapter, CRB_CMD_PRODUCER_OFFSET_3));
- return;
- default:
- printk(KERN_WARNING "We tried to update "
- "CRB_CMD_PRODUCER_OFFSET for invalid "
- "PCI function id %d\n",
- adapter->portnum);
- return;
- }
+ adapter->pci_write_normalize(adapter,
+ adapter->crb_addr_cmd_producer, crb_producer);
}
-static void netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
- u32 crb_consumer)
+static uint32_t crb_cmd_consumer[4] = {
+ CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
+ CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
+};
+
+static inline void
+netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
+ u32 crb_consumer)
{
- switch (adapter->portnum) {
- case 0:
- writel(crb_consumer, NETXEN_CRB_NORMALIZE
- (adapter, CRB_CMD_CONSUMER_OFFSET));
- return;
- case 1:
- writel(crb_consumer, NETXEN_CRB_NORMALIZE
- (adapter, CRB_CMD_CONSUMER_OFFSET_1));
- return;
- case 2:
- writel(crb_consumer, NETXEN_CRB_NORMALIZE
- (adapter, CRB_CMD_CONSUMER_OFFSET_2));
- return;
- case 3:
- writel(crb_consumer, NETXEN_CRB_NORMALIZE
- (adapter, CRB_CMD_CONSUMER_OFFSET_3));
- return;
- default:
- printk(KERN_WARNING "We tried to update "
- "CRB_CMD_PRODUCER_OFFSET for invalid "
- "PCI function id %d\n",
- adapter->portnum);
- return;
- }
+ adapter->pci_write_normalize(adapter,
+ adapter->crb_addr_cmd_consumer, crb_consumer);
}
-#define ADAPTER_LIST_SIZE 12
-
-static uint32_t msi_tgt_status[4] = {
+static uint32_t msi_tgt_status[8] = {
ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
- ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
};
-static uint32_t sw_int_mask[4] = {
- CRB_SW_INT_MASK_0, CRB_SW_INT_MASK_1,
- CRB_SW_INT_MASK_2, CRB_SW_INT_MASK_3
-};
+static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
static void netxen_nic_disable_int(struct netxen_adapter *adapter)
{
u32 mask = 0x7ff;
int retries = 32;
- int port = adapter->portnum;
int pci_fn = adapter->ahw.pci_func;
if (adapter->msi_mode != MSI_MODE_MULTIFUNC)
- writel(0x0, NETXEN_CRB_NORMALIZE(adapter, sw_int_mask[port]));
+ adapter->pci_write_normalize(adapter,
+ adapter->crb_intr_mask, 0);
if (adapter->intr_scheme != -1 &&
adapter->intr_scheme != INTR_SCHEME_PERPORT)
- writel(mask,PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_MASK));
+ adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask);
- if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
+ if (!NETXEN_IS_MSI_FAMILY(adapter)) {
do {
- writel(0xffffffff,
- PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_TARGET_STATUS));
- mask = readl(pci_base_offset(adapter, ISR_INT_VECTOR));
+ adapter->pci_write_immediate(adapter,
+ ISR_INT_TARGET_STATUS, 0xffffffff);
+ mask = adapter->pci_read_immediate(adapter,
+ ISR_INT_VECTOR);
if (!(mask & 0x80))
break;
udelay(10);
@@ -208,8 +180,8 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter)
}
} else {
if (adapter->msi_mode == MSI_MODE_MULTIFUNC) {
- writel(0xffffffff, PCI_OFFSET_SECOND_RANGE(adapter,
- msi_tgt_status[pci_fn]));
+ adapter->pci_write_immediate(adapter,
+ msi_tgt_status[pci_fn], 0xffffffff);
}
}
}
@@ -217,7 +189,6 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter)
static void netxen_nic_enable_int(struct netxen_adapter *adapter)
{
u32 mask;
- int port = adapter->portnum;
DPRINTK(1, INFO, "Entered ISR Enable \n");
@@ -235,24 +206,299 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter)
break;
}
- writel(mask, PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_MASK));
+ adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask);
}
- writel(0x1, NETXEN_CRB_NORMALIZE(adapter, sw_int_mask[port]));
+ adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1);
- if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
+ if (!NETXEN_IS_MSI_FAMILY(adapter)) {
mask = 0xbff;
if (adapter->intr_scheme != -1 &&
adapter->intr_scheme != INTR_SCHEME_PERPORT) {
- writel(0X0, NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR));
+ adapter->pci_write_normalize(adapter,
+ CRB_INT_VECTOR, 0);
}
- writel(mask,
- PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_TARGET_MASK));
+ adapter->pci_write_immediate(adapter,
+ ISR_INT_TARGET_MASK, mask);
}
DPRINTK(1, INFO, "Done with enable Int\n");
}
+static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+ uint64_t mask;
+
+#ifdef CONFIG_IA64
+ adapter->dma_mask = DMA_32BIT_MASK;
+#else
+ if (revision_id >= NX_P3_B0) {
+ /* should go to DMA_64BIT_MASK */
+ adapter->dma_mask = DMA_39BIT_MASK;
+ mask = DMA_39BIT_MASK;
+ } else if (revision_id == NX_P3_A2) {
+ adapter->dma_mask = DMA_39BIT_MASK;
+ mask = DMA_39BIT_MASK;
+ } else if (revision_id == NX_P2_C1) {
+ adapter->dma_mask = DMA_35BIT_MASK;
+ mask = DMA_35BIT_MASK;
+ } else {
+ adapter->dma_mask = DMA_32BIT_MASK;
+ mask = DMA_32BIT_MASK;
+ goto set_32_bit_mask;
+ }
+
+ /*
+ * Consistent DMA mask is set to 32 bit because it cannot be set to
+ * 35 bits. For P3 also leave it at 32 bits for now. Only the rings
+ * come off this pool.
+ */
+ if (pci_set_dma_mask(pdev, mask) == 0 &&
+ pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) == 0) {
+ adapter->pci_using_dac = 1;
+ return 0;
+ }
+#endif /* CONFIG_IA64 */
+
+set_32_bit_mask:
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ DPRINTK(ERR, "No usable DMA configuration, aborting:%d\n", err);
+ return err;
+ }
+
+ adapter->pci_using_dac = 0;
+ return 0;
+}
+
+static void netxen_check_options(struct netxen_adapter *adapter)
+{
+ switch (adapter->ahw.boardcfg.board_type) {
+ case NETXEN_BRDTYPE_P3_HMEZ:
+ case NETXEN_BRDTYPE_P3_XG_LOM:
+ case NETXEN_BRDTYPE_P3_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4_LP:
+ case NETXEN_BRDTYPE_P3_IMEZ:
+ case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+ case NETXEN_BRDTYPE_P3_10G_XFP:
+ case NETXEN_BRDTYPE_P3_10000_BASE_T:
+ adapter->msix_supported = !!use_msi_x;
+ adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
+ break;
+
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+ adapter->msix_supported = 0;
+ adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
+ break;
+
+ case NETXEN_BRDTYPE_P3_REF_QG:
+ case NETXEN_BRDTYPE_P3_4_GB:
+ case NETXEN_BRDTYPE_P3_4_GB_MM:
+ case NETXEN_BRDTYPE_P2_SB35_4G:
+ case NETXEN_BRDTYPE_P2_SB31_2G:
+ adapter->msix_supported = 0;
+ adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
+ break;
+
+ default:
+ adapter->msix_supported = 0;
+ adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
+
+ printk(KERN_WARNING "Unknown board type(0x%x)\n",
+ adapter->ahw.boardcfg.board_type);
+ break;
+ }
+
+ adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS_HOST;
+ adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS;
+ adapter->max_lro_rx_desc_count = MAX_LRO_RCV_DESCRIPTORS;
+
+ adapter->max_possible_rss_rings = 1;
+ return;
+}
+
+static int
+netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
+{
+ int ret = 0;
+
+ if (first_boot == 0x55555555) {
+ /* This is the first boot after power up */
+
+ /* PCI bus master workaround */
+ adapter->hw_read_wx(adapter,
+ NETXEN_PCIE_REG(0x4), &first_boot, 4);
+ if (!(first_boot & 0x4)) {
+ first_boot |= 0x4;
+ adapter->hw_write_wx(adapter,
+ NETXEN_PCIE_REG(0x4), &first_boot, 4);
+ adapter->hw_read_wx(adapter,
+ NETXEN_PCIE_REG(0x4), &first_boot, 4);
+ }
+
+ /* This is the first boot after power up */
+ adapter->hw_read_wx(adapter,
+ NETXEN_ROMUSB_GLB_SW_RESET, &first_boot, 4);
+ if (first_boot != 0x80000f) {
+ /* clear the register for future unloads/loads */
+ adapter->pci_write_normalize(adapter,
+ NETXEN_CAM_RAM(0x1fc), 0);
+ ret = -1;
+ }
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ /* Start P2 boot loader */
+ adapter->pci_write_normalize(adapter,
+ NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
+ adapter->pci_write_normalize(adapter,
+ NETXEN_ROMUSB_GLB_PEGTUNE_DONE, 1);
+ }
+ }
+ return ret;
+}
+
+static void netxen_set_port_mode(struct netxen_adapter *adapter)
+{
+ u32 val, data;
+
+ val = adapter->ahw.boardcfg.board_type;
+ if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
+ (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
+ if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
+ data = NETXEN_PORT_MODE_802_3_AP;
+ adapter->hw_write_wx(adapter,
+ NETXEN_PORT_MODE_ADDR, &data, 4);
+ } else if (port_mode == NETXEN_PORT_MODE_XG) {
+ data = NETXEN_PORT_MODE_XG;
+ adapter->hw_write_wx(adapter,
+ NETXEN_PORT_MODE_ADDR, &data, 4);
+ } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
+ data = NETXEN_PORT_MODE_AUTO_NEG_1G;
+ adapter->hw_write_wx(adapter,
+ NETXEN_PORT_MODE_ADDR, &data, 4);
+ } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
+ data = NETXEN_PORT_MODE_AUTO_NEG_XG;
+ adapter->hw_write_wx(adapter,
+ NETXEN_PORT_MODE_ADDR, &data, 4);
+ } else {
+ data = NETXEN_PORT_MODE_AUTO_NEG;
+ adapter->hw_write_wx(adapter,
+ NETXEN_PORT_MODE_ADDR, &data, 4);
+ }
+
+ if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
+ (wol_port_mode != NETXEN_PORT_MODE_XG) &&
+ (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
+ (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
+ wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
+ }
+ adapter->hw_write_wx(adapter, NETXEN_WOL_PORT_MODE,
+ &wol_port_mode, 4);
+ }
+}
+
+#define PCI_CAP_ID_GEN 0x10
+
+static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
+{
+ u32 pdevfuncsave;
+ u32 c8c9value = 0;
+ u32 chicken = 0;
+ u32 control = 0;
+ int i, pos;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_device(0x1166, 0x0140, NULL);
+ if (pdev) {
+ pci_dev_put(pdev);
+ adapter->hw_read_wx(adapter,
+ NETXEN_PCIE_REG(PCIE_TGT_SPLIT_CHICKEN), &chicken, 4);
+ chicken |= 0x4000;
+ adapter->hw_write_wx(adapter,
+ NETXEN_PCIE_REG(PCIE_TGT_SPLIT_CHICKEN), &chicken, 4);
+ }
+
+ pdev = adapter->pdev;
+
+ adapter->hw_read_wx(adapter,
+ NETXEN_PCIE_REG(PCIE_CHICKEN3), &chicken, 4);
+ /* clear chicken3.25:24 */
+ chicken &= 0xFCFFFFFF;
+ /*
+ * if gen1 and B0, set F1020 - if gen 2, do nothing
+ * if gen2 set to F1000
+ */
+ pos = pci_find_capability(pdev, PCI_CAP_ID_GEN);
+ if (pos == 0xC0) {
+ pci_read_config_dword(pdev, pos + 0x10, &control);
+ if ((control & 0x000F0000) != 0x00020000) {
+ /* set chicken3.24 if gen1 */
+ chicken |= 0x01000000;
+ }
+ printk(KERN_INFO "%s Gen2 strapping detected\n",
+ netxen_nic_driver_name);
+ c8c9value = 0xF1000;
+ } else {
+ /* set chicken3.24 if gen1 */
+ chicken |= 0x01000000;
+ printk(KERN_INFO "%s Gen1 strapping detected\n",
+ netxen_nic_driver_name);
+ if (adapter->ahw.revision_id == NX_P3_B0)
+ c8c9value = 0xF1020;
+ else
+ c8c9value = 0;
+
+ }
+ adapter->hw_write_wx(adapter,
+ NETXEN_PCIE_REG(PCIE_CHICKEN3), &chicken, 4);
+
+ if (!c8c9value)
+ return;
+
+ pdevfuncsave = pdev->devfn;
+ if (pdevfuncsave & 0x07)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ pci_read_config_dword(pdev, pos + 8, &control);
+ pci_read_config_dword(pdev, pos + 8, &control);
+ pci_write_config_dword(pdev, pos + 8, c8c9value);
+ pdev->devfn++;
+ }
+ pdev->devfn = pdevfuncsave;
+}
+
+static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
+{
+ u32 control;
+ int pos;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_dword(pdev, pos, &control);
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ else
+ control = 0;
+ pci_write_config_dword(pdev, pos, control);
+ }
+}
+
+static void netxen_init_msix_entries(struct netxen_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < MSIX_ENTRIES_PER_ADAPTER; i++)
+ adapter->msix_entries[i].entry = i;
+}
+
/*
* netxen_nic_probe()
*
@@ -278,28 +524,28 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u8 __iomem *db_ptr = NULL;
- unsigned long mem_base, mem_len, db_base, db_len;
- int pci_using_dac, i = 0, err;
- int ring;
- struct netxen_recv_context *recv_ctx = NULL;
- struct netxen_rcv_desc_ctx *rcv_desc = NULL;
- struct netxen_cmd_buffer *cmd_buf_arr = NULL;
+ unsigned long mem_base, mem_len, db_base, db_len, pci_len0 = 0;
+ int i = 0, err;
+ int first_driver, first_boot;
__le64 mac_addr[FLASH_NUM_PORTS + 1];
- int valid_mac = 0;
u32 val;
int pci_func_id = PCI_FUNC(pdev->devfn);
DECLARE_MAC_BUF(mac);
+ struct netxen_legacy_intr_set *legacy_intrp;
+ uint8_t revision_id;
if (pci_func_id == 0)
- printk(KERN_INFO "%s \n", netxen_nic_driver_string);
+ printk(KERN_INFO "%s\n", netxen_nic_driver_string);
if (pdev->class != 0x020000) {
printk(KERN_DEBUG "NetXen function %d, class %x will not "
"be enabled.\n",pci_func_id, pdev->class);
return -ENODEV;
}
+
if ((err = pci_enable_device(pdev)))
return err;
+
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
err = -ENODEV;
goto err_out_disable_pdev;
@@ -309,18 +555,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_pdev;
pci_set_master(pdev);
- if (pdev->revision == NX_P2_C1 &&
- (pci_set_dma_mask(pdev, DMA_35BIT_MASK) == 0) &&
- (pci_set_consistent_dma_mask(pdev, DMA_35BIT_MASK) == 0)) {
- pci_using_dac = 1;
- } else {
- if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
- (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)))
- goto err_out_free_res;
-
- pci_using_dac = 0;
- }
-
netdev = alloc_etherdev(sizeof(struct netxen_adapter));
if(!netdev) {
@@ -333,13 +567,35 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
adapter = netdev->priv;
-
- adapter->ahw.pdev = pdev;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
adapter->ahw.pci_func = pci_func_id;
+ revision_id = pdev->revision;
+ adapter->ahw.revision_id = revision_id;
+
+ err = nx_set_dma_mask(adapter, revision_id);
+ if (err)
+ goto err_out_free_netdev;
+
+ rwlock_init(&adapter->adapter_lock);
+ adapter->ahw.qdr_sn_window = -1;
+ adapter->ahw.ddr_mn_window = -1;
+
/* remap phys address */
mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
mem_len = pci_resource_len(pdev, 0);
+ pci_len0 = 0;
+
+ adapter->hw_write_wx = netxen_nic_hw_write_wx_128M;
+ adapter->hw_read_wx = netxen_nic_hw_read_wx_128M;
+ adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M;
+ adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M;
+ adapter->pci_read_normalize = netxen_nic_pci_read_normalize_128M;
+ adapter->pci_write_normalize = netxen_nic_pci_write_normalize_128M;
+ adapter->pci_set_window = netxen_nic_pci_set_window_128M;
+ adapter->pci_mem_read = netxen_nic_pci_mem_read_128M;
+ adapter->pci_mem_write = netxen_nic_pci_mem_write_128M;
/* 128 Meg of memory */
if (mem_len == NETXEN_PCI_128MB_SIZE) {
@@ -356,27 +612,48 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
first_page_group_start = 0;
first_page_group_end = 0;
+ } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
+ adapter->hw_write_wx = netxen_nic_hw_write_wx_2M;
+ adapter->hw_read_wx = netxen_nic_hw_read_wx_2M;
+ adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M;
+ adapter->pci_write_immediate =
+ netxen_nic_pci_write_immediate_2M;
+ adapter->pci_read_normalize = netxen_nic_pci_read_normalize_2M;
+ adapter->pci_write_normalize =
+ netxen_nic_pci_write_normalize_2M;
+ adapter->pci_set_window = netxen_nic_pci_set_window_2M;
+ adapter->pci_mem_read = netxen_nic_pci_mem_read_2M;
+ adapter->pci_mem_write = netxen_nic_pci_mem_write_2M;
+
+ mem_ptr0 = ioremap(mem_base, mem_len);
+ pci_len0 = mem_len;
+ first_page_group_start = 0;
+ first_page_group_end = 0;
+
+ adapter->ahw.ddr_mn_window = 0;
+ adapter->ahw.qdr_sn_window = 0;
+
+ adapter->ahw.mn_win_crb = 0x100000 + PCIX_MN_WINDOW +
+ (pci_func_id * 0x20);
+ adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW;
+ if (pci_func_id < 4)
+ adapter->ahw.ms_win_crb += (pci_func_id * 0x20);
+ else
+ adapter->ahw.ms_win_crb +=
+ 0xA0 + ((pci_func_id - 4) * 0x10);
} else {
err = -EIO;
goto err_out_free_netdev;
}
- if ((!mem_ptr0 && mem_len == NETXEN_PCI_128MB_SIZE) ||
- !mem_ptr1 || !mem_ptr2) {
- DPRINTK(ERR,
- "Cannot remap adapter memory aborting.:"
- "0 -> %p, 1 -> %p, 2 -> %p\n",
- mem_ptr0, mem_ptr1, mem_ptr2);
+ dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
- err = -EIO;
- goto err_out_iounmap;
- }
db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
db_len = pci_resource_len(pdev, 4);
if (db_len == 0) {
printk(KERN_ERR "%s: doorbell is disabled\n",
- netxen_nic_driver_name);
+ netxen_nic_driver_name);
err = -EIO;
goto err_out_iounmap;
}
@@ -386,13 +663,14 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
if (!db_ptr) {
printk(KERN_ERR "%s: Failed to allocate doorbell map.",
- netxen_nic_driver_name);
+ netxen_nic_driver_name);
err = -EIO;
goto err_out_iounmap;
}
DPRINTK(INFO, "doorbell ioremaped at %p\n", db_ptr);
adapter->ahw.pci_base0 = mem_ptr0;
+ adapter->ahw.pci_len0 = pci_len0;
adapter->ahw.first_page_group_start = first_page_group_start;
adapter->ahw.first_page_group_end = first_page_group_end;
adapter->ahw.pci_base1 = mem_ptr1;
@@ -400,11 +678,18 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ahw.db_base = db_ptr;
adapter->ahw.db_len = db_len;
- adapter->netdev = netdev;
- adapter->pdev = pdev;
-
netif_napi_add(netdev, &adapter->napi,
- netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
+ netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
+
+ if (revision_id >= NX_P3_B0)
+ legacy_intrp = &legacy_intr[pci_func_id];
+ else
+ legacy_intrp = &legacy_intr[0];
+
+ adapter->legacy_intr.int_vec_bit = legacy_intrp->int_vec_bit;
+ adapter->legacy_intr.tgt_status_reg = legacy_intrp->tgt_status_reg;
+ adapter->legacy_intr.tgt_mask_reg = legacy_intrp->tgt_mask_reg;
+ adapter->legacy_intr.pci_int_reg = legacy_intrp->pci_int_reg;
/* this will be read from FW later */
adapter->intr_scheme = -1;
@@ -414,12 +699,23 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->portnum = pci_func_id;
adapter->status &= ~NETXEN_NETDEV_STATUS;
adapter->rx_csum = 1;
+ adapter->mc_enabled = 0;
+ if (NX_IS_REVISION_P3(revision_id)) {
+ adapter->max_mc_count = 38;
+ adapter->max_rds_rings = 2;
+ } else {
+ adapter->max_mc_count = 16;
+ adapter->max_rds_rings = 3;
+ }
netdev->open = netxen_nic_open;
netdev->stop = netxen_nic_close;
netdev->hard_start_xmit = netxen_nic_xmit_frame;
netdev->get_stats = netxen_nic_get_stats;
- netdev->set_multicast_list = netxen_nic_set_multi;
+ if (NX_IS_REVISION_P3(revision_id))
+ netdev->set_multicast_list = netxen_p3_nic_set_multi;
+ else
+ netdev->set_multicast_list = netxen_p2_nic_set_multi;
netdev->set_mac_address = netxen_nic_set_mac;
netdev->change_mtu = netxen_nic_change_mtu;
netdev->tx_timeout = netxen_tx_timeout;
@@ -435,18 +731,14 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features = NETIF_F_SG;
netdev->features |= NETIF_F_IP_CSUM;
netdev->features |= NETIF_F_TSO;
+ if (NX_IS_REVISION_P3(revision_id)) {
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_TSO6;
+ }
- if (pci_using_dac)
+ if (adapter->pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- if (pci_enable_msi(pdev))
- adapter->flags &= ~NETXEN_NIC_MSI_ENABLED;
- else
- adapter->flags |= NETXEN_NIC_MSI_ENABLED;
-
- netdev->irq = pdev->irq;
- INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
-
/*
* Set the CRB window to invalid. If any register in window 0 is
* accessed it should set the window to 0 and then reset it to 1.
@@ -455,87 +747,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (netxen_nic_get_board_info(adapter) != 0) {
printk("%s: Error getting board config info.\n",
- netxen_nic_driver_name);
+ netxen_nic_driver_name);
err = -EIO;
goto err_out_iounmap;
}
- /*
- * Adapter in our case is quad port so initialize it before
- * initializing the ports
- */
-
netxen_initialize_adapter_ops(adapter);
- adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS_HOST;
- if ((adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB35_4G) ||
- (adapter->ahw.boardcfg.board_type ==
- NETXEN_BRDTYPE_P2_SB31_2G))
- adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
- else
- adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS;
- adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS;
- adapter->max_lro_rx_desc_count = MAX_LRO_RCV_DESCRIPTORS;
-
- cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
- if (cmd_buf_arr == NULL) {
- printk(KERN_ERR
- "%s: Could not allocate cmd_buf_arr memory:%d\n",
- netxen_nic_driver_name, (int)TX_RINGSIZE);
- err = -ENOMEM;
- goto err_out_free_adapter;
- }
- memset(cmd_buf_arr, 0, TX_RINGSIZE);
- adapter->cmd_buf_arr = cmd_buf_arr;
-
- for (i = 0; i < MAX_RCV_CTX; ++i) {
- recv_ctx = &adapter->recv_ctx[i];
- for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
- rcv_desc = &recv_ctx->rcv_desc[ring];
- switch (RCV_DESC_TYPE(ring)) {
- case RCV_DESC_NORMAL:
- rcv_desc->max_rx_desc_count =
- adapter->max_rx_desc_count;
- rcv_desc->flags = RCV_DESC_NORMAL;
- rcv_desc->dma_size = RX_DMA_MAP_LEN;
- rcv_desc->skb_size = MAX_RX_BUFFER_LENGTH;
- break;
-
- case RCV_DESC_JUMBO:
- rcv_desc->max_rx_desc_count =
- adapter->max_jumbo_rx_desc_count;
- rcv_desc->flags = RCV_DESC_JUMBO;
- rcv_desc->dma_size = RX_JUMBO_DMA_MAP_LEN;
- rcv_desc->skb_size = MAX_RX_JUMBO_BUFFER_LENGTH;
- break;
-
- case RCV_RING_LRO:
- rcv_desc->max_rx_desc_count =
- adapter->max_lro_rx_desc_count;
- rcv_desc->flags = RCV_DESC_LRO;
- rcv_desc->dma_size = RX_LRO_DMA_MAP_LEN;
- rcv_desc->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
- break;
-
- }
- rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *)
- vmalloc(RCV_BUFFSIZE);
-
- if (rcv_desc->rx_buf_arr == NULL) {
- printk(KERN_ERR "%s: Could not allocate "
- "rcv_desc->rx_buf_arr memory:%d\n",
- netxen_nic_driver_name,
- (int)RCV_BUFFSIZE);
- err = -ENOMEM;
- goto err_out_free_rx_buffer;
- }
- memset(rcv_desc->rx_buf_arr, 0, RCV_BUFFSIZE);
- }
-
- }
-
- netxen_initialize_adapter_sw(adapter); /* initialize the buffers in adapter */
-
/* Mezz cards have PCI function 0,2,3 enabled */
switch (adapter->ahw.boardcfg.board_type) {
case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
@@ -547,90 +765,71 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- init_timer(&adapter->watchdog_timer);
- adapter->ahw.xg_linkup = 0;
- adapter->watchdog_timer.function = &netxen_watchdog;
- adapter->watchdog_timer.data = (unsigned long)adapter;
- INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
- adapter->ahw.pdev = pdev;
- adapter->ahw.revision_id = pdev->revision;
-
- /* make sure Window == 1 */
- netxen_nic_pci_change_crbwindow(adapter, 1);
+ /*
+ * This call will setup various max rx/tx counts.
+ * It must be done before any buffer/ring allocations.
+ */
+ netxen_check_options(adapter);
+ first_driver = 0;
+ if (NX_IS_REVISION_P3(revision_id)) {
+ if (adapter->ahw.pci_func == 0)
+ first_driver = 1;
+ } else {
+ if (adapter->portnum == 0)
+ first_driver = 1;
+ }
+ adapter->crb_addr_cmd_producer = crb_cmd_producer[adapter->portnum];
+ adapter->crb_addr_cmd_consumer = crb_cmd_consumer[adapter->portnum];
netxen_nic_update_cmd_producer(adapter, 0);
netxen_nic_update_cmd_consumer(adapter, 0);
- writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO));
- if (netxen_is_flash_supported(adapter) == 0 &&
- netxen_get_flash_mac_addr(adapter, mac_addr) == 0)
- valid_mac = 1;
- else
- valid_mac = 0;
-
- if (valid_mac) {
- unsigned char *p = (unsigned char *)&mac_addr[adapter->portnum];
- netdev->dev_addr[0] = *(p + 5);
- netdev->dev_addr[1] = *(p + 4);
- netdev->dev_addr[2] = *(p + 3);
- netdev->dev_addr[3] = *(p + 2);
- netdev->dev_addr[4] = *(p + 1);
- netdev->dev_addr[5] = *(p + 0);
+ if (first_driver) {
+ first_boot = adapter->pci_read_normalize(adapter,
+ NETXEN_CAM_RAM(0x1fc));
- memcpy(netdev->perm_addr, netdev->dev_addr,
- netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
- printk(KERN_ERR "%s: Bad MAC address %s.\n",
- netxen_nic_driver_name,
- print_mac(mac, netdev->dev_addr));
- } else {
- if (adapter->macaddr_set)
- adapter->macaddr_set(adapter,
- netdev->dev_addr);
+ err = netxen_check_hw_init(adapter, first_boot);
+ if (err) {
+ printk(KERN_ERR "%s: error in init HW init sequence\n",
+ netxen_nic_driver_name);
+ goto err_out_iounmap;
}
- }
- if (adapter->portnum == 0) {
- err = netxen_initialize_adapter_offload(adapter);
- if (err)
- goto err_out_free_rx_buffer;
- val = readl(NETXEN_CRB_NORMALIZE(adapter,
- NETXEN_CAM_RAM(0x1fc)));
- if (val == 0x55555555) {
- /* This is the first boot after power up */
- netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(0x4), &val);
- if (!(val & 0x4)) {
- val |= 0x4;
- netxen_nic_write_w0(adapter, NETXEN_PCIE_REG(0x4), val);
- netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(0x4), &val);
- if (!(val & 0x4))
- printk(KERN_ERR "%s: failed to set MSI bit in PCI-e reg\n",
- netxen_nic_driver_name);
- }
- val = readl(NETXEN_CRB_NORMALIZE(adapter,
- NETXEN_ROMUSB_GLB_SW_RESET));
- printk(KERN_INFO"NetXen: read 0x%08x for reset reg.\n",val);
- if (val != 0x80000f) {
- /* clear the register for future unloads/loads */
- writel(0, NETXEN_CRB_NORMALIZE(adapter,
- NETXEN_CAM_RAM(0x1fc)));
- printk(KERN_ERR "ERROR in NetXen HW init sequence.\n");
- err = -ENODEV;
- goto err_out_free_dev;
- }
- } else {
- writel(0, NETXEN_CRB_NORMALIZE(adapter,
- CRB_CMDPEG_STATE));
+ if (NX_IS_REVISION_P3(revision_id))
+ netxen_set_port_mode(adapter);
+
+ if (first_boot != 0x55555555) {
+ adapter->pci_write_normalize(adapter,
+ CRB_CMDPEG_STATE, 0);
netxen_pinit_from_rom(adapter, 0);
msleep(1);
netxen_load_firmware(adapter);
- netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
}
- /* clear the register for future unloads/loads */
- writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc)));
- dev_info(&pdev->dev, "cmdpeg state: 0x%0x\n",
- readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)));
+ if (NX_IS_REVISION_P3(revision_id))
+ netxen_pcie_strap_init(adapter);
+
+ if (NX_IS_REVISION_P2(revision_id)) {
+
+ /* Initialize multicast addr pool owners */
+ val = 0x7654;
+ if (adapter->ahw.board_type == NETXEN_NIC_XGBE)
+ val |= 0x0f000000;
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_MAC_ADDR_CNTL_REG, val);
+
+ }
+
+ if ((first_boot == 0x55555555) &&
+ (NX_IS_REVISION_P2(revision_id))) {
+ /* Unlock the HW, prompting the boot sequence */
+ adapter->pci_write_normalize(adapter,
+ NETXEN_ROMUSB_GLB_PEGTUNE_DONE, 1);
+ }
+
+ err = netxen_initialize_adapter_offload(adapter);
+ if (err)
+ goto err_out_iounmap;
/*
* Tell the hardware our version number.
@@ -638,24 +837,101 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i = (_NETXEN_NIC_LINUX_MAJOR << 16)
| ((_NETXEN_NIC_LINUX_MINOR << 8))
| (_NETXEN_NIC_LINUX_SUBVERSION);
- writel(i, NETXEN_CRB_NORMALIZE(adapter, CRB_DRIVER_VERSION));
+ adapter->pci_write_normalize(adapter, CRB_DRIVER_VERSION, i);
- /* Unlock the HW, prompting the boot sequence */
- writel(1,
- NETXEN_CRB_NORMALIZE(adapter,
- NETXEN_ROMUSB_GLB_PEGTUNE_DONE));
/* Handshake with the card before we register the devices. */
netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
+
+ } /* first_driver */
+
+ netxen_nic_flash_print(adapter);
+
+ if (NX_IS_REVISION_P3(revision_id)) {
+ adapter->hw_read_wx(adapter,
+ NETXEN_MIU_MN_CONTROL, &val, 4);
+ adapter->ahw.cut_through = (val & 0x4) ? 1 : 0;
+ dev_info(&pdev->dev, "firmware running in %s mode\n",
+ adapter->ahw.cut_through ? "cut through" : "legacy");
}
/*
* See if the firmware gave us a virtual-physical port mapping.
*/
adapter->physical_port = adapter->portnum;
- i = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_V2P(adapter->portnum)));
+ i = adapter->pci_read_normalize(adapter, CRB_V2P(adapter->portnum));
if (i != 0x55555555)
adapter->physical_port = i;
+ adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
+
+ netxen_set_msix_bit(pdev, 0);
+
+ if (NX_IS_REVISION_P3(revision_id)) {
+ if ((mem_len != NETXEN_PCI_128MB_SIZE) &&
+ mem_len != NETXEN_PCI_2MB_SIZE)
+ adapter->msix_supported = 0;
+ }
+
+ if (adapter->msix_supported) {
+
+ netxen_init_msix_entries(adapter);
+
+ if (pci_enable_msix(pdev, adapter->msix_entries,
+ MSIX_ENTRIES_PER_ADAPTER))
+ goto request_msi;
+
+ adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
+ netxen_set_msix_bit(pdev, 1);
+ dev_info(&pdev->dev, "using msi-x interrupts\n");
+
+ } else {
+request_msi:
+ if (use_msi && !pci_enable_msi(pdev)) {
+ adapter->flags |= NETXEN_NIC_MSI_ENABLED;
+ dev_info(&pdev->dev, "using msi interrupts\n");
+ } else
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ }
+
+ if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
+ netdev->irq = adapter->msix_entries[0].vector;
+ else
+ netdev->irq = pdev->irq;
+
+ err = netxen_receive_peg_ready(adapter);
+ if (err)
+ goto err_out_disable_msi;
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->ahw.linkup = 0;
+ adapter->watchdog_timer.function = &netxen_watchdog;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+ INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
+ INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
+
+ if (netxen_is_flash_supported(adapter) == 0 &&
+ netxen_get_flash_mac_addr(adapter, mac_addr) == 0) {
+ unsigned char *p;
+
+ p = (unsigned char *)&mac_addr[adapter->portnum];
+ netdev->dev_addr[0] = *(p + 5);
+ netdev->dev_addr[1] = *(p + 4);
+ netdev->dev_addr[2] = *(p + 3);
+ netdev->dev_addr[3] = *(p + 2);
+ netdev->dev_addr[4] = *(p + 1);
+ netdev->dev_addr[5] = *(p + 0);
+
+ memcpy(netdev->perm_addr, netdev->dev_addr,
+ netdev->addr_len);
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+ printk(KERN_ERR "%s: Bad MAC address %s.\n",
+ netxen_nic_driver_name,
+ print_mac(mac, netdev->dev_addr));
+ } else {
+ adapter->macaddr_set(adapter, netdev->dev_addr);
+ }
+ }
+
netif_carrier_off(netdev);
netif_stop_queue(netdev);
@@ -664,41 +940,37 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
" aborting\n", netxen_nic_driver_name,
adapter->portnum);
err = -EIO;
- goto err_out_free_dev;
+ goto err_out_disable_msi;
}
- netxen_nic_flash_print(adapter);
pci_set_drvdata(pdev, adapter);
- return 0;
-
-err_out_free_dev:
- if (adapter->portnum == 0)
- netxen_free_adapter_offload(adapter);
-
-err_out_free_rx_buffer:
- for (i = 0; i < MAX_RCV_CTX; ++i) {
- recv_ctx = &adapter->recv_ctx[i];
- for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
- rcv_desc = &recv_ctx->rcv_desc[ring];
- if (rcv_desc->rx_buf_arr != NULL) {
- vfree(rcv_desc->rx_buf_arr);
- rcv_desc->rx_buf_arr = NULL;
- }
- }
+ switch (adapter->ahw.board_type) {
+ case NETXEN_NIC_GBE:
+ dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ case NETXEN_NIC_XGBE:
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ break;
}
- vfree(cmd_buf_arr);
-err_out_free_adapter:
+ return 0;
+
+err_out_disable_msi:
+ if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
+ pci_disable_msix(pdev);
if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
pci_disable_msi(pdev);
- pci_set_drvdata(pdev, NULL);
+ if (first_driver)
+ netxen_free_adapter_offload(adapter);
+err_out_iounmap:
if (db_ptr)
iounmap(db_ptr);
-err_out_iounmap:
if (mem_ptr0)
iounmap(mem_ptr0);
if (mem_ptr1)
@@ -713,6 +985,7 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
+ pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
}
@@ -721,11 +994,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
{
struct netxen_adapter *adapter;
struct net_device *netdev;
- struct netxen_rx_buffer *buffer;
- struct netxen_recv_context *recv_ctx;
- struct netxen_rcv_desc_ctx *rcv_desc;
- int i, ctxid, ring;
- static int init_firmware_done = 0;
adapter = pci_get_drvdata(pdev);
if (adapter == NULL)
@@ -736,36 +1004,18 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
- init_firmware_done++;
netxen_free_hw_resources(adapter);
+ netxen_free_sw_resources(adapter);
}
- for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
- recv_ctx = &adapter->recv_ctx[ctxid];
- for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
- rcv_desc = &recv_ctx->rcv_desc[ring];
- for (i = 0; i < rcv_desc->max_rx_desc_count; ++i) {
- buffer = &(rcv_desc->rx_buf_arr[i]);
- if (buffer->state == NETXEN_BUFFER_FREE)
- continue;
- pci_unmap_single(pdev, buffer->dma,
- rcv_desc->dma_size,
- PCI_DMA_FROMDEVICE);
- if (buffer->skb != NULL)
- dev_kfree_skb_any(buffer->skb);
- }
- vfree(rcv_desc->rx_buf_arr);
- }
- }
-
- vfree(adapter->cmd_buf_arr);
-
if (adapter->portnum == 0)
netxen_free_adapter_offload(adapter);
if (adapter->irq)
free_irq(adapter->irq, adapter);
+ if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
+ pci_disable_msix(pdev);
if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
pci_disable_msi(pdev);
@@ -803,51 +1053,69 @@ static int netxen_nic_open(struct net_device *netdev)
return -EIO;
}
- /* setup all the resources for the Phantom... */
- /* this include the descriptors for rcv, tx, and status */
- netxen_nic_clear_stats(adapter);
- err = netxen_nic_hw_resources(adapter);
+ err = netxen_alloc_sw_resources(adapter);
if (err) {
- printk(KERN_ERR "Error in setting hw resources:%d\n",
- err);
+ printk(KERN_ERR "%s: Error in setting sw resources\n",
+ netdev->name);
return err;
}
+
+ netxen_nic_clear_stats(adapter);
+
+ err = netxen_alloc_hw_resources(adapter);
+ if (err) {
+ printk(KERN_ERR "%s: Error in setting hw resources\n",
+ netdev->name);
+ goto err_out_free_sw;
+ }
+
+ if (adapter->fw_major < 4) {
+ adapter->crb_addr_cmd_producer =
+ crb_cmd_producer[adapter->portnum];
+ adapter->crb_addr_cmd_consumer =
+ crb_cmd_consumer[adapter->portnum];
+ }
+
+ netxen_nic_update_cmd_producer(adapter, 0);
+ netxen_nic_update_cmd_consumer(adapter, 0);
+
for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
- for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++)
+ for (ring = 0; ring < adapter->max_rds_rings; ring++)
netxen_post_rx_buffers(adapter, ctx, ring);
}
- adapter->irq = adapter->ahw.pdev->irq;
- if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
+ if (NETXEN_IS_MSI_FAMILY(adapter))
handler = netxen_msi_intr;
else {
flags |= IRQF_SHARED;
handler = netxen_intr;
}
+ adapter->irq = netdev->irq;
err = request_irq(adapter->irq, handler,
flags, netdev->name, adapter);
if (err) {
printk(KERN_ERR "request_irq failed with: %d\n", err);
- netxen_free_hw_resources(adapter);
- return err;
+ goto err_out_free_hw;
}
adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
}
+
/* Done here again so that even if phantom sw overwrote it,
* we set it */
- if (adapter->init_port
- && adapter->init_port(adapter, adapter->portnum) != 0) {
+ err = adapter->init_port(adapter, adapter->physical_port);
+ if (err) {
printk(KERN_ERR "%s: Failed to initialize port %d\n",
netxen_nic_driver_name, adapter->portnum);
- return -EIO;
+ goto err_out_free_irq;
}
- if (adapter->macaddr_set)
- adapter->macaddr_set(adapter, netdev->dev_addr);
+ adapter->macaddr_set(adapter, netdev->dev_addr);
netxen_nic_set_link_parameters(adapter);
- netxen_nic_set_multi(netdev);
- if (adapter->set_mtu)
+ netdev->set_multicast_list(netdev);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ nx_fw_cmd_set_mtu(adapter, netdev->mtu);
+ else
adapter->set_mtu(adapter, netdev->mtu);
mod_timer(&adapter->watchdog_timer, jiffies);
@@ -858,6 +1126,14 @@ static int netxen_nic_open(struct net_device *netdev)
netif_start_queue(netdev);
return 0;
+
+err_out_free_irq:
+ free_irq(adapter->irq, adapter);
+err_out_free_hw:
+ netxen_free_hw_resources(adapter);
+err_out_free_sw:
+ netxen_free_sw_resources(adapter);
+ return err;
}
/*
@@ -866,9 +1142,6 @@ static int netxen_nic_open(struct net_device *netdev)
static int netxen_nic_close(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- int i, j;
- struct netxen_cmd_buffer *cmd_buff;
- struct netxen_skb_frag *buffrag;
netif_carrier_off(netdev);
netif_stop_queue(netdev);
@@ -879,30 +1152,8 @@ static int netxen_nic_close(struct net_device *netdev)
netxen_nic_disable_int(adapter);
- cmd_buff = adapter->cmd_buf_arr;
- for (i = 0; i < adapter->max_tx_desc_count; i++) {
- buffrag = cmd_buff->frag_array;
- if (buffrag->dma) {
- pci_unmap_single(adapter->pdev, buffrag->dma,
- buffrag->length, PCI_DMA_TODEVICE);
- buffrag->dma = 0ULL;
- }
- for (j = 0; j < cmd_buff->frag_count; j++) {
- buffrag++;
- if (buffrag->dma) {
- pci_unmap_page(adapter->pdev, buffrag->dma,
- buffrag->length,
- PCI_DMA_TODEVICE);
- buffrag->dma = 0ULL;
- }
- }
- /* Free the skb we received in netxen_nic_xmit_frame */
- if (cmd_buff->skb) {
- dev_kfree_skb_any(cmd_buff->skb);
- cmd_buff->skb = NULL;
- }
- cmd_buff++;
- }
+ netxen_release_tx_buffers(adapter);
+
if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
FLUSH_SCHEDULED_WORK();
del_timer_sync(&adapter->watchdog_timer);
@@ -911,6 +1162,31 @@ static int netxen_nic_close(struct net_device *netdev)
return 0;
}
+void netxen_tso_check(struct netxen_adapter *adapter,
+ struct cmd_desc_type0 *desc, struct sk_buff *skb)
+{
+ if (desc->mss) {
+ desc->total_hdr_length = (sizeof(struct ethhdr) +
+ ip_hdrlen(skb) + tcp_hdrlen(skb));
+
+ if ((NX_IS_REVISION_P3(adapter->ahw.revision_id)) &&
+ (skb->protocol == htons(ETH_P_IPV6)))
+ netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO6);
+ else
+ netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO);
+
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT);
+ else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+ netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT);
+ else
+ return;
+ }
+ desc->tcp_hdr_offset = skb_transport_offset(skb);
+ desc->ip_hdr_offset = skb_network_offset(skb);
+}
+
static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
@@ -932,7 +1208,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* There 4 fragments per descriptor */
no_of_desc = (frag_count + 3) >> 2;
- if (netdev->features & NETIF_F_TSO) {
+ if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) {
if (skb_shinfo(skb)->gso_size > 0) {
no_of_desc++;
@@ -959,7 +1235,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
/* Take skb->data itself */
pbuf = &adapter->cmd_buf_arr[producer];
- if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) {
+ if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ skb_shinfo(skb)->gso_size > 0) {
pbuf->mss = skb_shinfo(skb)->gso_size;
hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
} else {
@@ -1086,6 +1363,89 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static int netxen_nic_check_temp(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ uint32_t temp, temp_state, temp_val;
+ int rv = 0;
+
+ temp = adapter->pci_read_normalize(adapter, CRB_TEMP_STATE);
+
+ temp_state = nx_get_temp_state(temp);
+ temp_val = nx_get_temp_val(temp);
+
+ if (temp_state == NX_TEMP_PANIC) {
+ printk(KERN_ALERT
+ "%s: Device temperature %d degrees C exceeds"
+ " maximum allowed. Hardware has been shut down.\n",
+ netxen_nic_driver_name, temp_val);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ rv = 1;
+ } else if (temp_state == NX_TEMP_WARN) {
+ if (adapter->temp == NX_TEMP_NORMAL) {
+ printk(KERN_ALERT
+ "%s: Device temperature %d degrees C "
+ "exceeds operating range."
+ " Immediate action needed.\n",
+ netxen_nic_driver_name, temp_val);
+ }
+ } else {
+ if (adapter->temp == NX_TEMP_WARN) {
+ printk(KERN_INFO
+ "%s: Device temperature is now %d degrees C"
+ " in normal range.\n", netxen_nic_driver_name,
+ temp_val);
+ }
+ }
+ adapter->temp = temp_state;
+ return rv;
+}
+
+static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 val, port, linkup;
+
+ port = adapter->physical_port;
+
+ if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
+ val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
+ linkup = (val >> port) & 1;
+ } else {
+ if (adapter->fw_major < 4) {
+ val = adapter->pci_read_normalize(adapter,
+ CRB_XG_STATE);
+ val = (val >> port*8) & 0xff;
+ linkup = (val == XG_LINK_UP);
+ } else {
+ val = adapter->pci_read_normalize(adapter,
+ CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ linkup = (val == XG_LINK_UP_P3);
+ }
+ }
+
+ if (adapter->ahw.linkup && !linkup) {
+ printk(KERN_INFO "%s: %s NIC Link is down\n",
+ netxen_nic_driver_name, netdev->name);
+ adapter->ahw.linkup = 0;
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else if (!adapter->ahw.linkup && linkup) {
+ printk(KERN_INFO "%s: %s NIC Link is up\n",
+ netxen_nic_driver_name, netdev->name);
+ adapter->ahw.linkup = 1;
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ }
+}
+
static void netxen_watchdog(unsigned long v)
{
struct netxen_adapter *adapter = (struct netxen_adapter *)v;
@@ -1093,6 +1453,19 @@ static void netxen_watchdog(unsigned long v)
SCHEDULE_WORK(&adapter->watchdog_task);
}
+void netxen_watchdog_task(struct work_struct *work)
+{
+ struct netxen_adapter *adapter =
+ container_of(work, struct netxen_adapter, watchdog_task);
+
+ if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter))
+ return;
+
+ netxen_nic_handle_phy_intr(adapter);
+
+ mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
+}
+
static void netxen_tx_timeout(struct net_device *netdev)
{
struct netxen_adapter *adapter = (struct netxen_adapter *)
@@ -1118,6 +1491,38 @@ static void netxen_tx_timeout_task(struct work_struct *work)
netif_wake_queue(adapter->netdev);
}
+/*
+ * netxen_nic_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ */
+struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct net_device_stats *stats = &adapter->net_stats;
+
+ memset(stats, 0, sizeof(*stats));
+
+ /* total packets received */
+ stats->rx_packets = adapter->stats.no_rcv;
+ /* total packets transmitted */
+ stats->tx_packets = adapter->stats.xmitedframes +
+ adapter->stats.xmitfinished;
+ /* total bytes received */
+ stats->rx_bytes = adapter->stats.rxbytes;
+ /* total bytes transmitted */
+ stats->tx_bytes = adapter->stats.txbytes;
+ /* bad packets received */
+ stats->rx_errors = adapter->stats.rcvdbadskb;
+ /* packet transmit problems */
+ stats->tx_errors = adapter->stats.nocmddescriptor;
+ /* no space in linux buffers */
+ stats->rx_dropped = adapter->stats.rxdropped;
+ /* no space available in linux */
+ stats->tx_dropped = adapter->stats.txdropped;
+
+ return stats;
+}
+
static inline void
netxen_handle_int(struct netxen_adapter *adapter)
{
@@ -1125,20 +1530,20 @@ netxen_handle_int(struct netxen_adapter *adapter)
napi_schedule(&adapter->napi);
}
-irqreturn_t netxen_intr(int irq, void *data)
+static irqreturn_t netxen_intr(int irq, void *data)
{
struct netxen_adapter *adapter = data;
u32 our_int = 0;
- our_int = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR));
+ our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR);
/* not our interrupt */
if ((our_int & (0x80 << adapter->portnum)) == 0)
return IRQ_NONE;
if (adapter->intr_scheme == INTR_SCHEME_PERPORT) {
/* claim interrupt */
- writel(our_int & ~((u32)(0x80 << adapter->portnum)),
- NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR));
+ adapter->pci_write_normalize(adapter, CRB_INT_VECTOR,
+ our_int & ~((u32)(0x80 << adapter->portnum)));
}
netxen_handle_int(adapter);
@@ -1146,7 +1551,7 @@ irqreturn_t netxen_intr(int irq, void *data)
return IRQ_HANDLED;
}
-irqreturn_t netxen_msi_intr(int irq, void *data)
+static irqreturn_t netxen_msi_intr(int irq, void *data)
{
struct netxen_adapter *adapter = data;
@@ -1220,10 +1625,6 @@ module_init(netxen_init_module);
static void __exit netxen_exit_module(void)
{
- /*
- * Wait for some time to allow the dma to drain, if any.
- */
- msleep(100);
pci_unregister_driver(&netxen_driver);
destroy_workqueue(netxen_workq);
}
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index a3bc7cc67a6f..4cb8f4a1cf4b 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -46,9 +46,8 @@ static int phy_lock(struct netxen_adapter *adapter)
int done = 0, timeout = 0;
while (!done) {
- done =
- readl(pci_base_offset
- (adapter, NETXEN_PCIE_REG(PCIE_SEM3_LOCK)));
+ done = netxen_nic_reg_read(adapter,
+ NETXEN_PCIE_REG(PCIE_SEM3_LOCK));
if (done == 1)
break;
if (timeout >= phy_lock_timeout) {
@@ -63,14 +62,14 @@ static int phy_lock(struct netxen_adapter *adapter)
}
}
- writel(PHY_LOCK_DRIVER,
- NETXEN_CRB_NORMALIZE(adapter, NETXEN_PHY_LOCK_ID));
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_PHY_LOCK_ID, PHY_LOCK_DRIVER);
return 0;
}
static int phy_unlock(struct netxen_adapter *adapter)
{
- readl(pci_base_offset(adapter, NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK)));
+ adapter->pci_read_immediate(adapter, NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK));
return 0;
}
@@ -109,7 +108,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
* so it cannot be in reset
*/
- if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
+ if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
&mac_cfg0, 4))
return -EIO;
if (netxen_gb_get_soft_reset(mac_cfg0)) {
@@ -119,7 +118,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
netxen_gb_rx_reset_pb(temp);
netxen_gb_tx_reset_mac(temp);
netxen_gb_rx_reset_mac(temp);
- if (netxen_nic_hw_write_wx(adapter,
+ if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_MAC_CONFIG_0(0),
&temp, 4))
return -EIO;
@@ -129,22 +128,22 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
address = 0;
netxen_gb_mii_mgmt_reg_addr(address, reg);
netxen_gb_mii_mgmt_phy_addr(address, phy);
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
&address, 4))
return -EIO;
command = 0; /* turn off any prior activity */
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
&command, 4))
return -EIO;
/* send read command */
netxen_gb_mii_mgmt_set_read_cycle(command);
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
&command, 4))
return -EIO;
status = 0;
do {
- if (netxen_nic_hw_read_wx(adapter,
+ if (adapter->hw_read_wx(adapter,
NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
&status, 4))
return -EIO;
@@ -154,7 +153,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
&& (timeout++ < NETXEN_NIU_PHY_WAITMAX));
if (timeout < NETXEN_NIU_PHY_WAITMAX) {
- if (netxen_nic_hw_read_wx(adapter,
+ if (adapter->hw_read_wx(adapter,
NETXEN_NIU_GB_MII_MGMT_STATUS(0),
readval, 4))
return -EIO;
@@ -163,7 +162,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
result = -1;
if (restore)
- if (netxen_nic_hw_write_wx(adapter,
+ if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_MAC_CONFIG_0(0),
&mac_cfg0, 4))
return -EIO;
@@ -201,7 +200,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
* cannot be in reset
*/
- if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
+ if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
&mac_cfg0, 4))
return -EIO;
if (netxen_gb_get_soft_reset(mac_cfg0)) {
@@ -212,7 +211,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
netxen_gb_tx_reset_mac(temp);
netxen_gb_rx_reset_mac(temp);
- if (netxen_nic_hw_write_wx(adapter,
+ if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_MAC_CONFIG_0(0),
&temp, 4))
return -EIO;
@@ -220,24 +219,24 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
}
command = 0; /* turn off any prior activity */
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
&command, 4))
return -EIO;
address = 0;
netxen_gb_mii_mgmt_reg_addr(address, reg);
netxen_gb_mii_mgmt_phy_addr(address, phy);
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
&address, 4))
return -EIO;
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0),
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0),
&val, 4))
return -EIO;
status = 0;
do {
- if (netxen_nic_hw_read_wx(adapter,
+ if (adapter->hw_read_wx(adapter,
NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
&status, 4))
return -EIO;
@@ -252,7 +251,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
/* restore the state of port 0 MAC in case we tampered with it */
if (restore)
- if (netxen_nic_hw_write_wx(adapter,
+ if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_MAC_CONFIG_0(0),
&mac_cfg0, 4))
return -EIO;
@@ -401,14 +400,16 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
{
int result = 0;
__u32 status;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
if (adapter->disable_phy_interrupts)
adapter->disable_phy_interrupts(adapter);
mdelay(2);
- if (0 ==
- netxen_niu_gbe_phy_read(adapter,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
- &status)) {
+ if (0 == netxen_niu_gbe_phy_read(adapter,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, &status)) {
if (netxen_get_phy_link(status)) {
if (netxen_get_phy_speed(status) == 2) {
netxen_niu_gbe_set_gmii_mode(adapter, port, 1);
@@ -456,12 +457,12 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
{
- u32 portnum = adapter->physical_port;
-
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_XGE_CONFIG_1+(0x10000*portnum), 0x1447);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_XGE_CONFIG_0+(0x10000*portnum), 0x5);
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447);
+ netxen_crb_writelit_adapter(adapter,
+ NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5);
+ }
return 0;
}
@@ -581,10 +582,10 @@ static int netxen_niu_macaddr_get(struct netxen_adapter *adapter,
if ((phy < 0) || (phy > 3))
return -EINVAL;
- if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy),
+ if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy),
&stationhigh, 4))
return -EIO;
- if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy),
+ if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy),
&stationlow, 4))
return -EIO;
((__le32 *)val)[1] = cpu_to_le32(stationhigh);
@@ -613,14 +614,14 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
temp[0] = temp[1] = 0;
memcpy(temp + 2, addr, 2);
val = le32_to_cpu(*(__le32 *)temp);
- if (netxen_nic_hw_write_wx
- (adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), &val, 4))
+ if (adapter->hw_write_wx(adapter,
+ NETXEN_NIU_GB_STATION_ADDR_1(phy), &val, 4))
return -EIO;
memcpy(temp, ((u8 *) addr) + 2, sizeof(__le32));
val = le32_to_cpu(*(__le32 *)temp);
- if (netxen_nic_hw_write_wx
- (adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), &val, 4))
+ if (adapter->hw_write_wx(adapter,
+ NETXEN_NIU_GB_STATION_ADDR_0(phy), &val, 4))
return -2;
netxen_niu_macaddr_get(adapter,
@@ -654,7 +655,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
mac_cfg0 = 0;
netxen_gb_soft_reset(mac_cfg0);
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
&mac_cfg0, 4))
return -EIO;
mac_cfg0 = 0;
@@ -666,7 +667,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
netxen_gb_tx_reset_mac(mac_cfg0);
netxen_gb_rx_reset_mac(mac_cfg0);
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
&mac_cfg0, 4))
return -EIO;
mac_cfg1 = 0;
@@ -679,7 +680,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
if (mode == NETXEN_NIU_10_100_MB) {
netxen_gb_set_intfmode(mac_cfg1, 1);
- if (netxen_nic_hw_write_wx(adapter,
+ if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_MAC_CONFIG_1(port),
&mac_cfg1, 4))
return -EIO;
@@ -692,7 +693,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
} else if (mode == NETXEN_NIU_1000_MB) {
netxen_gb_set_intfmode(mac_cfg1, 2);
- if (netxen_nic_hw_write_wx(adapter,
+ if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_MAC_CONFIG_1(port),
&mac_cfg1, 4))
return -EIO;
@@ -704,7 +705,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
}
mii_cfg = 0;
netxen_gb_set_mii_mgmt_clockselect(mii_cfg, 7);
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port),
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port),
&mii_cfg, 4))
return -EIO;
mac_cfg0 = 0;
@@ -713,7 +714,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
netxen_gb_unset_rx_flowctl(mac_cfg0);
netxen_gb_unset_tx_flowctl(mac_cfg0);
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
&mac_cfg0, 4))
return -EIO;
return 0;
@@ -730,7 +731,7 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter)
return -EINVAL;
mac_cfg0 = 0;
netxen_gb_soft_reset(mac_cfg0);
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
&mac_cfg0, 4))
return -EIO;
return 0;
@@ -746,7 +747,7 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
return -EINVAL;
mac_cfg = 0;
- if (netxen_nic_hw_write_wx(adapter,
+ if (adapter->hw_write_wx(adapter,
NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), &mac_cfg, 4))
return -EIO;
return 0;
@@ -763,7 +764,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
return -EINVAL;
/* save previous contents */
- if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
+ if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
&reg, 4))
return -EIO;
if (mode == NETXEN_NIU_PROMISC_MODE) {
@@ -801,7 +802,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
return -EIO;
}
}
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
&reg, 4))
return -EIO;
return 0;
@@ -826,13 +827,13 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
case 0:
memcpy(temp + 2, addr, 2);
val = le32_to_cpu(*(__le32 *)temp);
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
&val, 4))
return -EIO;
memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
val = le32_to_cpu(*(__le32 *)temp);
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
&val, 4))
return -EIO;
break;
@@ -840,13 +841,13 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
case 1:
memcpy(temp + 2, addr, 2);
val = le32_to_cpu(*(__le32 *)temp);
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_1,
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_1,
&val, 4))
return -EIO;
memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
val = le32_to_cpu(*(__le32 *)temp);
- if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_HI,
+ if (adapter->hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_HI,
&val, 4))
return -EIO;
break;
@@ -877,10 +878,10 @@ int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter,
if (phy != 0)
return -EINVAL;
- if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
+ if (adapter->hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
&stationhigh, 4))
return -EIO;
- if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
+ if (adapter->hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
&stationlow, 4))
return -EIO;
((__le32 *)val)[1] = cpu_to_le32(stationhigh);
@@ -901,7 +902,7 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
if (port > NETXEN_NIU_MAX_XG_PORTS)
return -EINVAL;
- if (netxen_nic_hw_read_wx(adapter,
+ if (adapter->hw_read_wx(adapter,
NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), &reg, 4))
return -EIO;
if (mode == NETXEN_NIU_PROMISC_MODE)
@@ -909,6 +910,11 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
else
reg = (reg & ~0x2000UL);
+ if (mode == NETXEN_NIU_ALLMULTI_MODE)
+ reg = (reg | 0x1000UL);
+ else
+ reg = (reg & ~0x1000UL);
+
netxen_crb_writelit_adapter(adapter,
NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
index a566b50f36f5..3bfa51b62a4f 100644
--- a/drivers/net/netxen/netxen_nic_phan_reg.h
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -42,8 +42,11 @@
#define CRB_CMD_CONSUMER_OFFSET NETXEN_NIC_REG(0x0c)
#define CRB_PAUSE_ADDR_LO NETXEN_NIC_REG(0x10) /* C0 EPG BUG */
#define CRB_PAUSE_ADDR_HI NETXEN_NIC_REG(0x14)
-#define CRB_HOST_CMD_ADDR_HI NETXEN_NIC_REG(0x18) /* host add:cmd ring */
-#define CRB_HOST_CMD_ADDR_LO NETXEN_NIC_REG(0x1c)
+#define NX_CDRP_CRB_OFFSET NETXEN_NIC_REG(0x18)
+#define NX_ARG1_CRB_OFFSET NETXEN_NIC_REG(0x1c)
+#define NX_ARG2_CRB_OFFSET NETXEN_NIC_REG(0x20)
+#define NX_ARG3_CRB_OFFSET NETXEN_NIC_REG(0x24)
+#define NX_SIGN_CRB_OFFSET NETXEN_NIC_REG(0x28)
#define CRB_CMD_INTR_LOOP NETXEN_NIC_REG(0x20) /* 4 regs for perf */
#define CRB_CMD_DMA_LOOP NETXEN_NIC_REG(0x24)
#define CRB_RCV_INTR_LOOP NETXEN_NIC_REG(0x28)
@@ -73,8 +76,8 @@
#define CRB_RX_LRO_MID_TIMER NETXEN_NIC_REG(0x88)
#define CRB_DMA_MAX_RCV_BUFS NETXEN_NIC_REG(0x8c)
#define CRB_MAX_DMA_ENTRIES NETXEN_NIC_REG(0x90)
-#define CRB_XG_STATE NETXEN_NIC_REG(0x94) /* XG Link status */
-#define CRB_AGENT_GO NETXEN_NIC_REG(0x98) /* NIC pkt gen agent */
+#define CRB_XG_STATE NETXEN_NIC_REG(0x94) /* XG Link status */
+#define CRB_XG_STATE_P3 NETXEN_NIC_REG(0x98) /* XG PF Link status */
#define CRB_AGENT_TX_SIZE NETXEN_NIC_REG(0x9c)
#define CRB_AGENT_TX_TYPE NETXEN_NIC_REG(0xa0)
#define CRB_AGENT_TX_ADDR NETXEN_NIC_REG(0xa4)
@@ -97,7 +100,9 @@
#define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0)
#define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4)
#define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8)
+#define CRB_HOST_DUMMY_BUF NETXEN_NIC_REG(0xfc)
+#define CRB_RCVPEG_STATE NETXEN_NIC_REG(0x13c)
#define CRB_CMD_PRODUCER_OFFSET_1 NETXEN_NIC_REG(0x1ac)
#define CRB_CMD_CONSUMER_OFFSET_1 NETXEN_NIC_REG(0x1b0)
#define CRB_CMD_PRODUCER_OFFSET_2 NETXEN_NIC_REG(0x1b8)
@@ -147,29 +152,15 @@
#define nx_get_temp_state(x) ((x) & 0xffff)
#define nx_encode_temp(val, state) (((val) << 16) | (state))
-/* CRB registers per Rcv Descriptor ring */
-struct netxen_rcv_desc_crb {
- u32 crb_rcv_producer_offset __attribute__ ((aligned(512)));
- u32 crb_rcv_consumer_offset;
- u32 crb_globalrcv_ring;
- u32 crb_rcv_ring_size;
-};
-
/*
* CRB registers used by the receive peg logic.
*/
struct netxen_recv_crb {
- struct netxen_rcv_desc_crb rcv_desc_crb[NUM_RCV_DESC_RINGS];
- u32 crb_rcvstatus_ring;
- u32 crb_rcv_status_producer;
- u32 crb_rcv_status_consumer;
- u32 crb_rcvpeg_state;
- u32 crb_status_ring_size;
+ u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
+ u32 crb_sts_consumer;
};
-extern struct netxen_recv_crb recv_crb_registers[];
-
/*
* Temperature control.
*/
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 32a8503a7acd..4aa547947040 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -158,11 +158,10 @@ static int m88e1111_config_init(struct phy_device *phydev)
{
int err;
int temp;
- int mode;
/* Enable Fiber/Copper auto selection */
temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+ temp &= ~MII_M1111_HWCFG_FIBER_COPPER_AUTO;
phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
temp = phy_read(phydev, MII_BMCR);
@@ -198,9 +197,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
temp &= ~(MII_M1111_HWCFG_MODE_MASK);
- mode = phy_read(phydev, MII_M1111_PHY_EXT_CR);
-
- if (mode & MII_M1111_HWCFG_FIBER_COPPER_RES)
+ if (temp & MII_M1111_HWCFG_FIBER_COPPER_RES)
temp |= MII_M1111_HWCFG_MODE_FIBER_RGMII;
else
temp |= MII_M1111_HWCFG_MODE_COPPER_RGMII;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6b1d7a8edf15..739b3ab7bccc 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -866,7 +866,8 @@ static int __init ppp_init(void)
err = PTR_ERR(ppp_class);
goto out_chrdev;
}
- device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), "ppp");
+ device_create_drvdata(ppp_class, NULL, MKDEV(PPP_MAJOR, 0),
+ NULL, "ppp");
}
out:
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 504a48ff73c8..6531ff565c54 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -50,8 +50,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
-#define DRV_VERSION "0.16"
-#define DRV_RELDATE "10Nov2007"
+#define DRV_VERSION "0.18"
+#define DRV_RELDATE "13Jul2008"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@@ -91,6 +91,14 @@
#define MISR 0x3C /* Status register */
#define MIER 0x40 /* INT enable register */
#define MSK_INT 0x0000 /* Mask off interrupts */
+#define RX_FINISH 0x0001 /* RX finished */
+#define RX_NO_DESC 0x0002 /* No RX descriptor available */
+#define RX_FIFO_FULL 0x0004 /* RX FIFO full */
+#define RX_EARLY 0x0008 /* RX early */
+#define TX_FINISH 0x0010 /* TX finished */
+#define TX_EARLY 0x0080 /* TX early */
+#define EVENT_OVRFL 0x0100 /* Event counter overflow */
+#define LINK_CHANGED 0x0200 /* PHY link changed */
#define ME_CISR 0x44 /* Event counter INT status */
#define ME_CIER 0x48 /* Event counter INT enable */
#define MR_CNT 0x50 /* Successfully received packet counter */
@@ -130,6 +138,21 @@
#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
#define MCAST_MAX 4 /* Max number multicast addresses to filter */
+/* Descriptor status */
+#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
+#define DSC_RX_OK 0x4000 /* RX was successful */
+#define DSC_RX_ERR 0x0800 /* RX PHY error */
+#define DSC_RX_ERR_DRI 0x0400 /* RX dribble packet */
+#define DSC_RX_ERR_BUF 0x0200 /* RX length exceeds buffer size */
+#define DSC_RX_ERR_LONG 0x0100 /* RX length > maximum packet length */
+#define DSC_RX_ERR_RUNT 0x0080 /* RX packet length < 64 byte */
+#define DSC_RX_ERR_CRC 0x0040 /* RX CRC error */
+#define DSC_RX_BCAST 0x0020 /* RX broadcast (no error) */
+#define DSC_RX_MCAST 0x0010 /* RX multicast (no error) */
+#define DSC_RX_MCH_HIT 0x0008 /* RX multicast hit in hash table (no error) */
+#define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */
+#define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */
+
/* PHY settings */
#define ICPLUS_PHY_ID 0x0243
@@ -139,10 +162,10 @@ MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
-#define RX_INT 0x0001
-#define TX_INT 0x0010
-#define RX_NO_DESC_INT 0x0002
-#define INT_MASK (RX_INT | TX_INT)
+/* RX and TX interrupts that we handle */
+#define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH)
+#define TX_INTS (TX_FINISH)
+#define INT_MASK (RX_INTS | TX_INTS)
struct r6040_descriptor {
u16 status, len; /* 0-3 */
@@ -167,7 +190,7 @@ struct r6040_private {
struct r6040_descriptor *tx_ring;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
- u16 tx_free_desc, rx_free_desc, phy_addr, phy_mode;
+ u16 tx_free_desc, phy_addr, phy_mode;
u16 mcr0, mcr1;
u16 switch_sig;
struct net_device *dev;
@@ -183,7 +206,7 @@ static char version[] __devinitdata = KERN_INFO DRV_NAME
static int phy_table[] = { PHY1_ADDR, PHY2_ADDR };
/* Read a word data from PHY Chip */
-static int phy_read(void __iomem *ioaddr, int phy_addr, int reg)
+static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
{
int limit = 2048;
u16 cmd;
@@ -200,7 +223,7 @@ static int phy_read(void __iomem *ioaddr, int phy_addr, int reg)
}
/* Write a word data from PHY Chip */
-static void phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val)
+static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val)
{
int limit = 2048;
u16 cmd;
@@ -216,20 +239,20 @@ static void phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val)
}
}
-static int mdio_read(struct net_device *dev, int mii_id, int reg)
+static int r6040_mdio_read(struct net_device *dev, int mii_id, int reg)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- return (phy_read(ioaddr, lp->phy_addr, reg));
+ return (r6040_phy_read(ioaddr, lp->phy_addr, reg));
}
-static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
+static void r6040_mdio_write(struct net_device *dev, int mii_id, int reg, int val)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- phy_write(ioaddr, lp->phy_addr, reg, val);
+ r6040_phy_write(ioaddr, lp->phy_addr, reg, val);
}
static void r6040_free_txbufs(struct net_device *dev)
@@ -283,58 +306,101 @@ static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
desc->vndescp = desc_ring;
}
-/* Allocate skb buffer for rx descriptor */
-static void rx_buf_alloc(struct r6040_private *lp, struct net_device *dev)
+static void r6040_init_txbufs(struct net_device *dev)
{
- struct r6040_descriptor *descptr;
- void __iomem *ioaddr = lp->base;
+ struct r6040_private *lp = netdev_priv(dev);
- descptr = lp->rx_insert_ptr;
- while (lp->rx_free_desc < RX_DCNT) {
- descptr->skb_ptr = netdev_alloc_skb(dev, MAX_BUF_SIZE);
+ lp->tx_free_desc = TX_DCNT;
- if (!descptr->skb_ptr)
- break;
- descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
- descptr->skb_ptr->data,
- MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
- descptr->status = 0x8000;
- descptr = descptr->vndescp;
- lp->rx_free_desc++;
- /* Trigger RX DMA */
- iowrite16(lp->mcr0 | 0x0002, ioaddr);
- }
- lp->rx_insert_ptr = descptr;
+ lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
+ r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
}
-static void r6040_alloc_txbufs(struct net_device *dev)
+static int r6040_alloc_rxbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
- void __iomem *ioaddr = lp->base;
+ struct r6040_descriptor *desc;
+ struct sk_buff *skb;
+ int rc;
- lp->tx_free_desc = TX_DCNT;
+ lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
+ r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
- lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
- r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
+ /* Allocate skbs for the rx descriptors */
+ desc = lp->rx_ring;
+ do {
+ skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
+ if (!skb) {
+ printk(KERN_ERR "%s: failed to alloc skb for rx\n", dev->name);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ desc->skb_ptr = skb;
+ desc->buf = cpu_to_le32(pci_map_single(lp->pdev,
+ desc->skb_ptr->data,
+ MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
+ desc->status = DSC_OWNER_MAC;
+ desc = desc->vndescp;
+ } while (desc != lp->rx_ring);
- iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
- iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
+ return 0;
+
+err_exit:
+ /* Deallocate all previously allocated skbs */
+ r6040_free_rxbufs(dev);
+ return rc;
}
-static void r6040_alloc_rxbufs(struct net_device *dev)
+static void r6040_init_mac_regs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
+ int limit = 2048;
+ u16 cmd;
- lp->rx_free_desc = 0;
+ /* Mask Off Interrupt */
+ iowrite16(MSK_INT, ioaddr + MIER);
- lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
- r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
+ /* Reset RDC MAC */
+ iowrite16(MAC_RST, ioaddr + MCR1);
+ while (limit--) {
+ cmd = ioread16(ioaddr + MCR1);
+ if (cmd & 0x1)
+ break;
+ }
+ /* Reset internal state machine */
+ iowrite16(2, ioaddr + MAC_SM);
+ iowrite16(0, ioaddr + MAC_SM);
+ udelay(5000);
- rx_buf_alloc(lp, dev);
+ /* MAC Bus Control Register */
+ iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
+
+ /* Buffer Size Register */
+ iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
+
+ /* Write TX ring start address */
+ iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
+ iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
+ /* Write RX ring start address */
iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
+
+ /* Set interrupt waiting time and packet numbers */
+ iowrite16(0, ioaddr + MT_ICR);
+ iowrite16(0, ioaddr + MR_ICR);
+
+ /* Enable interrupts */
+ iowrite16(INT_MASK, ioaddr + MIER);
+
+ /* Enable TX and RX */
+ iowrite16(lp->mcr0 | 0x0002, ioaddr);
+
+ /* Let TX poll the descriptors
+ * we may got called by r6040_tx_timeout which has left
+ * some unsent tx buffers */
+ iowrite16(0x01, ioaddr + MTPR);
}
static void r6040_tx_timeout(struct net_device *dev)
@@ -342,27 +408,16 @@ static void r6040_tx_timeout(struct net_device *dev)
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
- printk(KERN_WARNING "%s: transmit timed out, status %4.4x, PHY status "
- "%4.4x\n",
+ printk(KERN_WARNING "%s: transmit timed out, int enable %4.4x "
+ "status %4.4x, PHY status %4.4x\n",
dev->name, ioread16(ioaddr + MIER),
- mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
-
- disable_irq(dev->irq);
- napi_disable(&priv->napi);
- spin_lock(&priv->lock);
- /* Clear all descriptors */
- r6040_free_txbufs(dev);
- r6040_free_rxbufs(dev);
- r6040_alloc_txbufs(dev);
- r6040_alloc_rxbufs(dev);
-
- /* Reset MAC */
- iowrite16(MAC_RST, ioaddr + MCR1);
- spin_unlock(&priv->lock);
- enable_irq(dev->irq);
+ ioread16(ioaddr + MISR),
+ r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
dev->stats.tx_errors++;
- netif_wake_queue(dev);
+
+ /* Reset MAC and re-init all registers */
+ r6040_init_mac_regs(dev);
}
static struct net_device_stats *r6040_get_stats(struct net_device *dev)
@@ -424,6 +479,7 @@ static int r6040_close(struct net_device *dev)
del_timer_sync(&lp->timer);
spin_lock_irq(&lp->lock);
+ napi_disable(&lp->napi);
netif_stop_queue(dev);
r6040_down(dev);
spin_unlock_irq(&lp->lock);
@@ -432,23 +488,23 @@ static int r6040_close(struct net_device *dev)
}
/* Status of PHY CHIP */
-static int phy_mode_chk(struct net_device *dev)
+static int r6040_phy_mode_chk(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
int phy_dat;
/* PHY Link Status Check */
- phy_dat = phy_read(ioaddr, lp->phy_addr, 1);
+ phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
if (!(phy_dat & 0x4))
phy_dat = 0x8000; /* Link Failed, full duplex */
/* PHY Chip Auto-Negotiation Status */
- phy_dat = phy_read(ioaddr, lp->phy_addr, 1);
+ phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
if (phy_dat & 0x0020) {
/* Auto Negotiation Mode */
- phy_dat = phy_read(ioaddr, lp->phy_addr, 5);
- phy_dat &= phy_read(ioaddr, lp->phy_addr, 4);
+ phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 5);
+ phy_dat &= r6040_phy_read(ioaddr, lp->phy_addr, 4);
if (phy_dat & 0x140)
/* Force full duplex */
phy_dat = 0x8000;
@@ -456,7 +512,7 @@ static int phy_mode_chk(struct net_device *dev)
phy_dat = 0;
} else {
/* Force Mode */
- phy_dat = phy_read(ioaddr, lp->phy_addr, 0);
+ phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 0);
if (phy_dat & 0x100)
phy_dat = 0x8000;
else
@@ -468,12 +524,12 @@ static int phy_mode_chk(struct net_device *dev)
static void r6040_set_carrier(struct mii_if_info *mii)
{
- if (phy_mode_chk(mii->dev)) {
+ if (r6040_phy_mode_chk(mii->dev)) {
/* autoneg is off: Link is always assumed to be up */
if (!netif_carrier_ok(mii->dev))
netif_carrier_on(mii->dev);
} else
- phy_mode_chk(mii->dev);
+ r6040_phy_mode_chk(mii->dev);
}
static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -494,73 +550,72 @@ static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int r6040_rx(struct net_device *dev, int limit)
{
struct r6040_private *priv = netdev_priv(dev);
- int count;
- void __iomem *ioaddr = priv->base;
+ struct r6040_descriptor *descptr = priv->rx_remove_ptr;
+ struct sk_buff *skb_ptr, *new_skb;
+ int count = 0;
u16 err;
- for (count = 0; count < limit; ++count) {
- struct r6040_descriptor *descptr = priv->rx_remove_ptr;
- struct sk_buff *skb_ptr;
-
- /* Disable RX interrupt */
- iowrite16(ioread16(ioaddr + MIER) & (~RX_INT), ioaddr + MIER);
- descptr = priv->rx_remove_ptr;
-
- /* Check for errors */
- err = ioread16(ioaddr + MLSR);
- if (err & 0x0400)
- dev->stats.rx_errors++;
- /* RX FIFO over-run */
- if (err & 0x8000)
- dev->stats.rx_fifo_errors++;
- /* RX descriptor unavailable */
- if (err & 0x0080)
- dev->stats.rx_frame_errors++;
- /* Received packet with length over buffer lenght */
- if (err & 0x0020)
- dev->stats.rx_over_errors++;
- /* Received packet with too long or short */
- if (err & (0x0010 | 0x0008))
- dev->stats.rx_length_errors++;
- /* Received packet with CRC errors */
- if (err & 0x0004) {
- spin_lock(&priv->lock);
- dev->stats.rx_crc_errors++;
- spin_unlock(&priv->lock);
- }
-
- while (priv->rx_free_desc) {
- /* No RX packet */
- if (descptr->status & 0x8000)
- break;
- skb_ptr = descptr->skb_ptr;
- if (!skb_ptr) {
- printk(KERN_ERR "%s: Inconsistent RX"
- "descriptor chain\n",
- dev->name);
- break;
+ /* Limit not reached and the descriptor belongs to the CPU */
+ while (count < limit && !(descptr->status & DSC_OWNER_MAC)) {
+ /* Read the descriptor status */
+ err = descptr->status;
+ /* Global error status set */
+ if (err & DSC_RX_ERR) {
+ /* RX dribble */
+ if (err & DSC_RX_ERR_DRI)
+ dev->stats.rx_frame_errors++;
+ /* Buffer lenght exceeded */
+ if (err & DSC_RX_ERR_BUF)
+ dev->stats.rx_length_errors++;
+ /* Packet too long */
+ if (err & DSC_RX_ERR_LONG)
+ dev->stats.rx_length_errors++;
+ /* Packet < 64 bytes */
+ if (err & DSC_RX_ERR_RUNT)
+ dev->stats.rx_length_errors++;
+ /* CRC error */
+ if (err & DSC_RX_ERR_CRC) {
+ spin_lock(&priv->lock);
+ dev->stats.rx_crc_errors++;
+ spin_unlock(&priv->lock);
}
- descptr->skb_ptr = NULL;
- skb_ptr->dev = priv->dev;
- /* Do not count the CRC */
- skb_put(skb_ptr, descptr->len - 4);
- pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
- MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
- skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
- /* Send to upper layer */
- netif_receive_skb(skb_ptr);
- dev->last_rx = jiffies;
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += descptr->len;
- /* To next descriptor */
- descptr = descptr->vndescp;
- priv->rx_free_desc--;
+ goto next_descr;
+ }
+
+ /* Packet successfully received */
+ new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
+ if (!new_skb) {
+ dev->stats.rx_dropped++;
+ goto next_descr;
}
- priv->rx_remove_ptr = descptr;
+ skb_ptr = descptr->skb_ptr;
+ skb_ptr->dev = priv->dev;
+
+ /* Do not count the CRC */
+ skb_put(skb_ptr, descptr->len - 4);
+ pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
+ MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
+
+ /* Send to upper layer */
+ netif_receive_skb(skb_ptr);
+ dev->last_rx = jiffies;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += descptr->len - 4;
+
+ /* put new skb into descriptor */
+ descptr->skb_ptr = new_skb;
+ descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
+ descptr->skb_ptr->data,
+ MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
+
+next_descr:
+ /* put the descriptor back to the MAC */
+ descptr->status = DSC_OWNER_MAC;
+ descptr = descptr->vndescp;
+ count++;
}
- /* Allocate new RX buffer */
- if (priv->rx_free_desc < RX_DCNT)
- rx_buf_alloc(priv, priv->dev);
+ priv->rx_remove_ptr = descptr;
return count;
}
@@ -584,7 +639,7 @@ static void r6040_tx(struct net_device *dev)
if (err & (0x2000 | 0x4000))
dev->stats.tx_carrier_errors++;
- if (descptr->status & 0x8000)
+ if (descptr->status & DSC_OWNER_MAC)
break; /* Not complete */
skb_ptr = descptr->skb_ptr;
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
@@ -616,7 +671,7 @@ static int r6040_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
netif_rx_complete(dev, napi);
/* Enable RX interrupt */
- iowrite16(ioread16(ioaddr + MIER) | RX_INT, ioaddr + MIER);
+ iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
}
return work_done;
}
@@ -638,13 +693,22 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
return IRQ_NONE;
/* RX interrupt request */
- if (status & 0x01) {
+ if (status & RX_INTS) {
+ if (status & RX_NO_DESC) {
+ /* RX descriptor unavailable */
+ dev->stats.rx_dropped++;
+ dev->stats.rx_missed_errors++;
+ }
+ if (status & RX_FIFO_FULL)
+ dev->stats.rx_fifo_errors++;
+
+ /* Mask off RX interrupt */
+ iowrite16(ioread16(ioaddr + MIER) & ~RX_INTS, ioaddr + MIER);
netif_rx_schedule(dev, &lp->napi);
- iowrite16(TX_INT, ioaddr + MIER);
}
/* TX interrupt request */
- if (status & 0x10)
+ if (status & TX_INTS)
r6040_tx(dev);
return IRQ_HANDLED;
@@ -660,52 +724,48 @@ static void r6040_poll_controller(struct net_device *dev)
#endif
/* Init RDC MAC */
-static void r6040_up(struct net_device *dev)
+static int r6040_up(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
+ int ret;
/* Initialise and alloc RX/TX buffers */
- r6040_alloc_txbufs(dev);
- r6040_alloc_rxbufs(dev);
+ r6040_init_txbufs(dev);
+ ret = r6040_alloc_rxbufs(dev);
+ if (ret)
+ return ret;
- /* Buffer Size Register */
- iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
/* Read the PHY ID */
- lp->switch_sig = phy_read(ioaddr, 0, 2);
+ lp->switch_sig = r6040_phy_read(ioaddr, 0, 2);
if (lp->switch_sig == ICPLUS_PHY_ID) {
- phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */
+ r6040_phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */
lp->phy_mode = 0x8000;
} else {
/* PHY Mode Check */
- phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP);
- phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE);
+ r6040_phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP);
+ r6040_phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE);
if (PHY_MODE == 0x3100)
- lp->phy_mode = phy_mode_chk(dev);
+ lp->phy_mode = r6040_phy_mode_chk(dev);
else
lp->phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
}
- /* MAC Bus Control Register */
- iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
- /* MAC TX/RX Enable */
+ /* Set duplex mode */
lp->mcr0 |= lp->phy_mode;
- iowrite16(lp->mcr0, ioaddr);
-
- /* set interrupt waiting time and packet numbers */
- iowrite16(0x0F06, ioaddr + MT_ICR);
- iowrite16(0x0F06, ioaddr + MR_ICR);
/* improve performance (by RDC guys) */
- phy_write(ioaddr, 30, 17, (phy_read(ioaddr, 30, 17) | 0x4000));
- phy_write(ioaddr, 30, 17, ~((~phy_read(ioaddr, 30, 17)) | 0x2000));
- phy_write(ioaddr, 0, 19, 0x0000);
- phy_write(ioaddr, 0, 30, 0x01F0);
+ r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
+ r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
+ r6040_phy_write(ioaddr, 0, 19, 0x0000);
+ r6040_phy_write(ioaddr, 0, 30, 0x01F0);
- /* Interrupt Mask Register */
- iowrite16(INT_MASK, ioaddr + MIER);
+ /* Initialize all MAC registers */
+ r6040_init_mac_regs(dev);
+
+ return 0;
}
/*
@@ -721,7 +781,7 @@ static void r6040_timer(unsigned long data)
/* Polling PHY Chip Status */
if (PHY_MODE == 0x3100)
- phy_mode = phy_mode_chk(dev);
+ phy_mode = r6040_phy_mode_chk(dev);
else
phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
@@ -784,7 +844,14 @@ static int r6040_open(struct net_device *dev)
return -ENOMEM;
}
- r6040_up(dev);
+ ret = r6040_up(dev);
+ if (ret) {
+ pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
+ lp->tx_ring_dma);
+ pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
+ lp->rx_ring_dma);
+ return ret;
+ }
napi_enable(&lp->napi);
netif_start_queue(dev);
@@ -830,7 +897,7 @@ static int r6040_start_xmit(struct sk_buff *skb, struct net_device *dev)
descptr->skb_ptr = skb;
descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
- descptr->status = 0x8000;
+ descptr->status = DSC_OWNER_MAC;
/* Trigger the MAC to check the TX descriptor */
iowrite16(0x01, ioaddr + MTPR);
lp->tx_insert_ptr = descptr->vndescp;
@@ -987,24 +1054,27 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err)
- return err;
+ goto err_out;
/* this should always be supported */
- if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses"
"not supported by the card\n");
- return -ENODEV;
+ goto err_out;
}
- if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses"
"not supported by the card\n");
- return -ENODEV;
+ goto err_out;
}
/* IO Size check */
if (pci_resource_len(pdev, 0) < io_size) {
- printk(KERN_ERR "Insufficient PCI resources, aborting\n");
- return -EIO;
+ printk(KERN_ERR DRV_NAME "Insufficient PCI resources, aborting\n");
+ err = -EIO;
+ goto err_out;
}
pioaddr = pci_resource_start(pdev, 0); /* IO map base address */
@@ -1012,24 +1082,26 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
- printk(KERN_ERR "Failed to allocate etherdev\n");
- return -ENOMEM;
+ printk(KERN_ERR DRV_NAME "Failed to allocate etherdev\n");
+ err = -ENOMEM;
+ goto err_out;
}
SET_NETDEV_DEV(dev, &pdev->dev);
lp = netdev_priv(dev);
- lp->pdev = pdev;
- if (pci_request_regions(pdev, DRV_NAME)) {
+ err = pci_request_regions(pdev, DRV_NAME);
+
+ if (err) {
printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
- err = -ENODEV;
- goto err_out_disable;
+ goto err_out_free_dev;
}
ioaddr = pci_iomap(pdev, bar, io_size);
if (!ioaddr) {
printk(KERN_ERR "ioremap failed for device %s\n",
pci_name(pdev));
- return -EIO;
+ err = -EIO;
+ goto err_out_free_res;
}
/* Init system & device */
@@ -1049,6 +1121,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Link new device into r6040_root_dev */
lp->pdev = pdev;
+ lp->dev = dev;
/* Init RDC private data */
lp->mcr0 = 0x1002;
@@ -1070,8 +1143,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
#endif
netif_napi_add(dev, &lp->napi, r6040_poll, 64);
lp->mii_if.dev = dev;
- lp->mii_if.mdio_read = mdio_read;
- lp->mii_if.mdio_write = mdio_write;
+ lp->mii_if.mdio_read = r6040_mdio_read;
+ lp->mii_if.mdio_write = r6040_mdio_write;
lp->mii_if.phy_id = lp->phy_addr;
lp->mii_if.phy_id_mask = 0x1f;
lp->mii_if.reg_num_mask = 0x1f;
@@ -1080,17 +1153,17 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
printk(KERN_ERR DRV_NAME ": Failed to register net device\n");
- goto err_out_res;
+ goto err_out_unmap;
}
return 0;
-err_out_res:
+err_out_unmap:
+ pci_iounmap(pdev, ioaddr);
+err_out_free_res:
pci_release_regions(pdev);
-err_out_disable:
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
+err_out_free_dev:
free_netdev(dev);
-
+err_out:
return err;
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index cfe8829ed31f..a3e3895e5032 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1418,8 +1418,10 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
rtl_hw_phy_config(dev);
- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
- RTL_W8(0x82, 0x01);
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
+ dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ RTL_W8(0x82, 0x01);
+ }
pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
@@ -3032,13 +3034,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
tmp = rtl8169_rx_config | rx_mode |
(RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
- if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_15) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_16) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_17)) {
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
u32 data = mc_filter[0];
mc_filter[0] = swab32(mc_filter[1]);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 7b2015f9e469..45c72eebb3a7 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -19,6 +19,7 @@
#include <linux/in.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
+#include <linux/topology.h>
#include "net_driver.h"
#include "gmii.h"
#include "ethtool.h"
@@ -832,7 +833,23 @@ static void efx_probe_interrupts(struct efx_nic *efx)
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
- efx->rss_queues = rss_cpus ? rss_cpus : num_online_cpus();
+ if (rss_cpus == 0) {
+ cpumask_t core_mask;
+ int cpu;
+
+ cpus_clear(core_mask);
+ efx->rss_queues = 0;
+ for_each_online_cpu(cpu) {
+ if (!cpu_isset(cpu, core_mask)) {
+ ++efx->rss_queues;
+ cpus_or(core_mask, core_mask,
+ topology_core_siblings(cpu));
+ }
+ }
+ } else {
+ efx->rss_queues = rss_cpus;
+ }
+
efx->rss_queues = min(efx->rss_queues, max_channel + 1);
efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
@@ -1762,7 +1779,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
efx->reset_pending = method;
- queue_work(efx->workqueue, &efx->reset_work);
+ queue_work(efx->reset_workqueue, &efx->reset_work);
}
/**************************************************************************
@@ -1907,14 +1924,28 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
goto fail1;
}
+ efx->reset_workqueue = create_singlethread_workqueue("sfc_reset");
+ if (!efx->reset_workqueue) {
+ rc = -ENOMEM;
+ goto fail2;
+ }
+
return 0;
+ fail2:
+ destroy_workqueue(efx->workqueue);
+ efx->workqueue = NULL;
+
fail1:
return rc;
}
static void efx_fini_struct(struct efx_nic *efx)
{
+ if (efx->reset_workqueue) {
+ destroy_workqueue(efx->reset_workqueue);
+ efx->reset_workqueue = NULL;
+ }
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
@@ -1977,7 +2008,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
* scheduled from this point because efx_stop_all() has been
* called, we are no longer registered with driverlink, and
* the net_device's have been removed. */
- flush_workqueue(efx->workqueue);
+ flush_workqueue(efx->reset_workqueue);
efx_pci_remove_main(efx);
@@ -2098,7 +2129,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
* scheduled since efx_stop_all() has been called, and we
* have not and never have been registered with either
* the rtnetlink or driverlink layers. */
- cancel_work_sync(&efx->reset_work);
+ flush_workqueue(efx->reset_workqueue);
/* Retry if a recoverably reset event has been scheduled */
if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 630406e142e5..9138ee5b7b7b 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -223,13 +223,8 @@ static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
.getsda = falcon_getsda,
.getscl = falcon_getscl,
.udelay = 5,
- /*
- * This is the number of system clock ticks after which
- * i2c-algo-bit gives up waiting for SCL to become high.
- * It must be at least 2 since the first tick can happen
- * immediately after it starts waiting.
- */
- .timeout = 2,
+ /* Wait up to 50 ms for slave to let us pull SCL high */
+ .timeout = DIV_ROUND_UP(HZ, 20),
};
/**************************************************************************
@@ -2479,12 +2474,11 @@ int falcon_probe_nic(struct efx_nic *efx)
/* Initialise I2C adapter */
efx->i2c_adap.owner = THIS_MODULE;
- efx->i2c_adap.class = I2C_CLASS_HWMON;
nic_data->i2c_data = falcon_i2c_bit_operations;
nic_data->i2c_data.data = efx;
efx->i2c_adap.algo_data = &nic_data->i2c_data;
efx->i2c_adap.dev.parent = &efx->pci_dev->dev;
- strcpy(efx->i2c_adap.name, "SFC4000 GPIO");
+ strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name));
rc = i2c_bit_add_bus(&efx->i2c_adap);
if (rc)
goto fail5;
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index d803b86c647c..219c74a772c3 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -616,7 +616,9 @@ union efx_multicast_hash {
* @pci_dev: The PCI device
* @type: Controller type attributes
* @legacy_irq: IRQ number
- * @workqueue: Workqueue for resets, port reconfigures and the HW monitor
+ * @workqueue: Workqueue for port reconfigures and the HW monitor.
+ * Work items do not hold and must not acquire RTNL.
+ * @reset_workqueue: Workqueue for resets. Work item will acquire RTNL.
* @reset_work: Scheduled reset workitem
* @monitor_work: Hardware monitor workitem
* @membase_phys: Memory BAR value as physical address
@@ -684,6 +686,7 @@ struct efx_nic {
const struct efx_nic_type *type;
int legacy_irq;
struct workqueue_struct *workqueue;
+ struct workqueue_struct *reset_workqueue;
struct work_struct reset_work;
struct delayed_work monitor_work;
resource_size_t membase_phys;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index a4bc812aa999..c69ba1395fa9 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -642,17 +642,12 @@ static void sh_eth_adjust_link(struct net_device *ndev)
| ECMR_DM, ioaddr + ECMR);
new_state = 1;
mdp->link = phydev->link;
- netif_tx_schedule_all(ndev);
- netif_carrier_on(ndev);
- netif_start_queue(ndev);
}
} else if (mdp->link) {
new_state = 1;
mdp->link = PHY_DOWN;
mdp->speed = 0;
mdp->duplex = -1;
- netif_stop_queue(ndev);
- netif_carrier_off(ndev);
}
if (new_state)
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 711e4a8948e0..5257cf464f1a 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1829,9 +1829,6 @@ static int sky2_down(struct net_device *dev)
if (netif_msg_ifdown(sky2))
printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
- /* Stop more packets from being queued */
- netif_stop_queue(dev);
-
/* Disable port IRQ */
imask = sky2_read32(hw, B0_IMSK);
imask &= ~portirq_msk[port];
@@ -1887,8 +1884,6 @@ static int sky2_down(struct net_device *dev)
sky2_phy_power_down(hw, port);
- netif_carrier_off(dev);
-
/* turn off LED's */
sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index f2051b209da2..2040965d7724 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -308,7 +308,7 @@ static void smc_reset(struct net_device *dev)
* can't handle it then there will be no recovery except for
* a hard reset or power cycle
*/
- if (nowait)
+ if (lp->cfg.flags & SMC91X_NOWAIT)
cfg |= CONFIG_NO_WAIT;
/*
@@ -1939,8 +1939,11 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
if (retval)
goto err_out;
-#ifdef SMC_USE_PXA_DMA
- {
+#ifdef CONFIG_ARCH_PXA
+# ifdef SMC_USE_PXA_DMA
+ lp->cfg.flags |= SMC91X_USE_DMA;
+# endif
+ if (lp->cfg.flags & SMC91X_USE_DMA) {
int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
smc_pxa_dma_irq, NULL);
if (dma >= 0)
@@ -1980,7 +1983,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
}
err_out:
-#ifdef SMC_USE_PXA_DMA
+#ifdef CONFIG_ARCH_PXA
if (retval && dev->dma != (unsigned char)-1)
pxa_free_dma(dev->dma);
#endif
@@ -2050,9 +2053,11 @@ static int smc_enable_device(struct platform_device *pdev)
return 0;
}
-static int smc_request_attrib(struct platform_device *pdev)
+static int smc_request_attrib(struct platform_device *pdev,
+ struct net_device *ndev)
{
struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+ struct smc_local *lp = netdev_priv(ndev);
if (!res)
return 0;
@@ -2063,9 +2068,11 @@ static int smc_request_attrib(struct platform_device *pdev)
return 0;
}
-static void smc_release_attrib(struct platform_device *pdev)
+static void smc_release_attrib(struct platform_device *pdev,
+ struct net_device *ndev)
{
struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+ struct smc_local *lp = netdev_priv(ndev);
if (res)
release_mem_region(res->start, ATTRIB_SIZE);
@@ -2123,27 +2130,14 @@ static int smc_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
struct resource *res, *ires;
unsigned int __iomem *addr;
+ unsigned long irq_flags = SMC_IRQ_FLAGS;
int ret;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
- if (!res)
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto out;
- }
-
-
- if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
- ret = -EBUSY;
- goto out;
- }
-
ndev = alloc_etherdev(sizeof(struct smc_local));
if (!ndev) {
printk("%s: could not allocate device.\n", CARDNAME);
ret = -ENOMEM;
- goto out_release_io;
+ goto out;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2152,37 +2146,47 @@ static int smc_drv_probe(struct platform_device *pdev)
*/
lp = netdev_priv(ndev);
- lp->cfg.irq_flags = SMC_IRQ_FLAGS;
-#ifdef SMC_DYNAMIC_BUS_CONFIG
- if (pd)
+ if (pd) {
memcpy(&lp->cfg, pd, sizeof(lp->cfg));
- else {
- lp->cfg.flags = SMC91X_USE_8BIT;
- lp->cfg.flags |= SMC91X_USE_16BIT;
- lp->cfg.flags |= SMC91X_USE_32BIT;
+ lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
+ } else {
+ lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
+ lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
+ lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
+ lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0;
}
- lp->cfg.flags &= ~(SMC_CAN_USE_8BIT ? 0 : SMC91X_USE_8BIT);
- lp->cfg.flags &= ~(SMC_CAN_USE_16BIT ? 0 : SMC91X_USE_16BIT);
- lp->cfg.flags &= ~(SMC_CAN_USE_32BIT ? 0 : SMC91X_USE_32BIT);
-#endif
-
ndev->dma = (unsigned char)-1;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
+
+
+ if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
+ ret = -EBUSY;
+ goto out_free_netdev;
+ }
+
ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!ires) {
ret = -ENODEV;
- goto out_free_netdev;
+ goto out_release_io;
}
ndev->irq = ires->start;
- if (SMC_IRQ_FLAGS == -1)
- lp->cfg.irq_flags = ires->flags & IRQF_TRIGGER_MASK;
- ret = smc_request_attrib(pdev);
+ if (ires->flags & IRQF_TRIGGER_MASK)
+ irq_flags = ires->flags & IRQF_TRIGGER_MASK;
+
+ ret = smc_request_attrib(pdev, ndev);
if (ret)
- goto out_free_netdev;
+ goto out_release_io;
#if defined(CONFIG_SA1100_ASSABET)
NCR_0 |= NCR_ENET_OSC_EN;
#endif
@@ -2197,7 +2201,7 @@ static int smc_drv_probe(struct platform_device *pdev)
goto out_release_attrib;
}
-#ifdef SMC_USE_PXA_DMA
+#ifdef CONFIG_ARCH_PXA
{
struct smc_local *lp = netdev_priv(ndev);
lp->device = &pdev->dev;
@@ -2205,7 +2209,7 @@ static int smc_drv_probe(struct platform_device *pdev)
}
#endif
- ret = smc_probe(ndev, addr, lp->cfg.irq_flags);
+ ret = smc_probe(ndev, addr, irq_flags);
if (ret != 0)
goto out_iounmap;
@@ -2217,11 +2221,11 @@ static int smc_drv_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
iounmap(addr);
out_release_attrib:
- smc_release_attrib(pdev);
- out_free_netdev:
- free_netdev(ndev);
+ smc_release_attrib(pdev, ndev);
out_release_io:
release_mem_region(res->start, SMC_IO_EXTENT);
+ out_free_netdev:
+ free_netdev(ndev);
out:
printk("%s: not found (%d).\n", CARDNAME, ret);
@@ -2240,14 +2244,14 @@ static int smc_drv_remove(struct platform_device *pdev)
free_irq(ndev->irq, ndev);
-#ifdef SMC_USE_PXA_DMA
+#ifdef CONFIG_ARCH_PXA
if (ndev->dma != (unsigned char)-1)
pxa_free_dma(ndev->dma);
#endif
iounmap(lp->base);
smc_release_datacs(pdev,ndev);
- smc_release_attrib(pdev);
+ smc_release_attrib(pdev,ndev);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
if (!res)
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 8606818653f8..22209b6f1405 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -40,23 +40,46 @@
* Define your architecture specific bus configuration parameters here.
*/
-#if defined(CONFIG_ARCH_LUBBOCK)
+#if defined(CONFIG_ARCH_LUBBOCK) ||\
+ defined(CONFIG_MACH_MAINSTONE) ||\
+ defined(CONFIG_MACH_ZYLONITE) ||\
+ defined(CONFIG_MACH_LITTLETON)
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 0
+#include <asm/mach-types.h>
+
+/* Now the bus width is specified in the platform data
+ * pretend here to support all I/O access types
+ */
+#define SMC_CAN_USE_8BIT 1
#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
+#define SMC_CAN_USE_32BIT 1
#define SMC_NOWAIT 1
-/* The first two address lines aren't connected... */
-#define SMC_IO_SHIFT 2
+#define SMC_IO_SHIFT (lp->io_shift)
+#define SMC_inb(a, r) readb((a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
#define SMC_IRQ_FLAGS (-1) /* from resource */
+/* We actually can't write halfwords properly if not word aligned */
+static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+{
+ if (machine_is_mainstone() && reg & 2) {
+ unsigned int v = val << 16;
+ v |= readl(ioaddr + (reg & ~2)) & 0xffff;
+ writel(v, ioaddr + (reg & ~2));
+ } else {
+ writew(val, ioaddr + reg);
+ }
+}
+
#elif defined(CONFIG_BLACKFIN)
#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
@@ -195,7 +218,6 @@
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
#elif defined(CONFIG_ARCH_INNOKOM) || \
- defined(CONFIG_MACH_MAINSTONE) || \
defined(CONFIG_ARCH_PXA_IDP) || \
defined(CONFIG_ARCH_RAMSES) || \
defined(CONFIG_ARCH_PCM027)
@@ -229,22 +251,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
}
}
-#elif defined(CONFIG_MACH_ZYLONITE)
-
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-#define SMC_USE_PXA_DMA 1
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_insw(a, r, p, l) insw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l)
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
#elif defined(CONFIG_ARCH_OMAP)
/* We can only do 16-bit reads and writes in the static memory space. */
@@ -454,7 +460,6 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
#define RPC_LSA_DEFAULT RPC_LED_100_10
#define RPC_LSB_DEFAULT RPC_LED_TX_RX
-#define SMC_DYNAMIC_BUS_CONFIG
#endif
@@ -493,7 +498,7 @@ struct smc_local {
spinlock_t lock;
-#ifdef SMC_USE_PXA_DMA
+#ifdef CONFIG_ARCH_PXA
/* DMA needs the physical address of the chip */
u_long physaddr;
struct device *device;
@@ -501,20 +506,17 @@ struct smc_local {
void __iomem *base;
void __iomem *datacs;
+ /* the low address lines on some platforms aren't connected... */
+ int io_shift;
+
struct smc91x_platdata cfg;
};
-#ifdef SMC_DYNAMIC_BUS_CONFIG
-#define SMC_8BIT(p) (((p)->cfg.flags & SMC91X_USE_8BIT) && SMC_CAN_USE_8BIT)
-#define SMC_16BIT(p) (((p)->cfg.flags & SMC91X_USE_16BIT) && SMC_CAN_USE_16BIT)
-#define SMC_32BIT(p) (((p)->cfg.flags & SMC91X_USE_32BIT) && SMC_CAN_USE_32BIT)
-#else
-#define SMC_8BIT(p) SMC_CAN_USE_8BIT
-#define SMC_16BIT(p) SMC_CAN_USE_16BIT
-#define SMC_32BIT(p) SMC_CAN_USE_32BIT
-#endif
+#define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT)
+#define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT)
+#define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT)
-#ifdef SMC_USE_PXA_DMA
+#ifdef CONFIG_ARCH_PXA
/*
* Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
* always happening in irq context so no need to worry about races. TX is
@@ -608,7 +610,7 @@ smc_pxa_dma_irq(int dma, void *dummy)
{
DCSR(dma) = 0;
}
-#endif /* SMC_USE_PXA_DMA */
+#endif /* CONFIG_ARCH_PXA */
/*
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 41d3ac45685f..a645e5028c14 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -672,7 +672,6 @@ static void tc_handle_link_change(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
tc35815_set_multicast_list(dev);
#endif
- netif_tx_schedule_all(dev);
} else {
lp->speed = 0;
lp->duplex = -1;
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index bc30c6e8fea2..617ef41bdfea 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -5514,22 +5514,6 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
netif_wake_queue(dev); /* Unlock the TX ring */
break;
- case DE4X5_SET_PROM: /* Set Promiscuous Mode */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- omr = inl(DE4X5_OMR);
- omr |= OMR_PR;
- outl(omr, DE4X5_OMR);
- dev->flags |= IFF_PROMISC;
- break;
-
- case DE4X5_CLR_PROM: /* Clear Promiscuous Mode */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- omr = inl(DE4X5_OMR);
- omr &= ~OMR_PR;
- outl(omr, DE4X5_OMR);
- dev->flags &= ~IFF_PROMISC;
- break;
-
case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
if (!capable(CAP_NET_ADMIN)) return -EPERM;
printk("%s: Boo!\n", dev->name);
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h
index f5f33b3eb067..9f2877438fb0 100644
--- a/drivers/net/tulip/de4x5.h
+++ b/drivers/net/tulip/de4x5.h
@@ -1004,8 +1004,7 @@ struct de4x5_ioctl {
*/
#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
-#define DE4X5_SET_PROM 0x03 /* Set Promiscuous Mode */
-#define DE4X5_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+/* 0x03 and 0x04 were used before and are obsoleted now. Don't use them. */
#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
#define DE4X5_GET_MCA 0x06 /* Get a multicast address */
#define DE4X5_SET_MCA 0x07 /* Set a multicast address */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a82b32b40131..e6bbc639c2d0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -900,7 +900,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
return -EINVAL;
rtnl_lock();
- ret = update_filter(&tun->txflt, (void *) __user arg);
+ ret = update_filter(&tun->txflt, (void __user *)arg);
rtnl_unlock();
return ret;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index a934428a5890..0e061dfea78d 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -50,10 +50,18 @@ static int is_activesync(struct usb_interface_descriptor *desc)
&& desc->bInterfaceProtocol == 1;
}
+static int is_wireless_rndis(struct usb_interface_descriptor *desc)
+{
+ return desc->bInterfaceClass == USB_CLASS_WIRELESS_CONTROLLER
+ && desc->bInterfaceSubClass == 1
+ && desc->bInterfaceProtocol == 3;
+}
+
#else
#define is_rndis(desc) 0
#define is_activesync(desc) 0
+#define is_wireless_rndis(desc) 0
#endif
@@ -110,7 +118,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
* of cdc-acm, it'll fail RNDIS requests cleanly.
*/
rndis = is_rndis(&intf->cur_altsetting->desc)
- || is_activesync(&intf->cur_altsetting->desc);
+ || is_activesync(&intf->cur_altsetting->desc)
+ || is_wireless_rndis(&intf->cur_altsetting->desc);
memset(info, 0, sizeof *info);
info->control = intf;
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 61c98beb4d17..bcd858c567e0 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -576,6 +576,10 @@ static const struct usb_device_id products [] = {
/* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */
USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1),
.driver_info = (unsigned long) &rndis_info,
+}, {
+ /* RNDIS for tethering */
+ USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
+ .driver_info = (unsigned long) &rndis_info,
},
{ }, // END
};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c28d7cb2035b..0196a0df9021 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -19,6 +19,7 @@
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
@@ -54,9 +55,15 @@ struct virtnet_info
struct tasklet_struct tasklet;
bool free_in_tasklet;
+ /* I like... big packets and I cannot lie! */
+ bool big_packets;
+
/* Receive & send queues. */
struct sk_buff_head recv;
struct sk_buff_head send;
+
+ /* Chain pages by the private ptr. */
+ struct page *pages;
};
static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb)
@@ -69,6 +76,23 @@ static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb)
sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr));
}
+static void give_a_page(struct virtnet_info *vi, struct page *page)
+{
+ page->private = (unsigned long)vi->pages;
+ vi->pages = page;
+}
+
+static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
+{
+ struct page *p = vi->pages;
+
+ if (p)
+ vi->pages = (struct page *)p->private;
+ else
+ p = alloc_page(gfp_mask);
+ return p;
+}
+
static void skb_xmit_done(struct virtqueue *svq)
{
struct virtnet_info *vi = svq->vdev->priv;
@@ -88,6 +112,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
unsigned len)
{
struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
+ int err;
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
@@ -95,10 +120,23 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
goto drop;
}
len -= sizeof(struct virtio_net_hdr);
- BUG_ON(len > MAX_PACKET_LEN);
- skb_trim(skb, len);
+ if (len <= MAX_PACKET_LEN) {
+ unsigned int i;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ give_a_page(dev->priv, skb_shinfo(skb)->frags[i].page);
+ skb->data_len = 0;
+ skb_shinfo(skb)->nr_frags = 0;
+ }
+
+ err = pskb_trim(skb, len);
+ if (err) {
+ pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err);
+ dev->stats.rx_dropped++;
+ goto drop;
+ }
+ skb->truesize += skb->data_len;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
@@ -160,7 +198,7 @@ static void try_fill_recv(struct virtnet_info *vi)
{
struct sk_buff *skb;
struct scatterlist sg[2+MAX_SKB_FRAGS];
- int num, err;
+ int num, err, i;
sg_init_table(sg, 2+MAX_SKB_FRAGS);
for (;;) {
@@ -170,6 +208,24 @@ static void try_fill_recv(struct virtnet_info *vi)
skb_put(skb, MAX_PACKET_LEN);
vnet_hdr_to_sg(sg, skb);
+
+ if (vi->big_packets) {
+ for (i = 0; i < MAX_SKB_FRAGS; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ f->page = get_a_page(vi, GFP_ATOMIC);
+ if (!f->page)
+ break;
+
+ f->page_offset = 0;
+ f->size = PAGE_SIZE;
+
+ skb->data_len += PAGE_SIZE;
+ skb->len += PAGE_SIZE;
+
+ skb_shinfo(skb)->nr_frags++;
+ }
+ }
+
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
skb_queue_head(&vi->recv, skb);
@@ -335,16 +391,11 @@ again:
free_old_xmit_skbs(vi);
/* If we has a buffer left over from last time, send it now. */
- if (unlikely(vi->last_xmit_skb)) {
- if (xmit_skb(vi, vi->last_xmit_skb) != 0) {
- /* Drop this skb: we only queue one. */
- vi->dev->stats.tx_dropped++;
- kfree_skb(skb);
- skb = NULL;
- goto stop_queue;
- }
- vi->last_xmit_skb = NULL;
- }
+ if (unlikely(vi->last_xmit_skb) &&
+ xmit_skb(vi, vi->last_xmit_skb) != 0)
+ goto stop_queue;
+
+ vi->last_xmit_skb = NULL;
/* Put new one in send queue and do transmit */
if (likely(skb)) {
@@ -370,6 +421,11 @@ stop_queue:
netif_start_queue(dev);
goto again;
}
+ if (skb) {
+ /* Drop this skb: we only queue one. */
+ vi->dev->stats.tx_dropped++;
+ kfree_skb(skb);
+ }
goto done;
}
@@ -408,6 +464,22 @@ static int virtnet_close(struct net_device *dev)
return 0;
}
+static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct virtio_device *vdev = vi->vdev;
+
+ if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
+ return -ENOSYS;
+
+ return ethtool_op_set_tx_hw_csum(dev, data);
+}
+
+static struct ethtool_ops virtnet_ethtool_ops = {
+ .set_tx_csum = virtnet_set_tx_csum,
+ .set_sg = ethtool_op_set_sg,
+};
+
static int virtnet_probe(struct virtio_device *vdev)
{
int err;
@@ -427,6 +499,7 @@ static int virtnet_probe(struct virtio_device *vdev)
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = virtnet_netpoll;
#endif
+ SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
@@ -462,11 +535,18 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->dev = dev;
vi->vdev = vdev;
vdev->priv = vi;
+ vi->pages = NULL;
/* If they give us a callback when all buffers are done, we don't need
* the timer. */
vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
+ /* If we can receive ANY GSO packets, we must allocate large ones. */
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
+ || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
+ || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ vi->big_packets = true;
+
/* We expect two virtqueues, receive then send. */
vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
if (IS_ERR(vi->rvq)) {
@@ -541,6 +621,10 @@ static void virtnet_remove(struct virtio_device *vdev)
vdev->config->del_vq(vi->svq);
vdev->config->del_vq(vi->rvq);
unregister_netdev(vi->dev);
+
+ while (vi->pages)
+ __free_pages(get_a_page(vi, GFP_KERNEL), 0);
+
free_netdev(vi->dev);
}
@@ -553,7 +637,9 @@ static unsigned int features[] = {
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
- VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY,
+ VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
+ VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
+ VIRTIO_F_NOTIFY_ON_EMPTY,
};
static struct virtio_driver virtio_net = {
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 5827324e9d9f..f7d3349dc3ec 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -397,9 +397,9 @@ static int __init cosa_init(void)
err = PTR_ERR(cosa_class);
goto out_chrdev;
}
- for (i=0; i<nr_cards; i++) {
- device_create(cosa_class, NULL, MKDEV(cosa_major, i), "cosa%d", i);
- }
+ for (i = 0; i < nr_cards; i++)
+ device_create_drvdata(cosa_class, NULL, MKDEV(cosa_major, i),
+ NULL, "cosa%d", i);
err = 0;
goto out;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 13d5882f1f21..3153fe9d7ce0 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3101,6 +3101,7 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
* This is a natural nesting, which needs a split lock type.
*/
static struct lock_class_key hostap_netdev_xmit_lock_key;
+static struct lock_class_key hostap_netdev_addr_lock_key;
static void prism2_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
@@ -3112,6 +3113,8 @@ static void prism2_set_lockdep_class_one(struct net_device *dev,
static void prism2_set_lockdep_class(struct net_device *dev)
{
+ lockdep_set_class(&dev->addr_list_lock,
+ &hostap_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
}
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 6e704608947c..1acfbcd3703c 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -4972,8 +4972,7 @@ static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
}
done:
if ((ipw_tx_queue_space(q) > q->low_mark) &&
- (qindex >= 0) &&
- (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
+ (qindex >= 0))
netif_wake_queue(priv->net_dev);
used = q->first_empty - q->last_used;
if (used < 0)
@@ -10154,14 +10153,8 @@ static void init_sys_config(struct ipw_sys_config *sys_config)
static int ipw_net_open(struct net_device *dev)
{
- struct ipw_priv *priv = ieee80211_priv(dev);
IPW_DEBUG_INFO("dev->open\n");
- /* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->mutex);
- if (!(priv->status & STATUS_RF_KILL_MASK) &&
- (priv->status & STATUS_ASSOCIATED))
- netif_start_queue(dev);
- mutex_unlock(&priv->mutex);
+ netif_start_queue(dev);
return 0;
}
@@ -10481,13 +10474,6 @@ static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
spin_lock_irqsave(&priv->lock, flags);
- if (!(priv->status & STATUS_ASSOCIATED)) {
- IPW_DEBUG_INFO("Tx attempt while not associated.\n");
- priv->ieee->stats.tx_carrier_errors++;
- netif_stop_queue(dev);
- goto fail_unlock;
- }
-
#ifdef CONFIG_IPW2200_PROMISCUOUS
if (rtap_iface && netif_running(priv->prom_net_dev))
ipw_handle_promiscuous_tx(priv, txb);
@@ -10499,10 +10485,6 @@ static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
-
- fail_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
- return 1;
}
static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
@@ -10703,13 +10685,6 @@ static void ipw_link_up(struct ipw_priv *priv)
priv->last_packet_time = 0;
netif_carrier_on(priv->net_dev);
- if (netif_queue_stopped(priv->net_dev)) {
- IPW_DEBUG_NOTIF("waking queue\n");
- netif_wake_queue(priv->net_dev);
- } else {
- IPW_DEBUG_NOTIF("starting queue\n");
- netif_start_queue(priv->net_dev);
- }
cancel_delayed_work(&priv->request_scan);
cancel_delayed_work(&priv->request_direct_scan);
@@ -10739,7 +10714,6 @@ static void ipw_link_down(struct ipw_priv *priv)
{
ipw_led_link_down(priv);
netif_carrier_off(priv->net_dev);
- netif_stop_queue(priv->net_dev);
notify_wx_assoc_event(priv);
/* Cancel any queued work ... */
@@ -11419,7 +11393,6 @@ static void ipw_down(struct ipw_priv *priv)
/* Clear all bits but the RF Kill */
priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
netif_carrier_off(priv->net_dev);
- netif_stop_queue(priv->net_dev);
ipw_stop_nic(priv);
@@ -11522,7 +11495,6 @@ static int ipw_prom_open(struct net_device *dev)
IPW_DEBUG_INFO("prom dev->open\n");
netif_carrier_off(dev);
- netif_stop_queue(dev);
if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
priv->sys_config.accept_all_data_frames = 1;
@@ -11558,7 +11530,6 @@ static int ipw_prom_stop(struct net_device *dev)
static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
IPW_DEBUG_INFO("prom dev->xmit\n");
- netif_stop_queue(dev);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 913dc9fe08f9..5816230d58f8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -364,8 +364,7 @@ static void mac80211_hwsim_free(void)
struct mac80211_hwsim_data *data;
data = hwsim_radios[i]->priv;
ieee80211_unregister_hw(hwsim_radios[i]);
- if (!IS_ERR(data->dev))
- device_unregister(data->dev);
+ device_unregister(data->dev);
ieee80211_free_hw(hwsim_radios[i]);
}
}
@@ -437,7 +436,7 @@ static int __init init_mac80211_hwsim(void)
"mac80211_hwsim: device_create_drvdata "
"failed (%ld)\n", PTR_ERR(data->dev));
err = -ENOMEM;
- goto failed;
+ goto failed_drvdata;
}
data->dev->driver = &mac80211_hwsim_driver;
@@ -461,7 +460,7 @@ static int __init init_mac80211_hwsim(void)
if (err < 0) {
printk(KERN_DEBUG "mac80211_hwsim: "
"ieee80211_register_hw failed (%d)\n", err);
- goto failed;
+ goto failed_hw;
}
printk(KERN_DEBUG "%s: hwaddr %s registered\n",
@@ -479,9 +478,9 @@ static int __init init_mac80211_hwsim(void)
rtnl_lock();
err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
- if (err < 0) {
+ if (err < 0)
goto failed_mon;
- }
+
err = register_netdevice(hwsim_mon);
if (err < 0)
@@ -494,7 +493,14 @@ static int __init init_mac80211_hwsim(void)
failed_mon:
rtnl_unlock();
free_netdev(hwsim_mon);
+ mac80211_hwsim_free();
+ return err;
+failed_hw:
+ device_unregister(data->dev);
+failed_drvdata:
+ ieee80211_free_hw(hw);
+ hwsim_radios[i] = 0;
failed:
mac80211_hwsim_free();
return err;