diff options
Diffstat (limited to 'drivers')
222 files changed, 5388 insertions, 6474 deletions
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index de4c8499cbac..288547a3c566 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = { /* Atheros AR3011 with sflash firmware*/ { USB_DEVICE(0x0489, 0xE027) }, { USB_DEVICE(0x0489, 0xE03D) }, + { USB_DEVICE(0x04F2, 0xAFF1) }, { USB_DEVICE(0x0930, 0x0215) }, { USB_DEVICE(0x0CF3, 0x3002) }, { USB_DEVICE(0x0CF3, 0xE019) }, diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h index e75f8ee2512c..086f0ec89580 100644 --- a/drivers/bluetooth/btmrvl_drv.h +++ b/drivers/bluetooth/btmrvl_drv.h @@ -111,6 +111,7 @@ struct btmrvl_private { /* Vendor specific Bluetooth commands */ #define BT_CMD_PSCAN_WIN_REPORT_ENABLE 0xFC03 +#define BT_CMD_ROUTE_SCO_TO_HOST 0xFC1D #define BT_CMD_SET_BDADDR 0xFC22 #define BT_CMD_AUTO_SLEEP_MODE 0xFC23 #define BT_CMD_HOST_SLEEP_CONFIG 0xFC59 diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index 413597789c61..de05deb444ce 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -230,6 +230,18 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd) } EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd); +static int btmrvl_enable_sco_routing_to_host(struct btmrvl_private *priv) +{ + int ret; + u8 subcmd = 0; + + ret = btmrvl_send_sync_cmd(priv, BT_CMD_ROUTE_SCO_TO_HOST, &subcmd, 1); + if (ret) + BT_ERR("BT_CMD_ROUTE_SCO_TO_HOST command failed: %#x", ret); + + return ret; +} + int btmrvl_pscan_window_reporting(struct btmrvl_private *priv, u8 subcmd) { struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; @@ -558,6 +570,8 @@ static int btmrvl_setup(struct hci_dev *hdev) btmrvl_check_device_tree(priv); + btmrvl_enable_sco_routing_to_host(priv); + btmrvl_pscan_window_reporting(priv, 0x01); priv->btmrvl_dev.psmode = 1; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 8bfc4c2bba87..8c1bf6190533 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -159,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = { /* Atheros 3011 with sflash firmware */ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE }, @@ -339,16 +340,6 @@ struct btusb_data { int (*recv_bulk)(struct btusb_data *data, void *buffer, int count); }; -static int btusb_wait_on_bit_timeout(void *word, int bit, unsigned long timeout, - unsigned mode) -{ - might_sleep(); - if (!test_bit(bit, word)) - return 0; - return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout, - mode, timeout); -} - static inline void btusb_free_frags(struct btusb_data *data) { unsigned long flags; @@ -2197,9 +2188,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * and thus just timeout if that happens and fail the setup * of this device. */ - err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING, - msecs_to_jiffies(5000), - TASK_INTERRUPTIBLE); + err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(5000)); if (err == 1) { BT_ERR("%s: Firmware loading interrupted", hdev->name); err = -EINTR; @@ -2250,9 +2241,9 @@ done: */ BT_INFO("%s: Waiting for device to boot", hdev->name); - err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_BOOTING, - msecs_to_jiffies(1000), - TASK_INTERRUPTIBLE); + err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(1000)); if (err == 1) { BT_ERR("%s: Device boot interrupted", hdev->name); @@ -2332,6 +2323,27 @@ static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr) return 0; } +static int btusb_shutdown_intel(struct hci_dev *hdev) +{ + struct sk_buff *skb; + long ret; + + /* Some platforms have an issue with BT LED when the interface is + * down or BT radio is turned off, which takes 5 seconds to BT LED + * goes off. This command turns off the BT LED immediately. + */ + skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + BT_ERR("%s: turning off Intel device LED failed (%ld)", + hdev->name, ret); + return ret; + } + kfree_skb(skb); + + return 0; +} + static int btusb_set_bdaddr_marvell(struct hci_dev *hdev, const bdaddr_t *bdaddr) { @@ -2355,6 +2367,23 @@ static int btusb_set_bdaddr_marvell(struct hci_dev *hdev, return 0; } +static const struct { + u16 subver; + const char *name; +} bcm_subver_table[] = { + { 0x210b, "BCM43142A0" }, /* 001.001.011 */ + { 0x2112, "BCM4314A0" }, /* 001.001.018 */ + { 0x2118, "BCM20702A0" }, /* 001.001.024 */ + { 0x2126, "BCM4335A0" }, /* 001.001.038 */ + { 0x220e, "BCM20702A1" }, /* 001.002.014 */ + { 0x230f, "BCM4354A2" }, /* 001.003.015 */ + { 0x4106, "BCM4335B0" }, /* 002.001.006 */ + { 0x410e, "BCM20702B0" }, /* 002.001.014 */ + { 0x6109, "BCM4335C0" }, /* 003.001.009 */ + { 0x610c, "BCM4354" }, /* 003.001.012 */ + { } +}; + #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}}) static int btusb_setup_bcm_patchram(struct hci_dev *hdev) @@ -2367,29 +2396,20 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) size_t fw_size; const struct hci_command_hdr *cmd; const u8 *cmd_param; - u16 opcode; + u16 opcode, subver, rev; + const char *hw_name = NULL; struct sk_buff *skb; struct hci_rp_read_local_version *ver; struct hci_rp_read_bd_addr *bda; long ret; - - snprintf(fw_name, sizeof(fw_name), "brcm/%s-%04x-%04x.hcd", - udev->product ? udev->product : "BCM", - le16_to_cpu(udev->descriptor.idVendor), - le16_to_cpu(udev->descriptor.idProduct)); - - ret = request_firmware(&fw, fw_name, &hdev->dev); - if (ret < 0) { - BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name); - return 0; - } + int i; /* Reset */ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { ret = PTR_ERR(skb); BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret); - goto done; + return ret; } kfree_skb(skb); @@ -2400,23 +2420,43 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) ret = PTR_ERR(skb); BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", hdev->name, ret); - goto done; + return ret; } if (skb->len != sizeof(*ver)) { BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", hdev->name); kfree_skb(skb); - ret = -EIO; - goto done; + return -EIO; } ver = (struct hci_rp_read_local_version *)skb->data; - BT_INFO("%s: BCM: patching hci_ver=%02x hci_rev=%04x lmp_ver=%02x " - "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev, - ver->lmp_ver, ver->lmp_subver); + rev = le16_to_cpu(ver->hci_rev); + subver = le16_to_cpu(ver->lmp_subver); kfree_skb(skb); + for (i = 0; bcm_subver_table[i].name; i++) { + if (subver == bcm_subver_table[i].subver) { + hw_name = bcm_subver_table[i].name; + break; + } + } + + BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, + hw_name ? : "BCM", (subver & 0x7000) >> 13, + (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); + + snprintf(fw_name, sizeof(fw_name), "brcm/%s-%4.4x-%4.4x.hcd", + hw_name ? : "BCM", + le16_to_cpu(udev->descriptor.idVendor), + le16_to_cpu(udev->descriptor.idProduct)); + + ret = request_firmware(&fw, fw_name, &hdev->dev); + if (ret < 0) { + BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name); + return 0; + } + /* Start Download */ skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { @@ -2494,11 +2534,14 @@ reset_fw: } ver = (struct hci_rp_read_local_version *)skb->data; - BT_INFO("%s: BCM: firmware hci_ver=%02x hci_rev=%04x lmp_ver=%02x " - "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev, - ver->lmp_ver, ver->lmp_subver); + rev = le16_to_cpu(ver->hci_rev); + subver = le16_to_cpu(ver->lmp_subver); kfree_skb(skb); + BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, + hw_name ? : "BCM", (subver & 0x7000) >> 13, + (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); + /* Read BD Address */ skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, HCI_INIT_TIMEOUT); @@ -2709,6 +2752,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_INTEL) { hdev->setup = btusb_setup_intel; + hdev->shutdown = btusb_shutdown_intel; hdev->set_bdaddr = btusb_set_bdaddr_intel; set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); } diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 2c68da1ceeee..f4ea80d602f7 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -237,18 +237,6 @@ static int fwnet_header_create(struct sk_buff *skb, struct net_device *net, return -net->hard_header_len; } -static int fwnet_header_rebuild(struct sk_buff *skb) -{ - struct fwnet_header *h = (struct fwnet_header *)skb->data; - - if (get_unaligned_be16(&h->h_proto) == ETH_P_IP) - return arp_find((unsigned char *)&h->h_dest, skb); - - dev_notice(&skb->dev->dev, "unable to resolve type %04x addresses\n", - be16_to_cpu(h->h_proto)); - return 0; -} - static int fwnet_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type) { @@ -282,7 +270,6 @@ static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) static const struct header_ops fwnet_header_ops = { .create = fwnet_header_create, - .rebuild = fwnet_header_rebuild, .cache = fwnet_header_cache, .cache_update = fwnet_header_cache_update, .parse = fwnet_header_parse, diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 94affa5e6f28..546b7e81161d 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -1951,38 +1951,6 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev, return len; } -/* We don't need to send arp, because we have point-to-point connections. */ -static int -isdn_net_rebuild_header(struct sk_buff *skb) -{ - struct net_device *dev = skb->dev; - isdn_net_local *lp = netdev_priv(dev); - int ret = 0; - - if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { - struct ethhdr *eth = (struct ethhdr *) skb->data; - - /* - * Only ARP/IP is currently supported - */ - - if (eth->h_proto != htons(ETH_P_IP)) { - printk(KERN_WARNING - "isdn_net: %s don't know how to resolve type %d addresses?\n", - dev->name, (int) eth->h_proto); - memcpy(eth->h_source, dev->dev_addr, dev->addr_len); - return 0; - } - /* - * Try to get ARP to resolve the header. - */ -#ifdef CONFIG_INET - ret = arp_find(eth->h_dest, skb); -#endif - } - return ret; -} - static int isdn_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type) { @@ -2005,7 +1973,6 @@ static void isdn_header_cache_update(struct hh_cache *hh, static const struct header_ops isdn_header_ops = { .create = isdn_net_header, - .rebuild = isdn_net_rebuild_header, .cache = isdn_header_cache, .cache_update = isdn_header_cache_update, }; diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 84b35925ee4d..8dc7290089bb 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -112,8 +112,8 @@ mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) } static int -mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) { struct sk_buff *skb; struct sock *sk = sock->sk; @@ -173,8 +173,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, } static int -mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +mISDN_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sk_buff *skb; diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index 686d3277dad1..4a77cb02dffc 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -1190,7 +1190,6 @@ static int dvb_net_stop(struct net_device *dev) static const struct header_ops dvb_header_ops = { .create = eth_header, .parse = eth_header_parse, - .rebuild = eth_rebuild_header, }; diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 09de683c167e..10f71c732b59 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -104,7 +104,6 @@ EXPORT_SYMBOL(arcnet_timeout); static int arcnet_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); -static int arcnet_rebuild_header(struct sk_buff *skb); static int go_tx(struct net_device *dev); static int debug = ARCNET_DEBUG; @@ -312,7 +311,6 @@ static int choose_mtu(void) static const struct header_ops arcnet_header_ops = { .create = arcnet_header, - .rebuild = arcnet_rebuild_header, }; static const struct net_device_ops arcnet_netdev_ops = { @@ -538,59 +536,6 @@ static int arcnet_header(struct sk_buff *skb, struct net_device *dev, return proto->build_header(skb, dev, type, _daddr); } - -/* - * Rebuild the ARCnet hard header. This is called after an ARP (or in the - * future other address resolution) has completed on this sk_buff. We now - * let ARP fill in the destination field. - */ -static int arcnet_rebuild_header(struct sk_buff *skb) -{ - struct net_device *dev = skb->dev; - struct arcnet_local *lp = netdev_priv(dev); - int status = 0; /* default is failure */ - unsigned short type; - uint8_t daddr=0; - struct ArcProto *proto; - /* - * XXX: Why not use skb->mac_len? - */ - if (skb->network_header - skb->mac_header != 2) { - BUGMSG(D_NORMAL, - "rebuild_header: shouldn't be here! (hdrsize=%d)\n", - (int)(skb->network_header - skb->mac_header)); - return 0; - } - type = *(uint16_t *) skb_pull(skb, 2); - BUGMSG(D_DURING, "rebuild header for protocol %Xh\n", type); - - if (type == ETH_P_IP) { -#ifdef CONFIG_INET - BUGMSG(D_DURING, "rebuild header for ethernet protocol %Xh\n", type); - status = arp_find(&daddr, skb) ? 1 : 0; - BUGMSG(D_DURING, " rebuilt: dest is %d; protocol %Xh\n", - daddr, type); -#endif - } else { - BUGMSG(D_NORMAL, - "I don't understand ethernet protocol %Xh addresses!\n", type); - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; - } - - /* if we couldn't resolve the address... give up. */ - if (!status) - return 0; - - /* add the _real_ header this time! */ - proto = arc_proto_map[lp->default_proto[daddr]]; - proto->build_header(skb, dev, type, daddr); - - return 1; /* success */ -} - - - /* Called by the kernel in order to transmit a packet. */ netdev_tx_t arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index cfc4a9c1000a..f61b2870cddf 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -38,6 +38,7 @@ #define AD_STANDBY 0x2 #define AD_MAX_TX_IN_SECOND 3 #define AD_COLLECTOR_MAX_DELAY 0 +#define AD_MONITOR_CHURNED 0x1000 /* Timer definitions (43.4.4 in the 802.3ad standard) */ #define AD_FAST_PERIODIC_TIME 1 @@ -1013,16 +1014,19 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) /* check if state machine should change state */ /* first, check if port was reinitialized */ - if (port->sm_vars & AD_PORT_BEGIN) + if (port->sm_vars & AD_PORT_BEGIN) { port->sm_rx_state = AD_RX_INITIALIZE; + port->sm_vars |= AD_MONITOR_CHURNED; /* check if port is not enabled */ - else if (!(port->sm_vars & AD_PORT_BEGIN) + } else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED)) port->sm_rx_state = AD_RX_PORT_DISABLED; /* check if new lacpdu arrived */ else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || (port->sm_rx_state == AD_RX_DEFAULTED) || (port->sm_rx_state == AD_RX_CURRENT))) { + if (port->sm_rx_state != AD_RX_CURRENT) + port->sm_vars |= AD_MONITOR_CHURNED; port->sm_rx_timer_counter = 0; port->sm_rx_state = AD_RX_CURRENT; } else { @@ -1100,9 +1104,11 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) */ port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION; port->sm_vars &= ~AD_PORT_MATCHED; + port->partner_oper.port_state |= AD_STATE_LACP_TIMEOUT; port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY; port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT)); port->actor_oper_port_state |= AD_STATE_EXPIRED; + port->sm_vars |= AD_MONITOR_CHURNED; break; case AD_RX_DEFAULTED: __update_default_selected(port); @@ -1132,6 +1138,45 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) } /** + * ad_churn_machine - handle port churn's state machine + * @port: the port we're looking at + * + */ +static void ad_churn_machine(struct port *port) +{ + if (port->sm_vars & AD_MONITOR_CHURNED) { + port->sm_vars &= ~AD_MONITOR_CHURNED; + port->sm_churn_actor_state = AD_CHURN_MONITOR; + port->sm_churn_partner_state = AD_CHURN_MONITOR; + port->sm_churn_actor_timer_counter = + __ad_timer_to_ticks(AD_ACTOR_CHURN_TIMER, 0); + port->sm_churn_partner_timer_counter = + __ad_timer_to_ticks(AD_PARTNER_CHURN_TIMER, 0); + return; + } + if (port->sm_churn_actor_timer_counter && + !(--port->sm_churn_actor_timer_counter) && + port->sm_churn_actor_state == AD_CHURN_MONITOR) { + if (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION) { + port->sm_churn_actor_state = AD_NO_CHURN; + } else { + port->churn_actor_count++; + port->sm_churn_actor_state = AD_CHURN; + } + } + if (port->sm_churn_partner_timer_counter && + !(--port->sm_churn_partner_timer_counter) && + port->sm_churn_partner_state == AD_CHURN_MONITOR) { + if (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) { + port->sm_churn_partner_state = AD_NO_CHURN; + } else { + port->churn_partner_count++; + port->sm_churn_partner_state = AD_CHURN; + } + } +} + +/** * ad_tx_machine - handle a port's tx state machine * @port: the port we're looking at */ @@ -1745,6 +1790,13 @@ static void ad_initialize_port(struct port *port, int lacp_fast) port->next_port_in_aggregator = NULL; port->transaction_id = 0; + port->sm_churn_actor_timer_counter = 0; + port->sm_churn_actor_state = 0; + port->churn_actor_count = 0; + port->sm_churn_partner_timer_counter = 0; + port->sm_churn_partner_state = 0; + port->churn_partner_count = 0; + memcpy(&port->lacpdu, &lacpdu, sizeof(lacpdu)); } } @@ -2164,6 +2216,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) ad_port_selection_logic(port, &update_slave_arr); ad_mux_machine(port, &update_slave_arr); ad_tx_machine(port); + ad_churn_machine(port); /* turn off the BEGIN bit, since we already handled it */ if (port->sm_vars & AD_PORT_BEGIN) @@ -2485,6 +2538,9 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, if (skb->protocol != PKT_TYPE_LACPDU) return RX_HANDLER_ANOTHER; + if (!MAC_ADDRESS_EQUAL(eth_hdr(skb)->h_dest, lacpdu_mcast_addr)) + return RX_HANDLER_ANOTHER; + lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu); if (!lacpdu) return RX_HANDLER_ANOTHER; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b979c265fc51..675b082283d6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2900,6 +2900,8 @@ static int bond_slave_netdev_event(unsigned long event, if (old_duplex != slave->duplex) bond_3ad_adapter_duplex_changed(slave); } + /* Fallthrough */ + case NETDEV_DOWN: /* Refresh slave-array if applicable! * If the setup does not use miimon or arpmon (mode-specific!), * then these events will not cause the slave-array to be @@ -2911,10 +2913,6 @@ static int bond_slave_netdev_event(unsigned long event, if (bond_mode_uses_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); break; - case NETDEV_DOWN: - if (bond_mode_uses_xmit_hash(bond)) - bond_update_slave_arr(bond, NULL); - break; case NETDEV_CHANGEMTU: /* TODO: Should slaves be allowed to * independently alter their MTU? For diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 976f5ad2a0f2..62694cfc05b6 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -176,18 +176,51 @@ static void bond_info_show_slave(struct seq_file *seq, slave->link_failure_count); seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); + seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); if (BOND_MODE(bond) == BOND_MODE_8023AD) { - const struct aggregator *agg - = SLAVE_AD_INFO(slave)->port.aggregator; + const struct port *port = &SLAVE_AD_INFO(slave)->port; + const struct aggregator *agg = port->aggregator; - if (agg) + if (agg) { seq_printf(seq, "Aggregator ID: %d\n", agg->aggregator_identifier); - else + seq_printf(seq, "Actor Churn State: %s\n", + bond_3ad_churn_desc(port->sm_churn_actor_state)); + seq_printf(seq, "Partner Churn State: %s\n", + bond_3ad_churn_desc(port->sm_churn_partner_state)); + seq_printf(seq, "Actor Churned Count: %d\n", + port->churn_actor_count); + seq_printf(seq, "Partner Churned Count: %d\n", + port->churn_partner_count); + + seq_puts(seq, "details actor lacp pdu:\n"); + seq_printf(seq, " system priority: %d\n", + port->actor_system_priority); + seq_printf(seq, " port key: %d\n", + port->actor_oper_port_key); + seq_printf(seq, " port priority: %d\n", + port->actor_port_priority); + seq_printf(seq, " port number: %d\n", + port->actor_port_number); + seq_printf(seq, " port state: %d\n", + port->actor_oper_port_state); + + seq_puts(seq, "details partner lacp pdu:\n"); + seq_printf(seq, " system priority: %d\n", + port->partner_oper.system_priority); + seq_printf(seq, " oper key: %d\n", + port->partner_oper.key); + seq_printf(seq, " port priority: %d\n", + port->partner_oper.port_priority); + seq_printf(seq, " port number: %d\n", + port->partner_oper.port_number); + seq_printf(seq, " port state: %d\n", + port->partner_oper.port_state); + } else { seq_puts(seq, "Aggregator ID: N/A\n"); + } } - seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); } static int bond_info_seq_show(struct seq_file *seq, void *v) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 4daffb284931..cedb572bf25a 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -23,6 +23,7 @@ #include <linux/of_address.h> #include <net/dsa.h> #include <linux/ethtool.h> +#include <linux/if_bridge.h> #include "bcm_sf2.h" #include "bcm_sf2_regs.h" @@ -299,10 +300,14 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, if (port == 7) intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF)); - /* Set this port, and only this one to be in the default VLAN */ + /* Set this port, and only this one to be in the default VLAN, + * if member of a bridge, restore its membership prior to + * bringing down this port. + */ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); reg &= ~PORT_VLAN_CTRL_MASK; reg |= (1 << port); + reg |= priv->port_sts[port].vlan_ctl_mask; core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); bcm_sf2_imp_vlan_setup(ds, cpu_port); @@ -400,6 +405,151 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, return 0; } +/* Fast-ageing of ARL entries for a given port, equivalent to an ARL + * flush for that port. + */ +static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int timeout = 1000; + u32 reg; + + core_writel(priv, port, CORE_FAST_AGE_PORT); + + reg = core_readl(priv, CORE_FAST_AGE_CTRL); + reg |= EN_AGE_PORT | FAST_AGE_STR_DONE; + core_writel(priv, reg, CORE_FAST_AGE_CTRL); + + do { + reg = core_readl(priv, CORE_FAST_AGE_CTRL); + if (!(reg & FAST_AGE_STR_DONE)) + break; + + cpu_relax(); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + +static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, + u32 br_port_mask) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int i; + u32 reg, p_ctl; + + p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); + + for (i = 0; i < priv->hw_params.num_ports; i++) { + if (!((1 << i) & br_port_mask)) + continue; + + /* Add this local port to the remote port VLAN control + * membership and update the remote port bitmask + */ + reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); + reg |= 1 << port; + core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); + priv->port_sts[i].vlan_ctl_mask = reg; + + p_ctl |= 1 << i; + } + + /* Configure the local port VLAN control membership to include + * remote ports and update the local port bitmask + */ + core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); + priv->port_sts[port].vlan_ctl_mask = p_ctl; + + return 0; +} + +static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port, + u32 br_port_mask) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int i; + u32 reg, p_ctl; + + p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); + + for (i = 0; i < priv->hw_params.num_ports; i++) { + /* Don't touch the remaining ports */ + if (!((1 << i) & br_port_mask)) + continue; + + reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); + reg &= ~(1 << port); + core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); + priv->port_sts[port].vlan_ctl_mask = reg; + + /* Prevent self removal to preserve isolation */ + if (port != i) + p_ctl &= ~(1 << i); + } + + core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); + priv->port_sts[port].vlan_ctl_mask = p_ctl; + + return 0; +} + +static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, + u8 state) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + u8 hw_state, cur_hw_state; + int ret = 0; + u32 reg; + + reg = core_readl(priv, CORE_G_PCTL_PORT(port)); + cur_hw_state = reg >> G_MISTP_STATE_SHIFT; + + switch (state) { + case BR_STATE_DISABLED: + hw_state = G_MISTP_DIS_STATE; + break; + case BR_STATE_LISTENING: + hw_state = G_MISTP_LISTEN_STATE; + break; + case BR_STATE_LEARNING: + hw_state = G_MISTP_LEARN_STATE; + break; + case BR_STATE_FORWARDING: + hw_state = G_MISTP_FWD_STATE; + break; + case BR_STATE_BLOCKING: + hw_state = G_MISTP_BLOCK_STATE; + break; + default: + pr_err("%s: invalid STP state: %d\n", __func__, state); + return -EINVAL; + } + + /* Fast-age ARL entries if we are moving a port from Learning or + * Forwarding state to Disabled, Blocking or Listening state + */ + if (cur_hw_state != hw_state) { + if (cur_hw_state & 4 && !(hw_state & 4)) { + ret = bcm_sf2_sw_fast_age_port(ds, port); + if (ret) { + pr_err("%s: fast-ageing failed\n", __func__); + return ret; + } + } + } + + reg = core_readl(priv, CORE_G_PCTL_PORT(port)); + reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); + reg |= hw_state; + core_writel(priv, reg, CORE_G_PCTL_PORT(port)); + + return 0; +} + static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) { struct bcm_sf2_priv *priv = dev_id; @@ -916,6 +1066,9 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = { .port_disable = bcm_sf2_port_disable, .get_eee = bcm_sf2_sw_get_eee, .set_eee = bcm_sf2_sw_set_eee, + .port_join_bridge = bcm_sf2_sw_br_join, + .port_leave_bridge = bcm_sf2_sw_br_leave, + .port_stp_update = bcm_sf2_sw_br_set_stp_state, }; static int __init bcm_sf2_init(void) diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 7b7053d3c5fa..22e2ebf31333 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -46,6 +46,8 @@ struct bcm_sf2_port_status { unsigned int link; struct ethtool_eee eee; + + u32 vlan_ctl_mask; }; struct bcm_sf2_priv { diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index cabdfa5e217a..fa4e6e78c9ea 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -163,6 +163,21 @@ #define EN_CHIP_RST (1 << 6) #define EN_SW_RESET (1 << 4) +#define CORE_FAST_AGE_CTRL 0x00220 +#define EN_FAST_AGE_STATIC (1 << 0) +#define EN_AGE_DYNAMIC (1 << 1) +#define EN_AGE_PORT (1 << 2) +#define EN_AGE_VLAN (1 << 3) +#define EN_AGE_SPT (1 << 4) +#define EN_AGE_MCAST (1 << 5) +#define FAST_AGE_STR_DONE (1 << 7) + +#define CORE_FAST_AGE_PORT 0x00224 +#define AGE_PORT_MASK 0xf + +#define CORE_FAST_AGE_VID 0x00228 +#define AGE_VID_MASK 0x3fff + #define CORE_LNKSTS 0x00400 #define LNK_STS_MASK 0x1ff diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index aa33d16f2e22..9808c860a797 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -51,8 +51,11 @@ static int mv88e6171_switch_reset(struct dsa_switch *ds) /* Wait for transmit queues to drain. */ usleep_range(2000, 4000); - /* Reset the switch. */ - REG_WRITE(REG_GLOBAL, 0x04, 0xc400); + /* Reset the switch. Keep PPU active. The PPU needs to be + * active to support indirect phy register accesses through + * global registers 0x18 and 0x19. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0xc000); /* Wait up to one second for reset to complete. */ timeout = jiffies + 1 * HZ; @@ -83,11 +86,10 @@ static int mv88e6171_setup_global(struct dsa_switch *ds) int ret; int i; - /* Disable the PHY polling unit (since there won't be any - * external PHYs to poll), don't discard packets with - * excessive collisions, and mask all interrupt sources. + /* Discard packets with excessive collisions, mask all + * interrupt sources, enable PPU. */ - REG_WRITE(REG_GLOBAL, 0x04, 0x0000); + REG_WRITE(REG_GLOBAL, 0x04, 0x6000); /* Set the default address aging time to 5 minutes, and * enable address learn messages to be sent to all message @@ -336,7 +338,7 @@ mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum) int ret; mutex_lock(&ps->phy_mutex); - ret = mv88e6xxx_phy_read(ds, addr, regnum); + ret = mv88e6xxx_phy_read_indirect(ds, addr, regnum); mutex_unlock(&ps->phy_mutex); return ret; } @@ -350,7 +352,7 @@ mv88e6171_phy_write(struct dsa_switch *ds, int ret; mutex_lock(&ps->phy_mutex); - ret = mv88e6xxx_phy_write(ds, addr, regnum, val); + ret = mv88e6xxx_phy_write_indirect(ds, addr, regnum, val); mutex_unlock(&ps->phy_mutex); return ret; } diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index e13adc7b3dda..1ebd8f96072a 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -22,59 +22,6 @@ #include <net/dsa.h> #include "mv88e6xxx.h" -static int mv88e6352_wait(struct dsa_switch *ds, int reg, int offset, u16 mask) -{ - unsigned long timeout = jiffies + HZ / 10; - - while (time_before(jiffies, timeout)) { - int ret; - - ret = REG_READ(reg, offset); - if (!(ret & mask)) - return 0; - - usleep_range(1000, 2000); - } - return -ETIMEDOUT; -} - -static inline int mv88e6352_phy_wait(struct dsa_switch *ds) -{ - return mv88e6352_wait(ds, REG_GLOBAL2, 0x18, 0x8000); -} - -static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds) -{ - return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x0800); -} - -static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds) -{ - return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x8000); -} - -static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum) -{ - int ret; - - REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum); - - ret = mv88e6352_phy_wait(ds); - if (ret < 0) - return ret; - - return REG_READ(REG_GLOBAL2, 0x19); -} - -static int __mv88e6352_phy_write(struct dsa_switch *ds, int addr, int regnum, - u16 val) -{ - REG_WRITE(REG_GLOBAL2, 0x19, val); - REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum); - - return mv88e6352_phy_wait(ds); -} - static char *mv88e6352_probe(struct device *host_dev, int sw_addr) { struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); @@ -346,12 +293,12 @@ static int mv88e6352_phy_page_read(struct dsa_switch *ds, int ret; mutex_lock(&ps->phy_mutex); - ret = __mv88e6352_phy_write(ds, port, 0x16, page); + ret = mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); if (ret < 0) goto error; - ret = __mv88e6352_phy_read(ds, port, reg); + ret = mv88e6xxx_phy_read_indirect(ds, port, reg); error: - __mv88e6352_phy_write(ds, port, 0x16, 0x0); + mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0); mutex_unlock(&ps->phy_mutex); return ret; } @@ -363,13 +310,13 @@ static int mv88e6352_phy_page_write(struct dsa_switch *ds, int ret; mutex_lock(&ps->phy_mutex); - ret = __mv88e6352_phy_write(ds, port, 0x16, page); + ret = mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); if (ret < 0) goto error; - ret = __mv88e6352_phy_write(ds, port, reg, val); + ret = mv88e6xxx_phy_write_indirect(ds, port, reg, val); error: - __mv88e6352_phy_write(ds, port, 0x16, 0x0); + mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0); mutex_unlock(&ps->phy_mutex); return ret; } @@ -482,7 +429,7 @@ mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum) return addr; mutex_lock(&ps->phy_mutex); - ret = __mv88e6352_phy_read(ds, addr, regnum); + ret = mv88e6xxx_phy_read_indirect(ds, addr, regnum); mutex_unlock(&ps->phy_mutex); return ret; @@ -499,7 +446,7 @@ mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) return addr; mutex_lock(&ps->phy_mutex); - ret = __mv88e6352_phy_write(ds, addr, regnum, val); + ret = mv88e6xxx_phy_write_indirect(ds, addr, regnum, val); mutex_unlock(&ps->phy_mutex); return ret; @@ -553,7 +500,7 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) if (ret < 0) goto error; - ret = mv88e6352_eeprom_busy_wait(ds); + ret = mv88e6xxx_eeprom_busy_wait(ds); if (ret < 0) goto error; @@ -576,7 +523,7 @@ static int mv88e6352_get_eeprom(struct dsa_switch *ds, eeprom->magic = 0xc3ec4951; - ret = mv88e6352_eeprom_load_wait(ds); + ret = mv88e6xxx_eeprom_load_wait(ds); if (ret < 0) return ret; @@ -657,7 +604,7 @@ static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr, if (ret < 0) goto error; - ret = mv88e6352_eeprom_busy_wait(ds); + ret = mv88e6xxx_eeprom_busy_wait(ds); error: mutex_unlock(&ps->eeprom_mutex); return ret; @@ -681,7 +628,7 @@ static int mv88e6352_set_eeprom(struct dsa_switch *ds, len = eeprom->len; eeprom->len = 0; - ret = mv88e6352_eeprom_load_wait(ds); + ret = mv88e6xxx_eeprom_load_wait(ds); if (ret < 0) return ret; diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 3e7e31a6abb7..a83ace0803e7 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -596,6 +596,59 @@ error: } #endif /* CONFIG_NET_DSA_HWMON */ +static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask) +{ + unsigned long timeout = jiffies + HZ / 10; + + while (time_before(jiffies, timeout)) { + int ret; + + ret = REG_READ(reg, offset); + if (!(ret & mask)) + return 0; + + usleep_range(1000, 2000); + } + return -ETIMEDOUT; +} + +int mv88e6xxx_phy_wait(struct dsa_switch *ds) +{ + return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x18, 0x8000); +} + +int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds) +{ + return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x14, 0x0800); +} + +int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds) +{ + return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x14, 0x8000); +} + +int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum) +{ + int ret; + + REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum); + + ret = mv88e6xxx_phy_wait(ds); + if (ret < 0) + return ret; + + return REG_READ(REG_GLOBAL2, 0x19); +} + +int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum, + u16 val) +{ + REG_WRITE(REG_GLOBAL2, 0x19, val); + REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum); + + return mv88e6xxx_phy_wait(ds); +} + static int __init mv88e6xxx_init(void) { #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 03e397efde36..72942271bb67 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -82,6 +82,12 @@ int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port); void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, struct ethtool_regs *regs, void *_p); int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp); +int mv88e6xxx_phy_wait(struct dsa_switch *ds); +int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds); +int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds); +int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum); +int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum, + u16 val); extern struct dsa_switch_driver mv88e6131_switch_driver; extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 6725dc00750b..fd9296a5014d 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -105,11 +105,11 @@ static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) /* set MDIO address */ csrwr32((mii_id & 0x1f), priv->mac_dev, - tse_csroffs(mdio_phy0_addr)); + tse_csroffs(mdio_phy1_addr)); /* get the data */ return csrrd32(priv->mac_dev, - tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff; + tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff; } static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, @@ -120,10 +120,10 @@ static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, /* set MDIO address */ csrwr32((mii_id & 0x1f), priv->mac_dev, - tse_csroffs(mdio_phy0_addr)); + tse_csroffs(mdio_phy1_addr)); /* write the data */ - csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4); + csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4); return 0; } @@ -1098,8 +1098,12 @@ static int tse_open(struct net_device *dev) spin_lock(&priv->mac_cfg_lock); ret = reset_mac(priv); + /* Note that reset_mac will fail if the clocks are gated by the PHY + * due to the PHY being put into isolation or power down mode. + * This is not an error if reset fails due to no clock. + */ if (ret) - netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret); + netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); ret = init_mac(priv); spin_unlock(&priv->mac_cfg_lock); @@ -1203,8 +1207,12 @@ static int tse_shutdown(struct net_device *dev) spin_lock(&priv->tx_lock); ret = reset_mac(priv); + /* Note that reset_mac will fail if the clocks are gated by the PHY + * due to the PHY being put into isolation or power down mode. + * This is not an error if reset fails due to no clock. + */ if (ret) - netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret); + netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); priv->dmaops->reset_dma(priv); free_skbufs(dev); diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 11d6e6561df1..8eb37e0194b5 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1708,7 +1708,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ if (!is_valid_ether_addr(dev->dev_addr)) - memset(dev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(dev->dev_addr); if (pcnet32_debug & NETIF_MSG_PROBE) { pr_cont(" %pM", dev->dev_addr); diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 41a3c9804427..ee4fdfe65e9e 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -71,12 +71,12 @@ config BCMGENET Broadcom BCM7xxx Set Top Box family chipset. config BNX2 - tristate "QLogic NetXtremeII support" + tristate "QLogic bnx2 support" depends on PCI select CRC32 select FW_LOADER ---help--- - This driver supports QLogic NetXtremeII gigabit Ethernet cards. + This driver supports QLogic bnx2 gigabit Ethernet cards. To compile this driver as a module, choose M here: the module will be called bnx2. This is recommended. @@ -87,8 +87,8 @@ config CNIC select BNX2 select UIO ---help--- - This driver supports offload features of QLogic NetXtremeII - gigabit Ethernet cards. + This driver supports offload features of QLogic bnx2 gigabit + Ethernet cards. To compile this driver as a module, choose M here: the module will be called cnic. This is recommended. diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 02bf0b86995b..2b66ef3d8217 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -1,7 +1,7 @@ -/* bnx2.c: QLogic NX2 network driver. +/* bnx2.c: QLogic bnx2 network driver. * * Copyright (c) 2004-2014 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -58,8 +58,8 @@ #include "bnx2_fw.h" #define DRV_MODULE_NAME "bnx2" -#define DRV_MODULE_VERSION "2.2.5" -#define DRV_MODULE_RELDATE "December 20, 2013" +#define DRV_MODULE_VERSION "2.2.6" +#define DRV_MODULE_RELDATE "January 29, 2014" #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" @@ -72,10 +72,10 @@ #define TX_TIMEOUT (5*HZ) static char version[] = - "QLogic NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); -MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/5716 Driver"); +MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); MODULE_FIRMWARE(FW_MIPS_FILE_06); @@ -4984,8 +4984,6 @@ bnx2_init_chip(struct bnx2 *bp) bp->idle_chk_status_idx = 0xffff; - bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; - /* Set up how to generate a link change interrupt. */ BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); @@ -7710,17 +7708,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) return 0; } -static netdev_features_t -bnx2_fix_features(struct net_device *dev, netdev_features_t features) -{ - struct bnx2 *bp = netdev_priv(dev); - - if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) - features |= NETIF_F_HW_VLAN_CTAG_RX; - - return features; -} - static int bnx2_set_features(struct net_device *dev, netdev_features_t features) { @@ -8527,7 +8514,6 @@ static const struct net_device_ops bnx2_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnx2_change_mac_addr, .ndo_change_mtu = bnx2_change_mtu, - .ndo_fix_features = bnx2_fix_features, .ndo_set_features = bnx2_set_features, .ndo_tx_timeout = bnx2_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -8578,6 +8564,9 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features |= dev->hw_features; dev->priv_flags |= IFF_UNICAST_FLT; + if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + if ((rc = register_netdev(dev))) { dev_err(&pdev->dev, "Cannot register net device\n"); goto error; diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index 28df35d35893..f92f76c44756 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h @@ -1,7 +1,7 @@ -/* bnx2.h: QLogic NX2 network driver. +/* bnx2.h: QLogic bnx2 network driver. * * Copyright (c) 2004-2014 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2_fw.h b/drivers/net/ethernet/broadcom/bnx2_fw.h index 7db79c28b5ff..b0f2ccadaffd 100644 --- a/drivers/net/ethernet/broadcom/bnx2_fw.h +++ b/drivers/net/ethernet/broadcom/bnx2_fw.h @@ -1,7 +1,7 @@ -/* bnx2_fw.h: QLogic NX2 network driver. +/* bnx2_fw.h: QLogic bnx2 network driver. * * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index ffe4e003e636..e3d853cab7c9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2446,7 +2446,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) } packet = skb_put(skb, pkt_size); memcpy(packet, bp->dev->dev_addr, ETH_ALEN); - memset(packet + ETH_ALEN, 0, ETH_ALEN); + eth_zero_addr(packet + ETH_ALEN); memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); for (i = ETH_HLEN; i < pkt_size; i++) packet[i] = (unsigned char) (i & 0xff); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7155e1d2c208..98dcb03fe1b8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -11546,13 +11546,13 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) /* Disable iSCSI OOO if MAC configuration is invalid. */ if (!is_valid_ether_addr(iscsi_mac)) { bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; - memset(iscsi_mac, 0, ETH_ALEN); + eth_zero_addr(iscsi_mac); } /* Disable FCoE if MAC configuration is invalid. */ if (!is_valid_ether_addr(fip_mac)) { bp->flags |= NO_FCOE_FLAG; - memset(bp->fip_mac, 0, ETH_ALEN); + eth_zero_addr(bp->fip_mac); } } @@ -11563,7 +11563,7 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) int port = BP_PORT(bp); /* Zero primary MAC configuration */ - memset(bp->dev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(bp->dev->dev_addr); if (BP_NOMCP(bp)) { BNX2X_ERROR("warning: random MAC workaround active\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index e5aca2de1871..8638d6c97caa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -2693,7 +2693,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); else /* function has not been loaded yet. Show mac as 0s */ - memset(&ivi->mac, 0, ETH_ALEN); + eth_zero_addr(ivi->mac); /* vlan */ if (bulletin->valid_bitmap & (1 << VLAN_VALID)) diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index f05fab65d78a..17c145fdf3ff 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1,7 +1,7 @@ /* cnic.c: QLogic CNIC core network driver. * * Copyright (c) 2006-2014 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -58,11 +58,11 @@ #define CNIC_MODULE_NAME "cnic" static char version[] = - "QLogic NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; + "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " "Chen (zongxi@broadcom.com"); -MODULE_DESCRIPTION("QLogic NetXtreme II CNIC Driver"); +MODULE_DESCRIPTION("QLogic cnic Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(CNIC_MODULE_VERSION); diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 8bb36c1c4d68..ef6125b0ee3e 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -1,7 +1,7 @@ -/* cnic_if.h: QLogic CNIC core network driver. +/* cnic_if.h: QLogic cnic core network driver. * * Copyright (c) 2006-2014 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,8 +15,8 @@ #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION "2.5.20" -#define CNIC_MODULE_RELDATE "March 14, 2014" +#define CNIC_MODULE_VERSION "2.5.21" +#define CNIC_MODULE_RELDATE "January 29, 2015" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 6befde61c203..84feb241d60b 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -54,8 +54,8 @@ /* Default highest priority queue for multi queue support */ #define GENET_Q0_PRIORITY 0 -#define GENET_DEFAULT_BD_CNT \ - (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt) +#define GENET_Q16_TX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) #define RX_BUF_LENGTH 2048 #define SKB_ALIGNMENT 32 @@ -923,7 +923,7 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, tx_cb_ptr = ring->cbs; tx_cb_ptr += ring->write_ptr - ring->cb_ptr; - tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE; + /* Advancing local write pointer */ if (ring->write_ptr == ring->end_ptr) ring->write_ptr = ring->cb_ptr; @@ -1710,17 +1710,14 @@ static int init_umac(struct bcmgenet_priv *priv) return 0; } -/* Initialize all house-keeping variables for a TX ring, along - * with corresponding hardware registers - */ +/* Initialize a Tx ring along with corresponding hardware registers */ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, unsigned int index, unsigned int size, - unsigned int write_ptr, unsigned int end_ptr) + unsigned int start_ptr, unsigned int end_ptr) { struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; u32 words_per_bd = WORDS_PER_BD(priv); u32 flow_period_val = 0; - unsigned int first_bd; spin_lock_init(&ring->lock); ring->priv = priv; @@ -1735,12 +1732,12 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, ring->int_enable = bcmgenet_tx_ring_int_enable; ring->int_disable = bcmgenet_tx_ring_int_disable; } - ring->cbs = priv->tx_cbs + write_ptr; + ring->cbs = priv->tx_cbs + start_ptr; ring->size = size; ring->c_index = 0; ring->free_bds = size; - ring->write_ptr = write_ptr; - ring->cb_ptr = write_ptr; + ring->write_ptr = start_ptr; + ring->cb_ptr = start_ptr; ring->end_ptr = end_ptr - 1; ring->prod_index = 0; @@ -1754,19 +1751,16 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, /* Disable rate control for now */ bcmgenet_tdma_ring_writel(priv, index, flow_period_val, TDMA_FLOW_PERIOD); - /* Unclassified traffic goes to ring 16 */ bcmgenet_tdma_ring_writel(priv, index, ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); - first_bd = write_ptr; - /* Set start and end address, read and write pointers */ - bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, DMA_START_ADDR); - bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, TDMA_READ_PTR); - bcmgenet_tdma_ring_writel(priv, index, first_bd, + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, TDMA_WRITE_PTR); bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, DMA_END_ADDR); @@ -1825,78 +1819,73 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, return ret; } -/* init multi xmit queues, only available for GENET2+ - * the queue is partitioned as follows: +/* Initialize Tx queues * - * queue 0 - 3 is priority based, each one has 32 descriptors, + * Queues 0-3 are priority-based, each one has 32 descriptors, * with queue 0 being the highest priority queue. * - * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT - * descriptors: 256 - (number of tx queues * bds per queues) = 128 - * descriptors. + * Queue 16 is the default Tx queue with + * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. * - * The transmit control block pool is then partitioned as following: - * - tx_cbs[0...127] are for queue 16 - * - tx_ring_cbs[0] points to tx_cbs[128..159] - * - tx_ring_cbs[1] points to tx_cbs[160..191] - * - tx_ring_cbs[2] points to tx_cbs[192..223] - * - tx_ring_cbs[3] points to tx_cbs[224..255] + * The transmit control block pool is then partitioned as follows: + * - Tx queue 0 uses tx_cbs[0..31] + * - Tx queue 1 uses tx_cbs[32..63] + * - Tx queue 2 uses tx_cbs[64..95] + * - Tx queue 3 uses tx_cbs[96..127] + * - Tx queue 16 uses tx_cbs[128..255] */ -static void bcmgenet_init_multiq(struct net_device *dev) +static void bcmgenet_init_tx_queues(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - unsigned int i, dma_enable; - u32 reg, dma_ctrl, ring_cfg = 0; + u32 i, dma_enable; + u32 dma_ctrl, ring_cfg; u32 dma_priority[3] = {0, 0, 0}; - if (!netif_is_multiqueue(dev)) { - netdev_warn(dev, "called with non multi queue aware HW\n"); - return; - } - dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); dma_enable = dma_ctrl & DMA_EN; dma_ctrl &= ~DMA_EN; bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + dma_ctrl = 0; + ring_cfg = 0; + /* Enable strict priority arbiter mode */ bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + /* Initialize Tx priority queues */ for (i = 0; i < priv->hw_params->tx_queues; i++) { - /* first 64 tx_cbs are reserved for default tx queue - * (ring 16) - */ - bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, - i * priv->hw_params->bds_cnt, - (i + 1) * priv->hw_params->bds_cnt); - - /* Configure ring as descriptor ring and setup priority */ - ring_cfg |= 1 << i; - dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT); - + bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, + i * priv->hw_params->tx_bds_per_q, + (i + 1) * priv->hw_params->tx_bds_per_q); + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); dma_priority[DMA_PRIO_REG_INDEX(i)] |= ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); } - /* Set ring 16 priority and program the hardware registers */ + /* Initialize Tx default queue 16 */ + bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, + priv->hw_params->tx_queues * + priv->hw_params->tx_bds_per_q, + TOTAL_DESC); + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << DMA_PRIO_REG_SHIFT(DESC_INDEX)); + + /* Set Tx queue priorities */ bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); - /* Enable rings */ - reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); - reg |= ring_cfg; - bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG); + /* Enable Tx queues */ + bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); - /* Configure ring as descriptor ring and re-enable DMA if enabled */ - reg = bcmgenet_tdma_readl(priv, DMA_CTRL); - reg |= dma_ctrl; + /* Enable Tx DMA */ if (dma_enable) - reg |= DMA_EN; - bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + dma_ctrl |= DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); } static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) @@ -1985,6 +1974,8 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) static int bcmgenet_init_dma(struct bcmgenet_priv *priv) { int ret; + unsigned int i; + struct enet_cb *cb; netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n"); @@ -2011,14 +2002,13 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) return -ENOMEM; } - /* initialize multi xmit queue */ - bcmgenet_init_multiq(priv->dev); + for (i = 0; i < priv->num_tx_bds; i++) { + cb = priv->tx_cbs + i; + cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; + } - /* initialize special ring 16 */ - bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, - priv->hw_params->tx_queues * - priv->hw_params->bds_cnt, - TOTAL_DESC); + /* Initialize Tx queues */ + bcmgenet_init_tx_queues(priv->dev); return 0; } @@ -2499,8 +2489,8 @@ static const struct net_device_ops bcmgenet_netdev_ops = { static struct bcmgenet_hw_params bcmgenet_hw_params[] = { [GENET_V1] = { .tx_queues = 0, + .tx_bds_per_q = 0, .rx_queues = 0, - .bds_cnt = 0, .bp_in_en_shift = 16, .bp_in_mask = 0xffff, .hfb_filter_cnt = 16, @@ -2512,8 +2502,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { }, [GENET_V2] = { .tx_queues = 4, + .tx_bds_per_q = 32, .rx_queues = 4, - .bds_cnt = 32, .bp_in_en_shift = 16, .bp_in_mask = 0xffff, .hfb_filter_cnt = 16, @@ -2528,8 +2518,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { }, [GENET_V3] = { .tx_queues = 4, + .tx_bds_per_q = 32, .rx_queues = 4, - .bds_cnt = 32, .bp_in_en_shift = 17, .bp_in_mask = 0x1ffff, .hfb_filter_cnt = 48, @@ -2544,8 +2534,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { }, [GENET_V4] = { .tx_queues = 4, + .tx_bds_per_q = 32, .rx_queues = 4, - .bds_cnt = 32, .bp_in_en_shift = 17, .bp_in_mask = 0x1ffff, .hfb_filter_cnt = 48, @@ -2645,14 +2635,15 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) #endif pr_debug("Configuration for version: %d\n" - "TXq: %1d, RXq: %1d, BDs: %1d\n" + "TXq: %1d, TXqBDs: %1d, RXq: %1d\n" "BP << en: %2d, BP msk: 0x%05x\n" "HFB count: %2d, QTAQ msk: 0x%05x\n" "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" "RDMA: 0x%05x, TDMA: 0x%05x\n" "Words/BD: %d\n", priv->version, - params->tx_queues, params->rx_queues, params->bds_cnt, + params->tx_queues, params->tx_bds_per_q, + params->rx_queues, params->bp_in_en_shift, params->bp_in_mask, params->hfb_filter_cnt, params->qtag_mask, params->tbuf_offset, params->hfb_offset, diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 0d370d168aee..016bd12bf493 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -503,8 +503,8 @@ enum bcmgenet_version { */ struct bcmgenet_hw_params { u8 tx_queues; + u8 tx_bds_per_q; u8 rx_queues; - u8 bds_cnt; u8 bp_in_en_shift; u32 bp_in_mask; u8 hfb_filter_cnt; diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig index 264155778857..4e8c0b6c57d0 100644 --- a/drivers/net/ethernet/brocade/Kconfig +++ b/drivers/net/ethernet/brocade/Kconfig @@ -1,9 +1,9 @@ # -# Brocade device configuration +# QLogic BR-series device configuration # config NET_VENDOR_BROCADE - bool "Brocade devices" + bool "QLogic BR-series devices" default y depends on PCI ---help--- @@ -13,8 +13,8 @@ config NET_VENDOR_BROCADE Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all - the questions about Brocade cards. If you say Y, you will be asked for - your specific card in the following questions. + the questions about QLogic BR-series cards. If you say Y, you will be + asked for your specific card in the following questions. if NET_VENDOR_BROCADE diff --git a/drivers/net/ethernet/brocade/Makefile b/drivers/net/ethernet/brocade/Makefile index b58238d2df6a..fec10f9b4558 100644 --- a/drivers/net/ethernet/brocade/Makefile +++ b/drivers/net/ethernet/brocade/Makefile @@ -1,5 +1,5 @@ # -# Makefile for the Brocade device drivers. +# Makefile for the QLogic BR-series device drivers. # obj-$(CONFIG_BNA) += bna/ diff --git a/drivers/net/ethernet/brocade/bna/Kconfig b/drivers/net/ethernet/brocade/bna/Kconfig index dc2eb526fbf7..fe01279a8843 100644 --- a/drivers/net/ethernet/brocade/bna/Kconfig +++ b/drivers/net/ethernet/brocade/bna/Kconfig @@ -1,17 +1,17 @@ # -# Brocade network device configuration +# QLogic BR-series network device configuration # config BNA - tristate "Brocade 1010/1020 10Gb Ethernet Driver support" + tristate "QLogic BR-series 1010/1020/1860 10Gb Ethernet Driver support" depends on PCI ---help--- - This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet - cards. + This driver supports QLogic BR-series 1010/1020/1860 10Gb CEE capable + Ethernet cards. To compile this driver as a module, choose M here: the module will be called bna. - For general information and support, go to the Brocade support + For general information and support, go to the QLogic support website at: - <http://support.brocade.com> + <http://support.qlogic.com> diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile index 6027302ae73a..6e10b99733a2 100644 --- a/drivers/net/ethernet/brocade/bna/Makefile +++ b/drivers/net/ethernet/brocade/bna/Makefile @@ -1,5 +1,6 @@ # -# Copyright (c) 2005-2010 Brocade Communications Systems, Inc. +# Copyright (c) 2005-2014 Brocade Communications Systems, Inc. +# Copyright (c) 2014-2015 QLogic Corporation. # All rights reserved. # diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c index 550d2521ba76..cf9f3956f198 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.c +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bfa_cee.h" diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h index 93fde633d6f3..d04eef5d5a77 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.h +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_CEE_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h index ad004a4c3897..af25d8e8fae0 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* BFA common services */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h index b7d8127c198f..3bfd9da92630 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_DEFS_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h index b39c5f23974b..63e300f5ba41 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_DEFS_CNA_H__ #define __BFA_DEFS_CNA_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h index 7fb396fe679d..7a45cd0b594d 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_DEFS_MFG_COMM_H__ #define __BFA_DEFS_MFG_COMM_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h index ea9af9ae754d..a43b56002752 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_DEFS_STATUS_H__ #define __BFA_DEFS_STATUS_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 354ae9792bad..f2d13238b02e 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bfa_ioc.h" @@ -2763,7 +2764,7 @@ bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, list_add_tail(¬ify->qe, &ioc->notify_q); } -#define BFA_MFG_NAME "Brocade" +#define BFA_MFG_NAME "QLogic" static void bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, struct bfa_adapter_attr *ad_attr) diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h index 20cff7df4b55..effb7156e7a4 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_IOC_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c index d639558455cb..66c8507d7717 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bfa_ioc.h" diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c index 55067d0d25cf..c07d5b9372f4 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* MSGQ module source file. */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h index a6a565a366dc..66bc8b5acd57 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_msgq.h +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_MSGQ_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h index 8c563a77cdf6..f1e1129e6241 100644 --- a/drivers/net/ethernet/brocade/bna/bfi.h +++ b/drivers/net/ethernet/brocade/bna/bfi.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFI_H__ #define __BFI_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h index 6704a4392973..bd605bee72ee 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_cna.h +++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFI_CNA_H__ #define __BFI_CNA_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h index ae072dc5d238..bccca3bbadb8 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_enet.h +++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* BNA Hardware and Firmware Interface */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h index c49fa312ddbd..2835b51eabec 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_reg.h +++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,13 +11,14 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* - * bfi_reg.h ASIC register defines for all Brocade adapter ASICs + * bfi_reg.h ASIC register defines for all QLogic BR-series adapter ASICs */ #ifndef __BFI_REG_H__ @@ -221,7 +222,7 @@ enum { #define __PMM_1T_RESET_P 0x00000001 #define PMM_1T_RESET_REG_P1 0x00023c1c -/* Brocade 1860 Adapter specific defines */ +/* QLogic BR-series 1860 Adapter specific defines */ #define CT2_PCI_CPQ_BASE 0x00030000 #define CT2_PCI_APP_BASE 0x00030100 #define CT2_PCI_ETH_BASE 0x00030400 @@ -264,7 +265,7 @@ enum { #define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38) /* - * Brocade 1860 adapter CPQ block registers + * QLogic BR-series 1860 adapter CPQ block registers */ #define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00) #define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20) diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h index 1f512190d696..8ba72b1f36d9 100644 --- a/drivers/net/ethernet/brocade/bna/bna.h +++ b/drivers/net/ethernet/brocade/bna/bna.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BNA_H__ #define __BNA_H__ diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 903466ef41c0..deb8da6ab9cc 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bna.h" diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h index 2702d02e98d9..c5feab130d6d 100644 --- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h +++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* File for interrupt macros and functions */ diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 5fac411c52f4..8ab3a5f62706 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bna.h" #include "bfi.h" diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index 621547cd3504..d0a7a566f5d6 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BNA_TYPES_H__ #define __BNA_TYPES_H__ diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 7714d7790089..37072a83f9d6 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include <linux/bitops.h> #include <linux/netdevice.h> @@ -3867,7 +3868,7 @@ bnad_module_init(void) { int err; - pr_info("Brocade 10G Ethernet driver - version: %s\n", + pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n", BNAD_VERSION); bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); @@ -3894,7 +3895,7 @@ module_exit(bnad_module_exit); MODULE_AUTHOR("Brocade"); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver"); +MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver"); MODULE_VERSION(BNAD_VERSION); MODULE_FIRMWARE(CNA_FW_FILE_CT); MODULE_FIRMWARE(CNA_FW_FILE_CT2); diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 2842c188e0da..7ead6c23edb6 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BNAD_H__ #define __BNAD_H__ @@ -71,7 +72,7 @@ struct bnad_rx_ctrl { #define BNAD_NAME "bna" #define BNAD_NAME_LEN 64 -#define BNAD_VERSION "3.2.23.0" +#define BNAD_VERSION "3.2.25.1" #define BNAD_MAILBOX_MSIX_INDEX 0 #define BNAD_MAILBOX_MSIX_VECTORS 1 diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 619083a860a4..72c89550417c 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include <linux/debugfs.h> diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index d26adac6ab99..12f344debd1c 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "cna.h" diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h index b3ff6d507951..28e7d0ffeab1 100644 --- a/drivers/net/ethernet/brocade/bna/cna.h +++ b/drivers/net/ethernet/brocade/bna/cna.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2006-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2006-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __CNA_H__ @@ -37,8 +38,8 @@ extern char bfa_version[]; -#define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin" -#define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin" +#define CNA_FW_FILE_CT "ctfw-3.2.5.1.bin" +#define CNA_FW_FILE_CT2 "ct2fw-3.2.5.1.bin" #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ #pragma pack(1) diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c index 6f72771caea6..ebf462d8082f 100644 --- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include <linux/firmware.h> #include "bnad.h" diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ad76b8e35a00..1fe8b946243a 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -449,7 +449,7 @@ static void macb_update_stats(struct macb *bp) WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); for(; p < end; p++, reg++) - *p += __raw_readl(reg); + *p += readl_relaxed(reg); } static int macb_halt_tx(struct macb *bp) @@ -1578,6 +1578,7 @@ static u32 macb_dbw(struct macb *bp) static void macb_configure_dma(struct macb *bp) { u32 dmacfg; + u32 tmp, ncr; if (macb_is_gem(bp)) { dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); @@ -1585,7 +1586,24 @@ static void macb_configure_dma(struct macb *bp) if (bp->dma_burst_length) dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); - dmacfg &= ~GEM_BIT(ENDIA); + dmacfg &= ~GEM_BIT(ENDIA_PKT); + + /* Find the CPU endianness by using the loopback bit of net_ctrl + * register. save it first. When the CPU is in big endian we + * need to program swaped mode for management descriptor access. + */ + ncr = macb_readl(bp, NCR); + __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR); + tmp = __raw_readl(bp->regs + MACB_NCR); + + if (tmp == MACB_BIT(LLB)) + dmacfg &= ~GEM_BIT(ENDIA_DESC); + else + dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ + + /* Restore net_ctrl */ + macb_writel(bp, NCR, ncr); + if (bp->dev->features & NETIF_F_HW_CSUM) dmacfg |= GEM_BIT(TXCOEN); else @@ -1832,14 +1850,14 @@ static void gem_update_stats(struct macb *bp) for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { u32 offset = gem_statistics[i].offset; - u64 val = __raw_readl(bp->regs + offset); + u64 val = readl_relaxed(bp->regs + offset); bp->ethtool_stats[i] += val; *p += val; if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { /* Add GEM_OCTTXH, GEM_OCTRXH */ - val = __raw_readl(bp->regs + offset + 4); + val = readl_relaxed(bp->regs + offset + 4); bp->ethtool_stats[i] += ((u64)val) << 32; *(++p) += val; } @@ -2191,12 +2209,14 @@ static void macb_probe_queues(void __iomem *mem, *num_queues = 1; /* is it macb or gem ? */ - mid = __raw_readl(mem + MACB_MID); + mid = readl_relaxed(mem + MACB_MID); + if (MACB_BFEXT(IDNUM, mid) != 0x2) return; /* bit 0 is never set but queue 0 always exists */ - *queue_mask = __raw_readl(mem + GEM_DCFG6) & 0xff; + *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; + *queue_mask |= 0x1; for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 31dc080f2437..83241c8ec5dc 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -229,8 +229,10 @@ /* Bitfields in DMACFG. */ #define GEM_FBLDO_OFFSET 0 /* fixed burst length for DMA */ #define GEM_FBLDO_SIZE 5 -#define GEM_ENDIA_OFFSET 7 /* endian swap mode for packet data access */ -#define GEM_ENDIA_SIZE 1 +#define GEM_ENDIA_DESC_OFFSET 6 /* endian swap mode for management descriptor access */ +#define GEM_ENDIA_DESC_SIZE 1 +#define GEM_ENDIA_PKT_OFFSET 7 /* endian swap mode for packet data access */ +#define GEM_ENDIA_PKT_SIZE 1 #define GEM_RXBMS_OFFSET 8 /* RX packet buffer memory size select */ #define GEM_RXBMS_SIZE 2 #define GEM_TXPBMS_OFFSET 10 /* TX packet buffer memory size select */ @@ -423,17 +425,17 @@ /* Register access macros */ #define macb_readl(port,reg) \ - __raw_readl((port)->regs + MACB_##reg) + readl_relaxed((port)->regs + MACB_##reg) #define macb_writel(port,reg,value) \ - __raw_writel((value), (port)->regs + MACB_##reg) + writel_relaxed((value), (port)->regs + MACB_##reg) #define gem_readl(port, reg) \ - __raw_readl((port)->regs + GEM_##reg) + readl_relaxed((port)->regs + GEM_##reg) #define gem_writel(port, reg, value) \ - __raw_writel((value), (port)->regs + GEM_##reg) + writel_relaxed((value), (port)->regs + GEM_##reg) #define queue_readl(queue, reg) \ - __raw_readl((queue)->bp->regs + (queue)->reg) + readl_relaxed((queue)->bp->regs + (queue)->reg) #define queue_writel(queue, reg, value) \ - __raw_writel((value), (queue)->bp->regs + (queue)->reg) + writel_relaxed((value), (queue)->bp->regs + (queue)->reg) /* Conditional GEM/MACB macros. These perform the operation to the correct * register dependent on whether the device is a GEM or a MACB. For registers diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index a5179bfcdc2c..204bd182473b 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -893,7 +893,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, } else { memset(pp, 0, sizeof(*pp)); if (vf == PORT_SELF_VF) - memset(netdev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(netdev->dev_addr); } } else { /* Set flag to indicate that the port assoc/disassoc @@ -903,14 +903,14 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ if (pp->request == PORT_REQUEST_DISASSOCIATE) { - memset(pp->mac_addr, 0, ETH_ALEN); + eth_zero_addr(pp->mac_addr); if (vf == PORT_SELF_VF) - memset(netdev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(netdev->dev_addr); } } if (vf == PORT_SELF_VF) - memset(pp->vf_mac, 0, ETH_ALEN); + eth_zero_addr(pp->vf_mac); return err; } diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 27de37aa90af..fac806a15a61 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -238,10 +238,17 @@ struct be_tx_stats { struct u64_stats_sync sync_compl; }; +/* Structure to hold some data of interest obtained from a TX CQE */ +struct be_tx_compl_info { + u8 status; /* Completion status */ + u16 end_index; /* Completed TXQ Index */ +}; + struct be_tx_obj { u32 db_offset; struct be_queue_info q; struct be_queue_info cq; + struct be_tx_compl_info txcp; /* Remember the skbs that were transmitted */ struct sk_buff *sent_skb_list[TX_Q_LEN]; struct be_tx_stats stats; @@ -369,6 +376,7 @@ enum vf_state { #define BE_FLAGS_VXLAN_OFFLOADS BIT(8) #define BE_FLAGS_SETUP_DONE BIT(9) #define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10) +#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11) #define BE_UC_PMAC_COUNT 30 #define BE_VF_UC_PMAC_COUNT 2 @@ -417,6 +425,39 @@ struct rss_info { u8 rss_hkey[RSS_HASH_KEY_LEN]; }; +/* Macros to read/write the 'features' word of be_wrb_params structure. + */ +#define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT +#define BE_WRB_F_MASK(name) BIT_MASK(BE_WRB_F_##name##_BIT) + +#define BE_WRB_F_GET(word, name) \ + (((word) & (BE_WRB_F_MASK(name))) >> BE_WRB_F_BIT(name)) + +#define BE_WRB_F_SET(word, name, val) \ + ((word) |= (((val) << BE_WRB_F_BIT(name)) & BE_WRB_F_MASK(name))) + +/* Feature/offload bits */ +enum { + BE_WRB_F_CRC_BIT, /* Ethernet CRC */ + BE_WRB_F_IPCS_BIT, /* IP csum */ + BE_WRB_F_TCPCS_BIT, /* TCP csum */ + BE_WRB_F_UDPCS_BIT, /* UDP csum */ + BE_WRB_F_LSO_BIT, /* LSO */ + BE_WRB_F_LSO6_BIT, /* LSO6 */ + BE_WRB_F_VLAN_BIT, /* VLAN */ + BE_WRB_F_VLAN_SKIP_HW_BIT /* Skip VLAN tag (workaround) */ +}; + +/* The structure below provides a HW-agnostic abstraction of WRB params + * retrieved from a TX skb. This is in turn passed to chip specific routines + * during transmit, to set the corresponding params in the WRB. + */ +struct be_wrb_params { + u32 features; /* Feature bits */ + u16 vlan_tag; /* VLAN tag */ + u16 lso_mss; /* MSS for LSO */ +}; + struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; @@ -461,7 +502,7 @@ struct be_adapter { struct delayed_work work; u16 work_counter; - struct delayed_work func_recovery_work; + struct delayed_work be_err_detection_work; u32 flags; u32 cmd_privileges; /* Ethtool knobs and info */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 36916cfa70f9..be00695b3be7 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -635,73 +635,16 @@ static int lancer_wait_ready(struct be_adapter *adapter) for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); if (sliport_status & SLIPORT_STATUS_RDY_MASK) - break; - - msleep(1000); - } - - if (i == SLIPORT_READY_TIMEOUT) - return sliport_status ? : -1; - - return 0; -} - -static bool lancer_provisioning_error(struct be_adapter *adapter) -{ - u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; - - sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); - if (sliport_status & SLIPORT_STATUS_ERR_MASK) { - sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET); - sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET); - - if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 && - sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2) - return true; - } - return false; -} - -int lancer_test_and_set_rdy_state(struct be_adapter *adapter) -{ - int status; - u32 sliport_status, err, reset_needed; - bool resource_error; + return 0; - resource_error = lancer_provisioning_error(adapter); - if (resource_error) - return -EAGAIN; + if (sliport_status & SLIPORT_STATUS_ERR_MASK && + !(sliport_status & SLIPORT_STATUS_RN_MASK)) + return -EIO; - status = lancer_wait_ready(adapter); - if (!status) { - sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); - err = sliport_status & SLIPORT_STATUS_ERR_MASK; - reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK; - if (err && reset_needed) { - iowrite32(SLI_PORT_CONTROL_IP_MASK, - adapter->db + SLIPORT_CONTROL_OFFSET); - - /* check if adapter has corrected the error */ - status = lancer_wait_ready(adapter); - sliport_status = ioread32(adapter->db + - SLIPORT_STATUS_OFFSET); - sliport_status &= (SLIPORT_STATUS_ERR_MASK | - SLIPORT_STATUS_RN_MASK); - if (status || sliport_status) - status = -1; - } else if (err || reset_needed) { - status = -1; - } + msleep(1000); } - /* Stop error recovery if error is not recoverable. - * No resource error is temporary errors and will go away - * when PF provisions resources. - */ - resource_error = lancer_provisioning_error(adapter); - if (resource_error) - status = -EAGAIN; - return status; + return sliport_status ? : -1; } int be_fw_wait_ready(struct be_adapter *adapter) @@ -720,6 +663,10 @@ int be_fw_wait_ready(struct be_adapter *adapter) } do { + /* There's no means to poll POST state on BE2/3 VFs */ + if (BEx_chip(adapter) && be_virtfn(adapter)) + return 0; + stage = be_POST_stage_get(adapter); if (stage == POST_STAGE_ARMFW_RDY) return 0; @@ -734,7 +681,7 @@ int be_fw_wait_ready(struct be_adapter *adapter) err: dev_err(dev, "POST timeout; stage=%#x\n", stage); - return -1; + return -ETIMEDOUT; } static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) @@ -2126,16 +2073,12 @@ int be_cmd_reset_function(struct be_adapter *adapter) int status; if (lancer_chip(adapter)) { + iowrite32(SLI_PORT_CONTROL_IP_MASK, + adapter->db + SLIPORT_CONTROL_OFFSET); status = lancer_wait_ready(adapter); - if (!status) { - iowrite32(SLI_PORT_CONTROL_IP_MASK, - adapter->db + SLIPORT_CONTROL_OFFSET); - status = lancer_test_and_set_rdy_state(adapter); - } - if (status) { + if (status) dev_err(&adapter->pdev->dev, "Adapter in non recoverable error\n"); - } return status; } @@ -3133,7 +3076,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) int status; bool pmac_valid = false; - memset(mac, 0, ETH_ALEN); + eth_zero_addr(mac); if (BEx_chip(adapter)) { if (be_physfn(adapter)) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0a816859aca5..7eccebc676e2 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -727,48 +727,86 @@ static u16 skb_ip_proto(struct sk_buff *skb) ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; } -static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, - struct sk_buff *skb, u32 wrb_cnt, u32 len, - bool skip_hw_vlan) +static inline bool be_is_txq_full(struct be_tx_obj *txo) { - u16 vlan_tag, proto; + return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len; +} - memset(hdr, 0, sizeof(*hdr)); +static inline bool be_can_txq_wake(struct be_tx_obj *txo) +{ + return atomic_read(&txo->q.used) < txo->q.len / 2; +} - SET_TX_WRB_HDR_BITS(crc, hdr, 1); +static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo) +{ + return atomic_read(&txo->q.used) > txo->pend_wrb_cnt; +} + +static void be_get_wrb_params_from_skb(struct be_adapter *adapter, + struct sk_buff *skb, + struct be_wrb_params *wrb_params) +{ + u16 proto; if (skb_is_gso(skb)) { - SET_TX_WRB_HDR_BITS(lso, hdr, 1); - SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size); + BE_WRB_F_SET(wrb_params->features, LSO, 1); + wrb_params->lso_mss = skb_shinfo(skb)->gso_size; if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) - SET_TX_WRB_HDR_BITS(lso6, hdr, 1); + BE_WRB_F_SET(wrb_params->features, LSO6, 1); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->encapsulation) { - SET_TX_WRB_HDR_BITS(ipcs, hdr, 1); + BE_WRB_F_SET(wrb_params->features, IPCS, 1); proto = skb_inner_ip_proto(skb); } else { proto = skb_ip_proto(skb); } if (proto == IPPROTO_TCP) - SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1); + BE_WRB_F_SET(wrb_params->features, TCPCS, 1); else if (proto == IPPROTO_UDP) - SET_TX_WRB_HDR_BITS(udpcs, hdr, 1); + BE_WRB_F_SET(wrb_params->features, UDPCS, 1); } if (skb_vlan_tag_present(skb)) { - SET_TX_WRB_HDR_BITS(vlan, hdr, 1); - vlan_tag = be_get_tx_vlan_tag(adapter, skb); - SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag); + BE_WRB_F_SET(wrb_params->features, VLAN, 1); + wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb); } - SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt); - SET_TX_WRB_HDR_BITS(len, hdr, len); + BE_WRB_F_SET(wrb_params->features, CRC, 1); +} + +static void wrb_fill_hdr(struct be_adapter *adapter, + struct be_eth_hdr_wrb *hdr, + struct be_wrb_params *wrb_params, + struct sk_buff *skb) +{ + memset(hdr, 0, sizeof(*hdr)); - /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0 - * When this hack is not needed, the evt bit is set while ringing DB + SET_TX_WRB_HDR_BITS(crc, hdr, + BE_WRB_F_GET(wrb_params->features, CRC)); + SET_TX_WRB_HDR_BITS(ipcs, hdr, + BE_WRB_F_GET(wrb_params->features, IPCS)); + SET_TX_WRB_HDR_BITS(tcpcs, hdr, + BE_WRB_F_GET(wrb_params->features, TCPCS)); + SET_TX_WRB_HDR_BITS(udpcs, hdr, + BE_WRB_F_GET(wrb_params->features, UDPCS)); + + SET_TX_WRB_HDR_BITS(lso, hdr, + BE_WRB_F_GET(wrb_params->features, LSO)); + SET_TX_WRB_HDR_BITS(lso6, hdr, + BE_WRB_F_GET(wrb_params->features, LSO6)); + SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss); + + /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this + * hack is not needed, the evt bit is set while ringing DB. */ - if (skip_hw_vlan) - SET_TX_WRB_HDR_BITS(event, hdr, 1); + SET_TX_WRB_HDR_BITS(event, hdr, + BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW)); + SET_TX_WRB_HDR_BITS(vlan, hdr, + BE_WRB_F_GET(wrb_params->features, VLAN)); + SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag); + + SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb)); + SET_TX_WRB_HDR_BITS(len, hdr, skb->len); } static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, @@ -788,77 +826,124 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, } } -/* Returns the number of WRBs used up by the skb */ +/* Grab a WRB header for xmit */ +static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo) +{ + u16 head = txo->q.head; + + queue_head_inc(&txo->q); + return head; +} + +/* Set up the WRB header for xmit */ +static void be_tx_setup_wrb_hdr(struct be_adapter *adapter, + struct be_tx_obj *txo, + struct be_wrb_params *wrb_params, + struct sk_buff *skb, u16 head) +{ + u32 num_frags = skb_wrb_cnt(skb); + struct be_queue_info *txq = &txo->q; + struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head); + + wrb_fill_hdr(adapter, hdr, wrb_params, skb); + be_dws_cpu_to_le(hdr, sizeof(*hdr)); + + BUG_ON(txo->sent_skb_list[head]); + txo->sent_skb_list[head] = skb; + txo->last_req_hdr = head; + atomic_add(num_frags, &txq->used); + txo->last_req_wrb_cnt = num_frags; + txo->pend_wrb_cnt += num_frags; +} + +/* Setup a WRB fragment (buffer descriptor) for xmit */ +static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr, + int len) +{ + struct be_eth_wrb *wrb; + struct be_queue_info *txq = &txo->q; + + wrb = queue_head_node(txq); + wrb_fill(wrb, busaddr, len); + queue_head_inc(txq); +} + +/* Bring the queue back to the state it was in before be_xmit_enqueue() routine + * was invoked. The producer index is restored to the previous packet and the + * WRBs of the current packet are unmapped. Invoked to handle tx setup errors. + */ +static void be_xmit_restore(struct be_adapter *adapter, + struct be_tx_obj *txo, u16 head, bool map_single, + u32 copied) +{ + struct device *dev; + struct be_eth_wrb *wrb; + struct be_queue_info *txq = &txo->q; + + dev = &adapter->pdev->dev; + txq->head = head; + + /* skip the first wrb (hdr); it's not mapped */ + queue_head_inc(txq); + while (copied) { + wrb = queue_head_node(txq); + unmap_tx_frag(dev, wrb, map_single); + map_single = false; + copied -= le32_to_cpu(wrb->frag_len); + queue_head_inc(txq); + } + + txq->head = head; +} + +/* Enqueue the given packet for transmit. This routine allocates WRBs for the + * packet, dma maps the packet buffers and sets up the WRBs. Returns the number + * of WRBs used up by the packet. + */ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, - struct sk_buff *skb, bool skip_hw_vlan) + struct sk_buff *skb, + struct be_wrb_params *wrb_params) { u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb); struct device *dev = &adapter->pdev->dev; struct be_queue_info *txq = &txo->q; - struct be_eth_hdr_wrb *hdr; bool map_single = false; - struct be_eth_wrb *wrb; - dma_addr_t busaddr; u16 head = txq->head; + dma_addr_t busaddr; + int len; - hdr = queue_head_node(txq); - wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan); - be_dws_cpu_to_le(hdr, sizeof(*hdr)); - - queue_head_inc(txq); + head = be_tx_get_wrb_hdr(txo); if (skb->len > skb->data_len) { - int len = skb_headlen(skb); + len = skb_headlen(skb); busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) goto dma_err; map_single = true; - wrb = queue_head_node(txq); - wrb_fill(wrb, busaddr, len); - queue_head_inc(txq); + be_tx_setup_wrb_frag(txo, busaddr, len); copied += len; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + len = skb_frag_size(frag); - busaddr = skb_frag_dma_map(dev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); + busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) goto dma_err; - wrb = queue_head_node(txq); - wrb_fill(wrb, busaddr, skb_frag_size(frag)); - queue_head_inc(txq); - copied += skb_frag_size(frag); + be_tx_setup_wrb_frag(txo, busaddr, len); + copied += len; } - BUG_ON(txo->sent_skb_list[head]); - txo->sent_skb_list[head] = skb; - txo->last_req_hdr = head; - atomic_add(wrb_cnt, &txq->used); - txo->last_req_wrb_cnt = wrb_cnt; - txo->pend_wrb_cnt += wrb_cnt; + be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head); be_tx_stats_update(txo, skb); return wrb_cnt; dma_err: - /* Bring the queue back to the state it was in before this - * routine was invoked. - */ - txq->head = head; - /* skip the first wrb (hdr); it's not mapped */ - queue_head_inc(txq); - while (copied) { - wrb = queue_head_node(txq); - unmap_tx_frag(dev, wrb, map_single); - map_single = false; - copied -= le32_to_cpu(wrb->frag_len); - adapter->drv_stats.dma_map_errors++; - queue_head_inc(txq); - } - txq->head = head; + adapter->drv_stats.dma_map_errors++; + be_xmit_restore(adapter, txo, head, map_single, copied); return 0; } @@ -869,7 +954,8 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter) static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, struct sk_buff *skb, - bool *skip_hw_vlan) + struct be_wrb_params + *wrb_params) { u16 vlan_tag = 0; @@ -886,8 +972,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to * skip VLAN insertion */ - if (skip_hw_vlan) - *skip_hw_vlan = true; + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); } if (vlan_tag) { @@ -905,8 +990,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, vlan_tag); if (unlikely(!skb)) return skb; - if (skip_hw_vlan) - *skip_hw_vlan = true; + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); } return skb; @@ -946,7 +1030,8 @@ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, - bool *skip_hw_vlan) + struct be_wrb_params + *wrb_params) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; unsigned int eth_hdr_len; @@ -970,7 +1055,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, */ if (be_pvid_tagging_enabled(adapter) && veh->h_vlan_proto == htons(ETH_P_8021Q)) - *skip_hw_vlan = true; + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); /* HW has a bug wherein it will calculate CSUM for VLAN * pkts even though it is disabled. @@ -978,7 +1063,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_vlan_tag_present(skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); + skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); if (unlikely(!skb)) goto err; } @@ -1000,7 +1085,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, */ if (be_ipv6_tx_stall_chk(adapter, skb) && be_vlan_tag_tx_chk(adapter, skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); + skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); if (unlikely(!skb)) goto err; } @@ -1014,7 +1099,7 @@ err: static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, - bool *skip_hw_vlan) + struct be_wrb_params *wrb_params) { /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or * less may cause a transmit stall on that port. So the work-around is @@ -1026,7 +1111,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, } if (BEx_chip(adapter) || lancer_chip(adapter)) { - skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan); + skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params); if (!skb) return NULL; } @@ -1060,24 +1145,26 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo) static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) { - bool skip_hw_vlan = false, flush = !skb->xmit_more; struct be_adapter *adapter = netdev_priv(netdev); u16 q_idx = skb_get_queue_mapping(skb); struct be_tx_obj *txo = &adapter->tx_obj[q_idx]; - struct be_queue_info *txq = &txo->q; + struct be_wrb_params wrb_params = { 0 }; + bool flush = !skb->xmit_more; u16 wrb_cnt; - skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); + skb = be_xmit_workarounds(adapter, skb, &wrb_params); if (unlikely(!skb)) goto drop; - wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan); + be_get_wrb_params_from_skb(adapter, skb, &wrb_params); + + wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); if (unlikely(!wrb_cnt)) { dev_kfree_skb_any(skb); goto drop; } - if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) { + if (be_is_txq_full(txo)) { netif_stop_subqueue(netdev, q_idx); tx_stats(txo)->tx_stops++; } @@ -1991,18 +2078,23 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed) } } -static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) +static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo) { - struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); + struct be_queue_info *tx_cq = &txo->cq; + struct be_tx_compl_info *txcp = &txo->txcp; + struct be_eth_tx_compl *compl = queue_tail_node(tx_cq); - if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) + if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) return NULL; + /* Ensure load ordering of valid bit dword and other dwords below */ rmb(); - be_dws_le_to_cpu(txcp, sizeof(*txcp)); + be_dws_le_to_cpu(compl, sizeof(*compl)); - txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; + txcp->status = GET_TX_COMPL_BITS(status, compl); + txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl); + compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; queue_tail_inc(tx_cq); return txcp; } @@ -2123,9 +2215,9 @@ static void be_tx_compl_clean(struct be_adapter *adapter) { u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0; struct device *dev = &adapter->pdev->dev; - struct be_tx_obj *txo; + struct be_tx_compl_info *txcp; struct be_queue_info *txq; - struct be_eth_tx_compl *txcp; + struct be_tx_obj *txo; int i, pending_txqs; /* Stop polling for compls when HW has been silent for 10ms */ @@ -2136,10 +2228,10 @@ static void be_tx_compl_clean(struct be_adapter *adapter) cmpl = 0; num_wrbs = 0; txq = &txo->q; - while ((txcp = be_tx_compl_get(&txo->cq))) { - end_idx = GET_TX_COMPL_BITS(wrb_index, txcp); - num_wrbs += be_tx_compl_process(adapter, txo, - end_idx); + while ((txcp = be_tx_compl_get(txo))) { + num_wrbs += + be_tx_compl_process(adapter, txo, + txcp->end_index); cmpl++; } if (cmpl) { @@ -2147,7 +2239,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter) atomic_sub(num_wrbs, &txq->used); timeo = 0; } - if (atomic_read(&txq->used) == txo->pend_wrb_cnt) + if (!be_is_tx_compl_pending(txo)) pending_txqs--; } @@ -2498,7 +2590,7 @@ loop_continue: return work_done; } -static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status) +static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status) { switch (status) { case BE_TX_COMP_HDR_PARSE_ERR: @@ -2513,7 +2605,7 @@ static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status) } } -static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status) +static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status) { switch (status) { case LANCER_TX_COMP_LSO_ERR: @@ -2538,22 +2630,18 @@ static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status) static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, int idx) { - struct be_eth_tx_compl *txcp; int num_wrbs = 0, work_done = 0; - u32 compl_status; - u16 last_idx; + struct be_tx_compl_info *txcp; - while ((txcp = be_tx_compl_get(&txo->cq))) { - last_idx = GET_TX_COMPL_BITS(wrb_index, txcp); - num_wrbs += be_tx_compl_process(adapter, txo, last_idx); + while ((txcp = be_tx_compl_get(txo))) { + num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index); work_done++; - compl_status = GET_TX_COMPL_BITS(status, txcp); - if (compl_status) { + if (txcp->status) { if (lancer_chip(adapter)) - lancer_update_tx_err(txo, compl_status); + lancer_update_tx_err(txo, txcp->status); else - be_update_tx_err(txo, compl_status); + be_update_tx_err(txo, txcp->status); } } @@ -2564,7 +2652,7 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, /* As Tx wrbs have been freed up, wake up netdev queue * if it was stopped due to lack of tx wrbs. */ if (__netif_subqueue_stopped(adapter->netdev, idx) && - atomic_read(&txo->q.used) < txo->q.len / 2) { + be_can_txq_wake(txo)) { netif_wake_subqueue(adapter->netdev, idx); } @@ -2756,12 +2844,12 @@ void be_detect_error(struct be_adapter *adapter) sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET); adapter->hw_error = true; + error_detected = true; /* Do not log error messages if its a FW reset */ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && sliport_err2 == SLIPORT_ERROR_FW_RESET2) { dev_info(dev, "Firmware update in progress\n"); } else { - error_detected = true; dev_err(dev, "Error detected in the card\n"); dev_err(dev, "ERR: sliport status 0x%x\n", sliport_status); @@ -3130,7 +3218,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) int status = 0; u8 mac[ETH_ALEN]; - memset(mac, 0, ETH_ALEN); + eth_zero_addr(mac); cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, @@ -3275,6 +3363,14 @@ static void be_cancel_worker(struct be_adapter *adapter) } } +static void be_cancel_err_detection(struct be_adapter *adapter) +{ + if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) { + cancel_delayed_work_sync(&adapter->be_err_detection_work); + adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED; + } +} + static void be_mac_clear(struct be_adapter *adapter) { if (adapter->pmac_id) { @@ -3683,13 +3779,25 @@ static void be_sriov_config(struct be_adapter *adapter) static int be_get_config(struct be_adapter *adapter) { + int status, level; u16 profile_id; - int status; + + status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; status = be_cmd_query_fw_cfg(adapter); if (status) return status; + if (BEx_chip(adapter)) { + level = be_cmd_get_fw_log_level(adapter); + adapter->msg_enable = + level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; + } + + be_cmd_get_acpi_wol_cap(adapter); + be_cmd_query_port_name(adapter); if (be_physfn(adapter)) { @@ -3747,6 +3855,13 @@ static void be_schedule_worker(struct be_adapter *adapter) adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; } +static void be_schedule_err_detection(struct be_adapter *adapter) +{ + schedule_delayed_work(&adapter->be_err_detection_work, + msecs_to_jiffies(1000)); + adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED; +} + static int be_setup_queues(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3829,11 +3944,53 @@ static inline int fw_major_num(const char *fw_ver) return fw_major; } +/* If any VFs are already enabled don't FLR the PF */ +static bool be_reset_required(struct be_adapter *adapter) +{ + return pci_num_vf(adapter->pdev) ? false : true; +} + +/* Wait for the FW to be ready and perform the required initialization */ +static int be_func_init(struct be_adapter *adapter) +{ + int status; + + status = be_fw_wait_ready(adapter); + if (status) + return status; + + if (be_reset_required(adapter)) { + status = be_cmd_reset_function(adapter); + if (status) + return status; + + /* Wait for interrupts to quiesce after an FLR */ + msleep(100); + + /* We can clear all errors when function reset succeeds */ + be_clear_all_error(adapter); + } + + /* Tell FW we're ready to fire cmds */ + status = be_cmd_fw_init(adapter); + if (status) + return status; + + /* Allow interrupts for other ULPs running on NIC function */ + be_intr_set(adapter, true); + + return 0; +} + static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; int status; + status = be_func_init(adapter); + if (status) + return status; + be_setup_init(adapter); if (!lancer_chip(adapter)) @@ -3879,8 +4036,6 @@ static int be_setup(struct be_adapter *adapter) be_set_rx_mode(adapter->netdev); - be_cmd_get_acpi_wol_cap(adapter); - status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (status) @@ -4790,6 +4945,142 @@ static void be_netdev_init(struct net_device *netdev) netdev->ethtool_ops = &be_ethtool_ops; } +static void be_cleanup(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + netif_device_detach(netdev); + if (netif_running(netdev)) + be_close(netdev); + rtnl_unlock(); + + be_clear(adapter); +} + +static int be_resume(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int status; + + status = be_setup(adapter); + if (status) + return status; + + if (netif_running(netdev)) { + status = be_open(netdev); + if (status) + return status; + } + + netif_device_attach(netdev); + + return 0; +} + +static int be_err_recover(struct be_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + int status; + + status = be_resume(adapter); + if (status) + goto err; + + dev_info(dev, "Adapter recovery successful\n"); + return 0; +err: + if (be_physfn(adapter)) + dev_err(dev, "Adapter recovery failed\n"); + else + dev_err(dev, "Re-trying adapter recovery\n"); + + return status; +} + +static void be_err_detection_task(struct work_struct *work) +{ + struct be_adapter *adapter = + container_of(work, struct be_adapter, + be_err_detection_work.work); + int status = 0; + + be_detect_error(adapter); + + if (adapter->hw_error) { + be_cleanup(adapter); + + /* As of now error recovery support is in Lancer only */ + if (lancer_chip(adapter)) + status = be_err_recover(adapter); + } + + /* Always attempt recovery on VFs */ + if (!status || be_virtfn(adapter)) + be_schedule_err_detection(adapter); +} + +static void be_log_sfp_info(struct be_adapter *adapter) +{ + int status; + + status = be_cmd_query_sfp_info(adapter); + if (!status) { + dev_err(&adapter->pdev->dev, + "Unqualified SFP+ detected on %c from %s part no: %s", + adapter->port_name, adapter->phy.vendor_name, + adapter->phy.vendor_pn); + } + adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP; +} + +static void be_worker(struct work_struct *work) +{ + struct be_adapter *adapter = + container_of(work, struct be_adapter, work.work); + struct be_rx_obj *rxo; + int i; + + /* when interrupts are not yet enabled, just reap any pending + * mcc completions + */ + if (!netif_running(adapter->netdev)) { + local_bh_disable(); + be_process_mcc(adapter); + local_bh_enable(); + goto reschedule; + } + + if (!adapter->stats_cmd_sent) { + if (lancer_chip(adapter)) + lancer_cmd_get_pport_stats(adapter, + &adapter->stats_cmd); + else + be_cmd_get_stats(adapter, &adapter->stats_cmd); + } + + if (be_physfn(adapter) && + MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) + be_cmd_get_die_temperature(adapter); + + for_all_rx_queues(adapter, rxo, i) { + /* Replenish RX-queues starved due to memory + * allocation failures. + */ + if (rxo->rx_post_starved) + be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); + } + + be_eqd_update(adapter); + + if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP) + be_log_sfp_info(adapter); + +reschedule: + adapter->work_counter++; + schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); +} + static void be_unmap_pci_bars(struct be_adapter *adapter) { if (adapter->csr) @@ -4821,6 +5112,12 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter) static int be_map_pci_bars(struct be_adapter *adapter) { u8 __iomem *addr; + u32 sli_intf; + + pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); + adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >> + SLI_INTF_FAMILY_SHIFT; + adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; if (BEx_chip(adapter) && be_physfn(adapter)) { adapter->csr = pci_iomap(adapter->pdev, 2, 0); @@ -4842,109 +5139,94 @@ pci_map_err: return -ENOMEM; } -static void be_ctrl_cleanup(struct be_adapter *adapter) +static void be_drv_cleanup(struct be_adapter *adapter) { struct be_dma_mem *mem = &adapter->mbox_mem_alloced; - - be_unmap_pci_bars(adapter); + struct device *dev = &adapter->pdev->dev; if (mem->va) - dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, - mem->dma); + dma_free_coherent(dev, mem->size, mem->va, mem->dma); mem = &adapter->rx_filter; if (mem->va) - dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, - mem->dma); + dma_free_coherent(dev, mem->size, mem->va, mem->dma); + + mem = &adapter->stats_cmd; + if (mem->va) + dma_free_coherent(dev, mem->size, mem->va, mem->dma); } -static int be_ctrl_init(struct be_adapter *adapter) +/* Allocate and initialize various fields in be_adapter struct */ +static int be_drv_init(struct be_adapter *adapter) { struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; struct be_dma_mem *rx_filter = &adapter->rx_filter; - u32 sli_intf; - int status; - - pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); - adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >> - SLI_INTF_FAMILY_SHIFT; - adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; - - status = be_map_pci_bars(adapter); - if (status) - goto done; + struct be_dma_mem *stats_cmd = &adapter->stats_cmd; + struct device *dev = &adapter->pdev->dev; + int status = 0; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; - mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev, - mbox_mem_alloc->size, + mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL); - if (!mbox_mem_alloc->va) { - status = -ENOMEM; - goto unmap_pci_bars; - } + if (!mbox_mem_alloc->va) + return -ENOMEM; + mbox_mem_align->size = sizeof(struct be_mcc_mailbox); mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); rx_filter->size = sizeof(struct be_cmd_req_rx_filter); - rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev, - rx_filter->size, &rx_filter->dma, - GFP_KERNEL); + rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, + &rx_filter->dma, GFP_KERNEL); if (!rx_filter->va) { status = -ENOMEM; goto free_mbox; } + if (lancer_chip(adapter)) + stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats); + else if (BE2_chip(adapter)) + stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0); + else if (BE3_chip(adapter)) + stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); + else + stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); + stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, + &stats_cmd->dma, GFP_KERNEL); + if (!stats_cmd->va) { + status = -ENOMEM; + goto free_rx_filter; + } + mutex_init(&adapter->mbox_lock); spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); - init_completion(&adapter->et_cmd_compl); - pci_save_state(adapter->pdev); - return 0; - -free_mbox: - dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size, - mbox_mem_alloc->va, mbox_mem_alloc->dma); -unmap_pci_bars: - be_unmap_pci_bars(adapter); - -done: - return status; -} - -static void be_stats_cleanup(struct be_adapter *adapter) -{ - struct be_dma_mem *cmd = &adapter->stats_cmd; + pci_save_state(adapter->pdev); - if (cmd->va) - dma_free_coherent(&adapter->pdev->dev, cmd->size, - cmd->va, cmd->dma); -} + INIT_DELAYED_WORK(&adapter->work, be_worker); + INIT_DELAYED_WORK(&adapter->be_err_detection_work, + be_err_detection_task); -static int be_stats_init(struct be_adapter *adapter) -{ - struct be_dma_mem *cmd = &adapter->stats_cmd; + adapter->rx_fc = true; + adapter->tx_fc = true; - if (lancer_chip(adapter)) - cmd->size = sizeof(struct lancer_cmd_req_pport_stats); - else if (BE2_chip(adapter)) - cmd->size = sizeof(struct be_cmd_req_get_stats_v0); - else if (BE3_chip(adapter)) - cmd->size = sizeof(struct be_cmd_req_get_stats_v1); - else - /* ALL non-BE ASICs */ - cmd->size = sizeof(struct be_cmd_req_get_stats_v2); + /* Must be a power of 2 or else MODULO will BUG_ON */ + adapter->be_get_temp_freq = 64; + adapter->cfg_num_qs = netif_get_num_default_rss_queues(); - cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, - GFP_KERNEL); - if (!cmd->va) - return -ENOMEM; return 0; + +free_rx_filter: + dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma); +free_mbox: + dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va, + mbox_mem_alloc->dma); + return status; } static void be_remove(struct pci_dev *pdev) @@ -4957,7 +5239,7 @@ static void be_remove(struct pci_dev *pdev) be_roce_dev_remove(adapter); be_intr_set(adapter, false); - cancel_delayed_work_sync(&adapter->func_recovery_work); + be_cancel_err_detection(adapter); unregister_netdev(adapter->netdev); @@ -4966,9 +5248,8 @@ static void be_remove(struct pci_dev *pdev) /* tell fw we're done with firing cmds */ be_cmd_fw_clean(adapter); - be_stats_cleanup(adapter); - - be_ctrl_cleanup(adapter); + be_unmap_pci_bars(adapter); + be_drv_cleanup(adapter); pci_disable_pcie_error_reporting(pdev); @@ -4978,156 +5259,6 @@ static void be_remove(struct pci_dev *pdev) free_netdev(adapter->netdev); } -static int be_get_initial_config(struct be_adapter *adapter) -{ - int status, level; - - status = be_cmd_get_cntl_attributes(adapter); - if (status) - return status; - - /* Must be a power of 2 or else MODULO will BUG_ON */ - adapter->be_get_temp_freq = 64; - - if (BEx_chip(adapter)) { - level = be_cmd_get_fw_log_level(adapter); - adapter->msg_enable = - level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; - } - - adapter->cfg_num_qs = netif_get_num_default_rss_queues(); - return 0; -} - -static int lancer_recover_func(struct be_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - int status; - - status = lancer_test_and_set_rdy_state(adapter); - if (status) - goto err; - - if (netif_running(adapter->netdev)) - be_close(adapter->netdev); - - be_clear(adapter); - - be_clear_all_error(adapter); - - status = be_setup(adapter); - if (status) - goto err; - - if (netif_running(adapter->netdev)) { - status = be_open(adapter->netdev); - if (status) - goto err; - } - - dev_err(dev, "Adapter recovery successful\n"); - return 0; -err: - if (status == -EAGAIN) - dev_err(dev, "Waiting for resource provisioning\n"); - else - dev_err(dev, "Adapter recovery failed\n"); - - return status; -} - -static void be_func_recovery_task(struct work_struct *work) -{ - struct be_adapter *adapter = - container_of(work, struct be_adapter, func_recovery_work.work); - int status = 0; - - be_detect_error(adapter); - - if (adapter->hw_error && lancer_chip(adapter)) { - rtnl_lock(); - netif_device_detach(adapter->netdev); - rtnl_unlock(); - - status = lancer_recover_func(adapter); - if (!status) - netif_device_attach(adapter->netdev); - } - - /* In Lancer, for all errors other than provisioning error (-EAGAIN), - * no need to attempt further recovery. - */ - if (!status || status == -EAGAIN) - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); -} - -static void be_log_sfp_info(struct be_adapter *adapter) -{ - int status; - - status = be_cmd_query_sfp_info(adapter); - if (!status) { - dev_err(&adapter->pdev->dev, - "Unqualified SFP+ detected on %c from %s part no: %s", - adapter->port_name, adapter->phy.vendor_name, - adapter->phy.vendor_pn); - } - adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP; -} - -static void be_worker(struct work_struct *work) -{ - struct be_adapter *adapter = - container_of(work, struct be_adapter, work.work); - struct be_rx_obj *rxo; - int i; - - /* when interrupts are not yet enabled, just reap any pending - * mcc completions */ - if (!netif_running(adapter->netdev)) { - local_bh_disable(); - be_process_mcc(adapter); - local_bh_enable(); - goto reschedule; - } - - if (!adapter->stats_cmd_sent) { - if (lancer_chip(adapter)) - lancer_cmd_get_pport_stats(adapter, - &adapter->stats_cmd); - else - be_cmd_get_stats(adapter, &adapter->stats_cmd); - } - - if (be_physfn(adapter) && - MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) - be_cmd_get_die_temperature(adapter); - - for_all_rx_queues(adapter, rxo, i) { - /* Replenish RX-queues starved due to memory - * allocation failures. - */ - if (rxo->rx_post_starved) - be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); - } - - be_eqd_update(adapter); - - if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP) - be_log_sfp_info(adapter); - -reschedule: - adapter->work_counter++; - schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); -} - -/* If any VFs are already enabled don't FLR the PF */ -static bool be_reset_required(struct be_adapter *adapter) -{ - return pci_num_vf(adapter->pdev) ? false : true; -} - static char *mc_name(struct be_adapter *adapter) { char *str = ""; /* default */ @@ -5226,50 +5357,17 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) if (!status) dev_info(&pdev->dev, "PCIe error reporting enabled\n"); - status = be_ctrl_init(adapter); + status = be_map_pci_bars(adapter); if (status) goto free_netdev; - /* sync up with fw's ready state */ - if (be_physfn(adapter)) { - status = be_fw_wait_ready(adapter); - if (status) - goto ctrl_clean; - } - - if (be_reset_required(adapter)) { - status = be_cmd_reset_function(adapter); - if (status) - goto ctrl_clean; - - /* Wait for interrupts to quiesce after an FLR */ - msleep(100); - } - - /* Allow interrupts for other ULPs running on NIC function */ - be_intr_set(adapter, true); - - /* tell fw we're ready to fire cmds */ - status = be_cmd_fw_init(adapter); - if (status) - goto ctrl_clean; - - status = be_stats_init(adapter); - if (status) - goto ctrl_clean; - - status = be_get_initial_config(adapter); + status = be_drv_init(adapter); if (status) - goto stats_clean; - - INIT_DELAYED_WORK(&adapter->work, be_worker); - INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task); - adapter->rx_fc = true; - adapter->tx_fc = true; + goto unmap_bars; status = be_setup(adapter); if (status) - goto stats_clean; + goto drv_cleanup; be_netdev_init(netdev); status = register_netdev(netdev); @@ -5278,8 +5376,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) be_roce_dev_add(adapter); - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); + be_schedule_err_detection(adapter); dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev), func_name(adapter), mc_name(adapter), adapter->port_name); @@ -5288,10 +5385,10 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) unsetup: be_clear(adapter); -stats_clean: - be_stats_cleanup(adapter); -ctrl_clean: - be_ctrl_cleanup(adapter); +drv_cleanup: + be_drv_cleanup(adapter); +unmap_bars: + be_unmap_pci_bars(adapter); free_netdev: free_netdev(netdev); rel_reg: @@ -5306,21 +5403,14 @@ do_none: static int be_suspend(struct pci_dev *pdev, pm_message_t state) { struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; if (adapter->wol_en) be_setup_wol(adapter, true); be_intr_set(adapter, false); - cancel_delayed_work_sync(&adapter->func_recovery_work); + be_cancel_err_detection(adapter); - netif_device_detach(netdev); - if (netif_running(netdev)) { - rtnl_lock(); - be_close(netdev); - rtnl_unlock(); - } - be_clear(adapter); + be_cleanup(adapter); pci_save_state(pdev); pci_disable_device(pdev); @@ -5328,13 +5418,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int be_resume(struct pci_dev *pdev) +static int be_pci_resume(struct pci_dev *pdev) { - int status = 0; struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - - netif_device_detach(netdev); + int status = 0; status = pci_enable_device(pdev); if (status) @@ -5343,30 +5430,11 @@ static int be_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - status = be_fw_wait_ready(adapter); - if (status) - return status; - - status = be_cmd_reset_function(adapter); + status = be_resume(adapter); if (status) return status; - be_intr_set(adapter, true); - /* tell fw we're ready to fire cmds */ - status = be_cmd_fw_init(adapter); - if (status) - return status; - - be_setup(adapter); - if (netif_running(netdev)) { - rtnl_lock(); - be_open(netdev); - rtnl_unlock(); - } - - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); - netif_device_attach(netdev); + be_schedule_err_detection(adapter); if (adapter->wol_en) be_setup_wol(adapter, false); @@ -5386,7 +5454,7 @@ static void be_shutdown(struct pci_dev *pdev) be_roce_dev_shutdown(adapter); cancel_delayed_work_sync(&adapter->work); - cancel_delayed_work_sync(&adapter->func_recovery_work); + be_cancel_err_detection(adapter); netif_device_detach(adapter->netdev); @@ -5399,22 +5467,15 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; dev_err(&adapter->pdev->dev, "EEH error detected\n"); if (!adapter->eeh_error) { adapter->eeh_error = true; - cancel_delayed_work_sync(&adapter->func_recovery_work); + be_cancel_err_detection(adapter); - rtnl_lock(); - netif_device_detach(netdev); - if (netif_running(netdev)) - be_close(netdev); - rtnl_unlock(); - - be_clear(adapter); + be_cleanup(adapter); } if (state == pci_channel_io_perm_failure) @@ -5465,40 +5526,16 @@ static void be_eeh_resume(struct pci_dev *pdev) { int status = 0; struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; dev_info(&adapter->pdev->dev, "EEH resume\n"); pci_save_state(pdev); - status = be_cmd_reset_function(adapter); + status = be_resume(adapter); if (status) goto err; - /* On some BE3 FW versions, after a HW reset, - * interrupts will remain disabled for each function. - * So, explicitly enable interrupts - */ - be_intr_set(adapter, true); - - /* tell fw we're ready to fire cmds */ - status = be_cmd_fw_init(adapter); - if (status) - goto err; - - status = be_setup(adapter); - if (status) - goto err; - - if (netif_running(netdev)) { - status = be_open(netdev); - if (status) - goto err; - } - - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); - netif_device_attach(netdev); + be_schedule_err_detection(adapter); return; err: dev_err(&adapter->pdev->dev, "EEH resume failed\n"); @@ -5516,7 +5553,7 @@ static struct pci_driver be_driver = { .probe = be_probe, .remove = be_remove, .suspend = be_suspend, - .resume = be_resume, + .resume = be_pci_resume, .shutdown = be_shutdown, .err_handler = &be_eeh_handlers }; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index f4ff465584a0..7216a5370a1f 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -303,6 +303,15 @@ config I40E_FCOE If unsure, say N. +config I40E_CONFIGFS_FS + bool "Config File System Support (configfs)" + default n + depends on I40E && CONFIGFS_FS && !(I40E=y && CONFIGFS_FS=m) + ---help--- + Provides support for the configfs file system for additional + driver configuration. Say Y here if you want to use the + configuration file system in the driver. + config I40EVF tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" depends on PCI_MSI diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index bb7ab3c321d6..0570c668ec3d 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -141,6 +141,7 @@ #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ #define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ +#define E1000_RCTL_RDMTS_HEX 0x00010000 #define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ #define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ #define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 9416e5a7e0c8..a69f09e37b58 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -132,6 +132,7 @@ enum e1000_boards { board_pchlan, board_pch2lan, board_pch_lpt, + board_pch_spt }; struct e1000_ps_page { @@ -501,6 +502,7 @@ extern const struct e1000_info e1000_ich10_info; extern const struct e1000_info e1000_pch_info; extern const struct e1000_info e1000_pch2_info; extern const struct e1000_info e1000_pch_lpt_info; +extern const struct e1000_info e1000_pch_spt_info; extern const struct e1000_info e1000_es2_info; void e1000e_ptp_init(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 865ce45f9ec3..11f486e4ff7b 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -896,18 +896,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: mask |= (1 << 18); break; default: break; } - if (mac->type == e1000_pch_lpt) + if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> E1000_FWSM_WLOCK_MAC_SHIFT; for (i = 0; i < mac->rar_entry_count; i++) { - if (mac->type == e1000_pch_lpt) { + if ((mac->type == e1000_pch_lpt) || + (mac->type == e1000_pch_spt)) { /* Cannot test write-protected SHRAL[n] registers */ if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) continue; diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 72f5475c4b90..19e8c487db06 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -87,6 +87,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_I218_V2 0x15A1 #define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ #define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* SPT PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */ #define E1000_REVISION_4 4 @@ -108,6 +112,7 @@ enum e1000_mac_type { e1000_pchlan, e1000_pch2lan, e1000_pch_lpt, + e1000_pch_spt, }; enum e1000_media_type { @@ -153,6 +158,7 @@ enum e1000_bus_width { e1000_bus_width_pcie_x1, e1000_bus_width_pcie_x2, e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, e1000_bus_width_32, e1000_bus_width_64, e1000_bus_width_reserved diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 48b74a549155..7523f510c7e4 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -123,6 +123,14 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data); +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data); +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 *data); +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, + u32 offset, u32 data); +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword); static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); @@ -229,7 +237,8 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) if (ret_val) return false; out: - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { /* Unforce SMBus mode in PHY */ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; @@ -321,6 +330,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) */ switch (hw->mac.type) { case e1000_pch_lpt: + case e1000_pch_spt: if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -461,6 +471,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) /* fall-through */ case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@ -590,35 +601,50 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 gfpreg, sector_base_addr, sector_end_addr; u16 i; + u32 nvm_size; /* Can't read flash registers if the register set isn't mapped. */ - if (!hw->flash_address) { - e_dbg("ERROR: Flash registers not mapped\n"); - return -E1000_ERR_CONFIG; - } - nvm->type = e1000_nvm_flash_sw; + /* in SPT, gfpreg doesn't exist. NVM size is taken from the + * STRAP register + */ + if (hw->mac.type == e1000_pch_spt) { + nvm->flash_base_addr = 0; + nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1) + * NVM_SIZE_MULTIPLIER; + nvm->flash_bank_size = nvm_size / 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + /* Set the base address for flash register access */ + hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; + } else { + if (!hw->flash_address) { + e_dbg("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } - gfpreg = er32flash(ICH_FLASH_GFPREG); + gfpreg = er32flash(ICH_FLASH_GFPREG); - /* sector_X_addr is a "sector"-aligned address (4096 bytes) - * Add 1 to sector_end_addr since this sector is included in - * the overall size. - */ - sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; - sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + /* sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; - /* flash_base_addr is byte-aligned */ - nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr + << FLASH_SECTOR_ADDR_SHIFT; - /* find total size of the NVM, then cut in half since the total - * size represents two separate NVM banks. - */ - nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) - << FLASH_SECTOR_ADDR_SHIFT); - nvm->flash_bank_size /= 2; - /* Adjust to word count */ - nvm->flash_bank_size /= sizeof(u16); + /* find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + } nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; @@ -682,6 +708,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) mac->ops.rar_set = e1000_rar_set_pch2lan; /* fall-through */ case e1000_pch_lpt: + case e1000_pch_spt: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@ -699,7 +726,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) break; } - if (mac->type == e1000_pch_lpt) { + if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) { mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch_lpt; mac->ops.setup_physical_interface = @@ -919,8 +946,9 @@ release: /* clear FEXTNVM6 bit 8 on link down or 10/100 */ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; - if (!link || ((status & E1000_STATUS_SPEED_100) && - (status & E1000_STATUS_FD))) + if ((hw->phy.revision > 5) || !link || + ((status & E1000_STATUS_SPEED_100) && + (status & E1000_STATUS_FD))) goto update_fextnvm6; ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); @@ -1100,6 +1128,21 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (ret_val) goto out; + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable + * LPLU and disable Gig speed when entering ULP + */ + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, + &phy_reg); + if (ret_val) + goto release; + phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + phy_reg); + if (ret_val) + goto release; + } + /* Force SMBus mode in PHY */ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); if (ret_val) @@ -1302,7 +1345,8 @@ out: static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; + s32 ret_val, tipg_reg = 0; + u16 emi_addr, emi_val = 0; bool link; u16 phy_reg; @@ -1333,48 +1377,55 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * the IPG and reduce Rx latency in the PHY. */ if (((hw->mac.type == e1000_pch2lan) || - (hw->mac.type == e1000_pch_lpt)) && link) { + (hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && link) { u32 reg; reg = er32(STATUS); + tipg_reg = er32(TIPG); + tipg_reg &= ~E1000_TIPG_IPGT_MASK; + if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { - u16 emi_addr; + tipg_reg |= 0xFF; + /* Reduce Rx latency in analog PHY */ + emi_val = 0; + } else { - reg = er32(TIPG); - reg &= ~E1000_TIPG_IPGT_MASK; - reg |= 0xFF; - ew32(TIPG, reg); + /* Roll back the default values */ + tipg_reg |= 0x08; + emi_val = 1; + } - /* Reduce Rx latency in analog PHY */ - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; + ew32(TIPG, tipg_reg); - if (hw->mac.type == e1000_pch2lan) - emi_addr = I82579_RX_CONFIG; - else - emi_addr = I217_RX_CONFIG; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; - ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0); + if (hw->mac.type == e1000_pch2lan) + emi_addr = I82579_RX_CONFIG; + else + emi_addr = I217_RX_CONFIG; + ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); - hw->phy.ops.release(hw); + hw->phy.ops.release(hw); - if (ret_val) - return ret_val; - } + if (ret_val) + return ret_val; } /* Work-around I218 hang issue */ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || - (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type == e1000_pch_spt)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) return ret_val; } - - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { /* Set platform power management values for * Latency Tolerance Reporting (LTR) */ @@ -1386,6 +1437,19 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; + /* FEXTNVM6 K1-off workaround */ + if (hw->mac.type == e1000_pch_spt) { + u32 pcieanacfg = er32(PCIEANACFG); + u32 fextnvm6 = er32(FEXTNVM6); + + if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) + fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; + else + fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + + ew32(FEXTNVM6, fextnvm6); + } + if (!link) return 0; /* No link detected */ @@ -1479,6 +1543,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -1929,6 +1994,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -2961,6 +3027,20 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) s32 ret_val; switch (hw->mac.type) { + /* In SPT, read from the CTRL_EXT reg instead of + * accessing the sector valid bits from the nvm + */ + case e1000_pch_spt: + *bank = er32(CTRL_EXT) + & E1000_CTRL_EXT_NVMVS; + if ((*bank == 0) || (*bank == 1)) { + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } else { + *bank = *bank - 2; + return 0; + } + break; case e1000_ich8lan: case e1000_ich9lan: eecd = er32(EECD); @@ -3008,6 +3088,99 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) } /** + * e1000_read_nvm_spt - NVM access for SPT + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words. + * @data: pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM + **/ +static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = 0; + u32 bank = 0; + u32 dword = 0; + u16 offset_to_read; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = 0; + + for (i = 0; i < words; i += 2) { + if (words - i == 1) { + if (dev_spec->shadow_ram[offset + i].modified) { + data[i] = + dev_spec->shadow_ram[offset + i].value; + } else { + offset_to_read = act_offset + i - + ((act_offset + i) % 2); + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + if ((act_offset + i) % 2 == 0) + data[i] = (u16)(dword & 0xFFFF); + else + data[i] = (u16)((dword >> 16) & 0xFFFF); + } + } else { + offset_to_read = act_offset + i; + if (!(dev_spec->shadow_ram[offset + i].modified) || + !(dev_spec->shadow_ram[offset + i + 1].modified)) { + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + } + if (dev_spec->shadow_ram[offset + i].modified) + data[i] = + dev_spec->shadow_ram[offset + i].value; + else + data[i] = (u16)(dword & 0xFFFF); + if (dev_spec->shadow_ram[offset + i].modified) + data[i + 1] = + dev_spec->shadow_ram[offset + i + 1].value; + else + data[i + 1] = (u16)(dword >> 16 & 0xFFFF); + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + e_dbg("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** * e1000_read_nvm_ich8lan - Read word(s) from the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to read. @@ -3090,8 +3263,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) /* Clear FCERR and DAEL in hw status by writing 1 */ hsfsts.hsf_status.flcerr = 1; hsfsts.hsf_status.dael = 1; - - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); /* Either we should have a hardware SPI cycle in progress * bit to check against, in order to start a new cycle or @@ -3107,7 +3282,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * Begin by setting Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); ret_val = 0; } else { s32 i; @@ -3128,7 +3306,11 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * now set the Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, + hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); } else { e_dbg("Flash controller busy, cannot get access\n"); } @@ -3151,9 +3333,16 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) u32 i = 0; /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcgo = 1; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); /* wait till FDONE bit is set to 1 */ do { @@ -3170,6 +3359,23 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) } /** + * e1000_read_flash_dword_ich8lan - Read dword from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash dword at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + /* Must convert word offset into bytes. */ + offset <<= 1; + return e1000_read_flash_data32_ich8lan(hw, offset, data); +} + +/** * e1000_read_flash_word_ich8lan - Read word from flash * @hw: pointer to the HW structure * @offset: offset to data location @@ -3201,7 +3407,14 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val; u16 word = 0; - ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + /* In SPT, only 32 bits access is supported, + * so this function should not be called. + */ + if (hw->mac.type == e1000_pch_spt) + return -E1000_ERR_NVM; + else + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + if (ret_val) return ret_val; @@ -3287,6 +3500,82 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, } /** + * e1000_read_flash_data32_ich8lan - Read dword from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the dword to read. + * @data: Pointer to the dword to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ + +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (offset > ICH_FLASH_LINEAR_ADDR_MASK || + hw->mac.type != e1000_pch_spt) + return -E1000_ERR_NVM; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (!ret_val) { + *data = er32flash(ICH_FLASH_FDATA0); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** * e1000_write_nvm_ich8lan - Write word(s) to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to write. @@ -3321,7 +3610,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, } /** - * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * e1000_update_nvm_checksum_spt - Update the checksum for NVM * @hw: pointer to the HW structure * * The NVM checksum is updated by calling the generic update_nvm_checksum, @@ -3331,13 +3620,13 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, * After a successful commit, the shadow ram is cleared and is ready for * future writes. **/ -static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 i, act_offset, new_bank_offset, old_bank_offset, bank; s32 ret_val; - u16 data; + u32 dword = 0; ret_val = e1000e_update_nvm_checksum_generic(hw); if (ret_val) @@ -3371,12 +3660,175 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) if (ret_val) goto release; } - - for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) { /* Determine whether to write the value stored * in the other NVM bank or a modified value stored * in the shadow RAM */ + ret_val = e1000_read_flash_dword_ich8lan(hw, + i + old_bank_offset, + &dword); + + if (dev_spec->shadow_ram[i].modified) { + dword &= 0xffff0000; + dword |= (dev_spec->shadow_ram[i].value & 0xffff); + } + if (dev_spec->shadow_ram[i + 1].modified) { + dword &= 0x0000ffff; + dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) + << 16); + } + if (ret_val) + break; + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD - 1) + dword |= E1000_ICH_NVM_SIG_MASK << 16; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usleep_range(100, 200); + + /* Write the data to the new bank. Offset in words */ + act_offset = i + new_bank_offset; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, + dword); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ + e_dbg("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + + /*offset in words but we read dword */ + --act_offset; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0xBFFFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + + /* offset in words but we read dword */ + act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0x00FFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + usleep_range(10000, 20000); + } + +out: + if (ret_val) + e_dbg("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data = 0; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { if (dev_spec->shadow_ram[i].modified) { data = dev_spec->shadow_ram[i].value; } else { @@ -3498,6 +3950,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) */ switch (hw->mac.type) { case e1000_pch_lpt: + case e1000_pch_spt: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; @@ -3583,9 +4036,13 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val; u8 count = 0; - if (size < 1 || size > 2 || data > size * 0xff || - offset > ICH_FLASH_LINEAR_ADDR_MASK) - return -E1000_ERR_NVM; + if (hw->mac.type == e1000_pch_spt) { + if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } else { + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); @@ -3596,12 +4053,25 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) break; + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); @@ -3640,6 +4110,90 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, } /** +* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM +* @hw: pointer to the HW structure +* @offset: The offset (in bytes) of the dwords to read. +* @data: The 4 bytes to write to the NVM. +* +* Writes one/two/four bytes to the NVM using the flash access registers. +**/ +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val; + u8 count = 0; + + if (hw->mac.type == e1000_pch_spt) { + if (offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) + >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ew32flash(ICH_FLASH_FDATA0, data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + + if (!ret_val) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** * e1000_write_flash_byte_ich8lan - Write a single byte to NVM * @hw: pointer to the HW structure * @offset: The index of the byte to read. @@ -3656,6 +4210,40 @@ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, } /** +* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM +* @hw: pointer to the HW structure +* @offset: The offset of the word to write. +* @dword: The dword to write to the NVM. +* +* Writes a single dword to the NVM using the flash access registers. +* Goes through a retry algorithm before giving up. +**/ +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword) +{ + s32 ret_val; + u16 program_retries; + + /* Must convert word offset into bytes. */ + offset <<= 1; + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + + if (!ret_val) + return ret_val; + for (program_retries = 0; program_retries < 100; program_retries++) { + e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset); + usleep_range(100, 200); + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM * @hw: pointer to the HW structure * @offset: The offset of the byte to write. @@ -3759,9 +4347,18 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) /* Write a value 11 (block Erase) in Flash * Cycle field in hw flash control */ - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = + er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); /* Write the last 24 bits of an index within the * block into Flash Linear address field in Flash @@ -4180,7 +4777,8 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) ew32(RFCTL, reg); /* Enable ECC on Lynxpoint */ - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { reg = er32(PBECCSTS); reg |= E1000_PBECCSTS_ECC_ENABLE; ew32(PBECCSTS, reg); @@ -4583,7 +5181,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || (device_id == E1000_DEV_ID_PCH_I218_LM3) || - (device_id == E1000_DEV_ID_PCH_I218_V3)) { + (device_id == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type == e1000_pch_spt)) { u32 fextnvm6 = er32(FEXTNVM6); ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); @@ -5058,6 +5657,17 @@ static const struct e1000_nvm_operations ich8_nvm_ops = { .write = e1000_write_nvm_ich8lan, }; +static const struct e1000_nvm_operations spt_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, + .read = e1000_read_nvm_spt, + .update = e1000_update_nvm_checksum_spt, + .reload = e1000e_reload_nvm_generic, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate = e1000_validate_nvm_checksum_ich8lan, + .write = e1000_write_nvm_ich8lan, +}; + const struct e1000_info e1000_ich8_info = { .mac = e1000_ich8lan, .flags = FLAG_HAS_WOL @@ -5166,3 +5776,23 @@ const struct e1000_info e1000_pch_lpt_info = { .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; + +const struct e1000_info e1000_pch_spt_info = { + .mac = e1000_pch_spt, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9018, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &spt_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 8066a498eaac..770a573b9eea 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -95,9 +95,18 @@ #define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 #define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 +#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 +/* bit for disabling packet buffer read */ +#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 #define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#define K1_ENTRY_LATENCY 0 +#define K1_MIN_TIME 1 +#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ +#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ +#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ + #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 1e8c40fd5c3d..6fa4fc05709e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -70,6 +70,7 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_pchlan] = &e1000_pch_info, [board_pch2lan] = &e1000_pch2_info, [board_pch_lpt] = &e1000_pch_lpt_info, + [board_pch_spt] = &e1000_pch_spt_info, }; struct e1000_reg_info { @@ -1796,7 +1797,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* Reset on uncorrectable ECC error */ - if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt))) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -1876,7 +1878,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* Reset on uncorrectable ECC error */ - if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt))) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -2257,7 +2260,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); - } else if (hw->mac.type == e1000_pch_lpt) { + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); } else { ew32(IMS, IMS_ENABLE_MASK); @@ -3014,6 +3018,19 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(TCTL, tctl); hw->mac.ops.config_collision_dist(hw); + + /* SPT Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { + u32 reg_val; + + reg_val = er32(IOSFPC); + reg_val |= E1000_RCTL_RDMTS_HEX; + ew32(IOSFPC, reg_val); + + reg_val = er32(TARC(0)); + reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; + ew32(TARC(0), reg_val); + } } /** @@ -3490,8 +3507,11 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) struct e1000_hw *hw = &adapter->hw; u32 incvalue, incperiod, shift; - /* Make sure clock is enabled on I217 before checking the frequency */ - if ((hw->mac.type == e1000_pch_lpt) && + /* Make sure clock is enabled on I217/I218/I219 before checking + * the frequency + */ + if (((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { u32 fextnvm7 = er32(FEXTNVM7); @@ -3505,10 +3525,13 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) switch (hw->mac.type) { case e1000_pch2lan: case e1000_pch_lpt: - /* On I217, the clock frequency is 25MHz or 96MHz as - * indicated by the System Clock Frequency Indication + case e1000_pch_spt: + /* On I217, I218 and I219, the clock frequency is 25MHz + * or 96MHz as indicated by the System Clock Frequency + * Indication */ - if ((hw->mac.type != e1000_pch_lpt) || + if (((hw->mac.type != e1000_pch_lpt) && + (hw->mac.type != e1000_pch_spt)) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { /* Stable 96MHz frequency */ incperiod = INCPERIOD_96MHz; @@ -3875,6 +3898,7 @@ void e1000e_reset(struct e1000_adapter *adapter) break; case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: fc->refresh_time = 0x0400; if (adapter->netdev->mtu <= ETH_DATA_LEN) { @@ -4759,7 +4783,8 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.mgpdc += er32(MGTPDC); /* Correctable ECC Errors */ - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -6144,7 +6169,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); - } else if (hw->mac.type == e1000_pch_lpt) { + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) /* ULP does not support wake from unicast, multicast * or broadcast. @@ -7213,6 +7239,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 978ef9c4a043..1490f1e8d6aa 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -221,7 +221,9 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) switch (hw->mac.type) { case e1000_pch2lan: case e1000_pch_lpt: - if ((hw->mac.type != e1000_pch_lpt) || + case e1000_pch_spt: + if (((hw->mac.type != e1000_pch_lpt) && + (hw->mac.type != e1000_pch_spt)) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { adapter->ptp_clock_info.max_adj = 24000000 - 1; break; diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index ea235bbe50d3..85eefc4832ba 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -38,6 +38,7 @@ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ #define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ @@ -67,6 +68,7 @@ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ +#define E1000_IOSFPC 0x00F28 /* TX corrupted data */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_FLOP 0x0103C /* FLASH Opcode Register */ @@ -121,6 +123,7 @@ (0x054E4 + ((_i - 16) * 8))) #define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) #define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 42eb4344a9dc..59edfd4446cd 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -439,6 +439,7 @@ extern char fm10k_driver_name[]; extern const char fm10k_driver_version[]; int fm10k_init_queueing_scheme(struct fm10k_intfc *interface); void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface); +__be16 fm10k_tx_encap_offload(struct sk_buff *skb); netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_ring *tx_ring); void fm10k_tx_timeout_reset(struct fm10k_intfc *interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index bf19dccd4288..6cfae6ac04ea 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -398,7 +398,7 @@ static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw, /* Retrieve RX Owner Data */ id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx)); - /* Process RX Ring*/ + /* Process RX Ring */ do { rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx), &q->rx_drops); @@ -466,7 +466,6 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, * Function invalidates the index values for the queues so any updates that * may have happened are ignored and the base for the queue stats is reset. **/ - void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count) { u32 i; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 651f53bc7376..33b6106c764b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1019,7 +1019,7 @@ static int fm10k_set_channels(struct net_device *dev, } static int fm10k_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct ethtool_ts_info *info) { struct fm10k_intfc *interface = netdev_priv(dev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 060190864238..a02308f5048f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -275,7 +275,7 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid) if (vf_idx >= iov_data->num_vfs) return FM10K_ERR_PARAM; - /* determine if an update has occured and if so notify the VF */ + /* determine if an update has occurred and if so notify the VF */ vf_info = &iov_data->vf_info[vf_idx]; if (vf_info->sw_vid != pvid) { vf_info->sw_vid = pvid; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 84ab9eea2768..c325bc0c8338 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -711,10 +711,6 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) return NULL; - /* verify protocol is transparent Ethernet bridging */ - if (nvgre_hdr->proto != htons(ETH_P_TEB)) - return NULL; - /* report start of ethernet header */ if (nvgre_hdr->flags & NVGRE_TNI) return (struct ethhdr *)(nvgre_hdr + 1); @@ -722,15 +718,13 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) return (struct ethhdr *)(&nvgre_hdr->tni); } -static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) +__be16 fm10k_tx_encap_offload(struct sk_buff *skb) { + u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen; struct ethhdr *eth_hdr; - u8 l4_hdr = 0; -/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */ -#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET 164 - if (skb_inner_transport_header(skb) - skb_mac_header(skb) > - FM10K_MAX_ENCAP_TRANSPORT_OFFSET) + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) return 0; switch (vlan_get_protocol(skb)) { @@ -760,12 +754,33 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) switch (eth_hdr->h_proto) { case htons(ETH_P_IP): + inner_l4_hdr = inner_ip_hdr(skb)->protocol; + break; case htons(ETH_P_IPV6): + inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; break; default: return 0; } + switch (inner_l4_hdr) { + case IPPROTO_TCP: + inner_l4_hlen = inner_tcp_hdrlen(skb); + break; + case IPPROTO_UDP: + inner_l4_hlen = 8; + break; + default: + return 0; + } + + /* The hardware allows tunnel offloads only if the combined inner and + * outer header is 184 bytes or less + */ + if (skb_inner_transport_header(skb) + inner_l4_hlen - + skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH) + return 0; + return eth_hdr->h_proto; } @@ -934,10 +949,10 @@ static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ smp_mb(); - /* We need to check again in a case another CPU has just - * made room available. */ + /* Check again in a case another CPU has just made room available */ if (likely(fm10k_desc_unused(tx_ring) < size)) return -EBUSY; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 9f5457c9e627..14ee696e9830 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo) * @fifo: pointer to FIFO * @offset: offset to add to head * - * This function returns the indicies into the fifo based on head + offset + * This function returns the indices into the fifo based on head + offset **/ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) * @fifo: pointer to FIFO * @offset: offset to add to tail * - * This function returns the indicies into the fifo based on tail + offset + * This function returns the indices into the fifo based on tail + offset **/ static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -326,7 +326,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem * @mbx: pointer to mailbox * - * This function will take a seciton of the Rx FIFO and copy it into the + * This function will take a section of the Rx FIFO and copy it into the mbx->tail--; * mailbox memory. The offset in mbmem is based on the lower bits of the * tail and len determines the length to copy. @@ -418,7 +418,7 @@ static void fm10k_mbx_pull_head(struct fm10k_hw *hw, * @hw: pointer to hardware structure * @mbx: pointer to mailbox * - * This function will take a seciton of the mailbox memory and copy it + * This function will take a section of the mailbox memory and copy it * into the Rx FIFO. The offset is based on the lower bits of the * head and len determines the length to copy. **/ @@ -464,7 +464,7 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw, * @tail: tail index of message * * This function will first validate the tail index and size for the - * incoming message. It then updates the acknowlegment number and + * incoming message. It then updates the acknowledgment number and * copies the data into the FIFO. It will return the number of messages * dequeued on success and a negative value on error. **/ @@ -761,7 +761,7 @@ static s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw, err = fm10k_fifo_enqueue(&mbx->tx, msg); } - /* if we failed trhead the error */ + /* if we failed treat the error */ if (err) { mbx->timeout = 0; mbx->tx_busy++; @@ -815,7 +815,7 @@ static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) { u32 mbmem = mbx->mbmem_reg; - /* write new msg header to notify recepient of change */ + /* write new msg header to notify recipient of change */ fm10k_write_reg(hw, mbmem, mbx->mbx_hdr); /* write mailbox to sent interrupt */ @@ -1251,7 +1251,7 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw, /* we will need to pull all of the fields for verification */ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); - /* we only have lower 10 bits of error number os add upper bits */ + /* we only have lower 10 bits of error number so add upper bits */ err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO); err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO); @@ -1548,7 +1548,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, mbx->timeout = 0; mbx->udelay = FM10K_MBX_INIT_DELAY; - /* initalize tail and head */ + /* initialize tail and head */ mbx->tail = 1; mbx->head = 1; @@ -1627,7 +1627,7 @@ static void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx) mbx->local = FM10K_SM_MBX_VERSION; mbx->remote = 0; - /* initalize tail and head */ + /* initialize tail and head */ mbx->tail = 1; mbx->head = 1; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index cfde8bac1aeb..d5b303dad95e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -356,7 +356,7 @@ static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface) * fm10k_request_glort_range - Request GLORTs for use in configuring rules * @interface: board private structure * - * This function allocates a range of glorts for this inteface to use. + * This function allocates a range of glorts for this interface to use. **/ static void fm10k_request_glort_range(struct fm10k_intfc *interface) { @@ -781,7 +781,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) fm10k_mbx_lock(interface); - /* only need to update the VLAN if not in promiscous mode */ + /* only need to update the VLAN if not in promiscuous mode */ if (!(netdev->flags & IFF_PROMISC)) { err = hw->mac.ops.update_vlan(hw, vid, 0, set); if (err) @@ -970,7 +970,7 @@ static void fm10k_set_rx_mode(struct net_device *dev) fm10k_mbx_lock(interface); - /* syncronize all of the addresses */ + /* synchronize all of the addresses */ if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) @@ -1051,7 +1051,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) vid, true, 0); } - /* syncronize all of the addresses */ + /* synchronize all of the addresses */ if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) @@ -1350,6 +1350,16 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) } } +static netdev_features_t fm10k_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (!skb->encapsulation || fm10k_tx_encap_offload(skb)) + return features; + + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); +} + static const struct net_device_ops fm10k_netdev_ops = { .ndo_open = fm10k_open, .ndo_stop = fm10k_close, @@ -1372,6 +1382,7 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_do_ioctl = fm10k_ioctl, .ndo_dfwd_add_station = fm10k_dfwd_add_station, .ndo_dfwd_del_station = fm10k_dfwd_del_station, + .ndo_features_check = fm10k_features_check, }; #define DEFAULT_DEBUG_LEVEL_SHIFT 3 diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 4f5892cc32d7..8978d55a1c51 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -648,7 +648,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* Configure the Rx buffer size for one buff without split */ srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT; - /* Configure the Rx ring to supress loopback packets */ + /* Configure the Rx ring to suppress loopback packets */ srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS; fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 7e4711958e46..159cd8463800 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -234,8 +234,7 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) vid = (vid << 17) >> 17; /* verify the reserved 0 fields are 0 */ - if (len >= FM10K_VLAN_TABLE_VID_MAX || - vid >= FM10K_VLAN_TABLE_VID_MAX) + if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; /* Loop through the table updating all required VLANs */ @@ -312,7 +311,7 @@ bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort) } /** - * fm10k_update_uc_addr_pf - Update device unicast addresss + * fm10k_update_xc_addr_pf - Update device addresses * @hw: pointer to the HW structure * @glort: base resource tag for this request * @mac: MAC address to add/remove from table @@ -356,7 +355,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, } /** - * fm10k_update_uc_addr_pf - Update device unicast addresss + * fm10k_update_uc_addr_pf - Update device unicast addresses * @hw: pointer to the HW structure * @glort: base resource tag for this request * @mac: MAC address to add/remove from table @@ -454,7 +453,7 @@ static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw) break; } - /* always reset VFITR2[0] to point to last enabled PF vector*/ + /* always reset VFITR2[0] to point to last enabled PF vector */ fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i); /* reset ITR2[0] to point to last enabled PF vector */ @@ -812,7 +811,7 @@ static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx) if (vf_idx >= hw->iov.num_vfs) return FM10K_ERR_PARAM; - /* determine vector offset and count*/ + /* determine vector offset and count */ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); @@ -951,7 +950,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, if (vf_info->mbx.ops.disconnect) vf_info->mbx.ops.disconnect(hw, &vf_info->mbx); - /* determine vector offset and count*/ + /* determine vector offset and count */ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); @@ -1035,7 +1034,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, ((u32)vf_info->mac[2]); } - /* map queue pairs back to VF from last to first*/ + /* map queue pairs back to VF from last to first */ for (i = queues_per_pool; i--;) { fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); @@ -1141,7 +1140,7 @@ static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw, * * This function is a default handler for MSI-X requests from the VF. The * assumption is that in this case it is acceptable to just directly - * hand off the message form the VF to the underlying shared code. + * hand off the message from the VF to the underlying shared code. **/ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) @@ -1160,7 +1159,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, * * This function is a default handler for MAC/VLAN requests from the VF. * The assumption is that in this case it is acceptable to just directly - * hand off the message form the VF to the underlying shared code. + * hand off the message from the VF to the underlying shared code. **/ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) @@ -1404,7 +1403,7 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, &stats->vlan_drop); loopback_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_LOOPBACK_DROP, - &stats->loopback_drop); + &stats->loopback_drop); nodesc_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_NODESC_DROP, &stats->nodesc_drop); @@ -1573,7 +1572,7 @@ static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready) s32 ret_val = 0; u32 dma_ctrl2; - /* verify the switch is ready for interraction */ + /* verify the switch is ready for interaction */ dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2); if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) goto out; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index fd0a05f011a8..9b29d7b0377a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -710,7 +710,7 @@ void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags) /** * fm10k_tlv_msg_test - Validate all results on test message receive * @hw: Pointer to hardware structure - * @results: Pointer array to attributes in the mesage + * @results: Pointer array to attributes in the message * @mbx: Pointer to mailbox information structure * * This function does a check to verify all attributes match what the test diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 7c6d9d5a8ae5..4af96686c584 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -356,6 +356,9 @@ struct fm10k_hw; #define FM10K_QUEUE_DISABLE_TIMEOUT 100 #define FM10K_RESET_TIMEOUT 150 +/* Maximum supported combined inner and outer header length for encapsulation */ +#define FM10K_TUNNEL_HEADER_LENGTH 184 + /* VF registers */ #define FM10K_VFCTRL 0x00000 #define FM10K_VFCTRL_RST 0x00000008 @@ -593,7 +596,7 @@ struct fm10k_vf_info { u16 sw_vid; /* Switch API assigned VLAN */ u16 pf_vid; /* PF assigned Default VLAN */ u8 mac[ETH_ALEN]; /* PF Default MAC address */ - u8 vsi; /* VSI idenfifier */ + u8 vsi; /* VSI identifier */ u8 vf_idx; /* which VF this is */ u8 vf_flags; /* flags indicating what modes * are supported for the port diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index f0aa0f97b4a9..17219678439a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -37,7 +37,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) if (err) return err; - /* If permenant address is set then we need to restore it */ + /* If permanent address is set then we need to restore it */ if (is_valid_ether_addr(perm_addr)) { bal = (((u32)perm_addr[3]) << 24) | (((u32)perm_addr[4]) << 16) | @@ -65,7 +65,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) * fm10k_reset_hw_vf - VF hardware reset * @hw: pointer to hardware structure * - * This function should return the hardare to a state similar to the + * This function should return the hardware to a state similar to the * one it is in after just being initialized. **/ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) @@ -252,7 +252,7 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) } /** - * fm10k_update_uc_addr_vf - Update device unicast address + * fm10k_update_uc_addr_vf - Update device unicast addresses * @hw: pointer to the HW structure * @glort: unused * @mac: MAC address to add/remove from table @@ -282,7 +282,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) return FM10K_ERR_PARAM; - /* add bit to notify us if this is a set of clear operation */ + /* add bit to notify us if this is a set or clear operation */ if (!add) vid |= FM10K_VLAN_CLEAR; @@ -295,7 +295,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, } /** - * fm10k_update_mc_addr_vf - Update device multicast address + * fm10k_update_mc_addr_vf - Update device multicast addresses * @hw: pointer to the HW structure * @glort: unused * @mac: MAC address to add/remove from table @@ -319,7 +319,7 @@ static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, if (!is_multicast_ether_addr(mac)) return FM10K_ERR_PARAM; - /* add bit to notify us if this is a set of clear operation */ + /* add bit to notify us if this is a set or clear operation */ if (!add) vid |= FM10K_VLAN_CLEAR; @@ -515,7 +515,7 @@ static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb) * @hw: pointer to the hardware structure * * Function reads the content of 2 registers, combined to represent a 64 bit - * value measured in nanosecods. In order to guarantee the value is accurate + * value measured in nanoseconds. In order to guarantee the value is accurate * we check the 32 most significant bits both before and after reading the * 32 least significant bits to verify they didn't change as we were reading * the registers. diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index c40581999121..023e452aff8c 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel Ethernet Controller XL710 Family Linux Driver -# Copyright(c) 2013 - 2014 Intel Corporation. +# Copyright(c) 2013 - 2015 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -37,6 +37,7 @@ i40e-objs := i40e_main.o \ i40e_hmc.o \ i40e_lan_hmc.o \ i40e_nvm.o \ + i40e_configfs.o \ i40e_debugfs.o \ i40e_diag.o \ i40e_txrx.o \ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2b65cdcad6ba..c5137313b62a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,6 +36,7 @@ #include <linux/aer.h> #include <linux/netdevice.h> #include <linux/ioport.h> +#include <linux/iommu.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/string.h> @@ -49,6 +50,7 @@ #include <net/ip6_checksum.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> +#include <linux/if_bridge.h> #include <linux/clocksource.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> @@ -94,6 +96,9 @@ #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) +/* Ethtool Private Flags */ +#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0) + #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_HI_SHIFT 12 @@ -140,6 +145,7 @@ enum i40e_state_t { __I40E_CORE_RESET_REQUESTED, __I40E_GLOBAL_RESET_REQUESTED, __I40E_EMP_RESET_REQUESTED, + __I40E_EMP_RESET_INTR_RECEIVED, __I40E_FILTER_OVERFLOW_PROMISC, __I40E_SUSPENDED, __I40E_PTP_TX_IN_PROGRESS, @@ -383,6 +389,9 @@ struct i40e_pf { bool ptp_tx; bool ptp_rx; u16 rss_table_size; + /* These are only valid in NPAR modes */ + u32 npar_max_bw; + u32 npar_min_bw; }; struct i40e_mac_filter { @@ -405,6 +414,7 @@ struct i40e_veb { u16 uplink_seid; u16 stats_idx; /* index of VEB parent */ u8 enabled_tc; + u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */ u16 flags; u16 bw_limit; u8 bw_max_quanta; @@ -461,6 +471,9 @@ struct i40e_vsi { u16 rx_itr_setting; u16 tx_itr_setting; + u16 rss_table_size; + u16 rss_size; + u16 max_frame; u16 rx_hdr_len; u16 rx_buf_len; @@ -478,6 +491,7 @@ struct i40e_vsi { u16 base_queue; /* vsi's first queue in hw array */ u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ + u16 req_queue_pairs; /* User requested queue pairs */ u16 num_queue_pairs; /* Used tx and rx pairs */ u16 num_desc; enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ @@ -504,6 +518,9 @@ struct i40e_vsi { /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); + + /* current rxnfc data */ + struct ethtool_rxnfc rxnfc; /* current rss hash opts */ } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { @@ -544,14 +561,14 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw) static char buf[32]; snprintf(buf, sizeof(buf), - "f%d.%d a%d.%d n%02x.%02x e%08x", - hw->aq.fw_maj_ver, hw->aq.fw_min_ver, + "f%d.%d.%05d a%d.%d n%x.%02x e%x", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, hw->aq.api_maj_ver, hw->aq.api_min_ver, (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> I40E_NVM_VERSION_HI_SHIFT, (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> I40E_NVM_VERSION_LO_SHIFT, - hw->nvm.eetrack); + (hw->nvm.eetrack & 0xffffff)); return buf; } @@ -680,6 +697,7 @@ int i40e_vlan_rx_add_vid(struct net_device *netdev, int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid); #endif +int i40e_open(struct net_device *netdev); int i40e_vsi_open(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); @@ -690,7 +708,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); #ifdef I40E_FCOE -int i40e_open(struct net_device *netdev); int i40e_close(struct net_device *netdev); int i40e_setup_tc(struct net_device *netdev, u8 tc); void i40e_netpoll(struct net_device *netdev); @@ -712,6 +729,7 @@ void i40e_fcoe_handle_status(struct i40e_ring *rx_ring, void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); #ifdef CONFIG_I40E_DCB void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg); void i40e_dcbnl_set_all(struct i40e_vsi *vsi); void i40e_dcbnl_setup(struct i40e_vsi *vsi); @@ -727,4 +745,12 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr); int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr); void i40e_ptp_init(struct i40e_pf *pf); void i40e_ptp_stop(struct i40e_pf *pf); +int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); +#if IS_ENABLED(CONFIG_I40E_CONFIGFS_FS) +int i40e_configfs_init(void); +void i40e_configfs_exit(void); +#endif /* CONFIG_I40E_CONFIGFS_FS */ +i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf); +i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf); +i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf); #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 77f6254a89ac..dc2ed359e945 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -592,6 +592,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) ret_code = i40e_aq_get_firmware_version(hw, &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver, + &hw->aq.fw_build, &hw->aq.api_maj_ver, &hw->aq.api_min_ver, NULL); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index de17b6fbcc4e..28e519a50de4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -93,6 +93,7 @@ struct i40e_adminq_info { u16 asq_buf_size; /* send queue buffer size */ u16 fw_maj_ver; /* firmware major version */ u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ bool nvm_release_on_done; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 6aea65dae5ed..1da7d05abd38 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -94,16 +94,19 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - aq_desc->opcode, aq_desc->flags, aq_desc->datalen, - aq_desc->retval); + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - aq_desc->cookie_high, aq_desc->cookie_low); + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - aq_desc->params.internal.param0, - aq_desc->params.internal.param1); + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - aq_desc->params.external.addr_high, - aq_desc->params.external.addr_low); + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); if ((buffer != NULL) && (aq_desc->datalen != 0)) { memset(data, 0, sizeof(data)); @@ -116,15 +119,19 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, if ((i % 16) == 15) { i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", - i - 15, data[0], data[1], data[2], - data[3]); + i - 15, le32_to_cpu(data[0]), + le32_to_cpu(data[1]), + le32_to_cpu(data[2]), + le32_to_cpu(data[3])); memset(data, 0, sizeof(data)); } } if ((i % 16) != 0) i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", - i - (i % 16), data[0], data[1], data[2], - data[3]); + i - (i % 16), le32_to_cpu(data[0]), + le32_to_cpu(data[1]), + le32_to_cpu(data[2]), + le32_to_cpu(data[3])); } } @@ -1298,14 +1305,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; } /* Update the link info */ - status = i40e_update_link_info(hw, true); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ msleep(1000); - status = i40e_update_link_info(hw, true); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); } if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; @@ -1453,35 +1460,6 @@ aq_get_link_info_exit: } /** - * i40e_update_link_info - * @hw: pointer to the hw struct - * @enable_lse: enable/disable LinkStatusEvent reporting - * - * Returns the link status of the adapter - **/ -i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse) -{ - struct i40e_aq_get_phy_abilities_resp abilities; - i40e_status status; - - status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL); - if (status) - return status; - - status = i40e_aq_get_phy_capabilities(hw, false, false, - &abilities, NULL); - if (status) - return status; - - if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED) - hw->phy.link_info.an_enabled = true; - else - hw->phy.link_info.an_enabled = false; - - return status; -} - -/** * i40e_aq_set_phy_int_mask * @hw: pointer to the hw struct * @mask: interrupt mask to be set @@ -1760,6 +1738,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, * @hw: pointer to the hw struct * @fw_major_version: firmware major version * @fw_minor_version: firmware minor version + * @fw_build: firmware build number * @api_major_version: major queue version * @api_minor_version: minor queue version * @cmd_details: pointer to command details structure or NULL @@ -1768,6 +1747,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, **/ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details) { @@ -1781,13 +1761,15 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { - if (fw_major_version != NULL) + if (fw_major_version) *fw_major_version = le16_to_cpu(resp->fw_major); - if (fw_minor_version != NULL) + if (fw_minor_version) *fw_minor_version = le16_to_cpu(resp->fw_minor); - if (api_major_version != NULL) + if (fw_build) + *fw_build = le32_to_cpu(resp->fw_build); + if (api_major_version) *api_major_version = le16_to_cpu(resp->api_major); - if (api_minor_version != NULL) + if (api_minor_version) *api_minor_version = le16_to_cpu(resp->api_minor); } @@ -1817,7 +1799,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); - desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI); + desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); cmd->driver_major_ver = dv->major_version; cmd->driver_minor_ver = dv->minor_version; cmd->driver_build_ver = dv->build_version; @@ -3377,6 +3359,47 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } /** + * i40e_aq_alternate_read + * @hw: pointer to the hardware structure + * @reg_addr0: address of first dword to be read + * @reg_val0: pointer for data read from 'reg_addr0' + * @reg_addr1: address of second dword to be read + * @reg_val1: pointer for data read from 'reg_addr1' + * + * Read one or two dwords from alternate structure. Fields are indicated + * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer + * is not passed then only register at 'reg_addr0' is read. + * + **/ +i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write *cmd_resp = + (struct i40e_aqc_alternate_write *)&desc.params.raw; + i40e_status status; + + if (!reg_val0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); + cmd_resp->address0 = cpu_to_le32(reg_addr0); + cmd_resp->address1 = cpu_to_le32(reg_addr1); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + if (!status) { + *reg_val0 = le32_to_cpu(cmd_resp->data0); + + if (reg_val1) + *reg_val1 = le32_to_cpu(cmd_resp->data1); + } + + return status; +} + +/** * i40e_aq_resume_port_tx * @hw: pointer to the hardware structure * @cmd_details: pointer to command details structure or NULL @@ -3440,3 +3463,79 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) break; } } + +/** + * i40e_read_bw_from_alt_ram + * @hw: pointer to the hardware structure + * @max_bw: pointer for max_bw read + * @min_bw: pointer for min_bw read + * @min_valid: pointer for bool that is true if min_bw is a valid value + * @max_valid: pointer for bool that is true if max_bw is a valid value + * + * Read bw from the alternate ram for the given pf + **/ +i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, + bool *min_valid, bool *max_valid) +{ + i40e_status status; + u32 max_bw_addr, min_bw_addr; + + /* Calculate the address of the min/max bw registers */ + max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MAX_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MIN_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + + /* Read the bandwidths from alt ram */ + status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, + min_bw_addr, min_bw); + + if (*min_bw & I40E_ALT_BW_VALID_MASK) + *min_valid = true; + else + *min_valid = false; + + if (*max_bw & I40E_ALT_BW_VALID_MASK) + *max_valid = true; + else + *max_valid = false; + + return status; +} + +/** + * i40e_aq_configure_partition_bw + * @hw: pointer to the hardware structure + * @bw_data: Buffer holding valid pfs and bw limits + * @cmd_details: pointer to command details + * + * Configure partitions guaranteed/max bw + **/ +i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status; + struct i40e_aq_desc desc; + u16 bwd_size = sizeof(*bw_data); + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_partition_bw); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + if (bwd_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = cpu_to_le16(bwd_size); + + status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, + cmd_details); + + return status; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_configfs.c b/drivers/net/ethernet/intel/i40e/i40e_configfs.c new file mode 100644 index 000000000000..d3cdfc24d5bf --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_configfs.c @@ -0,0 +1,354 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include <linux/configfs.h> +#include "i40e.h" + +#if IS_ENABLED(CONFIG_I40E_CONFIGFS_FS) + +/** + * configfs structure for i40e + * + * This file adds code for configfs support for the i40e driver. This sets + * up a filesystem under /sys/kernel/config in which configuration changes + * can be made for the driver's netdevs. + * + * The initialization in this code creates the "i40e" entry in the configfs + * system. After that, the user needs to use mkdir to create configurations + * for specific netdev ports; for example "mkdir eth3". This code will verify + * that such a netdev exists and that it is owned by i40e. + * + **/ + +struct i40e_cfgfs_vsi { + struct config_item item; + struct i40e_vsi *vsi; +}; + +static inline struct i40e_cfgfs_vsi *to_i40e_cfgfs_vsi(struct config_item *item) +{ + return item ? container_of(item, struct i40e_cfgfs_vsi, item) : NULL; +} + +static struct configfs_attribute i40e_cfgfs_vsi_attr_min_bw = { + .ca_owner = THIS_MODULE, + .ca_name = "min_bw", + .ca_mode = S_IRUGO | S_IWUSR, +}; + +static struct configfs_attribute i40e_cfgfs_vsi_attr_max_bw = { + .ca_owner = THIS_MODULE, + .ca_name = "max_bw", + .ca_mode = S_IRUGO | S_IWUSR, +}; + +static struct configfs_attribute i40e_cfgfs_vsi_attr_commit = { + .ca_owner = THIS_MODULE, + .ca_name = "commit", + .ca_mode = S_IRUGO | S_IWUSR, +}; + +static struct configfs_attribute i40e_cfgfs_vsi_attr_port_count = { + .ca_owner = THIS_MODULE, + .ca_name = "ports", + .ca_mode = S_IRUGO | S_IWUSR, +}; + +static struct configfs_attribute i40e_cfgfs_vsi_attr_part_count = { + .ca_owner = THIS_MODULE, + .ca_name = "partitions", + .ca_mode = S_IRUGO | S_IWUSR, +}; + +static struct configfs_attribute *i40e_cfgfs_vsi_attrs[] = { + &i40e_cfgfs_vsi_attr_min_bw, + &i40e_cfgfs_vsi_attr_max_bw, + &i40e_cfgfs_vsi_attr_commit, + &i40e_cfgfs_vsi_attr_port_count, + &i40e_cfgfs_vsi_attr_part_count, + NULL, +}; + +/** + * i40e_cfgfs_vsi_attr_show - Show a VSI's NPAR BW partition info + * @item: A pointer back to the configfs item created on driver load + * @attr: A pointer to this item's configuration attribute + * @page: A pointer to the output buffer + **/ +static ssize_t i40e_cfgfs_vsi_attr_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + struct i40e_cfgfs_vsi *i40e_cfgfs_vsi = to_i40e_cfgfs_vsi(item); + struct i40e_pf *pf = i40e_cfgfs_vsi->vsi->back; + ssize_t count; + + if (i40e_cfgfs_vsi->vsi != pf->vsi[pf->lan_vsi]) + return 0; + + if (strncmp(attr->ca_name, "min_bw", 6) == 0) + count = sprintf(page, "%s %s %d%%\n", + i40e_cfgfs_vsi->vsi->netdev->name, + (pf->npar_min_bw & I40E_ALT_BW_RELATIVE_MASK) ? + "Relative Min BW" : "Absolute Min BW", + pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK); + else if (strncmp(attr->ca_name, "max_bw", 6) == 0) + count = sprintf(page, "%s %s %d%%\n", + i40e_cfgfs_vsi->vsi->netdev->name, + (pf->npar_max_bw & I40E_ALT_BW_RELATIVE_MASK) ? + "Relative Max BW" : "Absolute Max BW", + pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK); + else if (strncmp(attr->ca_name, "ports", 5) == 0) + count = sprintf(page, "%d\n", + pf->hw.num_ports); + else if (strncmp(attr->ca_name, "partitions", 10) == 0) + count = sprintf(page, "%d\n", + pf->hw.num_partitions); + else + return 0; + + return count; +} + +/** + * i40e_cfgfs_vsi_attr_store - Show a VSI's NPAR BW partition info + * @item: A pointer back to the configfs item created on driver load + * @attr: A pointer to this item's configuration attribute + * @page: A pointer to the user input buffer holding the user input values + **/ +static ssize_t i40e_cfgfs_vsi_attr_store(struct config_item *item, + struct configfs_attribute *attr, + const char *page, size_t count) +{ + struct i40e_cfgfs_vsi *i40e_cfgfs_vsi = to_i40e_cfgfs_vsi(item); + struct i40e_pf *pf = i40e_cfgfs_vsi->vsi->back; + char *p = (char *)page; + int rc; + unsigned long tmp; + + if (i40e_cfgfs_vsi->vsi != pf->vsi[pf->lan_vsi]) + return 0; + + if (!p || (*p && (*p == '\n'))) + return -EINVAL; + + rc = kstrtoul(p, 10, &tmp); + if (rc) + return rc; + if (tmp > 100) + return -ERANGE; + + if (strncmp(attr->ca_name, "min_bw", 6) == 0) { + if (tmp > (pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK)) + return -ERANGE; + /* Preserve the valid and relative BW bits - the rest is + * don't care. + */ + pf->npar_min_bw &= (I40E_ALT_BW_RELATIVE_MASK | + I40E_ALT_BW_VALID_MASK); + pf->npar_min_bw |= (tmp & I40E_ALT_BW_VALUE_MASK); + i40e_set_npar_bw_setting(pf); + } else if (strncmp(attr->ca_name, "max_bw", 6) == 0) { + if (tmp < 1 || + tmp < (pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK)) + return -ERANGE; + /* Preserve the valid and relative BW bits - the rest is + * don't care. + */ + pf->npar_max_bw &= (I40E_ALT_BW_RELATIVE_MASK | + I40E_ALT_BW_VALID_MASK); + pf->npar_max_bw |= (tmp & I40E_ALT_BW_VALUE_MASK); + i40e_set_npar_bw_setting(pf); + } else if (strncmp(attr->ca_name, "commit", 6) == 0 && tmp == 1) { + if (i40e_commit_npar_bw_setting(pf)) + return -EIO; + } + + return count; +} + +/** + * i40e_cfgfs_vsi_release - Free up the configuration item memory + * @item: A pointer back to the configfs item created on driver load + **/ +static void i40e_cfgfs_vsi_release(struct config_item *item) +{ + kfree(to_i40e_cfgfs_vsi(item)); +} + +static struct configfs_item_operations i40e_cfgfs_vsi_item_ops = { + .release = i40e_cfgfs_vsi_release, + .show_attribute = i40e_cfgfs_vsi_attr_show, + .store_attribute = i40e_cfgfs_vsi_attr_store, +}; + +static struct config_item_type i40e_cfgfs_vsi_type = { + .ct_item_ops = &i40e_cfgfs_vsi_item_ops, + .ct_attrs = i40e_cfgfs_vsi_attrs, + .ct_owner = THIS_MODULE, +}; + +struct i40e_cfgfs_group { + struct config_group group; +}; + +/** + * to_i40e_cfgfs_group - Get the group pointer from the config item + * @item: A pointer back to the configfs item created on driver load + **/ +static inline struct i40e_cfgfs_group * +to_i40e_cfgfs_group(struct config_item *item) +{ + return item ? container_of(to_config_group(item), + struct i40e_cfgfs_group, group) : NULL; +} + +/** + * i40e_cfgfs_group_make_item - Create the configfs item with group container + * @group: A pointer to our configfs group + * @name: A pointer to the nume of the device we're looking for + **/ +static struct config_item * +i40e_cfgfs_group_make_item(struct config_group *group, const char *name) +{ + struct i40e_cfgfs_vsi *i40e_cfgfs_vsi; + struct net_device *netdev; + struct i40e_netdev_priv *np; + + read_lock(&dev_base_lock); + netdev = first_net_device(&init_net); + while (netdev) { + if (strncmp(netdev->name, name, sizeof(netdev->name)) == 0) + break; + netdev = next_net_device(netdev); + } + read_unlock(&dev_base_lock); + + if (!netdev) + return ERR_PTR(-ENODEV); + + /* is this netdev owned by i40e? */ + if (netdev->netdev_ops->ndo_open != i40e_open) + return ERR_PTR(-EACCES); + + i40e_cfgfs_vsi = kzalloc(sizeof(*i40e_cfgfs_vsi), GFP_KERNEL); + if (!i40e_cfgfs_vsi) + return ERR_PTR(-ENOMEM); + + np = netdev_priv(netdev); + i40e_cfgfs_vsi->vsi = np->vsi; + config_item_init_type_name(&i40e_cfgfs_vsi->item, name, + &i40e_cfgfs_vsi_type); + + return &i40e_cfgfs_vsi->item; +} + +static struct configfs_attribute i40e_cfgfs_group_attr_description = { + .ca_owner = THIS_MODULE, + .ca_name = "description", + .ca_mode = S_IRUGO, +}; + +static struct configfs_attribute *i40e_cfgfs_group_attrs[] = { + &i40e_cfgfs_group_attr_description, + NULL, +}; + +static ssize_t i40e_cfgfs_group_attr_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + return sprintf(page, +"i40e\n" +"\n" +"This subsystem allows the modification of network port configurations.\n" +"To start, use the name of the network port to be configured in a 'mkdir'\n" +"command, e.g. 'mkdir eth3'.\n"); +} + +static void i40e_cfgfs_group_release(struct config_item *item) +{ + kfree(to_i40e_cfgfs_group(item)); +} + +static struct configfs_item_operations i40e_cfgfs_group_item_ops = { + .release = i40e_cfgfs_group_release, + .show_attribute = i40e_cfgfs_group_attr_show, +}; + +/* Note that, since no extra work is required on ->drop_item(), + * no ->drop_item() is provided. + */ +static struct configfs_group_operations i40e_cfgfs_group_ops = { + .make_item = i40e_cfgfs_group_make_item, +}; + +static struct config_item_type i40e_cfgfs_group_type = { + .ct_item_ops = &i40e_cfgfs_group_item_ops, + .ct_group_ops = &i40e_cfgfs_group_ops, + .ct_attrs = i40e_cfgfs_group_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem i40e_cfgfs_group_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = "i40e", + .ci_type = &i40e_cfgfs_group_type, + }, + }, +}; + +/** + * i40e_configfs_init - Initialize configfs support for our driver + **/ +int i40e_configfs_init(void) +{ + int ret; + struct configfs_subsystem *subsys; + + subsys = &i40e_cfgfs_group_subsys; + + config_group_init(&subsys->su_group); + mutex_init(&subsys->su_mutex); + ret = configfs_register_subsystem(subsys); + if (ret) { + pr_err("Error %d while registering configfs subsystem %s\n", + ret, subsys->su_group.cg_item.ci_namebuf); + return ret; + } + + return 0; +} + +/** + * i40e_configfs_init - Bail out - unregister configfs subsystem and release + **/ +void i40e_configfs_exit(void) +{ + configfs_unregister_subsystem(&i40e_cfgfs_group_subsys); +} +#endif /* IS_ENABLED(CONFIG_I40E_CONFIGFS_FS) */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index a11c70ca5a28..2f583554a260 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -269,22 +269,21 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, /** * i40e_dcbnl_flush_apps - Delete all removed APPs * @pf: the corresponding pf + * @old_cfg: old DCBX configuration data * @new_cfg: new DCBX configuration data * * Find and delete all APPs that are not present in the passed * DCB configuration **/ void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg) { struct i40e_dcb_app_priority_table app; - struct i40e_dcbx_config *dcbxcfg; - struct i40e_hw *hw = &pf->hw; int i; - dcbxcfg = &hw->local_dcbx_config; - for (i = 0; i < dcbxcfg->numapps; i++) { - app = dcbxcfg->app[i]; + for (i = 0; i < old_cfg->numapps; i++) { + app = old_cfg->app[i]; /* The APP is not available anymore delete it */ if (!i40e_dcbnl_find_app(new_cfg, &app)) i40e_dcbnl_del_app(pf, &app); @@ -306,9 +305,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return; - /* Do not setup DCB NL ops for MFP mode */ - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) - dev->dcbnl_ops = &dcbnl_ops; + dev->dcbnl_ops = &dcbnl_ops; /* Set initial IEEE DCB settings */ i40e_dcbnl_set_all(vsi); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index c17ee77100d3..e802b6bc067d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -921,9 +921,10 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) return; } dev_info(&pf->pdev->dev, - "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d\n", + "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n", veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, - veb->uplink_seid); + veb->uplink_seid, + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); i40e_dbg_dump_eth_stats(pf, &veb->stats); } @@ -1487,11 +1488,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else { dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); } - } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) { - i40e_pf_reset_stats(pf); - dev_info(&pf->pdev->dev, "pf clear stats called\n"); + } else if (strncmp(&cmd_buf[12], "port", 4) == 0) { + if (pf->hw.partition_id == 1) { + i40e_pf_reset_stats(pf); + dev_info(&pf->pdev->dev, "port stats cleared\n"); + } else { + dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n"); + } } else { - dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n"); + dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n"); } } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { struct i40e_aq_desc *desc; @@ -1897,7 +1902,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, " read <reg>\n"); dev_info(&pf->pdev->dev, " write <reg> <value>\n"); dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); - dev_info(&pf->pdev->dev, " clear_stats pf\n"); + dev_info(&pf->pdev->dev, " clear_stats port\n"); dev_info(&pf->pdev->dev, " pfr\n"); dev_info(&pf->pdev->dev, " corer\n"); dev_info(&pf->pdev->dev, " globr\n"); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index b8230dc205ec..7413b0e429c8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -113,7 +113,6 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), I40E_PF_STAT("tx_errors", stats.eth.tx_errors), I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), - I40E_PF_STAT("tx_dropped", stats.eth.tx_discards), I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), I40E_PF_STAT("crc_errors", stats.crc_errors), I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), @@ -218,6 +217,13 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) +static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { + "NPAR", +}; + +#define I40E_PRIV_FLAGS_STR_LEN \ + (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN) + /** * i40e_partition_setting_complaint - generic complaint for MFP restriction * @pf: the PF struct @@ -229,73 +235,20 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf) } /** - * i40e_get_settings - Get Link Speed and Duplex settings + * i40e_get_settings_link_up - Get the Link settings for when link is up + * @hw: hw structure + * @ecmd: ethtool command to fill in * @netdev: network interface device structure - * @ecmd: ethtool command * - * Reports speed/duplex settings based on media_type **/ -static int i40e_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static void i40e_get_settings_link_up(struct i40e_hw *hw, + struct ethtool_cmd *ecmd, + struct net_device *netdev) { - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_pf *pf = np->vsi->back; - struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; - bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; u32 link_speed = hw_link_info->link_speed; - /* hardware is either in 40G mode or 10G mode - * NOTE: this section initializes supported and advertising - */ - if (!link_up) { - /* link is down and the driver needs to fall back on - * device ID to determine what kinds of info to display, - * it's mostly a guess that may change when link is up - */ - switch (hw->device_id) { - case I40E_DEV_ID_QSFP_A: - case I40E_DEV_ID_QSFP_B: - case I40E_DEV_ID_QSFP_C: - /* pluggable QSFP */ - ecmd->supported = SUPPORTED_40000baseSR4_Full | - SUPPORTED_40000baseCR4_Full | - SUPPORTED_40000baseLR4_Full; - ecmd->advertising = ADVERTISED_40000baseSR4_Full | - ADVERTISED_40000baseCR4_Full | - ADVERTISED_40000baseLR4_Full; - break; - case I40E_DEV_ID_KX_B: - /* backplane 40G */ - ecmd->supported = SUPPORTED_40000baseKR4_Full; - ecmd->advertising = ADVERTISED_40000baseKR4_Full; - break; - case I40E_DEV_ID_KX_C: - /* backplane 10G */ - ecmd->supported = SUPPORTED_10000baseKR_Full; - ecmd->advertising = ADVERTISED_10000baseKR_Full; - break; - case I40E_DEV_ID_10G_BASE_T: - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; - break; - default: - /* all the rest are 10G/1G */ - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full; - break; - } - - /* skip phy_type use as it is zero when link is down */ - goto no_valid_phy_type; - } - + /* Initialize supported and advertised settings based on phy settings */ switch (hw_link_info->phy_type) { case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: @@ -304,6 +257,10 @@ static int i40e_get_settings(struct net_device *netdev, ecmd->advertising = ADVERTISED_Autoneg | ADVERTISED_40000baseCR4_Full; break; + case I40E_PHY_TYPE_XLAUI: + case I40E_PHY_TYPE_XLPPI: + ecmd->supported = SUPPORTED_40000baseCR4_Full; + break; case I40E_PHY_TYPE_40GBASE_KR4: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_40000baseKR4_Full; @@ -311,8 +268,6 @@ static int i40e_get_settings(struct net_device *netdev, ADVERTISED_40000baseKR4_Full; break; case I40E_PHY_TYPE_40GBASE_SR4: - case I40E_PHY_TYPE_XLPPI: - case I40E_PHY_TYPE_XLAUI: ecmd->supported = SUPPORTED_40000baseSR4_Full; break; case I40E_PHY_TYPE_40GBASE_LR4: @@ -334,20 +289,40 @@ static int i40e_get_settings(struct net_device *netdev, case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->supported |= SUPPORTED_1000baseT_Full; + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + case I40E_PHY_TYPE_1000BASE_KX: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseKX_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_1000baseKX_Full; break; - case I40E_PHY_TYPE_10GBASE_CR1_CU: - case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_10GBASE_T: + case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_100BASE_TX: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full; + ecmd->advertising = ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; + ADVERTISED_10000baseT_Full; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: @@ -355,34 +330,14 @@ static int i40e_get_settings(struct net_device *netdev, case I40E_PHY_TYPE_10GBASE_SFPP_CU: ecmd->supported = SUPPORTED_10000baseT_Full; break; - case I40E_PHY_TYPE_1000BASE_KX: - case I40E_PHY_TYPE_1000BASE_T: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; - break; - case I40E_PHY_TYPE_100BASE_TX: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; - break; case I40E_PHY_TYPE_SGMII: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; break; default: /* if we got here and link is up something bad is afoot */ @@ -390,8 +345,118 @@ static int i40e_get_settings(struct net_device *netdev, hw_link_info->phy_type); } -no_valid_phy_type: - /* this is if autoneg is enabled or disabled */ + /* Set speed and duplex */ + switch (link_speed) { + case I40E_LINK_SPEED_40GB: + /* need a SPEED_40000 in ethtool.h */ + ethtool_cmd_speed_set(ecmd, 40000); + break; + case I40E_LINK_SPEED_10GB: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; + case I40E_LINK_SPEED_1GB: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case I40E_LINK_SPEED_100MB: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; +} + +/** + * i40e_get_settings_link_down - Get the Link settings for when link is down + * @hw: hw structure + * @ecmd: ethtool command to fill in + * + * Reports link settings that can be determined when link is down + **/ +static void i40e_get_settings_link_down(struct i40e_hw *hw, + struct ethtool_cmd *ecmd) +{ + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + + /* link is down and the driver needs to fall back on + * device ID to determine what kinds of info to display, + * it's mostly a guess that may change when link is up + */ + switch (hw->device_id) { + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + /* pluggable QSFP */ + ecmd->supported = SUPPORTED_40000baseSR4_Full | + SUPPORTED_40000baseCR4_Full | + SUPPORTED_40000baseLR4_Full; + ecmd->advertising = ADVERTISED_40000baseSR4_Full | + ADVERTISED_40000baseCR4_Full | + ADVERTISED_40000baseLR4_Full; + break; + case I40E_DEV_ID_KX_B: + /* backplane 40G */ + ecmd->supported = SUPPORTED_40000baseKR4_Full; + ecmd->advertising = ADVERTISED_40000baseKR4_Full; + break; + case I40E_DEV_ID_KX_C: + /* backplane 10G */ + ecmd->supported = SUPPORTED_10000baseKR_Full; + ecmd->advertising = ADVERTISED_10000baseKR_Full; + break; + case I40E_DEV_ID_10G_BASE_T: + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + /* Figure out what has been requested */ + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + default: + /* all the rest are 10G/1G */ + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + /* Figure out what has been requested */ + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + } + + /* With no link speed and duplex are unknown */ + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; +} + +/** + * i40e_get_settings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Reports speed/duplex settings based on media_type + **/ +static int i40e_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; + + if (link_up) + i40e_get_settings_link_up(hw, ecmd, netdev); + else + i40e_get_settings_link_down(hw, ecmd); + + /* Now set the settings that don't rely on link being up/down */ + + /* Set autoneg settings */ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); @@ -424,11 +489,13 @@ no_valid_phy_type: break; } + /* Set transceiver */ ecmd->transceiver = XCVR_EXTERNAL; + /* Set flow control settings */ ecmd->supported |= SUPPORTED_Pause; - switch (hw->fc.current_mode) { + switch (hw->fc.requested_mode) { case I40E_FC_FULL: ecmd->advertising |= ADVERTISED_Pause; break; @@ -445,30 +512,6 @@ no_valid_phy_type: break; } - if (link_up) { - switch (link_speed) { - case I40E_LINK_SPEED_40GB: - /* need a SPEED_40000 in ethtool.h */ - ethtool_cmd_speed_set(ecmd, 40000); - break; - case I40E_LINK_SPEED_10GB: - ethtool_cmd_speed_set(ecmd, SPEED_10000); - break; - case I40E_LINK_SPEED_1GB: - ethtool_cmd_speed_set(ecmd, SPEED_1000); - break; - case I40E_LINK_SPEED_100MB: - ethtool_cmd_speed_set(ecmd, SPEED_100); - break; - default: - break; - } - ecmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - } - return 0; } @@ -601,6 +644,8 @@ static int i40e_set_settings(struct net_device *netdev, config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; + /* save the requested speeds */ + hw->phy.link_info.requested_speeds = config.link_speed; /* set link and auto negotiation so changes take effect */ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; /* If link is up put link down */ @@ -621,7 +666,7 @@ static int i40e_set_settings(struct net_device *netdev, return -EAGAIN; } - status = i40e_update_link_info(hw, true); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) netdev_info(netdev, "Updating link info failed with error %d\n", status); @@ -767,7 +812,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n", + netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n", status, hw->aq.asq_last_status); err = -EAGAIN; } @@ -998,6 +1043,7 @@ static void i40e_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); + drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; } static void i40e_get_ringparam(struct net_device *netdev, @@ -1185,6 +1231,8 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) } else { return I40E_VSI_STATS_LEN(netdev); } + case ETH_SS_PRIV_FLAGS: + return I40E_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } @@ -1358,6 +1406,15 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, } /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + memcpy(data, i40e_priv_flags_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + default: + break; } } @@ -1599,6 +1656,8 @@ static int i40e_set_phys_id(struct net_device *netdev, case ETHTOOL_ID_INACTIVE: i40e_led_set(hw, pf->led_status, false); break; + default: + break; } return 0; @@ -1703,6 +1762,11 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) { cmd->data = 0; + if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) { + cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data; + cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type; + return 0; + } /* Report default options for RSS on i40e */ switch (cmd->flow_type) { case TCP_V4_FLOW: @@ -1974,6 +2038,9 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); i40e_flush(hw); + /* Save setting for future output/update */ + pf->vsi[pf->lan_vsi]->rxnfc = *nfc; + return 0; } @@ -2281,10 +2348,6 @@ static int i40e_set_channels(struct net_device *dev, /* update feature limits from largest to smallest supported values */ /* TODO: Flow director limit, DCB etc */ - /* cap RSS limit */ - if (count > pf->rss_size_max) - count = pf->rss_size_max; - /* use rss_reconfig to rebuild with new queue count and update traffic * class queue mapping */ @@ -2295,6 +2358,29 @@ static int i40e_set_channels(struct net_device *dev, return -EINVAL; } +/** + * i40e_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the i40e_priv_flags_strings + * array. + * + * Returns a u32 bitmap of flags. + **/ +u32 i40e_get_priv_flags(struct net_device *dev) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u32 ret_flags = 0; + + ret_flags |= pf->hw.func_caps.npar_enable ? + I40E_PRIV_FLAGS_NPAR_FLAG : 0; + + return ret_flags; +} + static const struct ethtool_ops i40e_ethtool_ops = { .get_settings = i40e_get_settings, .set_settings = i40e_set_settings, @@ -2326,6 +2412,7 @@ static const struct ethtool_ops i40e_ethtool_ops = { .get_channels = i40e_get_channels, .set_channels = i40e_set_channels, .get_ts_info = i40e_get_ts_info, + .get_priv_flags = i40e_get_priv_flags, }; void i40e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 27c206e62da7..05d883e4d4ac 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -381,12 +381,11 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) ctxt->pf_num = hw->pf_id; ctxt->vf_num = 0; ctxt->uplink_seid = vsi->uplink_seid; - ctxt->connection_type = 0x1; + ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt->flags = I40E_AQ_VSI_TYPE_PF; /* FCoE VSI would need the following sections */ - info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID | - I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); + info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); /* FCoE VSI does not need these sections */ info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID | @@ -395,7 +394,12 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) I40E_AQ_VSI_PROP_INGRESS_UP_VALID | I40E_AQ_VSI_PROP_EGRESS_UP_VALID)); - info->switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + info->valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + info->switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } enabled_tc = i40e_get_fcoe_tc_map(pf); i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); @@ -1470,6 +1474,11 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = { .ndo_set_features = i40e_fcoe_set_features, }; +/* fcoe network device type */ +static struct device_type fcoe_netdev_type = { + .name = "fcoe", +}; + /** * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI * @vsi: pointer to the associated VSI struct @@ -1503,6 +1512,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1); netdev->mtu = FCOE_MTU; SET_NETDEV_DEV(netdev, &pf->pdev->dev); + SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type); /* set different dev_port value 1 for FCoE netdev than the default * zero dev_port value for PF netdev, this helps biosdevname user * tool to differentiate them correctly while both attached to the diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index dadda3c5d658..56bdaff9f27e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -39,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 2 -#define DRV_VERSION_BUILD 6 +#define DRV_VERSION_BUILD 10 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -919,11 +919,6 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); - i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), - pf->stat_offsets_loaded, - &osd->eth.tx_discards, - &nsd->eth.tx_discards); - i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, @@ -1576,6 +1571,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, /* Set actual Tx/Rx queue pairs */ vsi->num_queue_pairs = offset; + if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { + if (vsi->req_queue_pairs > 0) + vsi->num_queue_pairs = vsi->req_queue_pairs; + else + vsi->num_queue_pairs = pf->num_lan_msix; + } /* Scheduler section valid can only be set for ADD VSI */ if (is_add) { @@ -2596,7 +2597,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); - i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + if (ring_is_ps_enabled(ring)) { + i40e_alloc_rx_headers(ring); + i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); + } else { + i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); + } return 0; } @@ -3183,7 +3189,7 @@ static irqreturn_t i40e_intr(int irq, void *data) pf->globr_count++; } else if (val == I40E_RESET_EMPR) { pf->empr_count++; - set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); + set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); } } @@ -4119,7 +4125,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) if (pf->hw.func_caps.iscsi) enabled_tc = i40e_get_iscsi_tc_map(pf); else - enabled_tc = pf->hw.func_caps.enabled_tcmap; + return 1; /* Only TC0 */ /* At least have TC0 */ enabled_tc = (enabled_tc ? enabled_tc : 0x1); @@ -4169,11 +4175,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); - /* MPF enabled and iSCSI PF type */ + /* MFP enabled and iSCSI PF type */ if (pf->hw.func_caps.iscsi) return i40e_get_iscsi_tc_map(pf); else - return pf->hw.func_caps.enabled_tcmap; + return i40e_pf_get_default_tc(pf); } /** @@ -4563,6 +4569,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; int err = 0; + /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) + goto out; + /* Get the initial DCB configuration */ err = i40e_init_dcb(hw); if (!err) { @@ -4853,11 +4864,7 @@ exit: * * Returns 0 on success, negative value on failure **/ -#ifdef I40E_FCOE int i40e_open(struct net_device *netdev) -#else -static int i40e_open(struct net_device *netdev) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -5055,24 +5062,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_flush(&pf->hw); - } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) { - - /* Request a Firmware Reset - * - * Same as Global reset, plus restarting the - * embedded firmware engine. - */ - /* enable EMP Reset */ - val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP); - val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK; - wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val); - - /* force the reset */ - val = rd32(&pf->hw, I40E_GLGEN_RTRIG); - val |= I40E_GLGEN_RTRIG_EMPFWR_MASK; - wr32(&pf->hw, I40E_GLGEN_RTRIG, val); - i40e_flush(&pf->hw); - } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { /* Request a PF Reset @@ -5195,7 +5184,6 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, struct i40e_aqc_lldp_get_mib *mib = (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; struct i40e_hw *hw = &pf->hw; - struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; struct i40e_dcbx_config tmp_dcbx_cfg; bool need_reconfig = false; int ret = 0; @@ -5228,8 +5216,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); /* Store the old configuration */ - tmp_dcbx_cfg = *dcbx_cfg; + memcpy(&tmp_dcbx_cfg, &hw->local_dcbx_config, sizeof(tmp_dcbx_cfg)); + /* Reset the old DCBx configuration data */ + memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); /* Get updated DCBX data from firmware */ ret = i40e_get_dcb_config(&pf->hw); if (ret) { @@ -5238,20 +5228,22 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, } /* No change detected in DCBX configs */ - if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { + if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, + sizeof(tmp_dcbx_cfg))) { dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); goto exit; } - need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg); + need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, + &hw->local_dcbx_config); - i40e_dcbnl_flush_apps(pf, dcbx_cfg); + i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); if (!need_reconfig) goto exit; /* Enable DCB tagging only when more than one TC */ - if (i40e_dcb_get_num_tc(dcbx_cfg) > 1) + if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; else pf->flags &= ~I40E_FLAG_DCB_ENABLED; @@ -5919,6 +5911,26 @@ static void i40e_verify_eeprom(struct i40e_pf *pf) } /** + * i40e_config_bridge_mode - Configure the HW bridge mode + * @veb: pointer to the bridge instance + * + * Configure the loop back mode for the LAN VSI that is downlink to the + * specified HW bridge instance. It is expected this function is called + * when a new HW bridge is instantiated. + **/ +static void i40e_config_bridge_mode(struct i40e_veb *veb) +{ + struct i40e_pf *pf = veb->pf; + + dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + if (veb->bridge_mode & BRIDGE_MODE_VEPA) + i40e_disable_pf_switch_lb(pf); + else + i40e_enable_pf_switch_lb(pf); +} + +/** * i40e_reconstitute_veb - rebuild the VEB and anything connected to it * @veb: pointer to the VEB instance * @@ -5964,8 +5976,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; - /* Enable LB mode for the main VSI now that it is on a VEB */ - i40e_enable_pf_switch_lb(pf); + i40e_config_bridge_mode(veb); /* create the remaining VSIs attached to this VEB */ for (v = 0; v < pf->num_alloc_vsi; v++) { @@ -6222,10 +6233,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } /* re-verify the eeprom if we just had an EMP reset */ - if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) { - clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) i40e_verify_eeprom(pf); - } i40e_clear_pxe_mode(hw); ret = i40e_get_capabilities(pf); @@ -6335,13 +6344,14 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } } - msleep(75); - ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); - if (ret) { - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) { + msleep(75); + ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (ret) + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); } - /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); @@ -6728,6 +6738,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) vsi->idx = vsi_idx; vsi->rx_itr_setting = pf->rx_itr_default; vsi->tx_itr_setting = pf->tx_itr_default; + vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? + pf->rss_table_size : 64; vsi->netdev_registered = false; vsi->work_limit = I40E_DEFAULT_IRQ_WORK; INIT_LIST_HEAD(&vsi->mac_filter_list); @@ -6951,7 +6963,8 @@ static int i40e_init_msix(struct i40e_pf *pf) * If we can't get what we want, we'll simplify to nearly nothing * and try again. If that still fails, we punt. */ - pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); + pf->num_lan_msix = min_t(int, num_online_cpus(), + hw->func_caps.num_msix_vectors); pf->num_vmdq_msix = pf->num_vmdq_qps; other_vecs = 1; other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix); @@ -7219,6 +7232,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) static int i40e_config_rss(struct i40e_pf *pf) { u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; u32 lut = 0; int i, j; @@ -7236,6 +7250,8 @@ static int i40e_config_rss(struct i40e_pf *pf) wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); + vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); + /* Check capability and Set table size and register per hw expectation*/ reg_val = rd32(hw, I40E_PFQF_CTL_0); if (hw->func_caps.rss_table_size == 512) { @@ -7257,7 +7273,7 @@ static int i40e_config_rss(struct i40e_pf *pf) * If LAN VSI is the only consumer for RSS then this requirement * is not necessary. */ - if (j == pf->rss_size) + if (j == vsi->rss_size) j = 0; /* lut = 4-byte sliding window of 4 lut entries */ lut = (lut << 8) | (j & @@ -7281,15 +7297,19 @@ static int i40e_config_rss(struct i40e_pf *pf) **/ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) { + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + int new_rss_size; + if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) return 0; - queue_count = min_t(int, queue_count, pf->rss_size_max); + new_rss_size = min_t(int, queue_count, pf->rss_size_max); - if (queue_count != pf->rss_size) { + if (queue_count != vsi->num_queue_pairs) { + vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf); - pf->rss_size = queue_count; + pf->rss_size = new_rss_size; i40e_reset_and_rebuild(pf, true); i40e_config_rss(pf); @@ -7299,6 +7319,128 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) } /** + * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) +{ + i40e_status status; + bool min_valid, max_valid; + u32 max_bw, min_bw; + + status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, + &min_valid, &max_valid); + + if (!status) { + if (min_valid) + pf->npar_min_bw = min_bw; + if (max_valid) + pf->npar_max_bw = max_bw; + } + + return status; +} + +/** + * i40e_set_npar_bw_setting - Set BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) +{ + struct i40e_aqc_configure_partition_bw_data bw_data; + i40e_status status; + + /* Set the valid bit for this pf */ + bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id); + bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; + bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; + + /* Set the new bandwidths */ + status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); + + return status; +} + +/** + * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) +{ + /* Commit temporary BW setting to permanent NVM image */ + enum i40e_admin_queue_err last_aq_status; + i40e_status ret; + u16 nvm_word; + + if (pf->hw.partition_id != 1) { + dev_info(&pf->pdev->dev, + "Commit BW only works on partition 1! This is partition %d", + pf->hw.partition_id); + ret = I40E_NOT_SUPPORTED; + goto bw_commit_out; + } + + /* Acquire NVM for read access */ + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot acquire NVM for read access, err %d: aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + + /* Read word 0x10 of NVM - SW compatibility word 1 */ + ret = i40e_aq_read_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), &nvm_word, + false, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (ret) { + dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + + /* Wait a bit for NVM release to complete */ + msleep(50); + + /* Acquire NVM for write access */ + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot acquire NVM for write access, err %d: aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + /* Write it back out unchanged to initiate update NVM, + * which will force a write of the shadow (alt) RAM to + * the NVM - thus storing the bandwidth values permanently. + */ + ret = i40e_aq_update_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), + &nvm_word, true, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (ret) + dev_info(&pf->pdev->dev, + "BW settings NOT SAVED, err %d aq_err %d\n", + ret, last_aq_status); +bw_commit_out: + + return ret; +} + +/** * i40e_sw_init - Initialize general software structures (struct i40e_pf) * @pf: board private structure to initialize * @@ -7324,8 +7466,12 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | - I40E_FLAG_MSIX_ENABLED | - I40E_FLAG_RX_1BUF_ENABLED; + I40E_FLAG_MSIX_ENABLED; + + if (iommu_present(&pci_bus_type)) + pf->flags |= I40E_FLAG_RX_PS_ENABLED; + else + pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; /* Set default ITR */ pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; @@ -7336,6 +7482,7 @@ static int i40e_sw_init(struct i40e_pf *pf) */ pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; pf->rss_size = 1; + pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); if (pf->hw.func_caps.rss) { @@ -7347,6 +7494,13 @@ static int i40e_sw_init(struct i40e_pf *pf) if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { pf->flags |= I40E_FLAG_MFP_ENABLED; dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); + if (i40e_get_npar_bw_setting(pf)) + dev_warn(&pf->pdev->dev, + "Could not get NPAR bw settings\n"); + else + dev_info(&pf->pdev->dev, + "Min BW = %8.8x, Max BW = %8.8x\n", + pf->npar_min_bw, pf->npar_max_bw); } /* FW/NVM is not yet fixed in this regard */ @@ -7653,7 +7807,119 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } -static const struct net_device_ops i40e_netdev_ops = { +#ifdef HAVE_BRIDGE_ATTRIBS +/** + * i40e_ndo_bridge_setlink - Set the hardware bridge mode + * @dev: the netdev being configured + * @nlh: RTNL message + * + * Inserts a new hardware bridge if not already created and + * enables the bridging mode requested (VEB or VEPA). If the + * hardware bridge has already been inserted and the request + * is to change the mode then that requires a PF reset to + * allow rebuild of the components with required hardware + * bridge mode enabled. + **/ +static int i40e_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + struct nlattr *attr, *br_spec; + int i, rem; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if ((mode != BRIDGE_MODE_VEPA) && + (mode != BRIDGE_MODE_VEB)) + return -EINVAL; + + /* Insert a new HW bridge */ + if (!veb) { + veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + vsi->tc_config.enabled_tc); + if (veb) { + veb->bridge_mode = mode; + i40e_config_bridge_mode(veb); + } else { + /* No Bridge HW offload available */ + return -ENOENT; + } + break; + } else if (mode != veb->bridge_mode) { + /* Existing HW bridge but different mode needs reset */ + veb->bridge_mode = mode; + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + break; + } + } + + return 0; +} + +/** + * i40e_ndo_bridge_getlink - Get the hardware bridge mode + * @skb: skb buff + * @pid: process id + * @seq: RTNL message seq # + * @dev: the netdev being configured + * @filter_mask: unused + * + * Return the mode in which the hardware bridge is operating in + * i.e VEB or VEPA. + **/ +#ifdef HAVE_BRIDGE_FILTER +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask) +#else +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif /* HAVE_BRIDGE_FILTER */ +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + int i; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for the PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + if (!veb) + return 0; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode); +} +#endif /* HAVE_BRIDGE_ATTRIBS */ + +const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, .ndo_start_xmit = i40e_lan_xmit_frame, @@ -7687,6 +7953,10 @@ static const struct net_device_ops i40e_netdev_ops = { #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_getlink = i40e_ndo_bridge_getlink, + .ndo_bridge_setlink = i40e_ndo_bridge_setlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ }; /** @@ -7799,6 +8069,30 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi) } /** + * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB + * @vsi: the VSI being queried + * + * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode + **/ +int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) +{ + struct i40e_veb *veb; + struct i40e_pf *pf = vsi->back; + + /* Uplink is not a bridge so default to VEB */ + if (vsi->veb_idx == I40E_NO_VEB) + return 1; + + veb = pf->veb[vsi->veb_idx]; + /* Uplink is a bridge in VEPA mode */ + if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) + return 0; + + /* Uplink is a bridge in VEB mode */ + return 1; +} + +/** * i40e_add_vsi - Add a VSI to the switch * @vsi: the VSI being configured * @@ -7883,12 +8177,14 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; - ctxt.connection_type = 0x1; /* regular data port */ + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; - ctxt.info.valid_sections |= + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - ctxt.info.switch_id = + ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; @@ -7896,16 +8192,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; - ctxt.connection_type = 0x1; /* regular data port */ + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; - ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ - ctxt.info.switch_id = 0; - ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } /* Setup the VSI tx/rx queue map for TC0 only for now */ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); @@ -7915,15 +8213,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = hw->pf_id; ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; ctxt.uplink_seid = vsi->uplink_seid; - ctxt.connection_type = 0x1; /* regular data port */ + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VF; - ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ - ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; @@ -8281,7 +8582,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, __func__); return NULL; } - i40e_enable_pf_switch_lb(pf); + i40e_config_bridge_mode(veb); } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) @@ -8930,7 +9231,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_config_rss(pf); /* fill in link information and enable LSE reporting */ - i40e_update_link_info(&pf->hw, true); + i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); i40e_link_event(pf); /* Initialize user-specific link properties */ @@ -8938,7 +9239,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) I40E_AQ_AN_COMPLETED) ? true : false); /* fill in link information and enable LSE reporting */ - i40e_update_link_info(&pf->hw, true); + i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); i40e_link_event(pf); /* Initialize user-specific link properties */ @@ -9008,7 +9309,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) pf->flags &= ~I40E_FLAG_DCB_CAPABLE; dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } - pf->num_lan_qps = pf->rss_size_max; + pf->num_lan_qps = max_t(int, pf->rss_size_max, + num_online_cpus()); + pf->num_lan_qps = min_t(int, pf->num_lan_qps, + pf->hw.func_caps.num_tx_qp); + queues_left -= pf->num_lan_qps; } @@ -9106,8 +9411,10 @@ static void i40e_print_features(struct i40e_pf *pf) #ifdef CONFIG_PCI_IOV buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); #endif - buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis, - pf->vsi[pf->lan_vsi]->num_queue_pairs); + buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", + pf->hw.func_caps.num_vsis, + pf->vsi[pf->lan_vsi]->num_queue_pairs, + pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); if (pf->flags & I40E_FLAG_RSS_ENABLED) buf += sprintf(buf, "RSS "); @@ -9144,6 +9451,7 @@ static void i40e_print_features(struct i40e_pf *pf) **/ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_pf *pf; struct i40e_hw *hw; static u16 pfs_found; @@ -9409,13 +9717,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); - msleep(75); - err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); - if (err) { - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) { + msleep(75); + err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (err) + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); } - /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector * ends up disabled forever. @@ -9499,6 +9808,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); } + /* get the requested speeds from the fw */ + err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); + if (err) + dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n", + err); + pf->hw.phy.link_info.requested_speeds = abilities.link_speed; + /* print a string summarizing features */ i40e_print_features(pf); @@ -9844,6 +10160,10 @@ static int __init i40e_init_module(void) pr_info("%s: %s - version %s\n", i40e_driver_name, i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); + +#if IS_ENABLED(CONFIG_I40E_CONFIGFS_FS) + i40e_configfs_init(); +#endif /* CONFIG_I40E_CONFIGFS_FS */ i40e_dbg_init(); return pci_register_driver(&i40e_driver); } @@ -9859,5 +10179,8 @@ static void __exit i40e_exit_module(void) { pci_unregister_driver(&i40e_driver); i40e_dbg_exit(); +#if IS_ENABLED(CONFIG_I40E_CONFIGFS_FS) + i40e_configfs_exit(); +#endif /* CONFIG_I40E_CONFIGFS_FS */ } module_exit(i40e_exit_module); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 5defe0d63514..039018abad4a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -164,15 +164,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) } /** - * i40e_read_nvm_word - Reads Shadow RAM + * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. **/ -i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, - u16 *data) +i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, + u16 *data) { i40e_status ret_code = I40E_ERR_TIMEOUT; u32 sr_reg; @@ -200,6 +200,7 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, *data = (u16)((sr_reg & I40E_GLNVM_SRDATA_RDDATA_MASK) >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); + *data = le16_to_cpu(*data); } } if (ret_code) @@ -212,7 +213,21 @@ read_nvm_exit: } /** - * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * i40e_read_nvm_word - Reads Shadow RAM + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + return i40e_read_nvm_word_srctl(hw, offset, data); +} + +/** + * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read @@ -222,8 +237,8 @@ read_nvm_exit: * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) +i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) { i40e_status ret_code = 0; u16 index, word; @@ -231,7 +246,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, /* Loop thru the selected region */ for (word = 0; word < *words; word++) { index = offset + word; - ret_code = i40e_read_nvm_word(hw, index, &data[word]); + ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); if (ret_code) break; } @@ -243,6 +258,23 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, } /** + * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + return i40e_read_nvm_buffer_srctl(hw, offset, words, data); +} + +/** * i40e_write_nvm_aq - Writes Shadow RAM. * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning @@ -302,11 +334,18 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { i40e_status ret_code = 0; + struct i40e_virt_mem vmem; u16 pcie_alt_module = 0; u16 checksum_local = 0; u16 vpd_module = 0; - u16 word = 0; - u32 i = 0; + u16 *data; + u16 i = 0; + + ret_code = i40e_allocate_virt_mem(hw, &vmem, + I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); + if (ret_code) + goto i40e_calc_nvm_checksum_exit; + data = (u16 *)vmem.va; /* read pointer to VPD area */ ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); @@ -317,7 +356,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, /* read pointer to PCIe Alt Auto-load module */ ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, - &pcie_alt_module); + &pcie_alt_module); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; @@ -327,33 +366,40 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, * except the VPD and PCIe ALT Auto-load modules */ for (i = 0; i < hw->nvm.sr_size; i++) { + /* Read SR page */ + if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { + u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; + + ret_code = i40e_read_nvm_buffer(hw, i, &words, data); + if (ret_code) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + } + /* Skip Checksum word */ if (i == I40E_SR_SW_CHECKSUM_WORD) - i++; + continue; /* Skip VPD module (convert byte size to word count) */ - if (i == (u32)vpd_module) { - i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2); - if (i >= hw->nvm.sr_size) - break; + if ((i >= (u32)vpd_module) && + (i < ((u32)vpd_module + + (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { + continue; } /* Skip PCIe ALT module (convert byte size to word count) */ - if (i == (u32)pcie_alt_module) { - i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2); - if (i >= hw->nvm.sr_size) - break; + if ((i >= (u32)pcie_alt_module) && + (i < ((u32)pcie_alt_module + + (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { + continue; } - ret_code = i40e_read_nvm_word(hw, (u16)i, &word); - if (ret_code) { - ret_code = I40E_ERR_NVM_CHECKSUM; - goto i40e_calc_nvm_checksum_exit; - } - checksum_local += word; + checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; } *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; i40e_calc_nvm_checksum_exit: + i40e_free_virt_mem(hw, &vmem); return ret_code; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 68e852a96680..fea0d37ecc72 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -66,6 +66,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, @@ -97,7 +98,6 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse); i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw, u64 advt_reg, struct i40e_asq_cmd_details *cmd_details); @@ -247,6 +247,12 @@ void i40e_clear_hw(struct i40e_hw *hw); void i40e_clear_pxe_mode(struct i40e_hw *hw); bool i40e_get_link_status(struct i40e_hw *hw); i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, bool *min_valid, + bool *max_valid); +i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size); @@ -260,8 +266,6 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw); i40e_status i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access); void i40e_release_nvm(struct i40e_hw *hw); -i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset, - u16 *data); i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 65d3c8bb2d5b..522d6df51330 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -310,6 +310,10 @@ #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) @@ -421,6 +425,8 @@ #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) @@ -484,7 +490,9 @@ #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 @@ -548,9 +556,6 @@ #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) -#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */ -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0 -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) @@ -1066,7 +1071,7 @@ #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ @@ -1171,7 +1176,7 @@ #define I40E_VFINT_ITRN_MAX_INDEX 2 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) @@ -1803,9 +1808,6 @@ #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) -#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */ -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0 -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) @@ -1902,6 +1904,11 @@ #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) @@ -2374,20 +2381,20 @@ #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 @@ -2620,10 +2627,6 @@ #define I40E_GLPRT_TDOLD_MAX_INDEX 3 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) -#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDPC_MAX_INDEX 3 -#define I40E_GLPRT_TDPC_TDPC_SHIFT 0 -#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPRCH_MAX_INDEX 3 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 @@ -2990,9 +2993,6 @@ #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) -#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */ -#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0 -#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) @@ -3258,7 +3258,7 @@ #define I40E_VFINT_ITRN1_MAX_INDEX 2 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index bbf1b1247ac4..d4b4aa7c204e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -25,6 +25,7 @@ ******************************************************************************/ #include <linux/prefetch.h> +#include <net/busy_poll.h> #include "i40e.h" #include "i40e_prototype.h" @@ -1031,6 +1032,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (ring_is_ps_enabled(rx_ring)) { + int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; + + rx_bi = &rx_ring->rx_bi[0]; + if (rx_bi->hdr_buf) { + dma_free_coherent(dev, + bufsz, + rx_bi->hdr_buf, + rx_bi->dma); + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = 0; + rx_bi->hdr_buf = 0; + } + } + } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { rx_bi = &rx_ring->rx_bi[i]; @@ -1089,6 +1106,37 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) } /** + * i40e_alloc_rx_headers - allocate rx header buffers + * @rx_ring: ring to alloc buffers + * + * Allocate rx header buffers for the entire ring. As these are static, + * this is only called when setting up a new ring. + **/ +void i40e_alloc_rx_headers(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + dma_addr_t dma; + void *buffer; + int buf_size; + int i; + + if (rx_ring->rx_bi[0].hdr_buf) + return; + /* Make sure the buffers don't cross cache line boundaries. */ + buf_size = ALIGN(rx_ring->rx_hdr_len, 256); + buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, + &dma, GFP_KERNEL); + if (!buffer) + return; + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = dma + (i * buf_size); + rx_bi->hdr_buf = buffer + (i * buf_size); + } +} + +/** * i40e_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -1148,11 +1196,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40e_alloc_rx_buffers - Replace used receive buffers; packet split + * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + + if (bi->skb) /* desc is in use */ + goto no_buffers; + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + dma_sync_single_range_for_device(rx_ring->dev, + bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ -void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -1192,40 +1305,8 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) } } - if (ring_is_ps_enabled(rx_ring)) { - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - bi->page_dma = 0; - goto no_buffers; - } - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - } + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; i++; if (i == rx_ring->count) i = 0; @@ -1279,10 +1360,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct iphdr *iph; __sum16 csum; - ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_NONE; @@ -1410,13 +1491,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** - * i40e_clean_rx_irq - Reclaim resources after receive completes + * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; @@ -1432,25 +1513,51 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) if (budget <= 0) return 0; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - - while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { - union i40e_rx_desc *next_rxd; + do { struct i40e_rx_buffer *rx_bi; struct sk_buff *skb; u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + rmb(); if (i40e_rx_is_programming_status(qword)) { i40e_clean_programming_status(rx_ring, rx_desc); - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); - goto next_desc; + I40E_RX_INCREMENT(rx_ring, i); + continue; } rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; - prefetch(skb->data); - + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len); + if (!skb) + rx_ring->rx_stats.alloc_buff_failed++; + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + } rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> @@ -1465,40 +1572,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + prefetch(rx_bi->page); rx_bi->skb = NULL; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * STATUS_DD bit is set - */ - rmb(); - - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - if (rx_bi->dma) { - u16 len; - + cleaned_count++; + if (rx_hbo || rx_sph) { + int len; if (rx_hbo) len = I40E_RX_HDR_SIZE; - else if (rx_sph) - len = rx_header_len; - else if (rx_packet_len) - len = rx_packet_len; /* 1buf/no split found */ else - len = rx_header_len; /* split always mode */ - - skb_put(skb, len); - dma_unmap_single(rx_ring->dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; + len = rx_header_len; + memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); + } else if (skb->len == 0) { + int len; + + len = (rx_packet_len > skb_headlen(skb) ? + skb_headlen(skb) : rx_packet_len); + memcpy(__skb_put(skb, len), + rx_bi->page + rx_bi->page_offset, + len); + rx_bi->page_offset += len; + rx_packet_len -= len; } /* Get the rest of the data if this was a header split */ - if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { - + if (rx_packet_len) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_bi->page, rx_bi->page_offset, @@ -1520,22 +1617,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) DMA_FROM_DEVICE); rx_bi->page_dma = 0; } - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); + I40E_RX_INCREMENT(rx_ring, i); if (unlikely( !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; - - if (ring_is_ps_enabled(rx_ring)) { - rx_bi->skb = next_buffer->skb; - rx_bi->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } + next_buffer->skb = skb; rx_ring->rx_stats.non_eop_descs++; - goto next_desc; + continue; } /* ERR_MASK will only have valid bits if EOP set */ @@ -1544,7 +1635,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* TODO: shouldn't we increment a counter indicating the * drop? */ - goto next_desc; + continue; } skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), @@ -1570,33 +1661,149 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) #ifdef I40E_FCOE if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { dev_kfree_skb_any(skb); - goto next_desc; + continue; } #endif + skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_ring->netdev->last_rx = jiffies; - budget--; -next_desc: rx_desc->wb.qword1.status_error_len = 0; - if (!budget) - break; - cleaned_count++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns number of packets cleaned + **/ +static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct i40e_vsi *vsi = rx_ring->vsi; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u16 rx_packet_len; + u8 rx_ptype; + u64 qword; + u16 i; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40e_alloc_rx_buffers(rx_ring, cleaned_count); + i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); cleaned_count = 0; } - /* use prefetched values */ - rx_desc = next_rxd; + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - } + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + rmb(); + + if (i40e_rx_is_programming_status(qword)) { + i40e_clean_programming_status(rx_ring, rx_desc); + I40E_RX_INCREMENT(rx_ring, i); + continue; + } + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + cleaned_count++; + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + skb_put(skb, rx_packet_len); + dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { + i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & + I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); + rx_ring->last_rx_timestamp = jiffies; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + } while (likely(total_rx_packets < budget)); - rx_ring->next_to_clean = i; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1604,10 +1811,7 @@ next_desc: rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - if (cleaned_count) - i40e_alloc_rx_buffers(rx_ring, cleaned_count); - - return budget > 0; + return total_rx_packets; } /** @@ -1628,6 +1832,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; + int cleaned; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1647,8 +1852,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - i40e_for_each_ring(ring, q_vector->rx) - clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); + i40e_for_each_ring(ring, q_vector->rx) { + if (ring_is_ps_enabled(ring)) + cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); + else + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + /* if we didn't clean as many as budgeted, we must be done */ + clean_complete &= (budget_per_ring != cleaned); + } /* If work not completed, return budget and polling will return */ if (!clean_complete) { @@ -1838,6 +2049,9 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, tx_flags |= I40E_TX_FLAGS_SW_VLAN; } + if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) + goto out; + /* Insert 802.1p priority into VLAN header */ if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || (skb->priority != TC_PRIO_CONTROL)) { @@ -1858,6 +2072,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, tx_flags |= I40E_TX_FLAGS_HW_VLAN; } } + +out: *flags = tx_flags; return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index dff0baeb1ecc..4b0b8102cdc3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -96,6 +96,14 @@ enum i40e_dyn_idx_t { /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + #define I40E_RX_NEXT_DESC(r, i, n) \ do { \ (i)++; \ @@ -152,6 +160,7 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; + void *hdr_buf; dma_addr_t dma; struct page *page; dma_addr_t page_dma; @@ -224,8 +233,8 @@ struct i40e_ring { u16 rx_buf_len; u8 dtype; #define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 1 -#define I40E_RX_DTYPE_HEADER_SPLIT 2 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 @@ -281,7 +290,9 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_rx_ring(struct i40e_ring *rx_ring); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index e9901ef06a63..90069396bb28 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -175,12 +175,12 @@ struct i40e_link_status { u8 an_info; u8 ext_info; u8 loopback; - bool an_enabled; /* is Link Status Event notification to SW enabled */ bool lse_enable; u16 max_frame_size; bool crc_enable; u8 pacing; + u8 requested_speeds; }; struct i40e_phy_info { @@ -1401,6 +1401,19 @@ struct i40e_lldp_variables { u16 crc8; }; +/* Offsets into Alternate Ram */ +#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ +#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ +#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ +#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ +#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ +#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ + +/* Alternate Ram Bandwidth Masks */ +#define I40E_ALT_BW_VALUE_MASK 0xFF +#define I40E_ALT_BW_RELATIVE_MASK 0x40000000 +#define I40E_ALT_BW_VALID_MASK 0x80000000 + /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 61dd1b187624..2d20af290fbf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -59,31 +59,29 @@ * of the virtchnl_msg structure. */ enum i40e_virtchnl_ops { -/* VF sends req. to pf for the following - * ops. +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. */ I40E_VIRTCHNL_OP_UNKNOWN = 0, I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ - I40E_VIRTCHNL_OP_RESET_VF, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES, - I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, - I40E_VIRTCHNL_OP_ENABLE_QUEUES, - I40E_VIRTCHNL_OP_DISABLE_QUEUES, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_ADD_VLAN, - I40E_VIRTCHNL_OP_DEL_VLAN, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - I40E_VIRTCHNL_OP_GET_STATS, - I40E_VIRTCHNL_OP_FCOE, - I40E_VIRTCHNL_OP_CONFIG_RSS, -/* PF sends status change events to vfs using - * the following op. - */ - I40E_VIRTCHNL_OP_EVENT, + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 40f042af4131..910c45e83fdd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -752,7 +752,7 @@ void i40e_enable_pf_switch_lb(struct i40e_pf *pf) * * disable switch loop back or die - no point in a return value **/ -static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) +void i40e_disable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; @@ -832,7 +832,6 @@ void i40e_free_vfs(struct i40e_pf *pf) bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); } - i40e_disable_pf_switch_lb(pf); } else { dev_warn(&pf->pdev->dev, "unable to disable SR-IOV because VFs are assigned.\n"); @@ -891,7 +890,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) } pf->num_alloc_vfs = num_alloc_vfs; - i40e_enable_pf_switch_lb(pf); err_alloc: if (ret) i40e_free_vfs(pf); @@ -2427,7 +2425,8 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) ctxt.pf_num = pf->hw.pf_id; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); if (enable) - ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; + ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 9452f5247cff..ef777a62e393 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -127,5 +127,6 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); void i40e_enable_pf_switch_lb(struct i40e_pf *pf); +void i40e_disable_pf_switch_lb(struct i40e_pf *pf); #endif /* _I40E_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index 60f04e96a80e..ef43d68f67b3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -93,6 +93,7 @@ struct i40e_adminq_info { u16 asq_buf_size; /* send queue buffer size */ u16 fw_maj_ver; /* firmware major version */ u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ bool nvm_release_on_done; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 28c40c57d4f5..50b0ee54fc06 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -94,16 +94,19 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - aq_desc->opcode, aq_desc->flags, aq_desc->datalen, - aq_desc->retval); + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - aq_desc->cookie_high, aq_desc->cookie_low); + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - aq_desc->params.internal.param0, - aq_desc->params.internal.param1); + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - aq_desc->params.external.addr_high, - aq_desc->params.external.addr_low); + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); if ((buffer != NULL) && (aq_desc->datalen != 0)) { memset(data, 0, sizeof(data)); @@ -116,15 +119,19 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, if ((i % 16) == 15) { i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", - i - 15, data[0], data[1], data[2], - data[3]); + i - 15, le32_to_cpu(data[0]), + le32_to_cpu(data[1]), + le32_to_cpu(data[2]), + le32_to_cpu(data[3])); memset(data, 0, sizeof(data)); } } if ((i % 16) != 0) i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", - i - (i % 16), data[0], data[1], data[2], - data[3]); + i - (i % 16), le32_to_cpu(data[0]), + le32_to_cpu(data[1]), + le32_to_cpu(data[2]), + le32_to_cpu(data[3])); } } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h index c1f6a59bfea0..3cc737629bf7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -310,6 +310,10 @@ #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) @@ -421,6 +425,8 @@ #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) @@ -484,7 +490,9 @@ #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 @@ -548,9 +556,6 @@ #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) -#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */ -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0 -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) @@ -1066,7 +1071,7 @@ #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ @@ -1171,7 +1176,7 @@ #define I40E_VFINT_ITRN_MAX_INDEX 2 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) @@ -1803,9 +1808,6 @@ #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) -#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */ -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0 -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) @@ -1902,6 +1904,11 @@ #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) @@ -2374,20 +2381,20 @@ #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 @@ -2620,10 +2627,6 @@ #define I40E_GLPRT_TDOLD_MAX_INDEX 3 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) -#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDPC_MAX_INDEX 3 -#define I40E_GLPRT_TDPC_TDPC_SHIFT 0 -#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPRCH_MAX_INDEX 3 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 @@ -2990,9 +2993,6 @@ #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) -#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */ -#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0 -#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) @@ -3258,7 +3258,7 @@ #define I40E_VFINT_ITRN1_MAX_INDEX 2 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 708891571dae..fe13ad2def46 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -25,6 +25,7 @@ ******************************************************************************/ #include <linux/prefetch.h> +#include <net/busy_poll.h> #include "i40evf.h" #include "i40e_prototype.h" @@ -529,6 +530,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (ring_is_ps_enabled(rx_ring)) { + int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; + + rx_bi = &rx_ring->rx_bi[0]; + if (rx_bi->hdr_buf) { + dma_free_coherent(dev, + bufsz, + rx_bi->hdr_buf, + rx_bi->dma); + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = 0; + rx_bi->hdr_buf = 0; + } + } + } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { rx_bi = &rx_ring->rx_bi[i]; @@ -587,6 +604,37 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring) } /** + * i40evf_alloc_rx_headers - allocate rx header buffers + * @rx_ring: ring to alloc buffers + * + * Allocate rx header buffers for the entire ring. As these are static, + * this is only called when setting up a new ring. + **/ +void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + dma_addr_t dma; + void *buffer; + int buf_size; + int i; + + if (rx_ring->rx_bi[0].hdr_buf) + return; + /* Make sure the buffers don't cross cache line boundaries. */ + buf_size = ALIGN(rx_ring->rx_hdr_len, 256); + buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, + &dma, GFP_KERNEL); + if (!buffer) + return; + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = dma + (i * buf_size); + rx_bi->hdr_buf = buffer + (i * buf_size); + } +} + +/** * i40evf_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -646,11 +694,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split + * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + + if (bi->skb) /* desc is in use */ + goto no_buffers; + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + dma_sync_single_range_for_device(rx_ring->dev, + bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ -void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -690,40 +803,8 @@ void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) } } - if (ring_is_ps_enabled(rx_ring)) { - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - bi->page_dma = 0; - goto no_buffers; - } - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - } + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; i++; if (i == rx_ring->count) i = 0; @@ -777,10 +858,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct iphdr *iph; __sum16 csum; - ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_NONE; @@ -906,13 +987,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** - * i40e_clean_rx_irq - Reclaim resources after receive completes + * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; @@ -925,20 +1006,46 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u8 rx_ptype; u64 qword; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - - while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { - union i40e_rx_desc *next_rxd; + do { struct i40e_rx_buffer *rx_bi; struct sk_buff *skb; u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + rmb(); rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; - prefetch(skb->data); - + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len); + if (!skb) + rx_ring->rx_stats.alloc_buff_failed++; + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + } rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> @@ -953,40 +1060,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + prefetch(rx_bi->page); rx_bi->skb = NULL; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * STATUS_DD bit is set - */ - rmb(); - - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - if (rx_bi->dma) { - u16 len; - + cleaned_count++; + if (rx_hbo || rx_sph) { + int len; if (rx_hbo) len = I40E_RX_HDR_SIZE; - else if (rx_sph) - len = rx_header_len; - else if (rx_packet_len) - len = rx_packet_len; /* 1buf/no split found */ else - len = rx_header_len; /* split always mode */ - - skb_put(skb, len); - dma_unmap_single(rx_ring->dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; + len = rx_header_len; + memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); + } else if (skb->len == 0) { + int len; + + len = (rx_packet_len > skb_headlen(skb) ? + skb_headlen(skb) : rx_packet_len); + memcpy(__skb_put(skb, len), + rx_bi->page + rx_bi->page_offset, + len); + rx_bi->page_offset += len; + rx_packet_len -= len; } /* Get the rest of the data if this was a header split */ - if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { - + if (rx_packet_len) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_bi->page, rx_bi->page_offset, @@ -1008,22 +1105,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) DMA_FROM_DEVICE); rx_bi->page_dma = 0; } - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); + I40E_RX_INCREMENT(rx_ring, i); if (unlikely( !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; - - if (ring_is_ps_enabled(rx_ring)) { - rx_bi->skb = next_buffer->skb; - rx_bi->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } + next_buffer->skb = skb; rx_ring->rx_stats.non_eop_descs++; - goto next_desc; + continue; } /* ERR_MASK will only have valid bits if EOP set */ @@ -1032,7 +1123,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* TODO: shouldn't we increment a counter indicating the * drop? */ - goto next_desc; + continue; } skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), @@ -1048,30 +1139,134 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_ring->netdev->last_rx = jiffies; - budget--; -next_desc: rx_desc->wb.qword1.status_error_len = 0; - if (!budget) - break; - cleaned_count++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns number of packets cleaned + **/ +static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct i40e_vsi *vsi = rx_ring->vsi; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u16 rx_packet_len; + u8 rx_ptype; + u64 qword; + u16 i; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); + i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); cleaned_count = 0; } - /* use prefetched values */ - rx_desc = next_rxd; + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - } + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + rmb(); + + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + cleaned_count++; + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + skb_put(skb, rx_packet_len); + dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + } while (likely(total_rx_packets < budget)); - rx_ring->next_to_clean = i; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1079,10 +1274,7 @@ next_desc: rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - if (cleaned_count) - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); - - return budget > 0; + return total_rx_packets; } /** @@ -1103,6 +1295,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; + int cleaned; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1122,8 +1315,14 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - i40e_for_each_ring(ring, q_vector->rx) - clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); + i40e_for_each_ring(ring, q_vector->rx) { + if (ring_is_ps_enabled(ring)) + cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); + else + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + /* if we didn't clean as many as budgeted, we must be done */ + clean_complete &= (budget_per_ring != cleaned); + } /* If work not completed, return budget and polling will return */ if (!clean_complete) { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index c950a038237c..1e49bb1fbac1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -96,6 +96,14 @@ enum i40e_dyn_idx_t { /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + #define I40E_RX_NEXT_DESC(r, i, n) \ do { \ (i)++; \ @@ -151,6 +159,7 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; + void *hdr_buf; dma_addr_t dma; struct page *page; dma_addr_t page_dma; @@ -223,8 +232,8 @@ struct i40e_ring { u16 rx_buf_len; u8 dtype; #define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 1 -#define I40E_RX_DTYPE_HEADER_SPLIT 2 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 @@ -278,7 +287,9 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 3d0fdaab5cc8..a2693865594a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -175,12 +175,12 @@ struct i40e_link_status { u8 an_info; u8 ext_info; u8 loopback; - bool an_enabled; /* is Link Status Event notification to SW enabled */ bool lse_enable; u16 max_frame_size; bool crc_enable; u8 pacing; + u8 requested_speeds; }; struct i40e_phy_info { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index e0c8208138f4..59f62f0e65dd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -59,31 +59,29 @@ * of the virtchnl_msg structure. */ enum i40e_virtchnl_ops { -/* VF sends req. to pf for the following - * ops. +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. */ I40E_VIRTCHNL_OP_UNKNOWN = 0, I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ - I40E_VIRTCHNL_OP_RESET_VF, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES, - I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, - I40E_VIRTCHNL_OP_ENABLE_QUEUES, - I40E_VIRTCHNL_OP_DISABLE_QUEUES, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_ADD_VLAN, - I40E_VIRTCHNL_OP_DEL_VLAN, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - I40E_VIRTCHNL_OP_GET_STATS, - I40E_VIRTCHNL_OP_FCOE, - I40E_VIRTCHNL_OP_CONFIG_RSS, -/* PF sends status change events to vfs using - * the following op. - */ - I40E_VIRTCHNL_OP_EVENT, + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 69b97bac182c..681a5d4b4f6a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -180,7 +180,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev) } /** - * i40evf_get_msglevel - Set debug message level + * i40evf_set_msglevel - Set debug message level * @netdev: network interface device structure * @data: message level * @@ -191,6 +191,8 @@ static void i40evf_set_msglevel(struct net_device *netdev, u32 data) { struct i40evf_adapter *adapter = netdev_priv(netdev); + if (I40E_DEBUG_USER & data) + adapter->hw.debug_mask = data; adapter->msg_enable = data; } @@ -640,12 +642,14 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); - indir[j++] = hlut_val & 0xff; - indir[j++] = (hlut_val >> 8) & 0xff; - indir[j++] = (hlut_val >> 16) & 0xff; - indir[j++] = (hlut_val >> 24) & 0xff; + if (indir) { + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); + indir[j++] = hlut_val & 0xff; + indir[j++] = (hlut_val >> 8) & 0xff; + indir[j++] = (hlut_val >> 16) & 0xff; + indir[j++] = (hlut_val >> 24) & 0xff; + } } return 0; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 8d8c201c63c1..a95135846ea9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.2.0" +#define DRV_VERSION "1.2.4" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; @@ -524,7 +524,8 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) int err; snprintf(adapter->misc_vector_name, - sizeof(adapter->misc_vector_name) - 1, "i40evf:mbx"); + sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx", + dev_name(&adapter->pdev->dev)); err = request_irq(adapter->msix_entries[0].vector, &i40evf_msix_aq, 0, adapter->misc_vector_name, netdev); @@ -761,13 +762,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, u8 *macaddr) { struct i40evf_mac_filter *f; + int count = 50; if (!macaddr) return NULL; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + &adapter->crit_section)) { udelay(1); + if (--count == 0) + return NULL; + } f = i40evf_find_filter(adapter, macaddr); if (!f) { @@ -828,6 +833,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev) struct i40evf_mac_filter *f, *ftmp; struct netdev_hw_addr *uca; struct netdev_hw_addr *mca; + int count = 50; /* add addr if not already in the filter list */ netdev_for_each_uc_addr(uca, netdev) { @@ -838,8 +844,14 @@ static void i40evf_set_rx_mode(struct net_device *netdev) } while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + &adapter->crit_section)) { udelay(1); + if (--count == 0) { + dev_err(&adapter->pdev->dev, + "Failed to get lock in %s\n", __func__); + return; + } + } /* remove filter if not in netdev list */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { bool found = false; @@ -920,7 +932,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = adapter->rx_rings[i]; - i40evf_alloc_rx_buffers(ring, ring->count); + i40evf_alloc_rx_buffers_1buf(ring, ring->count); ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); } @@ -959,6 +971,7 @@ void i40evf_down(struct i40evf_adapter *adapter) usleep_range(500, 1000); i40evf_irq_disable(adapter); + i40evf_napi_disable_all(adapter); /* remove all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { @@ -985,8 +998,6 @@ void i40evf_down(struct i40evf_adapter *adapter) netif_tx_stop_all_queues(netdev); - i40evf_napi_disable_all(adapter); - msleep(20); netif_carrier_off(netdev); @@ -1481,9 +1492,11 @@ static void i40evf_reset_task(struct work_struct *work) struct i40evf_adapter *adapter = container_of(work, struct i40evf_adapter, reset_task); + struct net_device *netdev = adapter->netdev; struct i40e_hw *hw = &adapter->hw; - int i = 0, err; + struct i40evf_mac_filter *f; uint32_t rstat_val; + int i = 0, err; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) @@ -1528,7 +1541,11 @@ static void i40evf_reset_task(struct work_struct *work) if (netif_running(adapter->netdev)) { set_bit(__I40E_DOWN, &adapter->vsi.state); - i40evf_down(adapter); + i40evf_irq_disable(adapter); + i40evf_napi_disable_all(adapter); + netif_tx_disable(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); i40evf_free_traffic_irqs(adapter); i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); @@ -1560,22 +1577,37 @@ static void i40evf_reset_task(struct work_struct *work) continue_reset: adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - i40evf_down(adapter); + i40evf_irq_disable(adapter); + i40evf_napi_disable_all(adapter); + + netif_tx_disable(netdev); + + netif_tx_stop_all_queues(netdev); + + netif_carrier_off(netdev); adapter->state = __I40EVF_RESETTING; /* kill and reinit the admin queue */ if (i40evf_shutdown_adminq(hw)) - dev_warn(&adapter->pdev->dev, - "%s: Failed to destroy the Admin Queue resources\n", - __func__); + dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n"); + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; err = i40evf_init_adminq(hw); if (err) - dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n", - __func__, err); + dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", + err); - adapter->aq_pending = 0; - adapter->aq_required = 0; i40evf_map_queues(adapter); + + /* re-add all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) { + f->add = true; + } + /* re-add all VLAN filters */ + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + f->add = true; + } + adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); mod_timer(&adapter->watchdog_timer, jiffies + 2); diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h index d9fa999b1685..ae3f28332fa0 100644 --- a/drivers/net/ethernet/intel/igbvf/defines.h +++ b/drivers/net/ethernet/intel/igbvf/defines.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -29,94 +28,93 @@ #define _E1000_DEFINES_H_ /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 /* IVAR valid bit */ -#define E1000_IVAR_VALID 0x80 +#define E1000_IVAR_VALID 0x80 /* Receive Descriptor bit definitions */ -#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ -#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ -#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ -#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ -#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ - -#define E1000_RXDEXT_STATERR_LB 0x00040000 -#define E1000_RXDEXT_STATERR_CE 0x01000000 -#define E1000_RXDEXT_STATERR_SE 0x02000000 -#define E1000_RXDEXT_STATERR_SEQ 0x04000000 -#define E1000_RXDEXT_STATERR_CXE 0x10000000 -#define E1000_RXDEXT_STATERR_TCPE 0x20000000 -#define E1000_RXDEXT_STATERR_IPE 0x40000000 -#define E1000_RXDEXT_STATERR_RXE 0x80000000 - +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 /* Same mask, but for extended and packet split descriptors */ #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ - E1000_RXDEXT_STATERR_CE | \ - E1000_RXDEXT_STATERR_SE | \ - E1000_RXDEXT_STATERR_SEQ | \ - E1000_RXDEXT_STATERR_CXE | \ - E1000_RXDEXT_STATERR_RXE) + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) /* Device Control */ -#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ /* Device Status */ -#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ -#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ -#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ -#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ -#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ -#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ - -#define SPEED_10 10 -#define SPEED_100 100 -#define SPEED_1000 1000 -#define HALF_DUPLEX 1 -#define FULL_DUPLEX 2 +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 /* Transmit Descriptor bit definitions */ -#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Desc Done */ -#define MAX_JUMBO_FRAME_SIZE 0x3F00 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 /* 802.1q VLAN Packet Size */ -#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ /* Error Codes */ -#define E1000_SUCCESS 0 -#define E1000_ERR_CONFIG 3 -#define E1000_ERR_MAC_INIT 5 -#define E1000_ERR_MBX 15 +#define E1000_SUCCESS 0 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_MBX 15 /* SRRCTL bit definitions */ -#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ -#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 -#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ -#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 -#define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 -#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 /* Additional Descriptor Control definitions */ -#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Que */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */ /* Direct Cache Access (DCA) definitions */ -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ #endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 2178f87e9f61..c6996feb1cb4 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -36,7 +35,6 @@ #include "igbvf.h" #include <linux/if_vlan.h> - struct igbvf_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; @@ -74,7 +72,7 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) static int igbvf_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -111,18 +109,18 @@ static int igbvf_get_settings(struct net_device *netdev, } static int igbvf_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd) { return -EOPNOTSUPP; } static void igbvf_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { } static int igbvf_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { return -EOPNOTSUPP; } @@ -130,12 +128,14 @@ static int igbvf_set_pauseparam(struct net_device *netdev, static u32 igbvf_get_msglevel(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; } static void igbvf_set_msglevel(struct net_device *netdev, u32 data) { struct igbvf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; } @@ -146,7 +146,7 @@ static int igbvf_get_regs_len(struct net_device *netdev) } static void igbvf_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) + struct ethtool_regs *regs, void *p) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -175,19 +175,19 @@ static int igbvf_get_eeprom_len(struct net_device *netdev) } static int igbvf_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) + struct ethtool_eeprom *eeprom, u8 *bytes) { return -EOPNOTSUPP; } static int igbvf_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) + struct ethtool_eeprom *eeprom, u8 *bytes) { return -EOPNOTSUPP; } static void igbvf_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) + struct ethtool_drvinfo *drvinfo) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -201,7 +201,7 @@ static void igbvf_get_drvinfo(struct net_device *netdev, } static void igbvf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *tx_ring = adapter->tx_ring; @@ -214,7 +214,7 @@ static void igbvf_get_ringparam(struct net_device *netdev, } static int igbvf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *temp_ring; @@ -224,12 +224,12 @@ static int igbvf_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD); - new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD); + new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD); + new_rx_count = min_t(u32, new_rx_count, IGBVF_MAX_RXD); new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); - new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD); - new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD); + new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD); + new_tx_count = min_t(u32, new_tx_count, IGBVF_MAX_TXD); new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring->count) && @@ -239,7 +239,7 @@ static int igbvf_set_ringparam(struct net_device *netdev, } while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (!netif_running(adapter->netdev)) { adapter->tx_ring->count = new_tx_count; @@ -255,10 +255,9 @@ static int igbvf_set_ringparam(struct net_device *netdev, igbvf_down(adapter); - /* - * We can't just free everything and then setup again, + /* We can't just free everything and then setup again, * because the ISRs in MSI-X mode get passed pointers - * to the tx and rx ring structs. + * to the Tx and Rx ring structs. */ if (new_tx_count != adapter->tx_ring->count) { memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); @@ -283,7 +282,7 @@ static int igbvf_set_ringparam(struct net_device *netdev, igbvf_free_rx_resources(adapter->rx_ring); - memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); + memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring)); } err_setup: igbvf_up(adapter); @@ -307,14 +306,13 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) } static void igbvf_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) + struct ethtool_test *eth_test, u64 *data) { struct igbvf_adapter *adapter = netdev_priv(netdev); set_bit(__IGBVF_TESTING, &adapter->state); - /* - * Link test performed before hardware reset so autoneg doesn't + /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ if (igbvf_link_test(adapter, &data[0])) @@ -325,20 +323,20 @@ static void igbvf_diag_test(struct net_device *netdev, } static void igbvf_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) + struct ethtool_wolinfo *wol) { wol->supported = 0; wol->wolopts = 0; } static int igbvf_set_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) + struct ethtool_wolinfo *wol) { return -EOPNOTSUPP; } static int igbvf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -351,13 +349,13 @@ static int igbvf_get_coalesce(struct net_device *netdev, } static int igbvf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) && - (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { + (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { adapter->current_itr = ec->rx_coalesce_usecs << 2; adapter->requested_itr = 1000000000 / (adapter->current_itr * 256); @@ -366,8 +364,7 @@ static int igbvf_set_coalesce(struct net_device *netdev, adapter->current_itr = IGBVF_START_ITR; adapter->requested_itr = ec->rx_coalesce_usecs; } else if (ec->rx_coalesce_usecs == 0) { - /* - * The user's desire is to turn off interrupt throttling + /* The user's desire is to turn off interrupt throttling * altogether, but due to HW limitations, we can't do that. * Instead we set a very small value in EITR, which would * allow ~967k interrupts per second, but allow the adapter's @@ -376,8 +373,9 @@ static int igbvf_set_coalesce(struct net_device *netdev, adapter->current_itr = 4; adapter->requested_itr = 1000000000 / (adapter->current_itr * 256); - } else + } else { return -EINVAL; + } writel(adapter->current_itr, hw->hw_addr + adapter->rx_ring->itr_register); @@ -388,15 +386,15 @@ static int igbvf_set_coalesce(struct net_device *netdev, static int igbvf_nway_reset(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) igbvf_reinit_locked(adapter); return 0; } - static void igbvf_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, - u64 *data) + struct ethtool_stats *stats, + u64 *data) { struct igbvf_adapter *adapter = netdev_priv(netdev); int i; @@ -404,19 +402,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev, igbvf_update_stats(adapter); for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { char *p = (char *)adapter + - igbvf_gstrings_stats[i].stat_offset; + igbvf_gstrings_stats[i].stat_offset; char *b = (char *)adapter + - igbvf_gstrings_stats[i].base_stat_offset; + igbvf_gstrings_stats[i].base_stat_offset; data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : - (*(u32 *)p - *(u32 *)b)); + sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : + (*(u32 *)p - *(u32 *)b)); } - } static int igbvf_get_sset_count(struct net_device *dev, int stringset) { - switch(stringset) { + switch (stringset) { case ETH_SS_TEST: return IGBVF_TEST_LEN; case ETH_SS_STATS: @@ -427,7 +424,7 @@ static int igbvf_get_sset_count(struct net_device *dev, int stringset) } static void igbvf_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) + u8 *data) { u8 *p = data; int i; diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index 7d6a25c8f889..f166baab8d7e 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -43,10 +42,10 @@ struct igbvf_info; struct igbvf_adapter; /* Interrupt defines */ -#define IGBVF_START_ITR 488 /* ~8000 ints/sec */ -#define IGBVF_4K_ITR 980 -#define IGBVF_20K_ITR 196 -#define IGBVF_70K_ITR 56 +#define IGBVF_START_ITR 488 /* ~8000 ints/sec */ +#define IGBVF_4K_ITR 980 +#define IGBVF_20K_ITR 196 +#define IGBVF_70K_ITR 56 enum latency_range { lowest_latency = 0, @@ -55,56 +54,55 @@ enum latency_range { latency_invalid = 255 }; - /* Interrupt modes, as used by the IntMode parameter */ -#define IGBVF_INT_MODE_LEGACY 0 -#define IGBVF_INT_MODE_MSI 1 -#define IGBVF_INT_MODE_MSIX 2 +#define IGBVF_INT_MODE_LEGACY 0 +#define IGBVF_INT_MODE_MSI 1 +#define IGBVF_INT_MODE_MSIX 2 /* Tx/Rx descriptor defines */ -#define IGBVF_DEFAULT_TXD 256 -#define IGBVF_MAX_TXD 4096 -#define IGBVF_MIN_TXD 80 +#define IGBVF_DEFAULT_TXD 256 +#define IGBVF_MAX_TXD 4096 +#define IGBVF_MIN_TXD 80 -#define IGBVF_DEFAULT_RXD 256 -#define IGBVF_MAX_RXD 4096 -#define IGBVF_MIN_RXD 80 +#define IGBVF_DEFAULT_RXD 256 +#define IGBVF_MAX_RXD 4096 +#define IGBVF_MIN_RXD 80 -#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ -#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ +#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ /* RX descriptor control thresholds. * PTHRESH - MAC will consider prefetch if it has fewer than this number of - * descriptors available in its onboard memory. - * Setting this to 0 disables RX descriptor prefetch. + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. * HTHRESH - MAC will only prefetch if there are at least this many descriptors - * available in host memory. - * If PTHRESH is 0, this should also be 0. + * available in host memory. + * If PTHRESH is 0, this should also be 0. * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back - * descriptors until either it has this many to write back, or the - * ITR timer expires. + * descriptors until either it has this many to write back, or the + * ITR timer expires. */ -#define IGBVF_RX_PTHRESH 16 -#define IGBVF_RX_HTHRESH 8 -#define IGBVF_RX_WTHRESH 1 +#define IGBVF_RX_PTHRESH 16 +#define IGBVF_RX_HTHRESH 8 +#define IGBVF_RX_WTHRESH 1 /* this is the size past which hardware will drop packets when setting LPE=0 */ -#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 -#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ +#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ /* How many Tx Descriptors do we need to call netif_wake_queue ? */ -#define IGBVF_TX_QUEUE_WAKE 32 +#define IGBVF_TX_QUEUE_WAKE 32 /* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define AUTO_ALL_MODES 0 -#define IGBVF_EEPROM_APME 0x0400 +#define AUTO_ALL_MODES 0 +#define IGBVF_EEPROM_APME 0x0400 -#define IGBVF_MNG_VLAN_NONE (-1) +#define IGBVF_MNG_VLAN_NONE (-1) /* Number of packet split data buffers (not including the header buffer) */ -#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) enum igbvf_boards { board_vf, @@ -116,8 +114,7 @@ struct igbvf_queue_stats { u64 bytes; }; -/* - * wrappers around a pointer to a socket buffer, +/* wrappers around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct igbvf_buffer { @@ -148,10 +145,10 @@ union igbvf_desc { struct igbvf_ring { struct igbvf_adapter *adapter; /* backlink */ - union igbvf_desc *desc; /* pointer to ring memory */ - dma_addr_t dma; /* phys address of ring */ - unsigned int size; /* length of ring in bytes */ - unsigned int count; /* number of desc. in ring */ + union igbvf_desc *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ u16 next_to_use; u16 next_to_clean; @@ -202,9 +199,7 @@ struct igbvf_adapter { u32 requested_itr; /* ints/sec or adaptive */ u32 current_itr; /* Actual ITR register value, not ints/sec */ - /* - * Tx - */ + /* Tx */ struct igbvf_ring *tx_ring /* One per active queue */ ____cacheline_aligned_in_smp; @@ -226,9 +221,7 @@ struct igbvf_adapter { u32 tx_fifo_size; u32 tx_dma_failed; - /* - * Rx - */ + /* Rx */ struct igbvf_ring *rx_ring; u32 rx_int_delay; @@ -249,7 +242,7 @@ struct igbvf_adapter { struct net_device *netdev; struct pci_dev *pdev; struct net_device_stats net_stats; - spinlock_t stats_lock; /* prevent concurrent stats updates */ + spinlock_t stats_lock; /* prevent concurrent stats updates */ /* structs defined in e1000_hw.h */ struct e1000_hw hw; @@ -286,16 +279,16 @@ struct igbvf_adapter { }; struct igbvf_info { - enum e1000_mac_type mac; - unsigned int flags; - u32 pba; - void (*init_ops)(struct e1000_hw *); - s32 (*get_variants)(struct igbvf_adapter *); + enum e1000_mac_type mac; + unsigned int flags; + u32 pba; + void (*init_ops)(struct e1000_hw *); + s32 (*get_variants)(struct igbvf_adapter *); }; /* hardware capability, feature, and workaround flags */ -#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) -#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) +#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) +#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) #define IGBVF_RX_DESC_ADV(R, i) \ (&((((R).desc))[i].rx_desc)) #define IGBVF_TX_DESC_ADV(R, i) \ diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index b4b65bc9fc5d..7b6cb4c3764c 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -54,10 +53,10 @@ out: } /** - * e1000_poll_for_ack - Wait for message acknowledgement + * e1000_poll_for_ack - Wait for message acknowledgment * @hw: pointer to the HW structure * - * returns SUCCESS if it successfully received a message acknowledgement + * returns SUCCESS if it successfully received a message acknowledgment **/ static s32 e1000_poll_for_ack(struct e1000_hw *hw) { @@ -218,7 +217,7 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) s32 ret_val = -E1000_ERR_MBX; if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | - E1000_V2PMAILBOX_RSTI))) { + E1000_V2PMAILBOX_RSTI))) { ret_val = E1000_SUCCESS; hw->mbx.stats.rsts++; } @@ -239,7 +238,7 @@ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) /* Take ownership of the buffer */ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); - /* reserve mailbox for vf use */ + /* reserve mailbox for VF use */ if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) ret_val = E1000_SUCCESS; @@ -283,7 +282,7 @@ out_no_write: } /** - * e1000_read_mbx_vf - Reads a message from the inbox intended for vf + * e1000_read_mbx_vf - Reads a message from the inbox intended for VF * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -315,17 +314,18 @@ out_no_read: } /** - * e1000_init_mbx_params_vf - set initial values for vf mailbox + * e1000_init_mbx_params_vf - set initial values for VF mailbox * @hw: pointer to the HW structure * - * Initializes the hw->mbx struct to correct values for vf mailbox + * Initializes the hw->mbx struct to correct values for VF mailbox */ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; /* start mailbox as timed out and let the reset_hw call set the timeout - * value to being communications */ + * value to being communications + */ mbx->timeout = 0; mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; @@ -347,4 +347,3 @@ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) return E1000_SUCCESS; } - diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h index 24370bcb0e22..f800bf8eedae 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.h +++ b/drivers/net/ethernet/intel/igbvf/mbx.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -30,44 +29,44 @@ #include "vf.h" -#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ -#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ -#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ -#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ -#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ -#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ #define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ -#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is true if it is E1000_PF_*. * Message ACK's are the value or'd with 0xF0000000 */ -#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - clear to send requests */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 /* We have a total wait time of 1s for vf mailbox posted messages */ -#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */ -#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mbx timeout */ +#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ -#define E1000_VT_MSGINFO_SHIFT 16 +#define E1000_VT_MSGINFO_SHIFT 16 /* bits 23:16 are used for exra info for certain messages */ -#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_RESET 0x01 /* VF requests reset */ -#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ -#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ -#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ void e1000_init_mbx_ops_generic(struct e1000_hw *hw); s32 e1000_init_mbx_params_vf(struct e1000_hw *); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index ebf9d4a42fdd..c17ea4b8f84d 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -66,26 +65,27 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *); static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); static struct igbvf_info igbvf_vf_info = { - .mac = e1000_vfadapt, - .flags = 0, - .pba = 10, - .init_ops = e1000_init_function_pointers_vf, + .mac = e1000_vfadapt, + .flags = 0, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, }; static struct igbvf_info igbvf_i350_vf_info = { - .mac = e1000_vfadapt_i350, - .flags = 0, - .pba = 10, - .init_ops = e1000_init_function_pointers_vf, + .mac = e1000_vfadapt_i350, + .flags = 0, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, }; static const struct igbvf_info *igbvf_info_tbl[] = { - [board_vf] = &igbvf_vf_info, - [board_i350_vf] = &igbvf_i350_vf_info, + [board_vf] = &igbvf_vf_info, + [board_i350_vf] = &igbvf_i350_vf_info, }; /** * igbvf_desc_unused - calculate if we have unused descriptors + * @rx_ring: address of receive ring structure **/ static int igbvf_desc_unused(struct igbvf_ring *ring) { @@ -103,9 +103,9 @@ static int igbvf_desc_unused(struct igbvf_ring *ring) * @skb: pointer to sk_buff to be indicated to stack **/ static void igbvf_receive_skb(struct igbvf_adapter *adapter, - struct net_device *netdev, - struct sk_buff *skb, - u32 status, u16 vlan) + struct net_device *netdev, + struct sk_buff *skb, + u32 status, u16 vlan) { u16 vid; @@ -123,7 +123,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter, } static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, - u32 status_err, struct sk_buff *skb) + u32 status_err, struct sk_buff *skb) { skb_checksum_none_assert(skb); @@ -153,7 +153,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, * @cleaned_count: number of buffers to repopulate **/ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, - int cleaned_count) + int cleaned_count) { struct igbvf_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; @@ -188,8 +188,8 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, } buffer_info->page_dma = dma_map_page(&pdev->dev, buffer_info->page, - buffer_info->page_offset, - PAGE_SIZE / 2, + buffer_info->page_offset, + PAGE_SIZE / 2, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->page_dma)) { @@ -209,7 +209,7 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, buffer_info->skb = skb; buffer_info->dma = dma_map_single(&pdev->dev, skb->data, - bufsz, + bufsz, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { dev_kfree_skb(buffer_info->skb); @@ -219,14 +219,14 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, } } /* Refresh the desc even if buffer_addrs didn't change because - * each write-back erases this info. */ + * each write-back erases this info. + */ if (adapter->rx_ps_hdr_size) { rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->page_dma); rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); } else { - rx_desc->read.pkt_addr = - cpu_to_le64(buffer_info->dma); + rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); rx_desc->read.hdr_addr = 0; } @@ -247,7 +247,8 @@ no_buffers: /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); writel(i, adapter->hw.hw_addr + rx_ring->tail); } @@ -261,7 +262,7 @@ no_buffers: * is no guarantee that everything was cleaned **/ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, - int *work_done, int work_to_do) + int *work_done, int work_to_do) { struct igbvf_ring *rx_ring = adapter->rx_ring; struct net_device *netdev = adapter->netdev; @@ -292,8 +293,9 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, * that case, it fills the header buffer and spills the rest * into the page. */ - hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & - E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; + hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) + & E1000_RXDADV_HDRBUFLEN_MASK) >> + E1000_RXDADV_HDRBUFLEN_SHIFT; if (hlen > adapter->rx_ps_hdr_size) hlen = adapter->rx_ps_hdr_size; @@ -306,7 +308,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, buffer_info->skb = NULL; if (!adapter->rx_ps_hdr_size) { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_buffer_len, + adapter->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; skb_put(skb, length); @@ -315,21 +317,21 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, if (!skb_shinfo(skb)->nr_frags) { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_hdr_size, + adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); skb_put(skb, hlen); } if (length) { dma_unmap_page(&pdev->dev, buffer_info->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE / 2, DMA_FROM_DEVICE); buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - buffer_info->page, - buffer_info->page_offset, - length); + buffer_info->page, + buffer_info->page_offset, + length); if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || (page_count(buffer_info->page) != 1)) @@ -370,7 +372,7 @@ send_up: skb->protocol = eth_type_trans(skb, netdev); igbvf_receive_skb(adapter, netdev, skb, staterr, - rx_desc->wb.upper.vlan); + rx_desc->wb.upper.vlan); next_desc: rx_desc->wb.upper.status_error = 0; @@ -402,7 +404,7 @@ next_desc: } static void igbvf_put_txbuf(struct igbvf_adapter *adapter, - struct igbvf_buffer *buffer_info) + struct igbvf_buffer *buffer_info) { if (buffer_info->dma) { if (buffer_info->mapped_as_page) @@ -431,7 +433,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter, * Return 0 on success, negative on failure **/ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring) + struct igbvf_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; int size; @@ -458,7 +460,7 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, err: vfree(tx_ring->buffer_info); dev_err(&adapter->pdev->dev, - "Unable to allocate memory for the transmit descriptor ring\n"); + "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } @@ -501,7 +503,7 @@ err: vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; dev_err(&adapter->pdev->dev, - "Unable to allocate memory for the receive descriptor ring\n"); + "Unable to allocate memory for the receive descriptor ring\n"); return -ENOMEM; } @@ -578,13 +580,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma) { - if (adapter->rx_ps_hdr_size){ + if (adapter->rx_ps_hdr_size) { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_hdr_size, + adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); } else { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_buffer_len, + adapter->rx_buffer_len, DMA_FROM_DEVICE); } buffer_info->dma = 0; @@ -599,7 +601,7 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) if (buffer_info->page_dma) dma_unmap_page(&pdev->dev, buffer_info->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE / 2, DMA_FROM_DEVICE); put_page(buffer_info->page); buffer_info->page = NULL; @@ -638,7 +640,7 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) rx_ring->buffer_info = NULL; dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, - rx_ring->dma); + rx_ring->dma); rx_ring->desc = NULL; } @@ -649,13 +651,12 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) * @packets: the number of packets during this measurement interval * @bytes: the number of bytes during this measurement interval * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. + * Stores a new ITR value based on packets and byte counts during the last + * interrupt. The advantage of per interrupt computation is faster updates + * and more accurate ITR for the current traffic pattern. Constants in this + * function were computed based on theoretical maximum wire speed and thresholds + * were set based on testing data as well as attempting to minimize response + * time while increasing bulk throughput. **/ static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, enum latency_range itr_setting, @@ -744,17 +745,15 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); - if (new_itr != adapter->tx_ring->itr_val) { u32 current_itr = adapter->tx_ring->itr_val; - /* - * this attempts to bias the interrupt rate towards Bulk + /* this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ new_itr = new_itr > current_itr ? - min(current_itr + (new_itr >> 2), new_itr) : - new_itr; + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; adapter->tx_ring->itr_val = new_itr; adapter->tx_ring->set_itr = 1; @@ -772,9 +771,10 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) if (new_itr != adapter->rx_ring->itr_val) { u32 current_itr = adapter->rx_ring->itr_val; + new_itr = new_itr > current_itr ? - min(current_itr + (new_itr >> 2), new_itr) : - new_itr; + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; adapter->rx_ring->itr_val = new_itr; adapter->rx_ring->set_itr = 1; @@ -829,7 +829,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) segs = skb_shinfo(skb)->gso_segs ?: 1; /* multiply data chunks by size of headers */ bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; + skb->len; total_packets += segs; total_bytes += bytecount; } @@ -849,9 +849,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) tx_ring->next_to_clean = i; - if (unlikely(count && - netif_carrier_ok(netdev) && - igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { + if (unlikely(count && netif_carrier_ok(netdev) && + igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -902,8 +901,9 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; - /* auto mask will automatically reenable the interrupt when we write - * EICS */ + /* auto mask will automatically re-enable the interrupt when we write + * EICS + */ if (!igbvf_clean_tx_irq(tx_ring)) /* Ring was not completely cleaned, so fire another interrupt */ ew32(EICS, tx_ring->eims_value); @@ -941,15 +941,16 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) #define IGBVF_NO_QUEUE -1 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, - int tx_queue, int msix_vector) + int tx_queue, int msix_vector) { struct e1000_hw *hw = &adapter->hw; u32 ivar, index; /* 82576 uses a table-based method for assigning vectors. - Each queue has a single entry in the table to which we write - a vector number along with a "valid" bit. Sadly, the layout - of the table is somewhat counterintuitive. */ + * Each queue has a single entry in the table to which we write + * a vector number along with a "valid" bit. Sadly, the layout + * of the table is somewhat counterintuitive. + */ if (rx_queue > IGBVF_NO_QUEUE) { index = (rx_queue >> 1); ivar = array_er32(IVAR0, index); @@ -984,6 +985,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, /** * igbvf_configure_msix - Configure MSI-X hardware + * @adapter: board private structure * * igbvf_configure_msix sets up the hardware to properly * generate MSI-X interrupts. @@ -1027,6 +1029,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) /** * igbvf_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: board private structure * * Attempt to configure interrupts using the best available * capabilities of the hardware and kernel. @@ -1036,27 +1039,28 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) int err = -ENOMEM; int i; - /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ + /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */ adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), - GFP_KERNEL); + GFP_KERNEL); if (adapter->msix_entries) { for (i = 0; i < 3; i++) adapter->msix_entries[i].entry = i; err = pci_enable_msix_range(adapter->pdev, - adapter->msix_entries, 3, 3); + adapter->msix_entries, 3, 3); } if (err < 0) { /* MSI-X failed */ dev_err(&adapter->pdev->dev, - "Failed to initialize MSI-X interrupts.\n"); + "Failed to initialize MSI-X interrupts.\n"); igbvf_reset_interrupt_capability(adapter); } } /** * igbvf_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure * * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the * kernel. @@ -1075,8 +1079,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) } err = request_irq(adapter->msix_entries[vector].vector, - igbvf_intr_msix_tx, 0, adapter->tx_ring->name, - netdev); + igbvf_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); if (err) goto out; @@ -1085,8 +1089,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) vector++; err = request_irq(adapter->msix_entries[vector].vector, - igbvf_intr_msix_rx, 0, adapter->rx_ring->name, - netdev); + igbvf_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); if (err) goto out; @@ -1095,7 +1099,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) vector++; err = request_irq(adapter->msix_entries[vector].vector, - igbvf_msix_other, 0, netdev->name, netdev); + igbvf_msix_other, 0, netdev->name, netdev); if (err) goto out; @@ -1130,6 +1134,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter) /** * igbvf_request_irq - initialize interrupts + * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. @@ -1146,7 +1151,7 @@ static int igbvf_request_irq(struct igbvf_adapter *adapter) return err; dev_err(&adapter->pdev->dev, - "Unable to allocate interrupt, Error: %d\n", err); + "Unable to allocate interrupt, Error: %d\n", err); return err; } @@ -1164,6 +1169,7 @@ static void igbvf_free_irq(struct igbvf_adapter *adapter) /** * igbvf_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure **/ static void igbvf_irq_disable(struct igbvf_adapter *adapter) { @@ -1177,6 +1183,7 @@ static void igbvf_irq_disable(struct igbvf_adapter *adapter) /** * igbvf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure **/ static void igbvf_irq_enable(struct igbvf_adapter *adapter) { @@ -1252,7 +1259,7 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, if (hw->mac.ops.set_vfta(hw, vid, false)) { dev_err(&adapter->pdev->dev, - "Failed to remove vlan id %d\n", vid); + "Failed to remove vlan id %d\n", vid); return -EINVAL; } clear_bit(vid, adapter->active_vlans); @@ -1298,7 +1305,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) /* Turn off Relaxed Ordering on head write-backs. The writebacks * MUST be delivered in order or it will completely screw up - * our bookeeping. + * our bookkeeping. */ dca_txctrl = er32(DCA_TXCTRL(0)); dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; @@ -1325,15 +1332,15 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) u32 srrctl = 0; srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | - E1000_SRRCTL_BSIZEHDR_MASK | - E1000_SRRCTL_BSIZEPKT_MASK); + E1000_SRRCTL_BSIZEHDR_MASK | + E1000_SRRCTL_BSIZEPKT_MASK); /* Enable queue drop to avoid head of line blocking */ srrctl |= E1000_SRRCTL_DROP_EN; /* Setup buffer sizes */ srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> - E1000_SRRCTL_BSIZEPKT_SHIFT; + E1000_SRRCTL_BSIZEPKT_SHIFT; if (adapter->rx_buffer_len < 2048) { adapter->rx_ps_hdr_size = 0; @@ -1341,7 +1348,7 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) } else { adapter->rx_ps_hdr_size = 128; srrctl |= adapter->rx_ps_hdr_size << - E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; } @@ -1369,8 +1376,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); - /* - * Setup the HW Rx Head and Tail Descriptor Pointers and + /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ rdba = rx_ring->dma; @@ -1441,10 +1447,11 @@ static void igbvf_configure(struct igbvf_adapter *adapter) igbvf_setup_srrctl(adapter); igbvf_configure_rx(adapter); igbvf_alloc_rx_buffers(adapter->rx_ring, - igbvf_desc_unused(adapter->rx_ring)); + igbvf_desc_unused(adapter->rx_ring)); } /* igbvf_reset - bring the hardware into a known good state + * @adapter: private board structure * * This function boots the hardware and enables some settings that * require a configuration cycle of the hardware - those cannot be @@ -1494,7 +1501,6 @@ int igbvf_up(struct igbvf_adapter *adapter) hw->mac.get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies + 1); - return 0; } @@ -1504,8 +1510,7 @@ void igbvf_down(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 rxdctl, txdctl; - /* - * signal that we're down so the interrupt handler does not + /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__IGBVF_DOWN, &adapter->state); @@ -1547,7 +1552,7 @@ void igbvf_reinit_locked(struct igbvf_adapter *adapter) { might_sleep(); while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); igbvf_down(adapter); igbvf_up(adapter); clear_bit(__IGBVF_RESETTING, &adapter->state); @@ -1662,8 +1667,7 @@ static int igbvf_open(struct net_device *netdev) if (err) goto err_setup_rx; - /* - * before we allocate an interrupt, we must be ready to handle it. + /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our * clean_rx handler before we do so. @@ -1725,6 +1729,7 @@ static int igbvf_close(struct net_device *netdev) return 0; } + /** * igbvf_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure @@ -1753,15 +1758,15 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) return 0; } -#define UPDATE_VF_COUNTER(reg, name) \ - { \ - u32 current_counter = er32(reg); \ - if (current_counter < adapter->stats.last_##name) \ - adapter->stats.name += 0x100000000LL; \ - adapter->stats.last_##name = current_counter; \ - adapter->stats.name &= 0xFFFFFFFF00000000LL; \ - adapter->stats.name |= current_counter; \ - } +#define UPDATE_VF_COUNTER(reg, name) \ +{ \ + u32 current_counter = er32(reg); \ + if (current_counter < adapter->stats.last_##name) \ + adapter->stats.name += 0x100000000LL; \ + adapter->stats.last_##name = current_counter; \ + adapter->stats.name &= 0xFFFFFFFF00000000LL; \ + adapter->stats.name |= current_counter; \ +} /** * igbvf_update_stats - Update the board statistics counters @@ -1772,8 +1777,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - /* - * Prevent stats update while adapter is being reset, link is down + /* Prevent stats update while adapter is being reset, link is down * or if the pci connection is down. */ if (adapter->link_speed == 0) @@ -1832,7 +1836,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter) **/ static void igbvf_watchdog(unsigned long data) { - struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; + struct igbvf_adapter *adapter = (struct igbvf_adapter *)data; /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); @@ -1841,8 +1845,8 @@ static void igbvf_watchdog(unsigned long data) static void igbvf_watchdog_task(struct work_struct *work) { struct igbvf_adapter *adapter = container_of(work, - struct igbvf_adapter, - watchdog_task); + struct igbvf_adapter, + watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct igbvf_ring *tx_ring = adapter->tx_ring; @@ -1855,8 +1859,8 @@ static void igbvf_watchdog_task(struct work_struct *work) if (link) { if (!netif_carrier_ok(netdev)) { mac->ops.get_link_up_info(&adapter->hw, - &adapter->link_speed, - &adapter->link_duplex); + &adapter->link_speed, + &adapter->link_duplex); igbvf_print_link_info(adapter); netif_carrier_on(netdev); @@ -1876,10 +1880,9 @@ static void igbvf_watchdog_task(struct work_struct *work) igbvf_update_stats(adapter); } else { tx_pending = (igbvf_desc_unused(tx_ring) + 1 < - tx_ring->count); + tx_ring->count); if (tx_pending) { - /* - * We've lost link, so the controller stops DMA, + /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). @@ -1898,15 +1901,15 @@ static void igbvf_watchdog_task(struct work_struct *work) round_jiffies(jiffies + (2 * HZ))); } -#define IGBVF_TX_FLAGS_CSUM 0x00000001 -#define IGBVF_TX_FLAGS_VLAN 0x00000002 -#define IGBVF_TX_FLAGS_TSO 0x00000004 -#define IGBVF_TX_FLAGS_IPV4 0x00000008 -#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 +#define IGBVF_TX_FLAGS_CSUM 0x00000001 +#define IGBVF_TX_FLAGS_VLAN 0x00000002 +#define IGBVF_TX_FLAGS_TSO 0x00000004 +#define IGBVF_TX_FLAGS_IPV4 0x00000008 +#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 static int igbvf_tso(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, __be16 protocol) { @@ -1930,17 +1933,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter, if (protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + iph->daddr, 0, + IPPROTO_TCP, + 0); } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); } i = tx_ring->next_to_use; @@ -1984,7 +1988,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, } static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, __be16 protocol) { @@ -2005,8 +2009,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); if (skb->ip_summed == CHECKSUM_PARTIAL) info |= (skb_transport_header(skb) - - skb_network_header(skb)); - + skb_network_header(skb)); context_desc->vlan_macip_lens = cpu_to_le32(info); @@ -2055,6 +2058,10 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ smp_mb(); /* We need to check again just in case room has been made available */ @@ -2067,11 +2074,11 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) return 0; } -#define IGBVF_MAX_TXD_PWR 16 -#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) +#define IGBVF_MAX_TXD_PWR 16 +#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, struct sk_buff *skb) { struct igbvf_buffer *buffer_info; @@ -2093,7 +2100,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { const struct skb_frag_struct *frag; @@ -2111,7 +2117,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = true; buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, - DMA_TO_DEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; } @@ -2133,7 +2139,7 @@ dma_error: /* clear timestamp and dma mappings for remaining portion of packet */ while (count--) { - if (i==0) + if (i == 0) i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; @@ -2144,10 +2150,10 @@ dma_error: } static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, int tx_flags, int count, unsigned int first, u32 paylen, - u8 hdr_len) + u8 hdr_len) { union e1000_adv_tx_desc *tx_desc = NULL; struct igbvf_buffer *buffer_info; @@ -2155,7 +2161,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, unsigned int i; cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | - E1000_ADVTXD_DCMD_DEXT); + E1000_ADVTXD_DCMD_DEXT); if (tx_flags & IGBVF_TX_FLAGS_VLAN) cmd_type_len |= E1000_ADVTXD_DCMD_VLE; @@ -2182,7 +2188,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | buffer_info->length); + cpu_to_le32(cmd_type_len | buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); i++; if (i == tx_ring->count) @@ -2193,14 +2199,16 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); tx_ring->buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; writel(i, adapter->hw.hw_addr + tx_ring->tail); /* we need this if more than one processor can write to our tail - * at a time, it syncronizes IO on IA64/Altix systems */ + * at a time, it synchronizes IO on IA64/Altix systems + */ mmiowb(); } @@ -2225,11 +2233,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, return NETDEV_TX_OK; } - /* - * need: count + 4 desc gap to keep tail from touching - * + 2 desc gap to keep tail from touching head, - * + 1 desc for skb->data, - * + 1 desc for context descriptor, + /* need: count + 4 desc gap to keep tail from touching + * + 2 desc gap to keep tail from touching head, + * + 1 desc for skb->data, + * + 1 desc for context descriptor, * head, otherwise try next time */ if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { @@ -2258,11 +2265,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, if (tso) tx_flags |= IGBVF_TX_FLAGS_TSO; else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && - (skb->ip_summed == CHECKSUM_PARTIAL)) + (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGBVF_TX_FLAGS_CSUM; - /* - * count reflects descriptors mapped, if 0 then mapping error + /* count reflects descriptors mapped, if 0 then mapping error * has occurred and we need to rewind the descriptor queue */ count = igbvf_tx_map_adv(adapter, tx_ring, skb); @@ -2313,6 +2319,7 @@ static void igbvf_tx_timeout(struct net_device *netdev) static void igbvf_reset_task(struct work_struct *work) { struct igbvf_adapter *adapter; + adapter = container_of(work, struct igbvf_adapter, reset_task); igbvf_reinit_locked(adapter); @@ -2356,14 +2363,13 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) } while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); /* igbvf_down has a dependency on max_frame_size */ adapter->max_frame_size = max_frame; if (netif_running(netdev)) igbvf_down(adapter); - /* - * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab @@ -2382,15 +2388,14 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) adapter->rx_buffer_len = PAGE_SIZE / 2; #endif - /* adjust allocation if LPE protects us, and we aren't using SBP */ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || - (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + - ETH_FCS_LEN; + ETH_FCS_LEN; dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) @@ -2477,8 +2482,7 @@ static void igbvf_shutdown(struct pci_dev *pdev) } #ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs +/* Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ @@ -2503,7 +2507,7 @@ static void igbvf_netpoll(struct net_device *netdev) * this device has been detected. */ static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -2583,7 +2587,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter) } static int igbvf_set_features(struct net_device *netdev, - netdev_features_t features) + netdev_features_t features) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -2596,21 +2600,21 @@ static int igbvf_set_features(struct net_device *netdev, } static const struct net_device_ops igbvf_netdev_ops = { - .ndo_open = igbvf_open, - .ndo_stop = igbvf_close, - .ndo_start_xmit = igbvf_xmit_frame, - .ndo_get_stats = igbvf_get_stats, - .ndo_set_rx_mode = igbvf_set_multi, - .ndo_set_mac_address = igbvf_set_mac, - .ndo_change_mtu = igbvf_change_mtu, - .ndo_do_ioctl = igbvf_ioctl, - .ndo_tx_timeout = igbvf_tx_timeout, - .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, + .ndo_open = igbvf_open, + .ndo_stop = igbvf_close, + .ndo_start_xmit = igbvf_xmit_frame, + .ndo_get_stats = igbvf_get_stats, + .ndo_set_rx_mode = igbvf_set_multi, + .ndo_set_mac_address = igbvf_set_mac, + .ndo_change_mtu = igbvf_change_mtu, + .ndo_do_ioctl = igbvf_ioctl, + .ndo_tx_timeout = igbvf_tx_timeout, + .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = igbvf_netpoll, + .ndo_poll_controller = igbvf_netpoll, #endif - .ndo_set_features = igbvf_set_features, + .ndo_set_features = igbvf_set_features, }; /** @@ -2645,8 +2649,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); goto err_dma; } } @@ -2686,7 +2690,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -EIO; adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); + pci_resource_len(pdev, 0)); if (!adapter->hw.hw_addr) goto err_ioremap; @@ -2712,16 +2716,16 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->bd_number = cards_found++; netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM; netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; @@ -2742,7 +2746,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) dev_info(&pdev->dev, "Error reading MAC address.\n"); else if (is_zero_ether_addr(adapter->hw.mac.addr)) - dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); + dev_info(&pdev->dev, + "MAC address not assigned by administrator.\n"); memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); } @@ -2751,11 +2756,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "Assigning random MAC address.\n"); eth_hw_addr_random(netdev); memcpy(adapter->hw.mac.addr, netdev->dev_addr, - netdev->addr_len); + netdev->addr_len); } setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, - (unsigned long) adapter); + (unsigned long)adapter); INIT_WORK(&adapter->reset_task, igbvf_reset_task); INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); @@ -2818,8 +2823,7 @@ static void igbvf_remove(struct pci_dev *pdev) struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - /* - * The watchdog timer may be rescheduled, so explicitly + /* The watchdog timer may be rescheduled, so explicitly * disable it from being rescheduled. */ set_bit(__IGBVF_DOWN, &adapter->state); @@ -2832,9 +2836,8 @@ static void igbvf_remove(struct pci_dev *pdev) igbvf_reset_interrupt_capability(adapter); - /* - * it is important to delete the napi struct prior to freeing the - * rx ring so that you do not end up with null pointer refs + /* it is important to delete the NAPI struct prior to freeing the + * Rx ring so that you do not end up with null pointer refs */ netif_napi_del(&adapter->rx_ring->napi); kfree(adapter->tx_ring); @@ -2866,17 +2869,17 @@ MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); /* PCI Device API Driver */ static struct pci_driver igbvf_driver = { - .name = igbvf_driver_name, - .id_table = igbvf_pci_tbl, - .probe = igbvf_probe, - .remove = igbvf_remove, + .name = igbvf_driver_name, + .id_table = igbvf_pci_tbl, + .probe = igbvf_probe, + .remove = igbvf_remove, #ifdef CONFIG_PM /* Power Management Hooks */ - .suspend = igbvf_suspend, - .resume = igbvf_resume, + .suspend = igbvf_suspend, + .resume = igbvf_resume, #endif - .shutdown = igbvf_shutdown, - .err_handler = &igbvf_err_handler + .shutdown = igbvf_shutdown, + .err_handler = &igbvf_err_handler }; /** @@ -2888,6 +2891,7 @@ static struct pci_driver igbvf_driver = { static int __init igbvf_init_module(void) { int ret; + pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); pr_info("%s\n", igbvf_copyright); @@ -2909,7 +2913,6 @@ static void __exit igbvf_exit_module(void) } module_exit(igbvf_exit_module); - MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h index 7dc6341715dc..86a7c120b574 100644 --- a/drivers/net/ethernet/intel/igbvf/regs.h +++ b/drivers/net/ethernet/intel/igbvf/regs.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -28,81 +27,81 @@ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ -#define E1000_CTRL 0x00000 /* Device Control - RW */ -#define E1000_STATUS 0x00008 /* Device Status - RO */ -#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ -#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ -#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) -#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ -#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ -#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ -#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ -#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ -#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ -#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ -/* - * Convenience macros +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ + +/* Convenience macros * * Note: "_n" is the queue number of the register to be written to. * * Example usage: * E1000_RDBAL_REG(current_rx_queue) */ -#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ - (0x0C000 + ((_n) * 0x40))) -#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ - (0x0C004 + ((_n) * 0x40))) -#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ - (0x0C008 + ((_n) * 0x40))) -#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ - (0x0C00C + ((_n) * 0x40))) -#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ - (0x0C010 + ((_n) * 0x40))) -#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ - (0x0C018 + ((_n) * 0x40))) -#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ - (0x0C028 + ((_n) * 0x40))) -#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ - (0x0E000 + ((_n) * 0x40))) -#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ - (0x0E004 + ((_n) * 0x40))) -#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ - (0x0E008 + ((_n) * 0x40))) -#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ - (0x0E010 + ((_n) * 0x40))) -#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ - (0x0E018 + ((_n) * 0x40))) -#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ - (0x0E028 + ((_n) * 0x40))) -#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) -#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) -#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ - (0x054E0 + ((_i - 16) * 8))) -#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ - (0x054E4 + ((_i - 16) * 8))) +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) +#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) /* Statistics registers */ -#define E1000_VFGPRC 0x00F10 -#define E1000_VFGORC 0x00F18 -#define E1000_VFMPRC 0x00F3C -#define E1000_VFGPTC 0x00F14 -#define E1000_VFGOTC 0x00F34 -#define E1000_VFGOTLBC 0x00F50 -#define E1000_VFGPTLBC 0x00F44 -#define E1000_VFGORLBC 0x00F48 -#define E1000_VFGPRLBC 0x00F40 +#define E1000_VFGPRC 0x00F10 +#define E1000_VFGORC 0x00F18 +#define E1000_VFMPRC 0x00F3C +#define E1000_VFGPTC 0x00F14 +#define E1000_VFGOTC 0x00F34 +#define E1000_VFGOTLBC 0x00F50 +#define E1000_VFGPTLBC 0x00F44 +#define E1000_VFGORLBC 0x00F48 +#define E1000_VFGPRLBC 0x00F40 /* These act per VF so an array friendly macro is used */ -#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) -#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) /* Define macros for handling registers */ -#define er32(reg) readl(hw->hw_addr + E1000_##reg) -#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) +#define er32(reg) readl(hw->hw_addr + E1000_##reg) +#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) #define array_er32(reg, offset) \ readl(hw->hw_addr + E1000_##reg + (offset << 2)) #define array_ew32(reg, offset, val) \ writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) -#define e1e_flush() er32(STATUS) +#define e1e_flush() er32(STATUS) #endif diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 955ad8c2c534..a13baa90ae20 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -25,17 +24,16 @@ *******************************************************************************/ - #include "vf.h" static s32 e1000_check_for_link_vf(struct e1000_hw *hw); static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, - u16 *duplex); + u16 *duplex); static s32 e1000_init_hw_vf(struct e1000_hw *hw); static s32 e1000_reset_hw_vf(struct e1000_hw *hw); static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, - u32, u32, u32); + u32, u32, u32); static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); static s32 e1000_read_mac_addr_vf(struct e1000_hw *); static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); @@ -94,7 +92,7 @@ void e1000_init_function_pointers_vf(struct e1000_hw *hw) * the status register's data which is often stale and inaccurate. **/ static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, - u16 *duplex) + u16 *duplex) { s32 status; @@ -130,7 +128,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) u8 *addr = (u8 *)(&msgbuf[1]); u32 ctrl; - /* assert vf queue/interrupt reset */ + /* assert VF queue/interrupt reset */ ctrl = er32(CTRL); ew32(CTRL, ctrl | E1000_CTRL_RST); @@ -144,7 +142,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) /* mailbox timeout can now become active */ mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; - /* notify pf of vf reset completion */ + /* notify PF of VF reset completion */ msgbuf[0] = E1000_VF_RESET; mbx->ops.write_posted(hw, msgbuf, 1); @@ -153,7 +151,8 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) /* set our "perm_addr" based on info provided by PF */ ret_val = mbx->ops.read_posted(hw, msgbuf, 3); if (!ret_val) { - if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) + if (msgbuf[0] == (E1000_VF_RESET | + E1000_VT_MSGTYPE_ACK)) memcpy(hw->mac.perm_addr, addr, ETH_ALEN); else ret_val = -E1000_ERR_MAC_INIT; @@ -194,15 +193,14 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) /* Register count multiplied by bits per register */ hash_mask = (hw->mac.mta_reg_count * 32) - 1; - /* - * The bit_shift is the number of left-shifts + /* The bit_shift is the number of left-shifts * where 0xFF would still fall within the hash mask. */ while (hash_mask >> bit_shift != 0xFF) bit_shift++; hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | - (((u16) mc_addr[5]) << bit_shift))); + (((u16)mc_addr[5]) << bit_shift))); return hash_value; } @@ -221,8 +219,8 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) * unless there are workarounds that change this. **/ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count, - u32 rar_used_count, u32 rar_count) + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[E1000_VFMAILBOX_SIZE]; @@ -305,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) * @addr: pointer to the receive address * @index: receive address array register **/ -static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) +static void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, u32 index) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[3]; @@ -354,8 +352,7 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) s32 ret_val = E1000_SUCCESS; u32 in_msg = 0; - /* - * We only want to run this if there has been a rst asserted. + /* We only want to run this if there has been a rst asserted. * in this case that could mean a link change, device reset, * or a virtual function reset */ @@ -367,31 +364,33 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) if (!mac->get_link_status) goto out; - /* if link status is down no point in checking to see if pf is up */ + /* if link status is down no point in checking to see if PF is up */ if (!(er32(STATUS) & E1000_STATUS_LU)) goto out; /* if the read failed it could just be a mailbox collision, best wait - * until we are called again and don't report an error */ + * until we are called again and don't report an error + */ if (mbx->ops.read(hw, &in_msg, 1)) goto out; /* if incoming message isn't clear to send we are waiting on response */ if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { - /* message is not CTS and is NACK we must have lost CTS status */ + /* msg is not CTS and is NACK we must have lost CTS status */ if (in_msg & E1000_VT_MSGTYPE_NACK) ret_val = -E1000_ERR_MAC_INIT; goto out; } - /* the pf is talking, if we timed out in the past we reinit */ + /* the PF is talking, if we timed out in the past we reinit */ if (!mbx->timeout) { ret_val = -E1000_ERR_MAC_INIT; goto out; } /* if we passed all the tests above then the link is up and we no - * longer need to check for link */ + * longer need to check for link + */ mac->get_link_status = false; out: diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 57db3c68dfcd..0f1eca639f68 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -38,30 +37,29 @@ struct e1000_hw; -#define E1000_DEV_ID_82576_VF 0x10CA -#define E1000_DEV_ID_I350_VF 0x1520 -#define E1000_REVISION_0 0 -#define E1000_REVISION_1 1 -#define E1000_REVISION_2 2 -#define E1000_REVISION_3 3 -#define E1000_REVISION_4 4 +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_I350_VF 0x1520 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 -#define E1000_FUNC_0 0 -#define E1000_FUNC_1 1 +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 -/* - * Receive Address Register Count +/* Receive Address Register Count * Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. * These entries are also used for MAC-based filtering. */ -#define E1000_RAR_ENTRIES_VF 1 +#define E1000_RAR_ENTRIES_VF 1 /* Receive Descriptor - Advanced */ union e1000_adv_rx_desc { struct { - u64 pkt_addr; /* Packet buffer address */ - u64 hdr_addr; /* Header buffer address */ + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ } read; struct { struct { @@ -69,53 +67,53 @@ union e1000_adv_rx_desc { u32 data; struct { u16 pkt_info; /* RSS/Packet type */ - u16 hdr_info; /* Split Header, - * hdr buffer length */ + /* Split Header, hdr buffer length */ + u16 hdr_info; } hs_rss; } lo_dword; union { - u32 rss; /* RSS Hash */ + u32 rss; /* RSS Hash */ struct { - u16 ip_id; /* IP id */ - u16 csum; /* Packet Checksum */ + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; struct { - u32 status_error; /* ext status/error */ - u16 length; /* Packet length */ - u16 vlan; /* VLAN tag */ + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ } upper; } wb; /* writeback */ }; -#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 -#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 /* Transmit Descriptor - Advanced */ union e1000_adv_tx_desc { struct { - u64 buffer_addr; /* Address of descriptor's data buf */ + u64 buffer_addr; /* Address of descriptor's data buf */ u32 cmd_type_len; u32 olinfo_status; } read; struct { - u64 rsvd; /* Reserved */ + u64 rsvd; /* Reserved */ u32 nxtseq_seed; u32 status; } wb; }; /* Adv Transmit Descriptor Config Masks */ -#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ -#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ -#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ -#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ -#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ -#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ /* Context descriptors */ struct e1000_adv_tx_context_desc { @@ -125,11 +123,11 @@ struct e1000_adv_tx_context_desc { u32 mss_l4len_idx; }; -#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ enum e1000_mac_type { e1000_undefined = 0, @@ -262,5 +260,4 @@ struct e1000_hw { void e1000_rlpml_set_vf(struct e1000_hw *, u16); void e1000_init_function_pointers_vf(struct e1000_hw *hw); - #endif /* _E1000_VF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 70cc4c5c0a01..903664ff6904 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3924,7 +3924,7 @@ static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) for (i = 0; i < hw->mac.num_rar_entries; i++) { adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + eth_zero_addr(adapter->mac_table[i].addr); adapter->mac_table[i].queue = 0; } ixgbe_sync_mac_table(adapter); @@ -3992,7 +3992,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) adapter->mac_table[i].queue == queue) { adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + eth_zero_addr(adapter->mac_table[i].addr); adapter->mac_table[i].queue = 0; ixgbe_sync_mac_table(adapter); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 2a210c4efb89..c59ed925adaf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1685,7 +1685,7 @@ int mlx4_en_start_port(struct net_device *dev) } /* Attach rx QP to bradcast address */ - memset(&mc_list[10], 0xff, ETH_ALEN); + eth_broadcast_addr(&mc_list[10]); mc_list[5] = priv->port; /* needed for B0 steering support */ if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, priv->port, 0, MLX4_PROT_ETH, @@ -1788,7 +1788,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) } /* Detach All multicasts */ - memset(&mc_list[10], 0xff, ETH_ALEN); + eth_broadcast_addr(&mc_list[10]); mc_list[5] = priv->port; /* needed for B0 steering support */ mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, MLX4_PROT_ETH, priv->broadcast_id); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index a61009f4b2df..b66e03d9711f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -66,7 +66,7 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); packet = (unsigned char *)skb_put(skb, packet_size); memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); - memset(ethh->h_source, 0, ETH_ALEN); + eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_ARP); skb_set_mac_header(skb, 0); for (i = 0; i < packet_size; ++i) /* fill our packet */ diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 10988fbf47eb..6f332ebdf3b5 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4144,7 +4144,7 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr) for (i = 0; i < hw->addr_list_size; i++) { if (ether_addr_equal(hw->address[i], mac_addr)) { - memset(hw->address[i], 0, ETH_ALEN); + eth_zero_addr(hw->address[i]); writel(0, hw->io + ADD_ADDR_INCR * i + KS_ADD_ADDR_0_HI); return 0; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 716fc37ada5a..db80eb1c6d4f 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -537,7 +537,7 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev) u8 null_addr[ETH_ALEN]; int i; - memset(null_addr, 0, ETH_ALEN); + eth_zero_addr(null_addr); if (netdev->flags & IFF_PROMISC) { diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 8011ef3e7707..25800a1dedcb 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -460,7 +460,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set) netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Set Mac addr %pM\n", addr); } else { - memset(zero_mac_addr, 0, ETH_ALEN); + eth_zero_addr(zero_mac_addr); addr = &zero_mac_addr[0]; netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Clearing MAC address\n"); diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 9fb6948e14c6..a5d1e6ea7d58 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -49,12 +49,12 @@ struct rocker_flow_tbl_key { enum rocker_of_dpa_table_id tbl_id; union { struct { - u32 in_lport; - u32 in_lport_mask; + u32 in_pport; + u32 in_pport_mask; enum rocker_of_dpa_table_id goto_tbl; } ig_port; struct { - u32 in_lport; + u32 in_pport; __be16 vlan_id; __be16 vlan_id_mask; enum rocker_of_dpa_table_id goto_tbl; @@ -62,8 +62,8 @@ struct rocker_flow_tbl_key { __be16 new_vlan_id; } vlan; struct { - u32 in_lport; - u32 in_lport_mask; + u32 in_pport; + u32 in_pport_mask; __be16 eth_type; u8 eth_dst[ETH_ALEN]; u8 eth_dst_mask[ETH_ALEN]; @@ -91,8 +91,8 @@ struct rocker_flow_tbl_key { bool copy_to_cpu; } bridge; struct { - u32 in_lport; - u32 in_lport_mask; + u32 in_pport; + u32 in_pport_mask; u8 eth_src[ETH_ALEN]; u8 eth_src_mask[ETH_ALEN]; u8 eth_dst[ETH_ALEN]; @@ -148,7 +148,7 @@ struct rocker_fdb_tbl_entry { u32 key_crc32; /* key */ bool learned; struct rocker_fdb_tbl_key { - u32 lport; + u32 pport; u8 addr[ETH_ALEN]; __be16 vlan_id; } key; @@ -200,7 +200,7 @@ struct rocker_port { struct net_device *bridge_dev; struct rocker *rocker; unsigned int port_number; - u32 lport; + u32 pport; __be16 internal_vlan_id; int stp_state; u32 brport_flags; @@ -789,7 +789,30 @@ static u32 __pos_inc(u32 pos, size_t limit) static int rocker_desc_err(struct rocker_desc_info *desc_info) { - return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN); + int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; + + switch (err) { + case ROCKER_OK: + return 0; + case -ROCKER_ENOENT: + return -ENOENT; + case -ROCKER_ENXIO: + return -ENXIO; + case -ROCKER_ENOMEM: + return -ENOMEM; + case -ROCKER_EEXIST: + return -EEXIST; + case -ROCKER_EINVAL: + return -EINVAL; + case -ROCKER_EMSGSIZE: + return -EMSGSIZE; + case -ROCKER_ENOTSUP: + return -EOPNOTSUPP; + case -ROCKER_ENOBUFS: + return -ENOBUFS; + } + + return -EINVAL; } static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info) @@ -1257,9 +1280,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable) u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); if (enable) - val |= 1ULL << rocker_port->lport; + val |= 1ULL << rocker_port->pport; else - val &= ~(1ULL << rocker_port->lport); + val &= ~(1ULL << rocker_port->pport); rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); } @@ -1312,11 +1335,11 @@ static int rocker_event_link_change(struct rocker *rocker, struct rocker_port *rocker_port; rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); - if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] || + if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) return -EIO; port_number = - rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1; + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); if (port_number >= rocker->port_count) @@ -1353,12 +1376,12 @@ static int rocker_event_mac_vlan_seen(struct rocker *rocker, __be16 vlan_id; rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); - if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] || + if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) return -EIO; port_number = - rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1; + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); @@ -1517,8 +1540,8 @@ rocker_cmd_get_port_settings_prep(struct rocker *rocker, cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) return -EMSGSIZE; rocker_tlv_nest_end(desc_info, cmd_info); return 0; @@ -1606,8 +1629,8 @@ rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker, cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, ethtool_cmd_speed(ecmd))) @@ -1637,8 +1660,8 @@ rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker, cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) return -EMSGSIZE; if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, ETH_ALEN, macaddr)) @@ -1661,8 +1684,8 @@ rocker_cmd_set_port_learning_prep(struct rocker *rocker, cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, !!(rocker_port->brport_flags & BR_LEARNING))) @@ -1715,11 +1738,11 @@ static int rocker_port_set_learning(struct rocker_port *rocker_port) static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, - entry->key.ig_port.in_lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.ig_port.in_pport)) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, - entry->key.ig_port.in_lport_mask)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.ig_port.in_pport_mask)) return -EMSGSIZE; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, entry->key.ig_port.goto_tbl)) @@ -1731,8 +1754,8 @@ static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, - entry->key.vlan.in_lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.vlan.in_pport)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, entry->key.vlan.vlan_id)) @@ -1754,11 +1777,11 @@ static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, - entry->key.term_mac.in_lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.term_mac.in_pport)) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, - entry->key.term_mac.in_lport_mask)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.term_mac.in_pport_mask)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, entry->key.term_mac.eth_type)) @@ -1845,11 +1868,11 @@ static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, - entry->key.acl.in_lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.acl.in_pport)) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, - entry->key.acl.in_lport_mask)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.acl.in_pport_mask)) return -EMSGSIZE; if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, ETH_ALEN, entry->key.acl.eth_src)) @@ -1993,7 +2016,7 @@ static int rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, struct rocker_group_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT, + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, ROCKER_GROUP_PORT_GET(entry->group_id))) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, @@ -2311,7 +2334,7 @@ static int rocker_flow_tbl_do(struct rocker_port *rocker_port, } static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, - int flags, u32 in_lport, u32 in_lport_mask, + int flags, u32 in_pport, u32 in_pport_mask, enum rocker_of_dpa_table_id goto_tbl) { struct rocker_flow_tbl_entry *entry; @@ -2322,15 +2345,15 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, entry->key.priority = ROCKER_PRIORITY_IG_PORT; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; - entry->key.ig_port.in_lport = in_lport; - entry->key.ig_port.in_lport_mask = in_lport_mask; + entry->key.ig_port.in_pport = in_pport; + entry->key.ig_port.in_pport_mask = in_pport_mask; entry->key.ig_port.goto_tbl = goto_tbl; return rocker_flow_tbl_do(rocker_port, flags, entry); } static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, - int flags, u32 in_lport, + int flags, u32 in_pport, __be16 vlan_id, __be16 vlan_id_mask, enum rocker_of_dpa_table_id goto_tbl, bool untagged, __be16 new_vlan_id) @@ -2343,7 +2366,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, entry->key.priority = ROCKER_PRIORITY_VLAN; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; - entry->key.vlan.in_lport = in_lport; + entry->key.vlan.in_pport = in_pport; entry->key.vlan.vlan_id = vlan_id; entry->key.vlan.vlan_id_mask = vlan_id_mask; entry->key.vlan.goto_tbl = goto_tbl; @@ -2355,7 +2378,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, } static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, - u32 in_lport, u32 in_lport_mask, + u32 in_pport, u32 in_pport_mask, __be16 eth_type, const u8 *eth_dst, const u8 *eth_dst_mask, __be16 vlan_id, __be16 vlan_id_mask, bool copy_to_cpu, @@ -2378,8 +2401,8 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, } entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - entry->key.term_mac.in_lport = in_lport; - entry->key.term_mac.in_lport_mask = in_lport_mask; + entry->key.term_mac.in_pport = in_pport; + entry->key.term_mac.in_pport_mask = in_pport_mask; entry->key.term_mac.eth_type = eth_type; ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); @@ -2445,8 +2468,8 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, } static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, - int flags, u32 in_lport, - u32 in_lport_mask, + int flags, u32 in_pport, + u32 in_pport_mask, const u8 *eth_src, const u8 *eth_src_mask, const u8 *eth_dst, const u8 *eth_dst_mask, __be16 eth_type, @@ -2472,8 +2495,8 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, entry->key.priority = priority; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - entry->key.acl.in_lport = in_lport; - entry->key.acl.in_lport_mask = in_lport_mask; + entry->key.acl.in_pport = in_pport; + entry->key.acl.in_pport_mask = in_pport_mask; if (eth_src) ether_addr_copy(entry->key.acl.eth_src, eth_src); @@ -2604,7 +2627,7 @@ static int rocker_group_tbl_do(struct rocker_port *rocker_port, static int rocker_group_l2_interface(struct rocker_port *rocker_port, int flags, __be16 vlan_id, - u32 out_lport, int pop_vlan) + u32 out_pport, int pop_vlan) { struct rocker_group_tbl_entry *entry; @@ -2612,7 +2635,7 @@ static int rocker_group_l2_interface(struct rocker_port *rocker_port, if (!entry) return -ENOMEM; - entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); entry->l2_interface.pop_vlan = pop_vlan; return rocker_group_tbl_do(rocker_port, flags, entry); @@ -2674,8 +2697,7 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, continue; if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { group_ids[group_count++] = - ROCKER_GROUP_L2_INTERFACE(vlan_id, - p->lport); + ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); } } @@ -2700,7 +2722,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, struct rocker *rocker = rocker_port->rocker; struct rocker_port *p; bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - u32 out_lport; + u32 out_pport; int ref = 0; int err; int i; @@ -2711,14 +2733,14 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, if (rocker_port->stp_state == BR_STATE_LEARNING || rocker_port->stp_state == BR_STATE_FORWARDING) { - out_lport = rocker_port->lport; + out_pport = rocker_port->pport; err = rocker_group_l2_interface(rocker_port, flags, - vlan_id, out_lport, + vlan_id, out_pport, pop_vlan); if (err) { netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for lport %d\n", - err, out_lport); + "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); return err; } } @@ -2737,9 +2759,9 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, if ((!adding || ref != 1) && (adding || ref != 0)) return 0; - out_lport = 0; + out_pport = 0; err = rocker_group_l2_interface(rocker_port, flags, - vlan_id, out_lport, + vlan_id, out_pport, pop_vlan); if (err) { netdev_err(rocker_port->dev, @@ -2799,9 +2821,9 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, int flags, struct rocker_ctrl *ctrl, __be16 vlan_id) { - u32 in_lport = rocker_port->lport; - u32 in_lport_mask = 0xffffffff; - u32 out_lport = 0; + u32 in_pport = rocker_port->pport; + u32 in_pport_mask = 0xffffffff; + u32 out_pport = 0; u8 *eth_src = NULL; u8 *eth_src_mask = NULL; __be16 vlan_id_mask = htons(0xffff); @@ -2809,11 +2831,11 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, u8 ip_proto_mask = 0; u8 ip_tos = 0; u8 ip_tos_mask = 0; - u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); int err; err = rocker_flow_tbl_acl(rocker_port, flags, - in_lport, in_lport_mask, + in_pport, in_pport_mask, eth_src, eth_src_mask, ctrl->eth_dst, ctrl->eth_dst_mask, ctrl->eth_type, @@ -2856,7 +2878,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, int flags, struct rocker_ctrl *ctrl, __be16 vlan_id) { - u32 in_lport_mask = 0xffffffff; + u32 in_pport_mask = 0xffffffff; __be16 vlan_id_mask = htons(0xffff); int err; @@ -2864,7 +2886,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, vlan_id = rocker_port->internal_vlan_id; err = rocker_flow_tbl_term_mac(rocker_port, - rocker_port->lport, in_lport_mask, + rocker_port->pport, in_pport_mask, ctrl->eth_type, ctrl->eth_dst, ctrl->eth_dst_mask, vlan_id, vlan_id_mask, ctrl->copy_to_cpu, @@ -2934,7 +2956,7 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, { enum rocker_of_dpa_table_id goto_tbl = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - u32 in_lport = rocker_port->lport; + u32 in_pport = rocker_port->pport; __be16 vlan_id = htons(vid); __be16 vlan_id_mask = htons(0xffff); __be16 internal_vlan_id; @@ -2978,7 +3000,7 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, } err = rocker_flow_tbl_vlan(rocker_port, flags, - in_lport, vlan_id, vlan_id_mask, + in_pport, vlan_id, vlan_id_mask, goto_tbl, untagged, internal_vlan_id); if (err) netdev_err(rocker_port->dev, @@ -2990,20 +3012,20 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags) { enum rocker_of_dpa_table_id goto_tbl; - u32 in_lport; - u32 in_lport_mask; + u32 in_pport; + u32 in_pport_mask; int err; /* Normal Ethernet Frames. Matches pkts from any local physical * ports. Goto VLAN tbl. */ - in_lport = 0; - in_lport_mask = 0xffff0000; + in_pport = 0; + in_pport_mask = 0xffff0000; goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; err = rocker_flow_tbl_ig_port(rocker_port, flags, - in_lport, in_lport_mask, + in_pport, in_pport_mask, goto_tbl); if (err) netdev_err(rocker_port->dev, @@ -3047,7 +3069,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port, struct rocker_fdb_learn_work *lw; enum rocker_of_dpa_table_id goto_tbl = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 out_lport = rocker_port->lport; + u32 out_pport = rocker_port->pport; u32 tunnel_id = 0; u32 group_id = ROCKER_GROUP_NONE; bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); @@ -3055,7 +3077,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port, int err; if (rocker_port_is_bridged(rocker_port)) - group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); if (!(flags & ROCKER_OP_FLAG_REFRESH)) { err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL, @@ -3114,7 +3136,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port, return -ENOMEM; fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); - fdb->key.lport = rocker_port->lport; + fdb->key.pport = rocker_port->pport; ether_addr_copy(fdb->key.addr, addr); fdb->key.vlan_id = vlan_id; fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); @@ -3161,7 +3183,7 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port) spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.lport != rocker_port->lport) + if (found->key.pport != rocker_port->pport) continue; if (!found->learned) continue; @@ -3182,7 +3204,7 @@ err_out: static int rocker_port_router_mac(struct rocker_port *rocker_port, int flags, __be16 vlan_id) { - u32 in_lport_mask = 0xffffffff; + u32 in_pport_mask = 0xffffffff; __be16 eth_type; const u8 *dst_mac_mask = ff_mac; __be16 vlan_id_mask = htons(0xffff); @@ -3194,7 +3216,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port, eth_type = htons(ETH_P_IP); err = rocker_flow_tbl_term_mac(rocker_port, - rocker_port->lport, in_lport_mask, + rocker_port->pport, in_pport_mask, eth_type, rocker_port->dev->dev_addr, dst_mac_mask, vlan_id, vlan_id_mask, copy_to_cpu, flags); @@ -3203,7 +3225,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port, eth_type = htons(ETH_P_IPV6); err = rocker_flow_tbl_term_mac(rocker_port, - rocker_port->lport, in_lport_mask, + rocker_port->pport, in_pport_mask, eth_type, rocker_port->dev->dev_addr, dst_mac_mask, vlan_id, vlan_id_mask, copy_to_cpu, flags); @@ -3214,7 +3236,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port, static int rocker_port_fwding(struct rocker_port *rocker_port) { bool pop_vlan; - u32 out_lport; + u32 out_pport; __be16 vlan_id; u16 vid; int flags = ROCKER_OP_FLAG_NOWAIT; @@ -3231,19 +3253,19 @@ static int rocker_port_fwding(struct rocker_port *rocker_port) rocker_port->stp_state != BR_STATE_FORWARDING) flags |= ROCKER_OP_FLAG_REMOVE; - out_lport = rocker_port->lport; + out_pport = rocker_port->pport; for (vid = 1; vid < VLAN_N_VID; vid++) { if (!test_bit(vid, rocker_port->vlan_bitmap)) continue; vlan_id = htons(vid); pop_vlan = rocker_vlan_id_is_internal(vlan_id); err = rocker_group_l2_interface(rocker_port, flags, - vlan_id, out_lport, + vlan_id, out_pport, pop_vlan); if (err) { netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for lport %d\n", - err, out_lport); + "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); return err; } } @@ -3302,6 +3324,26 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state) return rocker_port_fwding(rocker_port); } +static int rocker_port_fwd_enable(struct rocker_port *rocker_port) +{ + if (rocker_port_is_bridged(rocker_port)) + /* bridge STP will enable port */ + return 0; + + /* port is not bridged, so simulate going to FORWARDING state */ + return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING); +} + +static int rocker_port_fwd_disable(struct rocker_port *rocker_port) +{ + if (rocker_port_is_bridged(rocker_port)) + /* bridge STP will disable port */ + return 0; + + /* port is not bridged, so simulate going to DISABLED state */ + return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED); +} + static struct rocker_internal_vlan_tbl_entry * rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex) { @@ -3394,8 +3436,6 @@ not_found: static int rocker_port_open(struct net_device *dev) { struct rocker_port *rocker_port = netdev_priv(dev); - u8 stp_state = rocker_port_is_bridged(rocker_port) ? - BR_STATE_BLOCKING : BR_STATE_FORWARDING; int err; err = rocker_port_dma_rings_init(rocker_port); @@ -3418,9 +3458,9 @@ static int rocker_port_open(struct net_device *dev) goto err_request_rx_irq; } - err = rocker_port_stp_update(rocker_port, stp_state); + err = rocker_port_fwd_enable(rocker_port); if (err) - goto err_stp_update; + goto err_fwd_enable; napi_enable(&rocker_port->napi_tx); napi_enable(&rocker_port->napi_rx); @@ -3428,7 +3468,7 @@ static int rocker_port_open(struct net_device *dev) netif_start_queue(dev); return 0; -err_stp_update: +err_fwd_enable: free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); err_request_rx_irq: free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); @@ -3445,7 +3485,7 @@ static int rocker_port_stop(struct net_device *dev) rocker_port_set_enable(rocker_port, false); napi_disable(&rocker_port->napi_rx); napi_disable(&rocker_port->napi_tx); - rocker_port_stp_update(rocker_port, BR_STATE_DISABLED); + rocker_port_fwd_disable(rocker_port); free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); rocker_port_dma_rings_fini(rocker_port); @@ -3702,7 +3742,7 @@ static int rocker_port_fdb_dump(struct sk_buff *skb, spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.lport != rocker_port->lport) + if (found->key.pport != rocker_port->pport) continue; if (idx < cb->args[0]) goto skip; @@ -3882,8 +3922,8 @@ rocker_cmd_get_port_stats_prep(struct rocker *rocker, if (!cmd_stats) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, + rocker_port->pport)) return -EMSGSIZE; rocker_tlv_nest_end(desc_info, cmd_stats); @@ -3900,7 +3940,7 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker, struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; struct rocker_tlv *pattr; - u32 lport; + u32 pport; u64 *data = priv; int i; @@ -3912,11 +3952,11 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker, rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, attrs[ROCKER_TLV_CMD_INFO]); - if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]) + if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) return -EIO; - lport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]); - if (lport != rocker_port->lport) + pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); + if (pport != rocker_port->pport) return -EIO; for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { @@ -4104,7 +4144,7 @@ static void rocker_carrier_init(struct rocker_port *rocker_port) u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); bool link_up; - link_up = link_status & (1 << rocker_port->lport); + link_up = link_status & (1 << rocker_port->pport); if (link_up) netif_carrier_on(rocker_port->dev); else @@ -4152,7 +4192,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_port->dev = dev; rocker_port->rocker = rocker; rocker_port->port_number = port_number; - rocker_port->lport = port_number + 1; + rocker_port->pport = port_number + 1; rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; rocker_port_dev_addr_init(rocker, rocker_port); @@ -4436,9 +4476,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port, rocker_port->internal_vlan_id = rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); - err = rocker_port_vlan(rocker_port, 0, 0); - - return err; + return rocker_port_vlan(rocker_port, 0, 0); } static int rocker_port_bridge_leave(struct rocker_port *rocker_port) @@ -4458,6 +4496,11 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port) rocker_port_internal_vlan_id_get(rocker_port, rocker_port->dev->ifindex); err = rocker_port_vlan(rocker_port, 0, 0); + if (err) + return err; + + if (rocker_port->dev->flags & IFF_UP) + err = rocker_port_fwd_enable(rocker_port); return err; } diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index a5bc432feada..0a94b7c300be 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -14,6 +14,19 @@ #include <linux/types.h> +/* Return codes */ +enum { + ROCKER_OK = 0, + ROCKER_ENOENT = 2, + ROCKER_ENXIO = 6, + ROCKER_ENOMEM = 12, + ROCKER_EEXIST = 17, + ROCKER_EINVAL = 22, + ROCKER_EMSGSIZE = 90, + ROCKER_ENOTSUP = 95, + ROCKER_ENOBUFS = 105, +}; + #define PCI_VENDOR_ID_REDHAT 0x1b36 #define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 @@ -136,7 +149,7 @@ enum { enum { ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, - ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */ ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ @@ -151,7 +164,7 @@ enum { enum { ROCKER_TLV_CMD_PORT_STATS_UNSPEC, - ROCKER_TLV_CMD_PORT_STATS_LPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */ ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */ ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */ @@ -191,7 +204,7 @@ enum { enum { ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, - ROCKER_TLV_EVENT_LINK_CHANGED_LPORT, /* u32 */ + ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */ ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, @@ -201,7 +214,7 @@ enum { enum { ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, - ROCKER_TLV_EVENT_MAC_VLAN_LPORT, /* u32 */ + ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */ ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ @@ -275,9 +288,9 @@ enum { ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ - ROCKER_TLV_OF_DPA_IN_LPORT, /* u32 */ - ROCKER_TLV_OF_DPA_IN_LPORT_MASK, /* u32 */ - ROCKER_TLV_OF_DPA_OUT_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */ + ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */ ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ @@ -291,7 +304,7 @@ enum { ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ - ROCKER_TLV_OF_DPA_TUN_LOG_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */ ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 2965c6ae7d6e..41047c9143d0 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -843,7 +843,7 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev) unsigned long flags; /* Initialise tx packet using broadcast destination address */ - memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN); + eth_broadcast_addr(pdata->loopback_tx_pkt); /* Use incrementing source address */ for (i = 6; i < 12; i++) diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 22e0cad1b4b5..401abf7254d3 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1411,6 +1411,8 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(err < 0)) { netdev_info(dev, "TX trigger error %d\n", err); d->hdr.state = VIO_DESC_FREE; + skb = port->tx_bufs[txi].skb; + port->tx_bufs[txi].skb = NULL; dev->stats.tx_carrier_errors++; goto out_dropped; } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index a31a8c3c8e7c..9f14d8b515c7 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1320,7 +1320,7 @@ static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp, if (addr) ether_addr_copy(naddr->addr, addr); else - memset(naddr->addr, 0, ETH_ALEN); + eth_zero_addr(naddr->addr); list_add_tail(&naddr->node, &netcp->addr_list); return naddr; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 0a7f2e77557f..13214a6492ac 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -1167,7 +1167,7 @@ static int gelic_wl_set_ap(struct net_device *netdev, } else { pr_debug("%s: clear bssid\n", __func__); clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); - memset(wl->bssid, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); } spin_unlock_irqrestore(&wl->lock, irqflag); pr_debug("%s: ->\n", __func__); @@ -1189,7 +1189,7 @@ static int gelic_wl_get_ap(struct net_device *netdev, memcpy(data->ap_addr.sa_data, wl->active_bssid, ETH_ALEN); } else - memset(data->ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(data->ap_addr.sa_data); spin_unlock_irqrestore(&wl->lock, irqflag); mutex_unlock(&wl->assoc_stat_lock); diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 17e276651601..8fb807ea1caa 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -70,12 +70,14 @@ static const int multicast_filter_limit = 32; /* Operational parameters that are set at compile time. */ /* Keep the ring sizes a power of two for compile efficiency. - The compiler will convert <unsigned>'%'<2^N> into a bit mask. - Making the Tx ring too large decreases the effectiveness of channel - bonding and packet priority. - There are no ill effects from too-large receive rings. */ -#define TX_RING_SIZE 16 -#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ + * The compiler will convert <unsigned>'%'<2^N> into a bit mask. + * Making the Tx ring too large decreases the effectiveness of channel + * bonding and packet priority. + * With BQL support, we can increase TX ring safely. + * There are no ill effects from too-large receive rings. + */ +#define TX_RING_SIZE 64 +#define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */ #define RX_RING_SIZE 64 /* Operational parameters that usually are not changed. */ @@ -1295,6 +1297,7 @@ static void alloc_tbufs(struct net_device* dev) } rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); + netdev_reset_queue(dev); } static void free_tbufs(struct net_device* dev) @@ -1795,6 +1798,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, else rp->tx_ring[entry].tx_status = 0; + netdev_sent_queue(dev, skb->len); /* lock eth irq */ wmb(); rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); @@ -1863,6 +1867,8 @@ static void rhine_tx(struct net_device *dev) struct rhine_private *rp = netdev_priv(dev); struct device *hwdev = dev->dev.parent; int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; + unsigned int pkts_compl = 0, bytes_compl = 0; + struct sk_buff *skb; /* find and cleanup dirty tx descriptors */ while (rp->dirty_tx != rp->cur_tx) { @@ -1871,6 +1877,7 @@ static void rhine_tx(struct net_device *dev) entry, txstatus); if (txstatus & DescOwn) break; + skb = rp->tx_skbuff[entry]; if (txstatus & 0x8000) { netif_dbg(rp, tx_done, dev, "Transmit error, Tx status %08x\n", txstatus); @@ -1899,7 +1906,7 @@ static void rhine_tx(struct net_device *dev) (txstatus >> 3) & 0xF, txstatus & 0xF); u64_stats_update_begin(&rp->tx_stats.syncp); - rp->tx_stats.bytes += rp->tx_skbuff[entry]->len; + rp->tx_stats.bytes += skb->len; rp->tx_stats.packets++; u64_stats_update_end(&rp->tx_stats.syncp); } @@ -1907,13 +1914,17 @@ static void rhine_tx(struct net_device *dev) if (rp->tx_skbuff_dma[entry]) { dma_unmap_single(hwdev, rp->tx_skbuff_dma[entry], - rp->tx_skbuff[entry]->len, + skb->len, DMA_TO_DEVICE); } - dev_consume_skb_any(rp->tx_skbuff[entry]); + bytes_compl += skb->len; + pkts_compl++; + dev_consume_skb_any(skb); rp->tx_skbuff[entry] = NULL; entry = (++rp->dirty_tx) % TX_RING_SIZE; } + + netdev_completed_queue(dev, pkts_compl, bytes_compl); if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 9e16a2819d48..5138407941cf 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -954,7 +954,7 @@ static void eth_set_mcast_list(struct net_device *dev) return; } - memset(diffs, 0, ETH_ALEN); + eth_zero_addr(diffs); addr = NULL; netdev_for_each_mc_addr(ha, dev) { diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index daca0dee88f3..7c4a4151ef0f 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -247,6 +247,9 @@ static netdev_tx_t sp_xmit(struct sk_buff *skb, struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + spin_lock_bh(&sp->lock); /* We were not busy, so we are now... :-) */ netif_stop_queue(dev); @@ -284,18 +287,6 @@ static int sp_close(struct net_device *dev) return 0; } -/* Return the frame type ID */ -static int sp_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, const void *daddr, - const void *saddr, unsigned len) -{ -#ifdef CONFIG_INET - if (type != ETH_P_AX25) - return ax25_hard_header(skb, dev, type, daddr, saddr, len); -#endif - return 0; -} - static int sp_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr_ax25 *sa = addr; @@ -309,20 +300,6 @@ static int sp_set_mac_address(struct net_device *dev, void *addr) return 0; } -static int sp_rebuild_header(struct sk_buff *skb) -{ -#ifdef CONFIG_INET - return ax25_rebuild_header(skb); -#else - return 0; -#endif -} - -static const struct header_ops sp_header_ops = { - .create = sp_header, - .rebuild = sp_rebuild_header, -}; - static const struct net_device_ops sp_netdev_ops = { .ndo_open = sp_open_dev, .ndo_stop = sp_close, @@ -337,7 +314,7 @@ static void sp_setup(struct net_device *dev) dev->destructor = free_netdev; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; - dev->header_ops = &sp_header_ops; + dev->header_ops = &ax25_header_ops; dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_AX25; diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index a98c153f371e..83c7cce0d172 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -772,6 +772,9 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev) { struct baycom_state *bc = netdev_priv(dev); + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + if (skb->data[0] != 0) { do_kiss_params(bc, skb->data, skb->len); dev_kfree_skb(skb); diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index c2894e43840e..63ff08a26da8 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -251,6 +251,9 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev) struct net_device *orig_dev; int size; + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + /* * Just to be *really* sure not to send anything if the interface * is down, the ethernet device may have gone. diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 0fad408f24aa..c3d377770616 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -920,6 +920,9 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) unsigned long flags; int i; + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + /* Temporarily stop the scheduler feeding us packets */ netif_stop_queue(dev); diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index c67a27245072..49fe59b180a8 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -404,6 +404,9 @@ static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb, { struct hdlcdrv_state *sm = netdev_priv(dev); + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + if (skb->data[0] != 0) { do_kiss_params(sm, skb->data, skb->len); dev_kfree_skb(skb); diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index f990bb1c3e02..17058c490b79 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -529,6 +529,9 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev) { struct mkiss *ax = netdev_priv(dev); + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + if (!netif_running(dev)) { printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name); return NETDEV_TX_BUSY; @@ -573,32 +576,6 @@ static int ax_open_dev(struct net_device *dev) return 0; } -#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) - -/* Return the frame type ID */ -static int ax_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, const void *daddr, - const void *saddr, unsigned len) -{ -#ifdef CONFIG_INET - if (type != ETH_P_AX25) - return ax25_hard_header(skb, dev, type, daddr, saddr, len); -#endif - return 0; -} - - -static int ax_rebuild_header(struct sk_buff *skb) -{ -#ifdef CONFIG_INET - return ax25_rebuild_header(skb); -#else - return 0; -#endif -} - -#endif /* CONFIG_{AX25,AX25_MODULE} */ - /* Open the low-level part of the AX25 channel. Easy! */ static int ax_open(struct net_device *dev) { @@ -662,11 +639,6 @@ static int ax_close(struct net_device *dev) return 0; } -static const struct header_ops ax_header_ops = { - .create = ax_header, - .rebuild = ax_rebuild_header, -}; - static const struct net_device_ops ax_netdev_ops = { .ndo_open = ax_open_dev, .ndo_stop = ax_close, @@ -682,7 +654,7 @@ static void ax_setup(struct net_device *dev) dev->addr_len = 0; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; - dev->header_ops = &ax_header_ops; + dev->header_ops = &ax25_header_ops; dev->netdev_ops = &ax_netdev_ops; diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 57be9e0e98a6..ce88df33fe17 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -1639,6 +1639,9 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev) unsigned long flags; char kisscmd; + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + if (skb->len > scc->stat.bufsize || skb->len < 2) { scc->dev_stat.tx_dropped++; /* bogus frame */ dev_kfree_skb(skb); diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 717433cfb81d..1a4729c36aa4 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -597,6 +597,9 @@ static netdev_tx_t yam_send_packet(struct sk_buff *skb, { struct yam_port *yp = netdev_priv(dev); + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + skb_queue_tail(&yp->send_queue, skb); dev->trans_start = jiffies; return NETDEV_TX_OK; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 384ca4f4de4a..4815843a6019 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -634,6 +634,7 @@ struct netvsc_device { struct vmbus_channel *chn_table[NR_CPUS]; u32 send_table[VRSS_SEND_TAB_SIZE]; + u32 max_chn; u32 num_chn; atomic_t queue_sends[NR_CPUS]; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 15d82eda0baf..a06bd6614007 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -687,6 +687,19 @@ static void netvsc_get_drvinfo(struct net_device *net, strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); } +static void netvsc_get_channels(struct net_device *net, + struct ethtool_channels *channel) +{ + struct net_device_context *net_device_ctx = netdev_priv(net); + struct hv_device *dev = net_device_ctx->device_ctx; + struct netvsc_device *nvdev = hv_get_drvdata(dev); + + if (nvdev) { + channel->max_combined = nvdev->max_chn; + channel->combined_count = nvdev->num_chn; + } +} + static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); @@ -760,6 +773,7 @@ static void netvsc_poll_controller(struct net_device *net) static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, .get_link = ethtool_op_get_link, + .get_channels = netvsc_get_channels, }; static const struct net_device_ops device_ops = { diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 7816d98bdddc..ca81de04bc76 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1027,6 +1027,7 @@ int rndis_filter_device_add(struct hv_device *dev, /* Initialize the rndis device */ net_device = hv_get_drvdata(dev); + net_device->max_chn = 1; net_device->num_chn = 1; net_device->extension = rndis_device; @@ -1094,6 +1095,7 @@ int rndis_filter_device_add(struct hv_device *dev, if (ret || rsscap.num_recv_que < 2) goto out; + net_device->max_chn = rsscap.num_recv_que; net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ? num_online_cpus() : rsscap.num_recv_que; if (net_device->num_chn == 1) @@ -1140,8 +1142,10 @@ int rndis_filter_device_add(struct hv_device *dev, ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn); out: - if (ret) + if (ret) { + net_device->max_chn = 1; net_device->num_chn = 1; + } return 0; /* return 0 because primary channel can be used alone */ err_dev_remv: diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 7b051eacb7f1..1d438bc54189 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -46,8 +46,6 @@ struct at86rf2xx_chip_data { u16 t_off_to_tx_on; u16 t_frame; u16 t_p_ack; - /* completion timeout for tx in msecs */ - u16 t_tx_timeout; int rssi_base_val; int (*set_channel)(struct at86rf230_local *, u8, u8); @@ -689,7 +687,7 @@ at86rf230_sync_state_change_complete(void *context) static int at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state) { - int rc; + unsigned long rc; at86rf230_async_state_change(lp, &lp->state, state, at86rf230_sync_state_change_complete, @@ -1281,7 +1279,6 @@ static struct at86rf2xx_chip_data at86rf233_data = { .t_off_to_tx_on = 80, .t_frame = 4096, .t_p_ack = 545, - .t_tx_timeout = 2000, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, .get_desense_steps = at86rf23x_get_desens_steps @@ -1295,7 +1292,6 @@ static struct at86rf2xx_chip_data at86rf231_data = { .t_off_to_tx_on = 110, .t_frame = 4096, .t_p_ack = 545, - .t_tx_timeout = 2000, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, .get_desense_steps = at86rf23x_get_desens_steps @@ -1309,13 +1305,12 @@ static struct at86rf2xx_chip_data at86rf212_data = { .t_off_to_tx_on = 200, .t_frame = 4096, .t_p_ack = 545, - .t_tx_timeout = 2000, .rssi_base_val = -100, .set_channel = at86rf212_set_channel, .get_desense_steps = at86rf212_get_desens_steps }; -static int at86rf230_hw_init(struct at86rf230_local *lp) +static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim) { int rc, irq_type, irq_pol = IRQ_ACTIVE_HIGH; unsigned int dvdd; @@ -1326,7 +1321,12 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) return rc; irq_type = irq_get_trigger_type(lp->spi->irq); - if (irq_type == IRQ_TYPE_EDGE_FALLING) + if (irq_type == IRQ_TYPE_EDGE_RISING || + irq_type == IRQ_TYPE_EDGE_FALLING) + dev_warn(&lp->spi->dev, + "Using edge triggered irq's are not recommended!\n"); + if (irq_type == IRQ_TYPE_EDGE_FALLING || + irq_type == IRQ_TYPE_LEVEL_LOW) irq_pol = IRQ_ACTIVE_LOW; rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol); @@ -1341,6 +1341,11 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) if (rc) return rc; + /* reset values differs in at86rf231 and at86rf233 */ + rc = at86rf230_write_subreg(lp, SR_IRQ_MASK_MODE, 0); + if (rc) + return rc; + get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed)); rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]); if (rc) @@ -1362,6 +1367,45 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) usleep_range(lp->data->t_sleep_cycle, lp->data->t_sleep_cycle + 100); + /* xtal_trim value is calculated by: + * CL = 0.5 * (CX + CTRIM + CPAR) + * + * whereas: + * CL = capacitor of used crystal + * CX = connected capacitors at xtal pins + * CPAR = in all at86rf2xx datasheets this is a constant value 3 pF, + * but this is different on each board setup. You need to fine + * tuning this value via CTRIM. + * CTRIM = variable capacitor setting. Resolution is 0.3 pF range is + * 0 pF upto 4.5 pF. + * + * Examples: + * atben transceiver: + * + * CL = 8 pF + * CX = 12 pF + * CPAR = 3 pF (We assume the magic constant from datasheet) + * CTRIM = 0.9 pF + * + * (12+0.9+3)/2 = 7.95 which is nearly at 8 pF + * + * xtal_trim = 0x3 + * + * openlabs transceiver: + * + * CL = 16 pF + * CX = 22 pF + * CPAR = 3 pF (We assume the magic constant from datasheet) + * CTRIM = 4.5 pF + * + * (22+4.5+3)/2 = 14.75 which is the nearest value to 16 pF + * + * xtal_trim = 0xf + */ + rc = at86rf230_write_subreg(lp, SR_XTAL_TRIM, xtal_trim); + if (rc) + return rc; + rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd); if (rc) return rc; @@ -1377,24 +1421,30 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) return at86rf230_write_subreg(lp, SR_SLOTTED_OPERATION, 0); } -static struct at86rf230_platform_data * -at86rf230_get_pdata(struct spi_device *spi) +static int +at86rf230_get_pdata(struct spi_device *spi, int *rstn, int *slp_tr, + u8 *xtal_trim) { - struct at86rf230_platform_data *pdata; + struct at86rf230_platform_data *pdata = spi->dev.platform_data; + int ret; - if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node) - return spi->dev.platform_data; + if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node) { + if (!pdata) + return -ENOENT; - pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - goto done; + *rstn = pdata->rstn; + *slp_tr = pdata->slp_tr; + *xtal_trim = pdata->xtal_trim; + return 0; + } - pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0); - pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0); + *rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0); + *slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0); + ret = of_property_read_u8(spi->dev.of_node, "xtal-trim", xtal_trim); + if (ret < 0 && ret != -EINVAL) + return ret; - spi->dev.platform_data = pdata; -done: - return pdata; + return 0; } static int @@ -1501,43 +1551,43 @@ at86rf230_setup_spi_messages(struct at86rf230_local *lp) static int at86rf230_probe(struct spi_device *spi) { - struct at86rf230_platform_data *pdata; struct ieee802154_hw *hw; struct at86rf230_local *lp; unsigned int status; - int rc, irq_type; + int rc, irq_type, rstn, slp_tr; + u8 xtal_trim; if (!spi->irq) { dev_err(&spi->dev, "no IRQ specified\n"); return -EINVAL; } - pdata = at86rf230_get_pdata(spi); - if (!pdata) { - dev_err(&spi->dev, "no platform_data\n"); - return -EINVAL; + rc = at86rf230_get_pdata(spi, &rstn, &slp_tr, &xtal_trim); + if (rc < 0) { + dev_err(&spi->dev, "failed to parse platform_data: %d\n", rc); + return rc; } - if (gpio_is_valid(pdata->rstn)) { - rc = devm_gpio_request_one(&spi->dev, pdata->rstn, + if (gpio_is_valid(rstn)) { + rc = devm_gpio_request_one(&spi->dev, rstn, GPIOF_OUT_INIT_HIGH, "rstn"); if (rc) return rc; } - if (gpio_is_valid(pdata->slp_tr)) { - rc = devm_gpio_request_one(&spi->dev, pdata->slp_tr, + if (gpio_is_valid(slp_tr)) { + rc = devm_gpio_request_one(&spi->dev, slp_tr, GPIOF_OUT_INIT_LOW, "slp_tr"); if (rc) return rc; } /* Reset */ - if (gpio_is_valid(pdata->rstn)) { + if (gpio_is_valid(rstn)) { udelay(1); - gpio_set_value(pdata->rstn, 0); + gpio_set_value(rstn, 0); udelay(1); - gpio_set_value(pdata->rstn, 1); + gpio_set_value(rstn, 1); usleep_range(120, 240); } @@ -1571,7 +1621,7 @@ static int at86rf230_probe(struct spi_device *spi) spi_set_drvdata(spi, lp); - rc = at86rf230_hw_init(lp); + rc = at86rf230_hw_init(lp, xtal_trim); if (rc) goto free_dev; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 4f4099d5603d..2950c3780230 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -336,7 +336,6 @@ static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev, static const struct header_ops ipvlan_header_ops = { .create = ipvlan_hard_header, - .rebuild = eth_rebuild_header, .parse = eth_header_parse, .cache = eth_header_cache, .cache_update = eth_header_cache_update, diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 1df38bdae2ee..b5e3320ca506 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -550,7 +550,6 @@ static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, static const struct header_ops macvlan_hard_header_ops = { .create = macvlan_hard_header, - .rebuild = eth_rebuild_header, .parse = eth_header_parse, .cache = eth_header_cache, .cache_update = eth_header_cache_update, diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 27ecc5c4fa26..8362aef0c15e 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -1130,16 +1130,15 @@ static const struct file_operations macvtap_fops = { #endif }; -static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len) +static int macvtap_sendmsg(struct socket *sock, struct msghdr *m, + size_t total_len) { struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); } -static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len, - int flags) +static int macvtap_recvmsg(struct socket *sock, struct msghdr *m, + size_t total_len, int flags) { struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); int ret; diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index ba2f5e710af1..15731d1db918 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -47,6 +47,7 @@ #include <linux/netpoll.h> #include <linux/inet.h> #include <linux/configfs.h> +#include <linux/etherdevice.h> MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>"); MODULE_DESCRIPTION("Console driver for network interfaces"); @@ -185,7 +186,7 @@ static struct netconsole_target *alloc_param_target(char *target_config) nt->np.local_port = 6665; nt->np.remote_port = 6666; mutex_init(&nt->mutex); - memset(nt->np.remote_mac, 0xff, ETH_ALEN); + eth_broadcast_addr(nt->np.remote_mac); /* Parse parameters and setup netpoll */ err = netpoll_parse_options(&nt->np, target_config); @@ -604,7 +605,7 @@ static struct config_item *make_netconsole_target(struct config_group *group, nt->np.local_port = 6665; nt->np.remote_port = 6666; mutex_init(&nt->mutex); - memset(nt->np.remote_mac, 0xff, ETH_ALEN); + eth_broadcast_addr(nt->np.remote_mac); /* Initialize the config_item member */ config_item_init_type_name(&nt->item, name, &netconsole_target_type); diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index d2408a5e43a6..ff059e1d8ac6 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -455,6 +455,18 @@ out: return NET_RX_DROP; } +static void pppoe_unbind_sock_work(struct work_struct *work) +{ + struct pppox_sock *po = container_of(work, struct pppox_sock, + proto.pppoe.padt_work); + struct sock *sk = sk_pppox(po); + + lock_sock(sk); + pppox_unbind_sock(sk); + release_sock(sk); + sock_put(sk); +} + /************************************************************************ * * Receive a PPPoE Discovery frame. @@ -500,7 +512,8 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, } bh_unlock_sock(sk); - sock_put(sk); + if (!schedule_work(&po->proto.pppoe.padt_work)) + sock_put(sk); } abort: @@ -613,6 +626,8 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); + INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work); + error = -EINVAL; if (sp->sa_protocol != PX_PROTO_OE) goto end; @@ -820,8 +835,8 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, return err; } -static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len) +static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, + size_t total_len) { struct sk_buff *skb; struct sock *sk = sock->sk; @@ -962,8 +977,8 @@ static const struct ppp_channel_ops pppoe_chan_ops = { .start_xmit = pppoe_xmit, }; -static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len, int flags) +static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, + size_t total_len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index f1ee71e22241..9d3366f7c9ad 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1935,6 +1935,9 @@ static netdev_features_t team_fix_features(struct net_device *dev, mask); } rcu_read_unlock(); + + features = netdev_add_tso_features(features, mask); + return features; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 857dca47bf80..b96b94cee760 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1448,8 +1448,7 @@ static void tun_sock_write_space(struct sock *sk) kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); } -static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len) +static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { int ret; struct tun_file *tfile = container_of(sock, struct tun_file, socket); @@ -1464,8 +1463,7 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, return ret; } -static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len, +static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct tun_file *tfile = container_of(sock, struct tun_file, socket); diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 5c55f11572ba..724a9b50df7a 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); skb_put(skb, sizeof(padbytes)); } + + usbnet_set_skb_tx_stats(skb, 1); return skb; } diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 8cfc3bb0c6a6..4e2b26a88b15 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -641,7 +641,7 @@ static void catc_set_multicast_list(struct net_device *netdev) u8 broadcast[ETH_ALEN]; u8 rx = RxEnable | RxPolarity | RxMultiCast; - memset(broadcast, 0xff, ETH_ALEN); + eth_broadcast_addr(broadcast); memset(catc->multicast, 0, 64); catc_multicast(broadcast, catc->multicast); @@ -880,7 +880,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id dev_dbg(dev, "Filling the multicast list.\n"); - memset(broadcast, 0xff, ETH_ALEN); + eth_broadcast_addr(broadcast); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); catc_write_mem(catc, 0xfa80, catc->multicast, 64); diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 96fc8a5bde84..e4b7a47a825c 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -394,7 +394,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_ skb_put(skb, ETH_HLEN); skb_reset_mac_header(skb); eth_hdr(skb)->h_proto = proto; - memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); + eth_zero_addr(eth_hdr(skb)->h_source); memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); /* add datagram */ diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 80a844e0ae03..70cbea551139 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1172,7 +1172,6 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) /* return skb */ ctx->tx_curr_skb = NULL; - dev->net->stats.tx_packets += ctx->tx_curr_frame_num; /* keep private stats: framing overhead and number of NTBs */ ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; @@ -1184,6 +1183,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) */ dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload; + usbnet_set_skb_tx_stats(skb_out, n); + return skb_out; exit_no_skb: diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 778e91531fac..111d907e0c11 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1477,6 +1477,7 @@ static void tiocmget_intr_callback(struct urb *urb) struct uart_icount *icount; struct hso_serial_state_notification *serial_state_notification; struct usb_device *usb; + struct usb_interface *interface; int if_num; /* Sanity checks */ @@ -1494,7 +1495,9 @@ static void tiocmget_intr_callback(struct urb *urb) BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM); usb = serial->parent->usb; - if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; + interface = serial->parent->interface; + + if_num = interface->cur_altsetting->desc.bInterfaceNumber; /* wIndex should be the USB interface number of the port to which the * notification applies, which should always be the Modem port. @@ -1675,6 +1678,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty, unsigned long flags; int if_num; struct hso_serial *serial = tty->driver_data; + struct usb_interface *interface; /* sanity check */ if (!serial) { @@ -1685,7 +1689,8 @@ static int hso_serial_tiocmset(struct tty_struct *tty, if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM) return -EINVAL; - if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; + interface = serial->parent->interface; + if_num = interface->cur_altsetting->desc.bInterfaceNumber; spin_lock_irqsave(&serial->serial_lock, flags); if (set & TIOCM_RTS) @@ -2808,7 +2813,7 @@ static int hso_get_config_data(struct usb_interface *interface) { struct usb_device *usbdev = interface_to_usbdev(interface); u8 *config_data = kmalloc(17, GFP_KERNEL); - u32 if_num = interface->altsetting->desc.bInterfaceNumber; + u32 if_num = interface->cur_altsetting->desc.bInterfaceNumber; s32 result; if (!config_data) @@ -2886,7 +2891,7 @@ static int hso_probe(struct usb_interface *interface, return -ENODEV; } - if_num = interface->altsetting->desc.bInterfaceNumber; + if_num = interface->cur_altsetting->desc.bInterfaceNumber; /* Get the interface/port specification from either driver_info or from * the device itself */ diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c index 8f37efd2d2fb..5714107533bb 100644 --- a/drivers/net/usb/lg-vl600.c +++ b/drivers/net/usb/lg-vl600.c @@ -201,7 +201,7 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb) &buf->data[sizeof(*ethhdr) + 0x12], ETH_ALEN); } else { - memset(ethhdr->h_source, 0, ETH_ALEN); + eth_zero_addr(ethhdr->h_source); memcpy(ethhdr->h_dest, dev->net->dev_addr, ETH_ALEN); /* Inbound IPv6 packets have an IPv4 ethertype (0x800) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 602dc6668c3a..f603f362504b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -108,7 +108,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) skb_push(skb, ETH_HLEN); skb_reset_mac_header(skb); eth_hdr(skb)->h_proto = proto; - memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); + eth_zero_addr(eth_hdr(skb)->h_source); fix_dest: memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); return 1; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 438fc6bcaef1..5065538dd03b 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -104,7 +104,8 @@ #define USB_TX_AGG 0xd40a #define USB_RX_BUF_TH 0xd40c #define USB_USB_TIMER 0xd428 -#define USB_RX_EARLY_AGG 0xd42c +#define USB_RX_EARLY_TIMEOUT 0xd42c +#define USB_RX_EARLY_SIZE 0xd42e #define USB_PM_CTRL_STATUS 0xd432 #define USB_TX_DMA 0xd434 #define USB_TOLERANCE 0xd490 @@ -349,10 +350,10 @@ /* USB_MISC_0 */ #define PCUT_STATUS 0x0001 -/* USB_RX_EARLY_AGG */ -#define EARLY_AGG_SUPPER 0x0e832981 -#define EARLY_AGG_HIGH 0x0e837a12 -#define EARLY_AGG_SLOW 0x0e83ffff +/* USB_RX_EARLY_TIMEOUT */ +#define COALESCE_SUPER 85000U +#define COALESCE_HIGH 250000U +#define COALESCE_SLOW 524280U /* USB_WDT11_CTRL */ #define TIMER11_EN 0x0001 @@ -606,6 +607,7 @@ struct r8152 { u32 saved_wolopts; u32 msg_enable; u32 tx_qlen; + u32 coalesce; u16 ocp_base; u8 *intr_buff; u8 version; @@ -2142,28 +2144,19 @@ static int rtl8152_enable(struct r8152 *tp) return rtl_enable(tp); } -static void r8153_set_rx_agg(struct r8152 *tp) +static void r8153_set_rx_early_timeout(struct r8152 *tp) { - u8 speed; + u32 ocp_data = tp->coalesce / 8; - speed = rtl8152_get_speed(tp); - if (speed & _1000bps) { - if (tp->udev->speed == USB_SPEED_SUPER) { - ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, - RX_THR_SUPPER); - ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG, - EARLY_AGG_SUPPER); - } else { - ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, - RX_THR_HIGH); - ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG, - EARLY_AGG_HIGH); - } - } else { - ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_SLOW); - ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG, - EARLY_AGG_SLOW); - } + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, ocp_data); +} + +static void r8153_set_rx_early_size(struct r8152 *tp) +{ + u32 mtu = tp->netdev->mtu; + u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4; + + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); } static int rtl8153_enable(struct r8152 *tp) @@ -2173,7 +2166,8 @@ static int rtl8153_enable(struct r8152 *tp) set_tx_qlen(tp); rtl_set_eee_plus(tp); - r8153_set_rx_agg(tp); + r8153_set_rx_early_timeout(tp); + r8153_set_rx_early_size(tp); return rtl_enable(tp); } @@ -3719,6 +3713,61 @@ out: return ret; } +static int rtl8152_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce) +{ + struct r8152 *tp = netdev_priv(netdev); + + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + return -EOPNOTSUPP; + default: + break; + } + + coalesce->rx_coalesce_usecs = tp->coalesce; + + return 0; +} + +static int rtl8152_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce) +{ + struct r8152 *tp = netdev_priv(netdev); + int ret; + + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + return -EOPNOTSUPP; + default: + break; + } + + if (coalesce->rx_coalesce_usecs > COALESCE_SLOW) + return -EINVAL; + + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + return ret; + + mutex_lock(&tp->control); + + if (tp->coalesce != coalesce->rx_coalesce_usecs) { + tp->coalesce = coalesce->rx_coalesce_usecs; + + if (netif_running(tp->netdev) && netif_carrier_ok(netdev)) + r8153_set_rx_early_timeout(tp); + } + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); + + return ret; +} + static struct ethtool_ops ops = { .get_drvinfo = rtl8152_get_drvinfo, .get_settings = rtl8152_get_settings, @@ -3732,6 +3781,8 @@ static struct ethtool_ops ops = { .get_strings = rtl8152_get_strings, .get_sset_count = rtl8152_get_sset_count, .get_ethtool_stats = rtl8152_get_ethtool_stats, + .get_coalesce = rtl8152_get_coalesce, + .set_coalesce = rtl8152_set_coalesce, .get_eee = rtl_ethtool_get_eee, .set_eee = rtl_ethtool_set_eee, }; @@ -3783,6 +3834,7 @@ out: static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) { struct r8152 *tp = netdev_priv(dev); + int ret; switch (tp->version) { case RTL_VER_01: @@ -3795,9 +3847,22 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) if (new_mtu < 68 || new_mtu > RTL8153_MAX_MTU) return -EINVAL; + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + return ret; + + mutex_lock(&tp->control); + dev->mtu = new_mtu; - return 0; + if (netif_running(dev) && netif_carrier_ok(dev)) + r8153_set_rx_early_size(tp); + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); + + return ret; } static const struct net_device_ops rtl8152_netdev_ops = { @@ -3966,6 +4031,18 @@ static int rtl8152_probe(struct usb_interface *intf, tp->mii.reg_num_mask = 0x1f; tp->mii.phy_id = R8152_PHY_ID; + switch (udev->speed) { + case USB_SPEED_SUPER: + tp->coalesce = COALESCE_SUPER; + break; + case USB_SPEED_HIGH: + tp->coalesce = COALESCE_HIGH; + break; + default: + tp->coalesce = COALESCE_SLOW; + break; + } + intf->needs_remote_wakeup = 1; tp->rtl_ops.init(tp); diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index b94a0fbb8b3b..7650cdc8fe6b 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb, skb_put(skb, sizeof(padbytes)); } + usbnet_set_skb_tx_stats(skb, 1); return skb; } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 449835f4331e..0f3ff285f6a1 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1188,8 +1188,7 @@ static void tx_complete (struct urb *urb) struct usbnet *dev = entry->dev; if (urb->status == 0) { - if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) - dev->net->stats.tx_packets++; + dev->net->stats.tx_packets += entry->packets; dev->net->stats.tx_bytes += entry->length; } else { dev->net->stats.tx_errors++; @@ -1348,6 +1347,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, urb->transfer_flags |= URB_ZERO_PACKET; } entry->length = urb->transfer_buffer_length = length; + if (!(info->flags & FLAG_MULTI_PACKET)) + usbnet_set_skb_tx_stats(skb, 1); spin_lock_irqsave(&dev->txq.lock, flags); retval = usb_autopm_get_interface_async(dev->intf); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 294214c15292..61c0840c448c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -819,6 +819,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) { struct Vmxnet3_TxDataDesc *tdd; + u8 protocol = 0; if (ctx->mss) { /* TSO */ ctx->eth_ip_hdr_size = skb_transport_offset(skb); @@ -831,16 +832,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, if (ctx->ipv4) { const struct iphdr *iph = ip_hdr(skb); - if (iph->protocol == IPPROTO_TCP) - ctx->l4_hdr_size = tcp_hdrlen(skb); - else if (iph->protocol == IPPROTO_UDP) - ctx->l4_hdr_size = sizeof(struct udphdr); - else - ctx->l4_hdr_size = 0; - } else { - /* for simplicity, don't copy L4 headers */ + protocol = iph->protocol; + } else if (ctx->ipv6) { + const struct ipv6hdr *ipv6h = ipv6_hdr(skb); + + protocol = ipv6h->nexthdr; + } + + switch (protocol) { + case IPPROTO_TCP: + ctx->l4_hdr_size = tcp_hdrlen(skb); + break; + case IPPROTO_UDP: + ctx->l4_hdr_size = sizeof(struct udphdr); + break; + default: ctx->l4_hdr_size = 0; + break; } + ctx->copy_size = min(ctx->eth_ip_hdr_size + ctx->l4_hdr_size, skb->len); } else { @@ -887,7 +897,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb, iph->check = 0; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); - } else { + } else if (ctx->ipv6) { struct ipv6hdr *iph = ipv6_hdr(skb); tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, @@ -938,6 +948,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, count = txd_estimate(skb); ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); + ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6)); ctx.mss = skb_shinfo(skb)->gso_size; if (ctx.mss) { diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index cd71c77f78f2..6bb769ae7de9 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.3.4.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.3.5.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01030400 +#define VMXNET3_DRIVER_VERSION_NUM 0x01030500 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ @@ -211,6 +211,7 @@ struct vmxnet3_tq_driver_stats { struct vmxnet3_tx_ctx { bool ipv4; + bool ipv6; u16 mss; u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum * offloading diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index e71a2ce7a448..627443283e1d 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -2676,7 +2676,7 @@ static void wifi_setup(struct net_device *dev) dev->addr_len = ETH_ALEN; dev->tx_queue_len = 100; - memset(dev->broadcast,0xFF, ETH_ALEN); + eth_broadcast_addr(dev->broadcast); dev->flags = IFF_BROADCAST|IFF_MULTICAST; } @@ -3273,7 +3273,7 @@ static void airo_handle_link(struct airo_info *ai) } /* Send event to user space */ - memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); wrqu.ap_addr.sa_family = ARPHRD_ETHER; wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL); } diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index da92bfa76b7c..49219c508963 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -1166,7 +1166,7 @@ static int at76_start_monitor(struct at76_priv *priv) int ret; memset(&scan, 0, sizeof(struct at76_req_scan)); - memset(scan.bssid, 0xff, ETH_ALEN); + eth_broadcast_addr(scan.bssid); scan.channel = priv->channel; scan.scan_type = SCAN_TYPE_PASSIVE; @@ -1427,7 +1427,7 @@ static int at76_startup_device(struct at76_priv *priv) at76_wait_completion(priv, CMD_STARTUP); /* remove BSSID from previous run */ - memset(priv->bssid, 0, ETH_ALEN); + eth_zero_addr(priv->bssid); priv->scanning = false; @@ -1973,7 +1973,7 @@ static int at76_hw_scan(struct ieee80211_hw *hw, ieee80211_stop_queues(hw); memset(&scan, 0, sizeof(struct at76_req_scan)); - memset(scan.bssid, 0xFF, ETH_ALEN); + eth_broadcast_addr(scan.bssid); if (req->n_ssids) { scan.scan_type = SCAN_TYPE_ACTIVE; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index d6d2f0f00caa..6c364bb98924 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1182,7 +1182,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, if (is_zero_ether_addr(arvif->bssid)) return; - memset(arvif->bssid, 0, ETH_ALEN); + eth_zero_addr(arvif->bssid); return; } diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index bc9cb356fa69..57a80e89822d 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -528,7 +528,7 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah, * together with the BSSID mask when matching addresses. */ iter_data.hw_macaddr = common->macaddr; - memset(&iter_data.mask, 0xff, ETH_ALEN); + eth_broadcast_addr(iter_data.mask); iter_data.found_active = false; iter_data.need_set_hw_addr = true; iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED; diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 85da63a67faf..e2978037d858 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -2033,7 +2033,7 @@ static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif) int ret; /* Setup unicast pkt pattern */ - memset(mac_mask, 0xff, ETH_ALEN); + eth_broadcast_addr(mac_mask); ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx, WOW_LIST_ID, ETH_ALEN, 0, ndev->dev_addr, diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index b42ba46b5030..1af3fed5a72c 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -105,7 +105,7 @@ static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i) memset(&ar->ap_stats.sta[sta->aid - 1], 0, sizeof(struct wmi_per_sta_stat)); - memset(sta->mac, 0, ETH_ALEN); + eth_zero_addr(sta->mac); memset(sta->wpa_ie, 0, ATH6KL_MAX_IE); sta->aid = 0; sta->sta_flags = 0; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 92d5a6c5a225..564923c0df87 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -149,7 +149,7 @@ static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv, * when matching addresses. */ iter_data.hw_macaddr = NULL; - memset(&iter_data.mask, 0xff, ETH_ALEN); + eth_broadcast_addr(iter_data.mask); if (vif) ath9k_htc_bssid_iter(&iter_data, vif->addr, vif); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9ede991b8d76..93ed99a72542 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -994,7 +994,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc, * BSSID mask when matching addresses. */ memset(iter_data, 0, sizeof(*iter_data)); - memset(&iter_data->mask, 0xff, ETH_ALEN); + eth_broadcast_addr(iter_data->mask); iter_data->slottime = ATH9K_SLOT_TIME_9; list_for_each_entry(avp, &ctx->vifs, list) @@ -1139,7 +1139,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, ctx->primary_sta = iter_data.primary_sta; } else { ctx->primary_sta = NULL; - memset(common->curbssid, 0, ETH_ALEN); + eth_zero_addr(common->curbssid); common->curaid = 0; ath9k_hw_write_associd(sc->sc_ah); if (ath9k_hw_mci_is_enabled(sc->sc_ah)) diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 55db9f03eb2a..6a1f03c271c1 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -1004,7 +1004,7 @@ static void frag_rx_path(struct atmel_private *priv, atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); if ((crc ^ 0xffffffff) != netcrc) { priv->dev->stats.rx_crc_errors++; - memset(priv->frag_source, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->frag_source); } } @@ -1022,7 +1022,7 @@ static void frag_rx_path(struct atmel_private *priv, atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); if ((crc ^ 0xffffffff) != netcrc) { priv->dev->stats.rx_crc_errors++; - memset(priv->frag_source, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->frag_source); more_frags = 1; /* don't send broken assembly */ } } @@ -1031,7 +1031,7 @@ static void frag_rx_path(struct atmel_private *priv, priv->frag_no++; if (!more_frags) { /* last one */ - memset(priv->frag_source, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->frag_source); if (!(skb = dev_alloc_skb(priv->frag_len + 14))) { priv->dev->stats.rx_dropped++; } else { @@ -1127,7 +1127,7 @@ static void rx_done_irq(struct atmel_private *priv) atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size); /* we use the same buffer for frag reassembly and control packets */ - memset(priv->frag_source, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->frag_source); if (priv->do_rx_crc) { /* last 4 octets is crc */ @@ -1379,7 +1379,7 @@ static int atmel_close(struct net_device *dev) wrqu.data.length = 0; wrqu.data.flags = 0; wrqu.ap_addr.sa_family = ARPHRD_ETHER; - memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); } @@ -1555,7 +1555,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port, priv->last_qual = jiffies; priv->last_beacon_timestamp = 0; memset(priv->frag_source, 0xff, sizeof(priv->frag_source)); - memset(priv->BSSID, 0, ETH_ALEN); + eth_zero_addr(priv->BSSID); priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */ priv->station_was_associated = 0; @@ -2760,7 +2760,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid) u8 SSID_size; } cmd; - memset(cmd.BSSID, 0xff, ETH_ALEN); + eth_broadcast_addr(cmd.BSSID); if (priv->fast_scan) { cmd.SSID_size = priv->SSID_size; @@ -4049,7 +4049,7 @@ static int reset_atmel_card(struct net_device *dev) wrqu.data.length = 0; wrqu.data.flags = 0; wrqu.ap_addr.sa_family = ARPHRD_ETHER; - memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); } diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index ccbdb05b28cd..31c7e4d41a9a 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -4132,7 +4132,7 @@ static void b43_op_bss_info_changed(struct ieee80211_hw *hw, if (conf->bssid) memcpy(wl->bssid, conf->bssid, ETH_ALEN); else - memset(wl->bssid, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); } if (b43_status(dev) >= B43_STAT_INITIALIZED) { @@ -5051,7 +5051,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw, wl->operating = false; b43_adjust_opmode(dev); - memset(wl->mac_addr, 0, ETH_ALEN); + eth_zero_addr(wl->mac_addr); b43_upload_card_macaddress(dev); mutex_unlock(&wl->mutex); @@ -5067,8 +5067,8 @@ static int b43_op_start(struct ieee80211_hw *hw) /* Kill all old instance specific information to make sure * the card won't use it in the short timeframe between start * and mac80211 reconfiguring it. */ - memset(wl->bssid, 0, ETH_ALEN); - memset(wl->mac_addr, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); + eth_zero_addr(wl->mac_addr); wl->filter_flags = 0; wl->radiotap_enabled = false; b43_qos_clear(wl); diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 4e58c0069830..c77b7f59505c 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -2866,7 +2866,7 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw, if (conf->bssid) memcpy(wl->bssid, conf->bssid, ETH_ALEN); else - memset(wl->bssid, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); } if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) { @@ -3470,7 +3470,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw, spin_lock_irqsave(&wl->irq_lock, flags); b43legacy_adjust_opmode(dev); - memset(wl->mac_addr, 0, ETH_ALEN); + eth_zero_addr(wl->mac_addr); b43legacy_upload_card_macaddress(dev); spin_unlock_irqrestore(&wl->irq_lock, flags); @@ -3487,8 +3487,8 @@ static int b43legacy_op_start(struct ieee80211_hw *hw) /* Kill all old instance specific information to make sure * the card won't use it in the short timeframe between start * and mac80211 reconfiguring it. */ - memset(wl->bssid, 0, ETH_ALEN); - memset(wl->mac_addr, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); + eth_zero_addr(wl->mac_addr); wl->filter_flags = 0; wl->beacon0_uploaded = false; wl->beacon1_uploaded = false; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index b59b8c6c42ab..06727a61b438 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -700,7 +700,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, /* Do a scan abort to stop the driver's scan engine */ brcmf_dbg(SCAN, "ABORT scan in firmware\n"); memset(¶ms_le, 0, sizeof(params_le)); - memset(params_le.bssid, 0xFF, ETH_ALEN); + eth_broadcast_addr(params_le.bssid); params_le.bss_type = DOT11_BSSTYPE_ANY; params_le.scan_type = 0; params_le.channel_num = cpu_to_le32(1); @@ -866,7 +866,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, char *ptr; struct brcmf_ssid_le ssid_le; - memset(params_le->bssid, 0xFF, ETH_ALEN); + eth_broadcast_addr(params_le->bssid); params_le->bss_type = DOT11_BSSTYPE_ANY; params_le->scan_type = 0; params_le->channel_num = 0; @@ -1375,8 +1375,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, BRCMF_ASSOC_PARAMS_FIXED_SIZE; memcpy(profile->bssid, params->bssid, ETH_ALEN); } else { - memset(join_params.params_le.bssid, 0xFF, ETH_ALEN); - memset(profile->bssid, 0, ETH_ALEN); + eth_broadcast_addr(join_params.params_le.bssid); + eth_zero_addr(profile->bssid); } /* Channel */ @@ -1850,7 +1850,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, if (sme->bssid) memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN); else - memset(&ext_join_params->assoc_le.bssid, 0xFF, ETH_ALEN); + eth_broadcast_addr(ext_join_params->assoc_le.bssid); if (cfg->channel) { ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1); @@ -1895,7 +1895,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, if (sme->bssid) memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN); else - memset(join_params.params_le.bssid, 0xFF, ETH_ALEN); + eth_broadcast_addr(join_params.params_le.bssid); if (cfg->channel) { join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c index 910fbb561469..eb1325371d3a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c @@ -236,7 +236,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid) brcmf_flowring_block(flow, flowid, false); hash_idx = ring->hash_id; flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX; - memset(flow->hash[hash_idx].mac, 0, ETH_ALEN); + eth_zero_addr(flow->hash[hash_idx].mac); flow->rings[flowid] = NULL; skb = skb_dequeue(&ring->skblist); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index effb48ebd864..98d82ec52de1 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -697,7 +697,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, else sparams->scan_type = 1; - memset(&sparams->bssid, 0xFF, ETH_ALEN); + eth_broadcast_addr(sparams->bssid); if (ssid.SSID_len) memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len); sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c index 4a47c7f8a246..89bc18cd6700 100644 --- a/drivers/net/wireless/cw1200/sta.c +++ b/drivers/net/wireless/cw1200/sta.c @@ -293,7 +293,7 @@ void cw1200_remove_interface(struct ieee80211_hw *dev, } priv->vif = NULL; priv->mode = NL80211_IFTYPE_MONITOR; - memset(priv->mac_addr, 0, ETH_ALEN); + eth_zero_addr(priv->mac_addr); memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo)); cw1200_free_keys(priv); cw1200_setup_mac(priv); diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c index 0bd541175ecd..d28bd49cb5fd 100644 --- a/drivers/net/wireless/cw1200/txrx.c +++ b/drivers/net/wireless/cw1200/txrx.c @@ -1429,7 +1429,7 @@ void cw1200_link_id_gc_work(struct work_struct *work) priv->link_id_map &= ~mask; priv->sta_asleep_mask &= ~mask; priv->pspoll_mask &= ~mask; - memset(map_link.mac_addr, 0, ETH_ALEN); + eth_zero_addr(map_link.mac_addr); spin_unlock_bh(&priv->ps_state_lock); reset.link_id = i + 1; wsm_reset(priv, &reset); diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c index 8bde77689469..055e11d353ca 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/hostap/hostap_80211_tx.c @@ -174,8 +174,8 @@ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb, /* send broadcast and multicast frames to broadcast RA, if * configured; otherwise, use unicast RA of the WDS link */ if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) && - skb->data[0] & 0x01) - memset(&hdr.addr1, 0xff, ETH_ALEN); + is_multicast_ether_addr(skb->data)) + eth_broadcast_addr(hdr.addr1); else if (iface->type == HOSTAP_INTERFACE_WDS) memcpy(&hdr.addr1, iface->u.wds.remote_addr, ETH_ALEN); diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index fd8d83dd4f62..c995ace153ee 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -309,7 +309,7 @@ void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap, int i; PDEBUG(DEBUG_AP, "%s: Deauthenticate all stations\n", dev->name); - memset(addr, 0xff, ETH_ALEN); + eth_broadcast_addr(addr); resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID); @@ -1015,8 +1015,8 @@ static void prism2_send_mgmt(struct net_device *dev, memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* SA */ } else if (ieee80211_is_ctl(hdr->frame_control)) { /* control:ACK does not have addr2 or addr3 */ - memset(hdr->addr2, 0, ETH_ALEN); - memset(hdr->addr3, 0, ETH_ALEN); + eth_zero_addr(hdr->addr2); + eth_zero_addr(hdr->addr3); } else { memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* SA */ memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* BSSID */ @@ -1601,7 +1601,7 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb, memcpy(prev_ap, pos, ETH_ALEN); pos++; pos++; pos++; left -= 6; } else - memset(prev_ap, 0, ETH_ALEN); + eth_zero_addr(prev_ap); if (left >= 2) { unsigned int ileft; diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c index de7c4ffec309..7635ac4f6679 100644 --- a/drivers/net/wireless/hostap/hostap_info.c +++ b/drivers/net/wireless/hostap/hostap_info.c @@ -442,7 +442,7 @@ static void handle_info_queue_linkstatus(local_info_t *local) } else { netif_carrier_off(local->dev); netif_carrier_off(local->ddev); - memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); } wrqu.ap_addr.sa_family = ARPHRD_ETHER; diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 52919ad42726..01de1a3bf94e 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -224,7 +224,7 @@ int prism2_wds_del(local_info_t *local, u8 *remote_addr, if (selected) { if (do_not_remove) - memset(selected->u.wds.remote_addr, 0, ETH_ALEN); + eth_zero_addr(selected->u.wds.remote_addr); else { hostap_remove_interface(selected->dev, rtnl_locked, 0); local->wds_connections--; @@ -798,7 +798,6 @@ static void prism2_tx_timeout(struct net_device *dev) const struct header_ops hostap_80211_ops = { .create = eth_header, - .rebuild = eth_rebuild_header, .cache = eth_header_cache, .cache_update = eth_header_cache_update, .parse = hostap_80211_header_parse, @@ -1088,7 +1087,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason) ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH, (u8 *) &val, 2); - memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL); return ret; } diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h index 57904015380f..ca25283e1c92 100644 --- a/drivers/net/wireless/hostap/hostap_wlan.h +++ b/drivers/net/wireless/hostap/hostap_wlan.h @@ -4,6 +4,7 @@ #include <linux/interrupt.h> #include <linux/wireless.h> #include <linux/netdevice.h> +#include <linux/etherdevice.h> #include <linux/mutex.h> #include <net/iw_handler.h> #include <net/ieee80211_radiotap.h> @@ -85,16 +86,16 @@ struct hfa384x_rx_frame { /* 802.11 */ __le16 frame_control; __le16 duration_id; - u8 addr1[6]; - u8 addr2[6]; - u8 addr3[6]; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; __le16 seq_ctrl; - u8 addr4[6]; + u8 addr4[ETH_ALEN]; __le16 data_len; /* 802.3 */ - u8 dst_addr[6]; - u8 src_addr[6]; + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; __be16 len; /* followed by frame data; max 2304 bytes */ @@ -114,16 +115,16 @@ struct hfa384x_tx_frame { /* 802.11 */ __le16 frame_control; /* parts not used */ __le16 duration_id; - u8 addr1[6]; - u8 addr2[6]; /* filled by firmware */ - u8 addr3[6]; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; /* filled by firmware */ + u8 addr3[ETH_ALEN]; __le16 seq_ctrl; /* filled by firmware */ - u8 addr4[6]; + u8 addr4[ETH_ALEN]; __le16 data_len; /* 802.3 */ - u8 dst_addr[6]; - u8 src_addr[6]; + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; __be16 len; /* followed by frame data; max 2304 bytes */ @@ -156,7 +157,7 @@ struct hfa384x_hostscan_request { } __packed; struct hfa384x_join_request { - u8 bssid[6]; + u8 bssid[ETH_ALEN]; __le16 channel; } __packed; @@ -228,7 +229,7 @@ struct hfa384x_scan_result { __le16 chid; __le16 anl; __le16 sl; - u8 bssid[6]; + u8 bssid[ETH_ALEN]; __le16 beacon_interval; __le16 capability; __le16 ssid_len; @@ -241,7 +242,7 @@ struct hfa384x_hostscan_result { __le16 chid; __le16 anl; __le16 sl; - u8 bssid[6]; + u8 bssid[ETH_ALEN]; __le16 beacon_interval; __le16 capability; __le16 ssid_len; @@ -824,7 +825,7 @@ struct local_info { #define PRISM2_INFO_PENDING_SCANRESULTS 1 int prev_link_status; /* previous received LinkStatus info */ int prev_linkstatus_connected; - u8 preferred_ap[6]; /* use this AP if possible */ + u8 preferred_ap[ETH_ALEN]; /* use this AP if possible */ #ifdef PRISM2_CALLBACK void *callback_data; /* Can be used in callbacks; e.g., allocate diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 6fabea0309dd..08eb229e7816 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -2147,8 +2147,8 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) return; } - memset(priv->bssid, 0, ETH_ALEN); - memset(priv->ieee->bssid, 0, ETH_ALEN); + eth_zero_addr(priv->bssid); + eth_zero_addr(priv->ieee->bssid); netif_carrier_off(priv->net_dev); netif_stop_queue(priv->net_dev); @@ -6956,7 +6956,7 @@ static int ipw2100_wx_get_wap(struct net_device *dev, wrqu->ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); } else - memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu->ap_addr.sa_data); IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data); return 0; @@ -8300,7 +8300,7 @@ static void ipw2100_wx_event_work(struct work_struct *work) priv->status & STATUS_RF_KILL_MASK || ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &priv->bssid, &len)) { - memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); } else { /* We now have the BSSID, so can finish setting to the full * associated state */ diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 67cad9b05ad8..39f3e6f5cbcd 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -1964,7 +1964,7 @@ static void notify_wx_assoc_event(struct ipw_priv *priv) if (priv->status & STATUS_ASSOCIATED) memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN); else - memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); } @@ -7400,7 +7400,7 @@ static int ipw_associate_network(struct ipw_priv *priv, memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN); if (priv->ieee->iw_mode == IW_MODE_ADHOC) { - memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN); + eth_broadcast_addr(priv->assoc_request.dest); priv->assoc_request.atim_window = cpu_to_le16(network->atim_window); } else { memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN); @@ -8986,7 +8986,7 @@ static int ipw_wx_get_wap(struct net_device *dev, wrqu->ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); } else - memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu->ap_addr.sa_data); IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data); diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 2c4fa49686ef..887114582583 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c @@ -4634,7 +4634,7 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) il->vif = NULL; il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; il_teardown_interface(il, vif); - memset(il->bssid, 0, ETH_ALEN); + eth_zero_addr(il->bssid); D_MAC80211("leave\n"); mutex_unlock(&il->mutex); diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 2620dd0c45f9..33bbdde0046f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -66,6 +66,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/etherdevice.h> #include <net/mac80211.h> @@ -491,7 +492,7 @@ void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid, ETH_ALEN)) - memset(mvmvif->uapsd_misbehaving_bssid, 0, ETH_ALEN); + eth_zero_addr(mvmvif->uapsd_misbehaving_bssid); } static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac, diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 569b64ecc607..8079560f4965 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -667,7 +667,7 @@ static int lbs_setup_firmware(struct lbs_private *priv) lbs_deb_enter(LBS_DEB_FW); /* Read MAC address from firmware */ - memset(priv->current_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->current_addr); ret = lbs_update_hw_spec(priv); if (ret) goto done; @@ -871,7 +871,7 @@ static int lbs_init_adapter(struct lbs_private *priv) lbs_deb_enter(LBS_DEB_MAIN); - memset(priv->current_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->current_addr); priv->connect_status = LBS_DISCONNECTED; priv->channel = DEFAULT_AD_HOC_CHANNEL; diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c index 25c5acc78bd1..ed02e4bf2c26 100644 --- a/drivers/net/wireless/libertas_tf/main.c +++ b/drivers/net/wireless/libertas_tf/main.c @@ -152,7 +152,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv) /* * Read priv address from HW */ - memset(priv->current_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->current_addr); ret = lbtf_update_hw_spec(priv); if (ret) { ret = -1; @@ -199,7 +199,7 @@ out: static int lbtf_init_adapter(struct lbtf_private *priv) { lbtf_deb_enter(LBTF_DEB_MAIN); - memset(priv->current_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->current_addr); mutex_init(&priv->lock); priv->vif = NULL; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 8908be6dbc48..d56b7859a437 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1911,7 +1911,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw, printk(KERN_DEBUG "hwsim sw_scan_complete\n"); hwsim->scanning = false; - memset(hwsim->scan_addr, 0, ETH_ALEN); + eth_zero_addr(hwsim->scan_addr); mutex_unlock(&hwsim->mutex); } @@ -2267,7 +2267,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, skb_queue_head_init(&data->pending); SET_IEEE80211_DEV(hw, data->dev); - memset(addr, 0, ETH_ALEN); + eth_zero_addr(addr); addr[0] = 0x02; addr[3] = idx >> 8; addr[4] = idx; @@ -2600,7 +2600,7 @@ static void hwsim_mon_setup(struct net_device *dev) ether_setup(dev); dev->tx_queue_len = 0; dev->type = ARPHRD_IEEE80211_RADIOTAP; - memset(dev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(dev->dev_addr); dev->dev_addr[0] = 0x12; } diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 41c8e25df954..7c3ca2f50186 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -1563,7 +1563,7 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac); - memset(deauth_mac, 0, ETH_ALEN); + eth_zero_addr(deauth_mac); spin_lock_irqsave(&priv->sta_list_spinlock, flags); sta_node = mwifiex_get_sta_entry(priv, params->mac); @@ -1786,7 +1786,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, wiphy_dbg(wiphy, "info: successfully disconnected from %pM:" " reason code %d\n", priv->cfg_bssid, reason_code); - memset(priv->cfg_bssid, 0, ETH_ALEN); + eth_zero_addr(priv->cfg_bssid); priv->hs2_enabled = false; return 0; @@ -2046,7 +2046,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, dev_dbg(priv->adapter->dev, "info: association to bssid %pM failed\n", priv->cfg_bssid); - memset(priv->cfg_bssid, 0, ETH_ALEN); + eth_zero_addr(priv->cfg_bssid); if (ret > 0) cfg80211_connect_result(priv->netdev, priv->cfg_bssid, @@ -2194,7 +2194,7 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; - memset(priv->cfg_bssid, 0, ETH_ALEN); + eth_zero_addr(priv->cfg_bssid); return 0; } diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index b77ba743e1c4..0978b1cc58b6 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -76,7 +76,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv) u32 i; priv->media_connected = false; - memset(priv->curr_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->curr_addr); priv->pkt_tx_ctrl = 0; priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; @@ -299,7 +299,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->ext_scan = false; adapter->key_api_major_ver = 0; adapter->key_api_minor_ver = 0; - memset(adapter->perm_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(adapter->perm_addr); adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM; adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM; adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM; diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index 80ffe7412496..64c4223a1e1e 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -135,7 +135,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, GFP_KERNEL); } - memset(priv->cfg_bssid, 0, ETH_ALEN); + eth_zero_addr(priv->cfg_bssid); mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index ef717acec8b7..0cd4f6bed9fc 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -730,7 +730,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, } else { memcpy(ra, skb->data, ETH_ALEN); if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb)) - memset(ra, 0xff, ETH_ALEN); + eth_broadcast_addr(ra); ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra); } diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index f9b1218c761a..95921167b53f 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -1277,7 +1277,7 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw, struct mwl8k_priv *priv = hw->priv; priv->capture_beacon = false; - memset(priv->capture_bssid, 0, ETH_ALEN); + eth_zero_addr(priv->capture_bssid); /* * Use GFP_ATOMIC as rxq_process is called from diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c index 6abdaf0aa052..1d4dae422106 100644 --- a/drivers/net/wireless/orinoco/wext.c +++ b/drivers/net/wireless/orinoco/wext.c @@ -168,7 +168,7 @@ static int orinoco_ioctl_setwap(struct net_device *dev, if (is_zero_ether_addr(ap_addr->sa_data) || is_broadcast_ether_addr(ap_addr->sa_data)) { priv->bssid_fixed = 0; - memset(priv->desired_bssid, 0, ETH_ALEN); + eth_zero_addr(priv->desired_bssid); /* "off" means keep existing connection */ if (ap_addr->sa_data[0] == 0) { diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c index 5367d510b22d..275408eaf95e 100644 --- a/drivers/net/wireless/p54/fwio.c +++ b/drivers/net/wireless/p54/fwio.c @@ -671,7 +671,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot, u8 idx, u8 len, if (addr) memcpy(rxkey->mac, addr, ETH_ALEN); else - memset(rxkey->mac, ~0, ETH_ALEN); + eth_broadcast_addr(rxkey->mac); switch (algo) { case P54_CRYPTO_WEP: diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c index b9250d75d253..e79674f73dc5 100644 --- a/drivers/net/wireless/p54/main.c +++ b/drivers/net/wireless/p54/main.c @@ -182,7 +182,7 @@ static int p54_start(struct ieee80211_hw *dev) if (err) goto out; - memset(priv->bssid, ~0, ETH_ALEN); + eth_broadcast_addr(priv->bssid); priv->mode = NL80211_IFTYPE_MONITOR; err = p54_setup_mac(priv); if (err) { @@ -274,8 +274,8 @@ static void p54_remove_interface(struct ieee80211_hw *dev, wait_for_completion_interruptible_timeout(&priv->beacon_comp, HZ); } priv->mode = NL80211_IFTYPE_MONITOR; - memset(priv->mac_addr, 0, ETH_ALEN); - memset(priv->bssid, 0, ETH_ALEN); + eth_zero_addr(priv->mac_addr); + eth_zero_addr(priv->bssid); p54_setup_mac(priv); mutex_unlock(&priv->conf_mutex); } @@ -794,7 +794,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len) init_completion(&priv->beacon_comp); INIT_DELAYED_WORK(&priv->work, p54_work); - memset(&priv->mc_maclist[0], ~0, ETH_ALEN); + eth_broadcast_addr(priv->mc_maclist[0]); priv->curchan = NULL; p54_reset_stats(priv); return dev; diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 8330fa33e50b..477f86354dc5 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -808,7 +808,7 @@ static int ray_dev_init(struct net_device *dev) /* copy mac and broadcast addresses to linux device */ memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN); - memset(dev->broadcast, 0xff, ETH_ALEN); + eth_broadcast_addr(dev->broadcast); dev_dbg(&link->dev, "ray_dev_init ending\n"); return 0; diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 60d44ce9c017..d72ff8e7125d 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -199,13 +199,13 @@ enum ndis_80211_pmkid_cand_list_flag_bits { struct ndis_80211_auth_request { __le32 length; - u8 bssid[6]; + u8 bssid[ETH_ALEN]; u8 padding[2]; __le32 flags; } __packed; struct ndis_80211_pmkid_candidate { - u8 bssid[6]; + u8 bssid[ETH_ALEN]; u8 padding[2]; __le32 flags; } __packed; @@ -248,7 +248,7 @@ struct ndis_80211_conf { struct ndis_80211_bssid_ex { __le32 length; - u8 mac[6]; + u8 mac[ETH_ALEN]; u8 padding[2]; struct ndis_80211_ssid ssid; __le32 privacy; @@ -283,7 +283,7 @@ struct ndis_80211_key { __le32 size; __le32 index; __le32 length; - u8 bssid[6]; + u8 bssid[ETH_ALEN]; u8 padding[6]; u8 rsc[8]; u8 material[32]; @@ -292,7 +292,7 @@ struct ndis_80211_key { struct ndis_80211_remove_key { __le32 size; __le32 index; - u8 bssid[6]; + u8 bssid[ETH_ALEN]; u8 padding[2]; } __packed; @@ -310,7 +310,7 @@ struct ndis_80211_assoc_info { struct req_ie { __le16 capa; __le16 listen_interval; - u8 cur_ap_address[6]; + u8 cur_ap_address[ETH_ALEN]; } req_ie; __le32 req_ie_length; __le32 offset_req_ies; @@ -338,7 +338,7 @@ struct ndis_80211_capability { } __packed; struct ndis_80211_bssid_info { - u8 bssid[6]; + u8 bssid[ETH_ALEN]; u8 pmkid[16]; } __packed; @@ -1037,7 +1037,7 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) bssid, &len); if (ret != 0) - memset(bssid, 0, ETH_ALEN); + eth_zero_addr(bssid); return ret; } @@ -1391,7 +1391,7 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len, priv->encr_keys[index].len = key_len; priv->encr_keys[index].cipher = cipher; memcpy(&priv->encr_keys[index].material, key, key_len); - memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->encr_keys[index].bssid); return 0; } @@ -1466,7 +1466,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len, } else { /* group key */ if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) - memset(ndis_key.bssid, 0xff, ETH_ALEN); + eth_broadcast_addr(ndis_key.bssid); else get_bssid(usbdev, ndis_key.bssid); } @@ -1486,7 +1486,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len, if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) memcpy(&priv->encr_keys[index].bssid, ndis_key.bssid, ETH_ALEN); else - memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->encr_keys[index].bssid); if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY) priv->encr_tx_key_index = index; @@ -2280,7 +2280,7 @@ static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev, netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code); priv->connected = false; - memset(priv->bssid, 0, ETH_ALEN); + eth_zero_addr(priv->bssid); return deauthenticate(usbdev); } @@ -2392,7 +2392,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev) netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n"); priv->connected = false; - memset(priv->bssid, 0, ETH_ALEN); + eth_zero_addr(priv->bssid); return deauthenticate(usbdev); } @@ -2857,7 +2857,7 @@ static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) if (priv->connected) { priv->connected = false; - memset(priv->bssid, 0, ETH_ALEN); + eth_zero_addr(priv->bssid); deauthenticate(usbdev); diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index a31a12775f1a..3b3a88b53b11 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -195,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw) if (!(support_remote_wakeup && rtlhal->enter_pnp_sleep)) { mac->link_state = MAC80211_NOLINK; - memset(mac->bssid, 0, 6); + eth_zero_addr(mac->bssid); mac->vendor = PEER_UNKNOWN; /* reset sec info */ @@ -357,7 +357,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw, mac->p2p = 0; mac->vif = NULL; mac->link_state = MAC80211_NOLINK; - memset(mac->bssid, 0, ETH_ALEN); + eth_zero_addr(mac->bssid); mac->vendor = PEER_UNKNOWN; mac->opmode = NL80211_IFTYPE_UNSPECIFIED; rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); @@ -1157,7 +1157,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE) rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); mac->link_state = MAC80211_NOLINK; - memset(mac->bssid, 0, ETH_ALEN); + eth_zero_addr(mac->bssid); mac->vendor = PEER_UNKNOWN; mac->mode = 0; diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index d4ba009ac9aa..d1e9a13be910 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -468,7 +468,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw) wl1251_tx_flush(wl); wl1251_power_off(wl); - memset(wl->bssid, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); wl->listen_int = 1; wl->bss_type = MAX_BSS_TYPE; @@ -547,7 +547,7 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface"); wl->vif = NULL; - memset(wl->bssid, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); mutex_unlock(&wl->mutex); } diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index c26fc2106e5b..68919f8d4310 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -367,7 +367,7 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) wl->links[*hlid].allocated_pkts = 0; wl->links[*hlid].prev_freed_pkts = 0; wl->links[*hlid].ba_bitmap = 0; - memset(wl->links[*hlid].addr, 0, ETH_ALEN); + eth_zero_addr(wl->links[*hlid].addr); /* * At this point op_tx() will not add more packets to the queues. We @@ -1293,7 +1293,7 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif) hdr->frame_control = cpu_to_le16(fc); memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN); memcpy(hdr->addr2, vif->addr, ETH_ALEN); - memset(hdr->addr3, 0xff, ETH_ALEN); + eth_broadcast_addr(hdr->addr3); ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP, skb->data, skb->len, 0, diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f38227afe099..4ae98e2ad719 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -438,7 +438,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, * stolen by an Ethernet bridge for STP purposes. * (FE:FF:FF:FF:FF:FF) */ - memset(dev->dev_addr, 0xFF, ETH_ALEN); + eth_broadcast_addr(dev->dev_addr); dev->dev_addr[0] &= ~0x01; netif_carrier_off(dev); diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index f1b5111bbaba..b2837b1c70b7 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -57,17 +57,6 @@ config SMSGIUCV_EVENT To compile as a module, choose M. The module name is "smsgiucv_app". -config CLAW - def_tristate m - prompt "CLAW device support" - depends on CCW && NETDEVICES - help - This driver supports channel attached CLAW devices. - CLAW is Common Link Access for Workstation. Common devices - that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices. - To compile as a module, choose M. The module name is claw. - To compile into the kernel, choose Y. - config QETH def_tristate y prompt "Gigabit Ethernet device support" @@ -106,6 +95,6 @@ config QETH_IPV6 config CCWGROUP tristate - default (LCS || CTCM || QETH || CLAW) + default (LCS || CTCM || QETH) endmenu diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index d28f05d0c75a..c351b07603e0 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile @@ -8,7 +8,6 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o obj-$(CONFIG_SMSGIUCV) += smsgiucv.o obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o obj-$(CONFIG_LCS) += lcs.o -obj-$(CONFIG_CLAW) += claw.o qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o obj-$(CONFIG_QETH) += qeth.o qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c deleted file mode 100644 index d609ca09aa94..000000000000 --- a/drivers/s390/net/claw.c +++ /dev/null @@ -1,3377 +0,0 @@ -/* - * ESCON CLAW network driver - * - * Linux for zSeries version - * Copyright IBM Corp. 2002, 2009 - * Author(s) Original code written by: - * Kazuo Iimura <iimura@jp.ibm.com> - * Rewritten by - * Andy Richter <richtera@us.ibm.com> - * Marc Price <mwprice@us.ibm.com> - * - * sysfs parms: - * group x.x.rrrr,x.x.wwww - * read_buffer nnnnnnn - * write_buffer nnnnnn - * host_name aaaaaaaa - * adapter_name aaaaaaaa - * api_type aaaaaaaa - * - * eg. - * group 0.0.0200 0.0.0201 - * read_buffer 25 - * write_buffer 20 - * host_name LINUX390 - * adapter_name RS6K - * api_type TCPIP - * - * where - * - * The device id is decided by the order entries - * are added to the group the first is claw0 the second claw1 - * up to CLAW_MAX_DEV - * - * rrrr - the first of 2 consecutive device addresses used for the - * CLAW protocol. - * The specified address is always used as the input (Read) - * channel and the next address is used as the output channel. - * - * wwww - the second of 2 consecutive device addresses used for - * the CLAW protocol. - * The specified address is always used as the output - * channel and the previous address is used as the input channel. - * - * read_buffer - specifies number of input buffers to allocate. - * write_buffer - specifies number of output buffers to allocate. - * host_name - host name - * adaptor_name - adaptor name - * api_type - API type TCPIP or API will be sent and expected - * as ws_name - * - * Note the following requirements: - * 1) host_name must match the configured adapter_name on the remote side - * 2) adaptor_name must match the configured host name on the remote side - * - * Change History - * 1.00 Initial release shipped - * 1.10 Changes for Buffer allocation - * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower - * 1.25 Added Packing support - * 1.5 - */ - -#define KMSG_COMPONENT "claw" - -#include <asm/ccwdev.h> -#include <asm/ccwgroup.h> -#include <asm/debug.h> -#include <asm/idals.h> -#include <asm/io.h> -#include <linux/bitops.h> -#include <linux/ctype.h> -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/if_arp.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/ip.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/proc_fs.h> -#include <linux/sched.h> -#include <linux/signal.h> -#include <linux/skbuff.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/tcp.h> -#include <linux/timer.h> -#include <linux/types.h> - -#include "claw.h" - -/* - CLAW uses the s390dbf file system see claw_trace and claw_setup -*/ - -static char version[] __initdata = "CLAW driver"; -static char debug_buffer[255]; -/** - * Debug Facility Stuff - */ -static debug_info_t *claw_dbf_setup; -static debug_info_t *claw_dbf_trace; - -/** - * CLAW Debug Facility functions - */ -static void -claw_unregister_debug_facility(void) -{ - debug_unregister(claw_dbf_setup); - debug_unregister(claw_dbf_trace); -} - -static int -claw_register_debug_facility(void) -{ - claw_dbf_setup = debug_register("claw_setup", 2, 1, 8); - claw_dbf_trace = debug_register("claw_trace", 2, 2, 8); - if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) { - claw_unregister_debug_facility(); - return -ENOMEM; - } - debug_register_view(claw_dbf_setup, &debug_hex_ascii_view); - debug_set_level(claw_dbf_setup, 2); - debug_register_view(claw_dbf_trace, &debug_hex_ascii_view); - debug_set_level(claw_dbf_trace, 2); - return 0; -} - -static inline void -claw_set_busy(struct net_device *dev) -{ - ((struct claw_privbk *)dev->ml_priv)->tbusy = 1; -} - -static inline void -claw_clear_busy(struct net_device *dev) -{ - clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy)); - netif_wake_queue(dev); -} - -static inline int -claw_check_busy(struct net_device *dev) -{ - return ((struct claw_privbk *) dev->ml_priv)->tbusy; -} - -static inline void -claw_setbit_busy(int nr,struct net_device *dev) -{ - netif_stop_queue(dev); - set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy)); -} - -static inline void -claw_clearbit_busy(int nr,struct net_device *dev) -{ - clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy)); - netif_wake_queue(dev); -} - -static inline int -claw_test_and_setbit_busy(int nr,struct net_device *dev) -{ - netif_stop_queue(dev); - return test_and_set_bit(nr, - (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy)); -} - - -/* Functions for the DEV methods */ - -static int claw_probe(struct ccwgroup_device *cgdev); -static void claw_remove_device(struct ccwgroup_device *cgdev); -static void claw_purge_skb_queue(struct sk_buff_head *q); -static int claw_new_device(struct ccwgroup_device *cgdev); -static int claw_shutdown_device(struct ccwgroup_device *cgdev); -static int claw_tx(struct sk_buff *skb, struct net_device *dev); -static int claw_change_mtu( struct net_device *dev, int new_mtu); -static int claw_open(struct net_device *dev); -static void claw_irq_handler(struct ccw_device *cdev, - unsigned long intparm, struct irb *irb); -static void claw_irq_tasklet ( unsigned long data ); -static int claw_release(struct net_device *dev); -static void claw_write_retry ( struct chbk * p_ch ); -static void claw_write_next ( struct chbk * p_ch ); -static void claw_timer ( struct chbk * p_ch ); - -/* Functions */ -static int add_claw_reads(struct net_device *dev, - struct ccwbk* p_first, struct ccwbk* p_last); -static void ccw_check_return_code (struct ccw_device *cdev, int return_code); -static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense ); -static int find_link(struct net_device *dev, char *host_name, char *ws_name ); -static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); -static int init_ccw_bk(struct net_device *dev); -static void probe_error( struct ccwgroup_device *cgdev); -static struct net_device_stats *claw_stats(struct net_device *dev); -static int pages_to_order_of_mag(int num_of_pages); -static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); -/* sysfs Functions */ -static ssize_t claw_hname_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t claw_hname_write(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t claw_adname_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t claw_adname_write(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t claw_apname_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t claw_apname_write(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t claw_wbuff_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t claw_wbuff_write(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t claw_rbuff_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t claw_rbuff_write(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); - -/* Functions for System Validate */ -static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw); -static int claw_send_control(struct net_device *dev, __u8 type, __u8 link, - __u8 correlator, __u8 rc , char *local_name, char *remote_name); -static int claw_snd_conn_req(struct net_device *dev, __u8 link); -static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl); -static int claw_snd_sys_validate_rsp(struct net_device *dev, - struct clawctl * p_ctl, __u32 return_code); -static int claw_strt_conn_req(struct net_device *dev ); -static void claw_strt_read(struct net_device *dev, int lock); -static void claw_strt_out_IO(struct net_device *dev); -static void claw_free_wrt_buf(struct net_device *dev); - -/* Functions for unpack reads */ -static void unpack_read(struct net_device *dev); - -static int claw_pm_prepare(struct ccwgroup_device *gdev) -{ - return -EPERM; -} - -/* the root device for claw group devices */ -static struct device *claw_root_dev; - -/* ccwgroup table */ - -static struct ccwgroup_driver claw_group_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "claw", - }, - .setup = claw_probe, - .remove = claw_remove_device, - .set_online = claw_new_device, - .set_offline = claw_shutdown_device, - .prepare = claw_pm_prepare, -}; - -static struct ccw_device_id claw_ids[] = { - {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw}, - {}, -}; -MODULE_DEVICE_TABLE(ccw, claw_ids); - -static struct ccw_driver claw_ccw_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "claw", - }, - .ids = claw_ids, - .probe = ccwgroup_probe_ccwdev, - .remove = ccwgroup_remove_ccwdev, - .int_class = IRQIO_CLW, -}; - -static ssize_t claw_driver_group_store(struct device_driver *ddrv, - const char *buf, size_t count) -{ - int err; - err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf); - return err ? err : count; -} -static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store); - -static struct attribute *claw_drv_attrs[] = { - &driver_attr_group.attr, - NULL, -}; -static struct attribute_group claw_drv_attr_group = { - .attrs = claw_drv_attrs, -}; -static const struct attribute_group *claw_drv_attr_groups[] = { - &claw_drv_attr_group, - NULL, -}; - -/* -* Key functions -*/ - -/*-------------------------------------------------------------------* - * claw_tx * - *-------------------------------------------------------------------*/ - -static int -claw_tx(struct sk_buff *skb, struct net_device *dev) -{ - int rc; - struct claw_privbk *privptr = dev->ml_priv; - unsigned long saveflags; - struct chbk *p_ch; - - CLAW_DBF_TEXT(4, trace, "claw_tx"); - p_ch = &privptr->channel[WRITE_CHANNEL]; - spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); - rc=claw_hw_tx( skb, dev, 1 ); - spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); - CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc); - if (rc) - rc = NETDEV_TX_BUSY; - else - rc = NETDEV_TX_OK; - return rc; -} /* end of claw_tx */ - -/*------------------------------------------------------------------* - * pack the collect queue into an skb and return it * - * If not packing just return the top skb from the queue * - *------------------------------------------------------------------*/ - -static struct sk_buff * -claw_pack_skb(struct claw_privbk *privptr) -{ - struct sk_buff *new_skb,*held_skb; - struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL]; - struct claw_env *p_env = privptr->p_env; - int pkt_cnt,pk_ind,so_far; - - new_skb = NULL; /* assume no dice */ - pkt_cnt = 0; - CLAW_DBF_TEXT(4, trace, "PackSKBe"); - if (!skb_queue_empty(&p_ch->collect_queue)) { - /* some data */ - held_skb = skb_dequeue(&p_ch->collect_queue); - if (held_skb) - dev_kfree_skb_any(held_skb); - else - return NULL; - if (p_env->packing != DO_PACKED) - return held_skb; - /* get a new SKB we will pack at least one */ - new_skb = dev_alloc_skb(p_env->write_size); - if (new_skb == NULL) { - atomic_inc(&held_skb->users); - skb_queue_head(&p_ch->collect_queue,held_skb); - return NULL; - } - /* we have packed packet and a place to put it */ - pk_ind = 1; - so_far = 0; - new_skb->cb[1] = 'P'; /* every skb on queue has pack header */ - while ((pk_ind) && (held_skb != NULL)) { - if (held_skb->len+so_far <= p_env->write_size-8) { - memcpy(skb_put(new_skb,held_skb->len), - held_skb->data,held_skb->len); - privptr->stats.tx_packets++; - so_far += held_skb->len; - pkt_cnt++; - dev_kfree_skb_any(held_skb); - held_skb = skb_dequeue(&p_ch->collect_queue); - if (held_skb) - atomic_dec(&held_skb->users); - } else { - pk_ind = 0; - atomic_inc(&held_skb->users); - skb_queue_head(&p_ch->collect_queue,held_skb); - } - } - } - CLAW_DBF_TEXT(4, trace, "PackSKBx"); - return new_skb; -} - -/*-------------------------------------------------------------------* - * claw_change_mtu * - * * - *-------------------------------------------------------------------*/ - -static int -claw_change_mtu(struct net_device *dev, int new_mtu) -{ - struct claw_privbk *privptr = dev->ml_priv; - int buff_size; - CLAW_DBF_TEXT(4, trace, "setmtu"); - buff_size = privptr->p_env->write_size; - if ((new_mtu < 60) || (new_mtu > buff_size)) { - return -EINVAL; - } - dev->mtu = new_mtu; - return 0; -} /* end of claw_change_mtu */ - - -/*-------------------------------------------------------------------* - * claw_open * - * * - *-------------------------------------------------------------------*/ -static int -claw_open(struct net_device *dev) -{ - - int rc; - int i; - unsigned long saveflags=0; - unsigned long parm; - struct claw_privbk *privptr; - DECLARE_WAITQUEUE(wait, current); - struct timer_list timer; - struct ccwbk *p_buf; - - CLAW_DBF_TEXT(4, trace, "open"); - privptr = (struct claw_privbk *)dev->ml_priv; - /* allocate and initialize CCW blocks */ - if (privptr->buffs_alloc == 0) { - rc=init_ccw_bk(dev); - if (rc) { - CLAW_DBF_TEXT(2, trace, "openmem"); - return -ENOMEM; - } - } - privptr->system_validate_comp=0; - privptr->release_pend=0; - if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) { - privptr->p_env->read_size=DEF_PACK_BUFSIZE; - privptr->p_env->write_size=DEF_PACK_BUFSIZE; - privptr->p_env->packing=PACKING_ASK; - } else { - privptr->p_env->packing=0; - privptr->p_env->read_size=CLAW_FRAME_SIZE; - privptr->p_env->write_size=CLAW_FRAME_SIZE; - } - claw_set_busy(dev); - tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet, - (unsigned long) &privptr->channel[READ_CHANNEL]); - for ( i = 0; i < 2; i++) { - CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i); - init_waitqueue_head(&privptr->channel[i].wait); - /* skb_queue_head_init(&p_ch->io_queue); */ - if (i == WRITE_CHANNEL) - skb_queue_head_init( - &privptr->channel[WRITE_CHANNEL].collect_queue); - privptr->channel[i].flag_a = 0; - privptr->channel[i].IO_active = 0; - privptr->channel[i].flag &= ~CLAW_TIMER; - init_timer(&timer); - timer.function = (void *)claw_timer; - timer.data = (unsigned long)(&privptr->channel[i]); - timer.expires = jiffies + 15*HZ; - add_timer(&timer); - spin_lock_irqsave(get_ccwdev_lock( - privptr->channel[i].cdev), saveflags); - parm = (unsigned long) &privptr->channel[i]; - privptr->channel[i].claw_state = CLAW_START_HALT_IO; - rc = 0; - add_wait_queue(&privptr->channel[i].wait, &wait); - rc = ccw_device_halt( - (struct ccw_device *)privptr->channel[i].cdev,parm); - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irqrestore( - get_ccwdev_lock(privptr->channel[i].cdev), saveflags); - schedule(); - remove_wait_queue(&privptr->channel[i].wait, &wait); - if(rc != 0) - ccw_check_return_code(privptr->channel[i].cdev, rc); - if((privptr->channel[i].flag & CLAW_TIMER) == 0x00) - del_timer(&timer); - } - if ((((privptr->channel[READ_CHANNEL].last_dstat | - privptr->channel[WRITE_CHANNEL].last_dstat) & - ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || - (((privptr->channel[READ_CHANNEL].flag | - privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) { - dev_info(&privptr->channel[READ_CHANNEL].cdev->dev, - "%s: remote side is not ready\n", dev->name); - CLAW_DBF_TEXT(2, trace, "notrdy"); - - for ( i = 0; i < 2; i++) { - spin_lock_irqsave( - get_ccwdev_lock(privptr->channel[i].cdev), - saveflags); - parm = (unsigned long) &privptr->channel[i]; - privptr->channel[i].claw_state = CLAW_STOP; - rc = ccw_device_halt( - (struct ccw_device *)&privptr->channel[i].cdev, - parm); - spin_unlock_irqrestore( - get_ccwdev_lock(privptr->channel[i].cdev), - saveflags); - if (rc != 0) { - ccw_check_return_code( - privptr->channel[i].cdev, rc); - } - } - free_pages((unsigned long)privptr->p_buff_ccw, - (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); - if (privptr->p_env->read_size < PAGE_SIZE) { - free_pages((unsigned long)privptr->p_buff_read, - (int)pages_to_order_of_mag( - privptr->p_buff_read_num)); - } - else { - p_buf=privptr->p_read_active_first; - while (p_buf!=NULL) { - free_pages((unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perread )); - p_buf=p_buf->next; - } - } - if (privptr->p_env->write_size < PAGE_SIZE ) { - free_pages((unsigned long)privptr->p_buff_write, - (int)pages_to_order_of_mag( - privptr->p_buff_write_num)); - } - else { - p_buf=privptr->p_write_active_first; - while (p_buf!=NULL) { - free_pages((unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perwrite )); - p_buf=p_buf->next; - } - } - privptr->buffs_alloc = 0; - privptr->channel[READ_CHANNEL].flag = 0x00; - privptr->channel[WRITE_CHANNEL].flag = 0x00; - privptr->p_buff_ccw=NULL; - privptr->p_buff_read=NULL; - privptr->p_buff_write=NULL; - claw_clear_busy(dev); - CLAW_DBF_TEXT(2, trace, "open EIO"); - return -EIO; - } - - /* Send SystemValidate command */ - - claw_clear_busy(dev); - CLAW_DBF_TEXT(4, trace, "openok"); - return 0; -} /* end of claw_open */ - -/*-------------------------------------------------------------------* -* * -* claw_irq_handler * -* * -*--------------------------------------------------------------------*/ -static void -claw_irq_handler(struct ccw_device *cdev, - unsigned long intparm, struct irb *irb) -{ - struct chbk *p_ch = NULL; - struct claw_privbk *privptr = NULL; - struct net_device *dev = NULL; - struct claw_env *p_env; - struct chbk *p_ch_r=NULL; - - CLAW_DBF_TEXT(4, trace, "clawirq"); - /* Bypass all 'unsolicited interrupts' */ - privptr = dev_get_drvdata(&cdev->dev); - if (!privptr) { - dev_warn(&cdev->dev, "An uninitialized CLAW device received an" - " IRQ, c-%02x d-%02x\n", - irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); - CLAW_DBF_TEXT(2, trace, "badirq"); - return; - } - - /* Try to extract channel from driver data. */ - if (privptr->channel[READ_CHANNEL].cdev == cdev) - p_ch = &privptr->channel[READ_CHANNEL]; - else if (privptr->channel[WRITE_CHANNEL].cdev == cdev) - p_ch = &privptr->channel[WRITE_CHANNEL]; - else { - dev_warn(&cdev->dev, "The device is not a CLAW device\n"); - CLAW_DBF_TEXT(2, trace, "badchan"); - return; - } - CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag); - - dev = (struct net_device *) (p_ch->ndev); - p_env=privptr->p_env; - - /* Copy interruption response block. */ - memcpy(p_ch->irb, irb, sizeof(struct irb)); - - /* Check for good subchannel return code, otherwise info message */ - if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) { - dev_info(&cdev->dev, - "%s: subchannel check for device: %04x -" - " Sch Stat %02x Dev Stat %02x CPA - %04x\n", - dev->name, p_ch->devno, - irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, - irb->scsw.cmd.cpa); - CLAW_DBF_TEXT(2, trace, "chanchk"); - /* return; */ - } - - /* Check the reason-code of a unit check */ - if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) - ccw_check_unit_check(p_ch, irb->ecw[0]); - - /* State machine to bring the connection up, down and to restart */ - p_ch->last_dstat = irb->scsw.cmd.dstat; - - switch (p_ch->claw_state) { - case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ - if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || - (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || - (p_ch->irb->scsw.cmd.stctl == - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) - return; - wake_up(&p_ch->wait); /* wake up claw_release */ - CLAW_DBF_TEXT(4, trace, "stop"); - return; - case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */ - if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || - (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || - (p_ch->irb->scsw.cmd.stctl == - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { - CLAW_DBF_TEXT(4, trace, "haltio"); - return; - } - if (p_ch->flag == CLAW_READ) { - p_ch->claw_state = CLAW_START_READ; - wake_up(&p_ch->wait); /* wake claw_open (READ)*/ - } else if (p_ch->flag == CLAW_WRITE) { - p_ch->claw_state = CLAW_START_WRITE; - /* send SYSTEM_VALIDATE */ - claw_strt_read(dev, LOCK_NO); - claw_send_control(dev, - SYSTEM_VALIDATE_REQUEST, - 0, 0, 0, - p_env->host_name, - p_env->adapter_name); - } else { - dev_warn(&cdev->dev, "The CLAW device received" - " an unexpected IRQ, " - "c-%02x d-%02x\n", - irb->scsw.cmd.cstat, - irb->scsw.cmd.dstat); - return; - } - CLAW_DBF_TEXT(4, trace, "haltio"); - return; - case CLAW_START_READ: - CLAW_DBF_TEXT(4, trace, "ReadIRQ"); - if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { - clear_bit(0, (void *)&p_ch->IO_active); - if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || - (p_ch->irb->ecw[0] & 0x40) == 0x40 || - (p_ch->irb->ecw[0]) == 0) { - privptr->stats.rx_errors++; - dev_info(&cdev->dev, - "%s: Restart is required after remote " - "side recovers \n", - dev->name); - } - CLAW_DBF_TEXT(4, trace, "notrdy"); - return; - } - if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) && - (p_ch->irb->scsw.cmd.dstat == 0)) { - if (test_and_set_bit(CLAW_BH_ACTIVE, - (void *)&p_ch->flag_a) == 0) - tasklet_schedule(&p_ch->tasklet); - else - CLAW_DBF_TEXT(4, trace, "PCINoBH"); - CLAW_DBF_TEXT(4, trace, "PCI_read"); - return; - } - if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || - (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || - (p_ch->irb->scsw.cmd.stctl == - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { - CLAW_DBF_TEXT(4, trace, "SPend_rd"); - return; - } - clear_bit(0, (void *)&p_ch->IO_active); - claw_clearbit_busy(TB_RETRY, dev); - if (test_and_set_bit(CLAW_BH_ACTIVE, - (void *)&p_ch->flag_a) == 0) - tasklet_schedule(&p_ch->tasklet); - else - CLAW_DBF_TEXT(4, trace, "RdBHAct"); - CLAW_DBF_TEXT(4, trace, "RdIRQXit"); - return; - case CLAW_START_WRITE: - if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { - dev_info(&cdev->dev, - "%s: Unit Check Occurred in " - "write channel\n", dev->name); - clear_bit(0, (void *)&p_ch->IO_active); - if (p_ch->irb->ecw[0] & 0x80) { - dev_info(&cdev->dev, - "%s: Resetting Event " - "occurred:\n", dev->name); - init_timer(&p_ch->timer); - p_ch->timer.function = - (void *)claw_write_retry; - p_ch->timer.data = (unsigned long)p_ch; - p_ch->timer.expires = jiffies + 10*HZ; - add_timer(&p_ch->timer); - dev_info(&cdev->dev, - "%s: write connection " - "restarting\n", dev->name); - } - CLAW_DBF_TEXT(4, trace, "rstrtwrt"); - return; - } - if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { - clear_bit(0, (void *)&p_ch->IO_active); - dev_info(&cdev->dev, - "%s: Unit Exception " - "occurred in write channel\n", - dev->name); - } - if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || - (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || - (p_ch->irb->scsw.cmd.stctl == - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { - CLAW_DBF_TEXT(4, trace, "writeUE"); - return; - } - clear_bit(0, (void *)&p_ch->IO_active); - if (claw_test_and_setbit_busy(TB_TX, dev) == 0) { - claw_write_next(p_ch); - claw_clearbit_busy(TB_TX, dev); - claw_clear_busy(dev); - } - p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL]; - if (test_and_set_bit(CLAW_BH_ACTIVE, - (void *)&p_ch_r->flag_a) == 0) - tasklet_schedule(&p_ch_r->tasklet); - CLAW_DBF_TEXT(4, trace, "StWtExit"); - return; - default: - dev_warn(&cdev->dev, - "The CLAW device for %s received an unexpected IRQ\n", - dev->name); - CLAW_DBF_TEXT(2, trace, "badIRQ"); - return; - } - -} /* end of claw_irq_handler */ - - -/*-------------------------------------------------------------------* -* claw_irq_tasklet * -* * -*--------------------------------------------------------------------*/ -static void -claw_irq_tasklet ( unsigned long data ) -{ - struct chbk * p_ch; - struct net_device *dev; - - p_ch = (struct chbk *) data; - dev = (struct net_device *)p_ch->ndev; - CLAW_DBF_TEXT(4, trace, "IRQtask"); - unpack_read(dev); - clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); - CLAW_DBF_TEXT(4, trace, "TskletXt"); - return; -} /* end of claw_irq_bh */ - -/*-------------------------------------------------------------------* -* claw_release * -* * -*--------------------------------------------------------------------*/ -static int -claw_release(struct net_device *dev) -{ - int rc; - int i; - unsigned long saveflags; - unsigned long parm; - struct claw_privbk *privptr; - DECLARE_WAITQUEUE(wait, current); - struct ccwbk* p_this_ccw; - struct ccwbk* p_buf; - - if (!dev) - return 0; - privptr = (struct claw_privbk *)dev->ml_priv; - if (!privptr) - return 0; - CLAW_DBF_TEXT(4, trace, "release"); - privptr->release_pend=1; - claw_setbit_busy(TB_STOP,dev); - for ( i = 1; i >=0 ; i--) { - spin_lock_irqsave( - get_ccwdev_lock(privptr->channel[i].cdev), saveflags); - /* del_timer(&privptr->channel[READ_CHANNEL].timer); */ - privptr->channel[i].claw_state = CLAW_STOP; - privptr->channel[i].IO_active = 0; - parm = (unsigned long) &privptr->channel[i]; - if (i == WRITE_CHANNEL) - claw_purge_skb_queue( - &privptr->channel[WRITE_CHANNEL].collect_queue); - rc = ccw_device_halt (privptr->channel[i].cdev, parm); - if (privptr->system_validate_comp==0x00) /* never opened? */ - init_waitqueue_head(&privptr->channel[i].wait); - add_wait_queue(&privptr->channel[i].wait, &wait); - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irqrestore( - get_ccwdev_lock(privptr->channel[i].cdev), saveflags); - schedule(); - remove_wait_queue(&privptr->channel[i].wait, &wait); - if (rc != 0) { - ccw_check_return_code(privptr->channel[i].cdev, rc); - } - } - if (privptr->pk_skb != NULL) { - dev_kfree_skb_any(privptr->pk_skb); - privptr->pk_skb = NULL; - } - if(privptr->buffs_alloc != 1) { - CLAW_DBF_TEXT(4, trace, "none2fre"); - return 0; - } - CLAW_DBF_TEXT(4, trace, "freebufs"); - if (privptr->p_buff_ccw != NULL) { - free_pages((unsigned long)privptr->p_buff_ccw, - (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); - } - CLAW_DBF_TEXT(4, trace, "freeread"); - if (privptr->p_env->read_size < PAGE_SIZE) { - if (privptr->p_buff_read != NULL) { - free_pages((unsigned long)privptr->p_buff_read, - (int)pages_to_order_of_mag(privptr->p_buff_read_num)); - } - } - else { - p_buf=privptr->p_read_active_first; - while (p_buf!=NULL) { - free_pages((unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perread )); - p_buf=p_buf->next; - } - } - CLAW_DBF_TEXT(4, trace, "freewrit"); - if (privptr->p_env->write_size < PAGE_SIZE ) { - free_pages((unsigned long)privptr->p_buff_write, - (int)pages_to_order_of_mag(privptr->p_buff_write_num)); - } - else { - p_buf=privptr->p_write_active_first; - while (p_buf!=NULL) { - free_pages((unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perwrite )); - p_buf=p_buf->next; - } - } - CLAW_DBF_TEXT(4, trace, "clearptr"); - privptr->buffs_alloc = 0; - privptr->p_buff_ccw=NULL; - privptr->p_buff_read=NULL; - privptr->p_buff_write=NULL; - privptr->system_validate_comp=0; - privptr->release_pend=0; - /* Remove any writes that were pending and reset all reads */ - p_this_ccw=privptr->p_read_active_first; - while (p_this_ccw!=NULL) { - p_this_ccw->header.length=0xffff; - p_this_ccw->header.opcode=0xff; - p_this_ccw->header.flag=0x00; - p_this_ccw=p_this_ccw->next; - } - - while (privptr->p_write_active_first!=NULL) { - p_this_ccw=privptr->p_write_active_first; - p_this_ccw->header.flag=CLAW_PENDING; - privptr->p_write_active_first=p_this_ccw->next; - p_this_ccw->next=privptr->p_write_free_chain; - privptr->p_write_free_chain=p_this_ccw; - ++privptr->write_free_count; - } - privptr->p_write_active_last=NULL; - privptr->mtc_logical_link = -1; - privptr->mtc_skipping = 1; - privptr->mtc_offset=0; - - if (((privptr->channel[READ_CHANNEL].last_dstat | - privptr->channel[WRITE_CHANNEL].last_dstat) & - ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { - dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev, - "Deactivating %s completed with incorrect" - " subchannel status " - "(read %02x, write %02x)\n", - dev->name, - privptr->channel[READ_CHANNEL].last_dstat, - privptr->channel[WRITE_CHANNEL].last_dstat); - CLAW_DBF_TEXT(2, trace, "badclose"); - } - CLAW_DBF_TEXT(4, trace, "rlsexit"); - return 0; -} /* end of claw_release */ - -/*-------------------------------------------------------------------* -* claw_write_retry * -* * -*--------------------------------------------------------------------*/ - -static void -claw_write_retry ( struct chbk *p_ch ) -{ - - struct net_device *dev=p_ch->ndev; - - CLAW_DBF_TEXT(4, trace, "w_retry"); - if (p_ch->claw_state == CLAW_STOP) { - return; - } - claw_strt_out_IO( dev ); - CLAW_DBF_TEXT(4, trace, "rtry_xit"); - return; -} /* end of claw_write_retry */ - - -/*-------------------------------------------------------------------* -* claw_write_next * -* * -*--------------------------------------------------------------------*/ - -static void -claw_write_next ( struct chbk * p_ch ) -{ - - struct net_device *dev; - struct claw_privbk *privptr=NULL; - struct sk_buff *pk_skb; - - CLAW_DBF_TEXT(4, trace, "claw_wrt"); - if (p_ch->claw_state == CLAW_STOP) - return; - dev = (struct net_device *) p_ch->ndev; - privptr = (struct claw_privbk *) dev->ml_priv; - claw_free_wrt_buf( dev ); - if ((privptr->write_free_count > 0) && - !skb_queue_empty(&p_ch->collect_queue)) { - pk_skb = claw_pack_skb(privptr); - while (pk_skb != NULL) { - claw_hw_tx(pk_skb, dev, 1); - if (privptr->write_free_count > 0) { - pk_skb = claw_pack_skb(privptr); - } else - pk_skb = NULL; - } - } - if (privptr->p_write_active_first!=NULL) { - claw_strt_out_IO(dev); - } - return; -} /* end of claw_write_next */ - -/*-------------------------------------------------------------------* -* * -* claw_timer * -*--------------------------------------------------------------------*/ - -static void -claw_timer ( struct chbk * p_ch ) -{ - CLAW_DBF_TEXT(4, trace, "timer"); - p_ch->flag |= CLAW_TIMER; - wake_up(&p_ch->wait); - return; -} /* end of claw_timer */ - -/* -* -* functions -*/ - - -/*-------------------------------------------------------------------* -* * -* pages_to_order_of_mag * -* * -* takes a number of pages from 1 to 512 and returns the * -* log(num_pages)/log(2) get_free_pages() needs a base 2 order * -* of magnitude get_free_pages() has an upper order of 9 * -*--------------------------------------------------------------------*/ - -static int -pages_to_order_of_mag(int num_of_pages) -{ - int order_of_mag=1; /* assume 2 pages */ - int nump; - - CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages); - if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */ - /* 512 pages = 2Meg on 4k page systems */ - if (num_of_pages >= 512) {return 9; } - /* we have two or more pages order is at least 1 */ - for (nump=2 ;nump <= 512;nump*=2) { - if (num_of_pages <= nump) - break; - order_of_mag +=1; - } - if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */ - CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag); - return order_of_mag; -} - -/*-------------------------------------------------------------------* -* * -* add_claw_reads * -* * -*--------------------------------------------------------------------*/ -static int -add_claw_reads(struct net_device *dev, struct ccwbk* p_first, - struct ccwbk* p_last) -{ - struct claw_privbk *privptr; - struct ccw1 temp_ccw; - struct endccw * p_end; - CLAW_DBF_TEXT(4, trace, "addreads"); - privptr = dev->ml_priv; - p_end = privptr->p_end_ccw; - - /* first CCW and last CCW contains a new set of read channel programs - * to apend the running channel programs - */ - if ( p_first==NULL) { - CLAW_DBF_TEXT(4, trace, "addexit"); - return 0; - } - - /* set up ending CCW sequence for this segment */ - if (p_end->read1) { - p_end->read1=0x00; /* second ending CCW is now active */ - /* reset ending CCWs and setup TIC CCWs */ - p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF; - p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; - p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1); - p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1); - p_end->read2_nop2.cda=0; - p_end->read2_nop2.count=1; - } - else { - p_end->read1=0x01; /* first ending CCW is now active */ - /* reset ending CCWs and setup TIC CCWs */ - p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF; - p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; - p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1); - p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1); - p_end->read1_nop2.cda=0; - p_end->read1_nop2.count=1; - } - - if ( privptr-> p_read_active_first ==NULL ) { - privptr->p_read_active_first = p_first; /* set new first */ - privptr->p_read_active_last = p_last; /* set new last */ - } - else { - - /* set up TIC ccw */ - temp_ccw.cda= (__u32)__pa(&p_first->read); - temp_ccw.count=0; - temp_ccw.flags=0; - temp_ccw.cmd_code = CCW_CLAW_CMD_TIC; - - - if (p_end->read1) { - - /* first set of CCW's is chained to the new read */ - /* chain, so the second set is chained to the active chain. */ - /* Therefore modify the second set to point to the new */ - /* read chain set up TIC CCWs */ - /* make sure we update the CCW so channel doesn't fetch it */ - /* when it's only half done */ - memcpy( &p_end->read2_nop2, &temp_ccw , - sizeof(struct ccw1)); - privptr->p_read_active_last->r_TIC_1.cda= - (__u32)__pa(&p_first->read); - privptr->p_read_active_last->r_TIC_2.cda= - (__u32)__pa(&p_first->read); - } - else { - /* make sure we update the CCW so channel doesn't */ - /* fetch it when it is only half done */ - memcpy( &p_end->read1_nop2, &temp_ccw , - sizeof(struct ccw1)); - privptr->p_read_active_last->r_TIC_1.cda= - (__u32)__pa(&p_first->read); - privptr->p_read_active_last->r_TIC_2.cda= - (__u32)__pa(&p_first->read); - } - /* chain in new set of blocks */ - privptr->p_read_active_last->next = p_first; - privptr->p_read_active_last=p_last; - } /* end of if ( privptr-> p_read_active_first ==NULL) */ - CLAW_DBF_TEXT(4, trace, "addexit"); - return 0; -} /* end of add_claw_reads */ - -/*-------------------------------------------------------------------* - * ccw_check_return_code * - * * - *-------------------------------------------------------------------*/ - -static void -ccw_check_return_code(struct ccw_device *cdev, int return_code) -{ - CLAW_DBF_TEXT(4, trace, "ccwret"); - if (return_code != 0) { - switch (return_code) { - case -EBUSY: /* BUSY is a transient state no action needed */ - break; - case -ENODEV: - dev_err(&cdev->dev, "The remote channel adapter is not" - " available\n"); - break; - case -EINVAL: - dev_err(&cdev->dev, - "The status of the remote channel adapter" - " is not valid\n"); - break; - default: - dev_err(&cdev->dev, "The common device layer" - " returned error code %d\n", - return_code); - } - } - CLAW_DBF_TEXT(4, trace, "ccwret"); -} /* end of ccw_check_return_code */ - -/*-------------------------------------------------------------------* -* ccw_check_unit_check * -*--------------------------------------------------------------------*/ - -static void -ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) -{ - struct net_device *ndev = p_ch->ndev; - struct device *dev = &p_ch->cdev->dev; - - CLAW_DBF_TEXT(4, trace, "unitchek"); - dev_warn(dev, "The communication peer of %s disconnected\n", - ndev->name); - - if (sense & 0x40) { - if (sense & 0x01) { - dev_warn(dev, "The remote channel adapter for" - " %s has been reset\n", - ndev->name); - } - } else if (sense & 0x20) { - if (sense & 0x04) { - dev_warn(dev, "A data streaming timeout occurred" - " for %s\n", - ndev->name); - } else if (sense & 0x10) { - dev_warn(dev, "The remote channel adapter for %s" - " is faulty\n", - ndev->name); - } else { - dev_warn(dev, "A data transfer parity error occurred" - " for %s\n", - ndev->name); - } - } else if (sense & 0x10) { - dev_warn(dev, "A read data parity error occurred" - " for %s\n", - ndev->name); - } - -} /* end of ccw_check_unit_check */ - -/*-------------------------------------------------------------------* -* find_link * -*--------------------------------------------------------------------*/ -static int -find_link(struct net_device *dev, char *host_name, char *ws_name ) -{ - struct claw_privbk *privptr; - struct claw_env *p_env; - int rc=0; - - CLAW_DBF_TEXT(2, setup, "findlink"); - privptr = dev->ml_priv; - p_env=privptr->p_env; - switch (p_env->packing) - { - case PACKING_ASK: - if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) || - (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 )) - rc = EINVAL; - break; - case DO_PACKED: - case PACK_SEND: - if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) || - (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 )) - rc = EINVAL; - break; - default: - if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) || - (memcmp(p_env->api_type , ws_name, 8)!=0)) - rc = EINVAL; - break; - } - - return rc; -} /* end of find_link */ - -/*-------------------------------------------------------------------* - * claw_hw_tx * - * * - * * - *-------------------------------------------------------------------*/ - -static int -claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) -{ - int rc=0; - struct claw_privbk *privptr; - struct ccwbk *p_this_ccw; - struct ccwbk *p_first_ccw; - struct ccwbk *p_last_ccw; - __u32 numBuffers; - signed long len_of_data; - unsigned long bytesInThisBuffer; - unsigned char *pDataAddress; - struct endccw *pEnd; - struct ccw1 tempCCW; - struct claw_env *p_env; - struct clawph *pk_head; - struct chbk *ch; - - CLAW_DBF_TEXT(4, trace, "hw_tx"); - privptr = (struct claw_privbk *)(dev->ml_priv); - p_env =privptr->p_env; - claw_free_wrt_buf(dev); /* Clean up free chain if posible */ - /* scan the write queue to free any completed write packets */ - p_first_ccw=NULL; - p_last_ccw=NULL; - if ((p_env->packing >= PACK_SEND) && - (skb->cb[1] != 'P')) { - skb_push(skb,sizeof(struct clawph)); - pk_head=(struct clawph *)skb->data; - pk_head->len=skb->len-sizeof(struct clawph); - if (pk_head->len%4) { - pk_head->len+= 4-(pk_head->len%4); - skb_pad(skb,4-(pk_head->len%4)); - skb_put(skb,4-(pk_head->len%4)); - } - if (p_env->packing == DO_PACKED) - pk_head->link_num = linkid; - else - pk_head->link_num = 0; - pk_head->flag = 0x00; - skb_pad(skb,4); - skb->cb[1] = 'P'; - } - if (linkid == 0) { - if (claw_check_busy(dev)) { - if (privptr->write_free_count!=0) { - claw_clear_busy(dev); - } - else { - claw_strt_out_IO(dev ); - claw_free_wrt_buf( dev ); - if (privptr->write_free_count==0) { - ch = &privptr->channel[WRITE_CHANNEL]; - atomic_inc(&skb->users); - skb_queue_tail(&ch->collect_queue, skb); - goto Done; - } - else { - claw_clear_busy(dev); - } - } - } - /* tx lock */ - if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */ - ch = &privptr->channel[WRITE_CHANNEL]; - atomic_inc(&skb->users); - skb_queue_tail(&ch->collect_queue, skb); - claw_strt_out_IO(dev ); - rc=-EBUSY; - goto Done2; - } - } - /* See how many write buffers are required to hold this data */ - numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size); - - /* If that number of buffers isn't available, give up for now */ - if (privptr->write_free_count < numBuffers || - privptr->p_write_free_chain == NULL ) { - - claw_setbit_busy(TB_NOBUFFER,dev); - ch = &privptr->channel[WRITE_CHANNEL]; - atomic_inc(&skb->users); - skb_queue_tail(&ch->collect_queue, skb); - CLAW_DBF_TEXT(2, trace, "clawbusy"); - goto Done2; - } - pDataAddress=skb->data; - len_of_data=skb->len; - - while (len_of_data > 0) { - p_this_ccw=privptr->p_write_free_chain; /* get a block */ - if (p_this_ccw == NULL) { /* lost the race */ - ch = &privptr->channel[WRITE_CHANNEL]; - atomic_inc(&skb->users); - skb_queue_tail(&ch->collect_queue, skb); - goto Done2; - } - privptr->p_write_free_chain=p_this_ccw->next; - p_this_ccw->next=NULL; - --privptr->write_free_count; /* -1 */ - if (len_of_data >= privptr->p_env->write_size) - bytesInThisBuffer = privptr->p_env->write_size; - else - bytesInThisBuffer = len_of_data; - memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer); - len_of_data-=bytesInThisBuffer; - pDataAddress+=(unsigned long)bytesInThisBuffer; - /* setup write CCW */ - p_this_ccw->write.cmd_code = (linkid * 8) +1; - if (len_of_data>0) { - p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG; - } - p_this_ccw->write.count=bytesInThisBuffer; - /* now add to end of this chain */ - if (p_first_ccw==NULL) { - p_first_ccw=p_this_ccw; - } - if (p_last_ccw!=NULL) { - p_last_ccw->next=p_this_ccw; - /* set up TIC ccws */ - p_last_ccw->w_TIC_1.cda= - (__u32)__pa(&p_this_ccw->write); - } - p_last_ccw=p_this_ccw; /* save new last block */ - } - - /* FirstCCW and LastCCW now contain a new set of write channel - * programs to append to the running channel program - */ - - if (p_first_ccw!=NULL) { - /* setup ending ccw sequence for this segment */ - pEnd=privptr->p_end_ccw; - if (pEnd->write1) { - pEnd->write1=0x00; /* second end ccw is now active */ - /* set up Tic CCWs */ - p_last_ccw->w_TIC_1.cda= - (__u32)__pa(&pEnd->write2_nop1); - pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF; - pEnd->write2_nop2.flags = - CCW_FLAG_SLI | CCW_FLAG_SKIP; - pEnd->write2_nop2.cda=0; - pEnd->write2_nop2.count=1; - } - else { /* end of if (pEnd->write1)*/ - pEnd->write1=0x01; /* first end ccw is now active */ - /* set up Tic CCWs */ - p_last_ccw->w_TIC_1.cda= - (__u32)__pa(&pEnd->write1_nop1); - pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF; - pEnd->write1_nop2.flags = - CCW_FLAG_SLI | CCW_FLAG_SKIP; - pEnd->write1_nop2.cda=0; - pEnd->write1_nop2.count=1; - } /* end if if (pEnd->write1) */ - - if (privptr->p_write_active_first==NULL ) { - privptr->p_write_active_first=p_first_ccw; - privptr->p_write_active_last=p_last_ccw; - } - else { - /* set up Tic CCWs */ - - tempCCW.cda=(__u32)__pa(&p_first_ccw->write); - tempCCW.count=0; - tempCCW.flags=0; - tempCCW.cmd_code=CCW_CLAW_CMD_TIC; - - if (pEnd->write1) { - - /* - * first set of ending CCW's is chained to the new write - * chain, so the second set is chained to the active chain - * Therefore modify the second set to point the new write chain. - * make sure we update the CCW atomically - * so channel does not fetch it when it's only half done - */ - memcpy( &pEnd->write2_nop2, &tempCCW , - sizeof(struct ccw1)); - privptr->p_write_active_last->w_TIC_1.cda= - (__u32)__pa(&p_first_ccw->write); - } - else { - - /*make sure we update the CCW atomically - *so channel does not fetch it when it's only half done - */ - memcpy(&pEnd->write1_nop2, &tempCCW , - sizeof(struct ccw1)); - privptr->p_write_active_last->w_TIC_1.cda= - (__u32)__pa(&p_first_ccw->write); - - } /* end if if (pEnd->write1) */ - - privptr->p_write_active_last->next=p_first_ccw; - privptr->p_write_active_last=p_last_ccw; - } - - } /* endif (p_first_ccw!=NULL) */ - dev_kfree_skb_any(skb); - claw_strt_out_IO(dev ); - /* if write free count is zero , set NOBUFFER */ - if (privptr->write_free_count==0) { - claw_setbit_busy(TB_NOBUFFER,dev); - } -Done2: - claw_clearbit_busy(TB_TX,dev); -Done: - return(rc); -} /* end of claw_hw_tx */ - -/*-------------------------------------------------------------------* -* * -* init_ccw_bk * -* * -*--------------------------------------------------------------------*/ - -static int -init_ccw_bk(struct net_device *dev) -{ - - __u32 ccw_blocks_required; - __u32 ccw_blocks_perpage; - __u32 ccw_pages_required; - __u32 claw_reads_perpage=1; - __u32 claw_read_pages; - __u32 claw_writes_perpage=1; - __u32 claw_write_pages; - void *p_buff=NULL; - struct ccwbk*p_free_chain; - struct ccwbk*p_buf; - struct ccwbk*p_last_CCWB; - struct ccwbk*p_first_CCWB; - struct endccw *p_endccw=NULL; - addr_t real_address; - struct claw_privbk *privptr = dev->ml_priv; - struct clawh *pClawH=NULL; - addr_t real_TIC_address; - int i,j; - CLAW_DBF_TEXT(4, trace, "init_ccw"); - - /* initialize statistics field */ - privptr->active_link_ID=0; - /* initialize ccwbk pointers */ - privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/ - privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/ - privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/ - privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/ - privptr->p_read_active_last=NULL; /* pointer to the last read ccw */ - privptr->p_end_ccw=NULL; /* pointer to ending ccw */ - privptr->p_claw_signal_blk=NULL; /* pointer to signal block */ - privptr->buffs_alloc = 0; - memset(&privptr->end_ccw, 0x00, sizeof(struct endccw)); - memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl)); - /* initialize free write ccwbk counter */ - privptr->write_free_count=0; /* number of free bufs on write chain */ - p_last_CCWB = NULL; - p_first_CCWB= NULL; - /* - * We need 1 CCW block for each read buffer, 1 for each - * write buffer, plus 1 for ClawSignalBlock - */ - ccw_blocks_required = - privptr->p_env->read_buffers+privptr->p_env->write_buffers+1; - /* - * compute number of CCW blocks that will fit in a page - */ - ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE; - ccw_pages_required= - DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage); - - /* - * read and write sizes are set by 2 constants in claw.h - * 4k and 32k. Unpacked values other than 4k are not going to - * provide good performance. With packing buffers support 32k - * buffers are used. - */ - if (privptr->p_env->read_size < PAGE_SIZE) { - claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size; - claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers, - claw_reads_perpage); - } - else { /* > or equal */ - privptr->p_buff_pages_perread = - DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE); - claw_read_pages = privptr->p_env->read_buffers * - privptr->p_buff_pages_perread; - } - if (privptr->p_env->write_size < PAGE_SIZE) { - claw_writes_perpage = - PAGE_SIZE / privptr->p_env->write_size; - claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers, - claw_writes_perpage); - - } - else { /* > or equal */ - privptr->p_buff_pages_perwrite = - DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE); - claw_write_pages = privptr->p_env->write_buffers * - privptr->p_buff_pages_perwrite; - } - /* - * allocate ccw_pages_required - */ - if (privptr->p_buff_ccw==NULL) { - privptr->p_buff_ccw= - (void *)__get_free_pages(__GFP_DMA, - (int)pages_to_order_of_mag(ccw_pages_required )); - if (privptr->p_buff_ccw==NULL) { - return -ENOMEM; - } - privptr->p_buff_ccw_num=ccw_pages_required; - } - memset(privptr->p_buff_ccw, 0x00, - privptr->p_buff_ccw_num * PAGE_SIZE); - - /* - * obtain ending ccw block address - * - */ - privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw; - real_address = (__u32)__pa(privptr->p_end_ccw); - /* Initialize ending CCW block */ - p_endccw=privptr->p_end_ccw; - p_endccw->real=real_address; - p_endccw->write1=0x00; - p_endccw->read1=0x00; - - /* write1_nop1 */ - p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP; - p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_endccw->write1_nop1.count = 1; - p_endccw->write1_nop1.cda = 0; - - /* write1_nop2 */ - p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF; - p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; - p_endccw->write1_nop2.count = 1; - p_endccw->write1_nop2.cda = 0; - - /* write2_nop1 */ - p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP; - p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_endccw->write2_nop1.count = 1; - p_endccw->write2_nop1.cda = 0; - - /* write2_nop2 */ - p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF; - p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; - p_endccw->write2_nop2.count = 1; - p_endccw->write2_nop2.cda = 0; - - /* read1_nop1 */ - p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP; - p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_endccw->read1_nop1.count = 1; - p_endccw->read1_nop1.cda = 0; - - /* read1_nop2 */ - p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF; - p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; - p_endccw->read1_nop2.count = 1; - p_endccw->read1_nop2.cda = 0; - - /* read2_nop1 */ - p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP; - p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_endccw->read2_nop1.count = 1; - p_endccw->read2_nop1.cda = 0; - - /* read2_nop2 */ - p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF; - p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; - p_endccw->read2_nop2.count = 1; - p_endccw->read2_nop2.cda = 0; - - /* - * Build a chain of CCWs - * - */ - p_buff=privptr->p_buff_ccw; - - p_free_chain=NULL; - for (i=0 ; i < ccw_pages_required; i++ ) { - real_address = (__u32)__pa(p_buff); - p_buf=p_buff; - for (j=0 ; j < ccw_blocks_perpage ; j++) { - p_buf->next = p_free_chain; - p_free_chain = p_buf; - p_buf->real=(__u32)__pa(p_buf); - ++p_buf; - } - p_buff+=PAGE_SIZE; - } - /* - * Initialize ClawSignalBlock - * - */ - if (privptr->p_claw_signal_blk==NULL) { - privptr->p_claw_signal_blk=p_free_chain; - p_free_chain=p_free_chain->next; - pClawH=(struct clawh *)privptr->p_claw_signal_blk; - pClawH->length=0xffff; - pClawH->opcode=0xff; - pClawH->flag=CLAW_BUSY; - } - - /* - * allocate write_pages_required and add to free chain - */ - if (privptr->p_buff_write==NULL) { - if (privptr->p_env->write_size < PAGE_SIZE) { - privptr->p_buff_write= - (void *)__get_free_pages(__GFP_DMA, - (int)pages_to_order_of_mag(claw_write_pages )); - if (privptr->p_buff_write==NULL) { - privptr->p_buff_ccw=NULL; - return -ENOMEM; - } - /* - * Build CLAW write free chain - * - */ - - memset(privptr->p_buff_write, 0x00, - ccw_pages_required * PAGE_SIZE); - privptr->p_write_free_chain=NULL; - - p_buff=privptr->p_buff_write; - - for (i=0 ; i< privptr->p_env->write_buffers ; i++) { - p_buf = p_free_chain; /* get a CCW */ - p_free_chain = p_buf->next; - p_buf->next =privptr->p_write_free_chain; - privptr->p_write_free_chain = p_buf; - p_buf-> p_buffer = (struct clawbuf *)p_buff; - p_buf-> write.cda = (__u32)__pa(p_buff); - p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF; - p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> w_read_FF.count = 1; - p_buf-> w_read_FF.cda = - (__u32)__pa(&p_buf-> header.flag); - p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; - p_buf-> w_TIC_1.flags = 0; - p_buf-> w_TIC_1.count = 0; - - if (((unsigned long)p_buff + - privptr->p_env->write_size) >= - ((unsigned long)(p_buff+2* - (privptr->p_env->write_size) - 1) & PAGE_MASK)) { - p_buff = p_buff+privptr->p_env->write_size; - } - } - } - else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */ - { - privptr->p_write_free_chain=NULL; - for (i = 0; i< privptr->p_env->write_buffers ; i++) { - p_buff=(void *)__get_free_pages(__GFP_DMA, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perwrite) ); - if (p_buff==NULL) { - free_pages((unsigned long)privptr->p_buff_ccw, - (int)pages_to_order_of_mag( - privptr->p_buff_ccw_num)); - privptr->p_buff_ccw=NULL; - p_buf=privptr->p_buff_write; - while (p_buf!=NULL) { - free_pages((unsigned long) - p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perwrite)); - p_buf=p_buf->next; - } - return -ENOMEM; - } /* Error on get_pages */ - memset(p_buff, 0x00, privptr->p_env->write_size ); - p_buf = p_free_chain; - p_free_chain = p_buf->next; - p_buf->next = privptr->p_write_free_chain; - privptr->p_write_free_chain = p_buf; - privptr->p_buff_write = p_buf; - p_buf->p_buffer=(struct clawbuf *)p_buff; - p_buf-> write.cda = (__u32)__pa(p_buff); - p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF; - p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> w_read_FF.count = 1; - p_buf-> w_read_FF.cda = - (__u32)__pa(&p_buf-> header.flag); - p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; - p_buf-> w_TIC_1.flags = 0; - p_buf-> w_TIC_1.count = 0; - } /* for all write_buffers */ - - } /* else buffers are PAGE_SIZE or bigger */ - - } - privptr->p_buff_write_num=claw_write_pages; - privptr->write_free_count=privptr->p_env->write_buffers; - - - /* - * allocate read_pages_required and chain to free chain - */ - if (privptr->p_buff_read==NULL) { - if (privptr->p_env->read_size < PAGE_SIZE) { - privptr->p_buff_read= - (void *)__get_free_pages(__GFP_DMA, - (int)pages_to_order_of_mag(claw_read_pages) ); - if (privptr->p_buff_read==NULL) { - free_pages((unsigned long)privptr->p_buff_ccw, - (int)pages_to_order_of_mag( - privptr->p_buff_ccw_num)); - /* free the write pages size is < page size */ - free_pages((unsigned long)privptr->p_buff_write, - (int)pages_to_order_of_mag( - privptr->p_buff_write_num)); - privptr->p_buff_ccw=NULL; - privptr->p_buff_write=NULL; - return -ENOMEM; - } - memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE); - privptr->p_buff_read_num=claw_read_pages; - /* - * Build CLAW read free chain - * - */ - p_buff=privptr->p_buff_read; - for (i=0 ; i< privptr->p_env->read_buffers ; i++) { - p_buf = p_free_chain; - p_free_chain = p_buf->next; - - if (p_last_CCWB==NULL) { - p_buf->next=NULL; - real_TIC_address=0; - p_last_CCWB=p_buf; - } - else { - p_buf->next=p_first_CCWB; - real_TIC_address= - (__u32)__pa(&p_first_CCWB -> read ); - } - - p_first_CCWB=p_buf; - - p_buf->p_buffer=(struct clawbuf *)p_buff; - /* initialize read command */ - p_buf-> read.cmd_code = CCW_CLAW_CMD_READ; - p_buf-> read.cda = (__u32)__pa(p_buff); - p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> read.count = privptr->p_env->read_size; - - /* initialize read_h command */ - p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER; - p_buf-> read_h.cda = - (__u32)__pa(&(p_buf->header)); - p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> read_h.count = sizeof(struct clawh); - - /* initialize Signal command */ - p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD; - p_buf-> signal.cda = - (__u32)__pa(&(pClawH->flag)); - p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> signal.count = 1; - - /* initialize r_TIC_1 command */ - p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; - p_buf-> r_TIC_1.cda = (__u32)real_TIC_address; - p_buf-> r_TIC_1.flags = 0; - p_buf-> r_TIC_1.count = 0; - - /* initialize r_read_FF command */ - p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF; - p_buf-> r_read_FF.cda = - (__u32)__pa(&(pClawH->flag)); - p_buf-> r_read_FF.flags = - CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI; - p_buf-> r_read_FF.count = 1; - - /* initialize r_TIC_2 */ - memcpy(&p_buf->r_TIC_2, - &p_buf->r_TIC_1, sizeof(struct ccw1)); - - /* initialize Header */ - p_buf->header.length=0xffff; - p_buf->header.opcode=0xff; - p_buf->header.flag=CLAW_PENDING; - - if (((unsigned long)p_buff+privptr->p_env->read_size) >= - ((unsigned long)(p_buff+2*(privptr->p_env->read_size) - -1) - & PAGE_MASK)) { - p_buff= p_buff+privptr->p_env->read_size; - } - else { - p_buff= - (void *)((unsigned long) - (p_buff+2*(privptr->p_env->read_size)-1) - & PAGE_MASK) ; - } - } /* for read_buffers */ - } /* read_size < PAGE_SIZE */ - else { /* read Size >= PAGE_SIZE */ - for (i=0 ; i< privptr->p_env->read_buffers ; i++) { - p_buff = (void *)__get_free_pages(__GFP_DMA, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perread)); - if (p_buff==NULL) { - free_pages((unsigned long)privptr->p_buff_ccw, - (int)pages_to_order_of_mag(privptr-> - p_buff_ccw_num)); - /* free the write pages */ - p_buf=privptr->p_buff_write; - while (p_buf!=NULL) { - free_pages( - (unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perwrite)); - p_buf=p_buf->next; - } - /* free any read pages already alloc */ - p_buf=privptr->p_buff_read; - while (p_buf!=NULL) { - free_pages( - (unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perread)); - p_buf=p_buf->next; - } - privptr->p_buff_ccw=NULL; - privptr->p_buff_write=NULL; - return -ENOMEM; - } - memset(p_buff, 0x00, privptr->p_env->read_size); - p_buf = p_free_chain; - privptr->p_buff_read = p_buf; - p_free_chain = p_buf->next; - - if (p_last_CCWB==NULL) { - p_buf->next=NULL; - real_TIC_address=0; - p_last_CCWB=p_buf; - } - else { - p_buf->next=p_first_CCWB; - real_TIC_address= - (addr_t)__pa( - &p_first_CCWB -> read ); - } - - p_first_CCWB=p_buf; - /* save buff address */ - p_buf->p_buffer=(struct clawbuf *)p_buff; - /* initialize read command */ - p_buf-> read.cmd_code = CCW_CLAW_CMD_READ; - p_buf-> read.cda = (__u32)__pa(p_buff); - p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> read.count = privptr->p_env->read_size; - - /* initialize read_h command */ - p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER; - p_buf-> read_h.cda = - (__u32)__pa(&(p_buf->header)); - p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> read_h.count = sizeof(struct clawh); - - /* initialize Signal command */ - p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD; - p_buf-> signal.cda = - (__u32)__pa(&(pClawH->flag)); - p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> signal.count = 1; - - /* initialize r_TIC_1 command */ - p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; - p_buf-> r_TIC_1.cda = (__u32)real_TIC_address; - p_buf-> r_TIC_1.flags = 0; - p_buf-> r_TIC_1.count = 0; - - /* initialize r_read_FF command */ - p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF; - p_buf-> r_read_FF.cda = - (__u32)__pa(&(pClawH->flag)); - p_buf-> r_read_FF.flags = - CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI; - p_buf-> r_read_FF.count = 1; - - /* initialize r_TIC_2 */ - memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1, - sizeof(struct ccw1)); - - /* initialize Header */ - p_buf->header.length=0xffff; - p_buf->header.opcode=0xff; - p_buf->header.flag=CLAW_PENDING; - - } /* For read_buffers */ - } /* read_size >= PAGE_SIZE */ - } /* pBuffread = NULL */ - add_claw_reads( dev ,p_first_CCWB , p_last_CCWB); - privptr->buffs_alloc = 1; - - return 0; -} /* end of init_ccw_bk */ - -/*-------------------------------------------------------------------* -* * -* probe_error * -* * -*--------------------------------------------------------------------*/ - -static void -probe_error( struct ccwgroup_device *cgdev) -{ - struct claw_privbk *privptr; - - CLAW_DBF_TEXT(4, trace, "proberr"); - privptr = dev_get_drvdata(&cgdev->dev); - if (privptr != NULL) { - dev_set_drvdata(&cgdev->dev, NULL); - kfree(privptr->p_env); - kfree(privptr->p_mtc_envelope); - kfree(privptr); - } -} /* probe_error */ - -/*-------------------------------------------------------------------* -* claw_process_control * -* * -* * -*--------------------------------------------------------------------*/ - -static int -claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) -{ - - struct clawbuf *p_buf; - struct clawctl ctlbk; - struct clawctl *p_ctlbk; - char temp_host_name[8]; - char temp_ws_name[8]; - struct claw_privbk *privptr; - struct claw_env *p_env; - struct sysval *p_sysval; - struct conncmd *p_connect=NULL; - int rc; - struct chbk *p_ch = NULL; - struct device *tdev; - CLAW_DBF_TEXT(2, setup, "clw_cntl"); - udelay(1000); /* Wait a ms for the control packets to - *catch up to each other */ - privptr = dev->ml_priv; - p_env=privptr->p_env; - tdev = &privptr->channel[READ_CHANNEL].cdev->dev; - memcpy( &temp_host_name, p_env->host_name, 8); - memcpy( &temp_ws_name, p_env->adapter_name , 8); - dev_info(tdev, "%s: CLAW device %.8s: " - "Received Control Packet\n", - dev->name, temp_ws_name); - if (privptr->release_pend==1) { - return 0; - } - p_buf=p_ccw->p_buffer; - p_ctlbk=&ctlbk; - if (p_env->packing == DO_PACKED) { /* packing in progress?*/ - memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl)); - } else { - memcpy(p_ctlbk, p_buf, sizeof(struct clawctl)); - } - switch (p_ctlbk->command) - { - case SYSTEM_VALIDATE_REQUEST: - if (p_ctlbk->version != CLAW_VERSION_ID) { - claw_snd_sys_validate_rsp(dev, p_ctlbk, - CLAW_RC_WRONG_VERSION); - dev_warn(tdev, "The communication peer of %s" - " uses an incorrect API version %d\n", - dev->name, p_ctlbk->version); - } - p_sysval = (struct sysval *)&(p_ctlbk->data); - dev_info(tdev, "%s: Recv Sys Validate Request: " - "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s," - "Host name=%.8s\n", - dev->name, p_ctlbk->version, - p_ctlbk->linkid, - p_ctlbk->correlator, - p_sysval->WS_name, - p_sysval->host_name); - if (memcmp(temp_host_name, p_sysval->host_name, 8)) { - claw_snd_sys_validate_rsp(dev, p_ctlbk, - CLAW_RC_NAME_MISMATCH); - CLAW_DBF_TEXT(2, setup, "HSTBAD"); - CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name); - CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name); - dev_warn(tdev, - "Host name %s for %s does not match the" - " remote adapter name %s\n", - p_sysval->host_name, - dev->name, - temp_host_name); - } - if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) { - claw_snd_sys_validate_rsp(dev, p_ctlbk, - CLAW_RC_NAME_MISMATCH); - CLAW_DBF_TEXT(2, setup, "WSNBAD"); - CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name); - CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name); - dev_warn(tdev, "Adapter name %s for %s does not match" - " the remote host name %s\n", - p_sysval->WS_name, - dev->name, - temp_ws_name); - } - if ((p_sysval->write_frame_size < p_env->write_size) && - (p_env->packing == 0)) { - claw_snd_sys_validate_rsp(dev, p_ctlbk, - CLAW_RC_HOST_RCV_TOO_SMALL); - dev_warn(tdev, - "The local write buffer is smaller than the" - " remote read buffer\n"); - CLAW_DBF_TEXT(2, setup, "wrtszbad"); - } - if ((p_sysval->read_frame_size < p_env->read_size) && - (p_env->packing == 0)) { - claw_snd_sys_validate_rsp(dev, p_ctlbk, - CLAW_RC_HOST_RCV_TOO_SMALL); - dev_warn(tdev, - "The local read buffer is smaller than the" - " remote write buffer\n"); - CLAW_DBF_TEXT(2, setup, "rdsizbad"); - } - claw_snd_sys_validate_rsp(dev, p_ctlbk, 0); - dev_info(tdev, - "CLAW device %.8s: System validate" - " completed.\n", temp_ws_name); - dev_info(tdev, - "%s: sys Validate Rsize:%d Wsize:%d\n", - dev->name, p_sysval->read_frame_size, - p_sysval->write_frame_size); - privptr->system_validate_comp = 1; - if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0) - p_env->packing = PACKING_ASK; - claw_strt_conn_req(dev); - break; - case SYSTEM_VALIDATE_RESPONSE: - p_sysval = (struct sysval *)&(p_ctlbk->data); - dev_info(tdev, - "Settings for %s validated (version=%d, " - "remote device=%d, rc=%d, adapter name=%.8s, " - "host name=%.8s)\n", - dev->name, - p_ctlbk->version, - p_ctlbk->correlator, - p_ctlbk->rc, - p_sysval->WS_name, - p_sysval->host_name); - switch (p_ctlbk->rc) { - case 0: - dev_info(tdev, "%s: CLAW device " - "%.8s: System validate completed.\n", - dev->name, temp_ws_name); - if (privptr->system_validate_comp == 0) - claw_strt_conn_req(dev); - privptr->system_validate_comp = 1; - break; - case CLAW_RC_NAME_MISMATCH: - dev_warn(tdev, "Validating %s failed because of" - " a host or adapter name mismatch\n", - dev->name); - break; - case CLAW_RC_WRONG_VERSION: - dev_warn(tdev, "Validating %s failed because of a" - " version conflict\n", - dev->name); - break; - case CLAW_RC_HOST_RCV_TOO_SMALL: - dev_warn(tdev, "Validating %s failed because of a" - " frame size conflict\n", - dev->name); - break; - default: - dev_warn(tdev, "The communication peer of %s rejected" - " the connection\n", - dev->name); - break; - } - break; - - case CONNECTION_REQUEST: - p_connect = (struct conncmd *)&(p_ctlbk->data); - dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d," - "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n", - dev->name, - p_ctlbk->version, - p_ctlbk->linkid, - p_ctlbk->correlator, - p_connect->host_name, - p_connect->WS_name); - if (privptr->active_link_ID != 0) { - claw_snd_disc(dev, p_ctlbk); - dev_info(tdev, "%s rejected a connection request" - " because it is already active\n", - dev->name); - } - if (p_ctlbk->linkid != 1) { - claw_snd_disc(dev, p_ctlbk); - dev_info(tdev, "%s rejected a request to open multiple" - " connections\n", - dev->name); - } - rc = find_link(dev, p_connect->host_name, p_connect->WS_name); - if (rc != 0) { - claw_snd_disc(dev, p_ctlbk); - dev_info(tdev, "%s rejected a connection request" - " because of a type mismatch\n", - dev->name); - } - claw_send_control(dev, - CONNECTION_CONFIRM, p_ctlbk->linkid, - p_ctlbk->correlator, - 0, p_connect->host_name, - p_connect->WS_name); - if (p_env->packing == PACKING_ASK) { - p_env->packing = PACK_SEND; - claw_snd_conn_req(dev, 0); - } - dev_info(tdev, "%s: CLAW device %.8s: Connection " - "completed link_id=%d.\n", - dev->name, temp_ws_name, - p_ctlbk->linkid); - privptr->active_link_ID = p_ctlbk->linkid; - p_ch = &privptr->channel[WRITE_CHANNEL]; - wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */ - break; - case CONNECTION_RESPONSE: - p_connect = (struct conncmd *)&(p_ctlbk->data); - dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d," - "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n", - dev->name, - p_ctlbk->version, - p_ctlbk->linkid, - p_ctlbk->correlator, - p_ctlbk->rc, - p_connect->host_name, - p_connect->WS_name); - - if (p_ctlbk->rc != 0) { - dev_warn(tdev, "The communication peer of %s rejected" - " a connection request\n", - dev->name); - return 1; - } - rc = find_link(dev, - p_connect->host_name, p_connect->WS_name); - if (rc != 0) { - claw_snd_disc(dev, p_ctlbk); - dev_warn(tdev, "The communication peer of %s" - " rejected a connection " - "request because of a type mismatch\n", - dev->name); - } - /* should be until CONNECTION_CONFIRM */ - privptr->active_link_ID = -(p_ctlbk->linkid); - break; - case CONNECTION_CONFIRM: - p_connect = (struct conncmd *)&(p_ctlbk->data); - dev_info(tdev, - "%s: Recv Conn Confirm:Vers=%d,link_id=%d," - "Corr=%d,Host appl=%.8s,WS appl=%.8s\n", - dev->name, - p_ctlbk->version, - p_ctlbk->linkid, - p_ctlbk->correlator, - p_connect->host_name, - p_connect->WS_name); - if (p_ctlbk->linkid == -(privptr->active_link_ID)) { - privptr->active_link_ID = p_ctlbk->linkid; - if (p_env->packing > PACKING_ASK) { - dev_info(tdev, - "%s: Confirmed Now packing\n", dev->name); - p_env->packing = DO_PACKED; - } - p_ch = &privptr->channel[WRITE_CHANNEL]; - wake_up(&p_ch->wait); - } else { - dev_warn(tdev, "Activating %s failed because of" - " an incorrect link ID=%d\n", - dev->name, p_ctlbk->linkid); - claw_snd_disc(dev, p_ctlbk); - } - break; - case DISCONNECT: - dev_info(tdev, "%s: Disconnect: " - "Vers=%d,link_id=%d,Corr=%d\n", - dev->name, p_ctlbk->version, - p_ctlbk->linkid, p_ctlbk->correlator); - if ((p_ctlbk->linkid == 2) && - (p_env->packing == PACK_SEND)) { - privptr->active_link_ID = 1; - p_env->packing = DO_PACKED; - } else - privptr->active_link_ID = 0; - break; - case CLAW_ERROR: - dev_warn(tdev, "The communication peer of %s failed\n", - dev->name); - break; - default: - dev_warn(tdev, "The communication peer of %s sent" - " an unknown command code\n", - dev->name); - break; - } - - return 0; -} /* end of claw_process_control */ - - -/*-------------------------------------------------------------------* -* claw_send_control * -* * -*--------------------------------------------------------------------*/ - -static int -claw_send_control(struct net_device *dev, __u8 type, __u8 link, - __u8 correlator, __u8 rc, char *local_name, char *remote_name) -{ - struct claw_privbk *privptr; - struct clawctl *p_ctl; - struct sysval *p_sysval; - struct conncmd *p_connect; - struct sk_buff *skb; - - CLAW_DBF_TEXT(2, setup, "sndcntl"); - privptr = dev->ml_priv; - p_ctl=(struct clawctl *)&privptr->ctl_bk; - - p_ctl->command=type; - p_ctl->version=CLAW_VERSION_ID; - p_ctl->linkid=link; - p_ctl->correlator=correlator; - p_ctl->rc=rc; - - p_sysval=(struct sysval *)&p_ctl->data; - p_connect=(struct conncmd *)&p_ctl->data; - - switch (p_ctl->command) { - case SYSTEM_VALIDATE_REQUEST: - case SYSTEM_VALIDATE_RESPONSE: - memcpy(&p_sysval->host_name, local_name, 8); - memcpy(&p_sysval->WS_name, remote_name, 8); - if (privptr->p_env->packing > 0) { - p_sysval->read_frame_size = DEF_PACK_BUFSIZE; - p_sysval->write_frame_size = DEF_PACK_BUFSIZE; - } else { - /* how big is the biggest group of packets */ - p_sysval->read_frame_size = - privptr->p_env->read_size; - p_sysval->write_frame_size = - privptr->p_env->write_size; - } - memset(&p_sysval->reserved, 0x00, 4); - break; - case CONNECTION_REQUEST: - case CONNECTION_RESPONSE: - case CONNECTION_CONFIRM: - case DISCONNECT: - memcpy(&p_sysval->host_name, local_name, 8); - memcpy(&p_sysval->WS_name, remote_name, 8); - if (privptr->p_env->packing > 0) { - /* How big is the biggest packet */ - p_connect->reserved1[0]=CLAW_FRAME_SIZE; - p_connect->reserved1[1]=CLAW_FRAME_SIZE; - } else { - memset(&p_connect->reserved1, 0x00, 4); - memset(&p_connect->reserved2, 0x00, 4); - } - break; - default: - break; - } - - /* write Control Record to the device */ - - - skb = dev_alloc_skb(sizeof(struct clawctl)); - if (!skb) { - return -ENOMEM; - } - memcpy(skb_put(skb, sizeof(struct clawctl)), - p_ctl, sizeof(struct clawctl)); - if (privptr->p_env->packing >= PACK_SEND) - claw_hw_tx(skb, dev, 1); - else - claw_hw_tx(skb, dev, 0); - return 0; -} /* end of claw_send_control */ - -/*-------------------------------------------------------------------* -* claw_snd_conn_req * -* * -*--------------------------------------------------------------------*/ -static int -claw_snd_conn_req(struct net_device *dev, __u8 link) -{ - int rc; - struct claw_privbk *privptr = dev->ml_priv; - struct clawctl *p_ctl; - - CLAW_DBF_TEXT(2, setup, "snd_conn"); - rc = 1; - p_ctl=(struct clawctl *)&privptr->ctl_bk; - p_ctl->linkid = link; - if ( privptr->system_validate_comp==0x00 ) { - return rc; - } - if (privptr->p_env->packing == PACKING_ASK ) - rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, - WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED); - if (privptr->p_env->packing == PACK_SEND) { - rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, - WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME); - } - if (privptr->p_env->packing == 0) - rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, - HOST_APPL_NAME, privptr->p_env->api_type); - return rc; - -} /* end of claw_snd_conn_req */ - - -/*-------------------------------------------------------------------* -* claw_snd_disc * -* * -*--------------------------------------------------------------------*/ - -static int -claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl) -{ - int rc; - struct conncmd * p_connect; - - CLAW_DBF_TEXT(2, setup, "snd_dsc"); - p_connect=(struct conncmd *)&p_ctl->data; - - rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid, - p_ctl->correlator, 0, - p_connect->host_name, p_connect->WS_name); - return rc; -} /* end of claw_snd_disc */ - - -/*-------------------------------------------------------------------* -* claw_snd_sys_validate_rsp * -* * -*--------------------------------------------------------------------*/ - -static int -claw_snd_sys_validate_rsp(struct net_device *dev, - struct clawctl *p_ctl, __u32 return_code) -{ - struct claw_env * p_env; - struct claw_privbk *privptr; - int rc; - - CLAW_DBF_TEXT(2, setup, "chkresp"); - privptr = dev->ml_priv; - p_env=privptr->p_env; - rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE, - p_ctl->linkid, - p_ctl->correlator, - return_code, - p_env->host_name, - p_env->adapter_name ); - return rc; -} /* end of claw_snd_sys_validate_rsp */ - -/*-------------------------------------------------------------------* -* claw_strt_conn_req * -* * -*--------------------------------------------------------------------*/ - -static int -claw_strt_conn_req(struct net_device *dev ) -{ - int rc; - - CLAW_DBF_TEXT(2, setup, "conn_req"); - rc=claw_snd_conn_req(dev, 1); - return rc; -} /* end of claw_strt_conn_req */ - - - -/*-------------------------------------------------------------------* - * claw_stats * - *-------------------------------------------------------------------*/ - -static struct -net_device_stats *claw_stats(struct net_device *dev) -{ - struct claw_privbk *privptr; - - CLAW_DBF_TEXT(4, trace, "stats"); - privptr = dev->ml_priv; - return &privptr->stats; -} /* end of claw_stats */ - - -/*-------------------------------------------------------------------* -* unpack_read * -* * -*--------------------------------------------------------------------*/ -static void -unpack_read(struct net_device *dev ) -{ - struct sk_buff *skb; - struct claw_privbk *privptr; - struct claw_env *p_env; - struct ccwbk *p_this_ccw; - struct ccwbk *p_first_ccw; - struct ccwbk *p_last_ccw; - struct clawph *p_packh; - void *p_packd; - struct clawctl *p_ctlrec=NULL; - struct device *p_dev; - - __u32 len_of_data; - __u32 pack_off; - __u8 link_num; - __u8 mtc_this_frm=0; - __u32 bytes_to_mov; - int i=0; - int p=0; - - CLAW_DBF_TEXT(4, trace, "unpkread"); - p_first_ccw=NULL; - p_last_ccw=NULL; - p_packh=NULL; - p_packd=NULL; - privptr = dev->ml_priv; - - p_dev = &privptr->channel[READ_CHANNEL].cdev->dev; - p_env = privptr->p_env; - p_this_ccw=privptr->p_read_active_first; - while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) { - pack_off = 0; - p = 0; - p_this_ccw->header.flag=CLAW_PENDING; - privptr->p_read_active_first=p_this_ccw->next; - p_this_ccw->next=NULL; - p_packh = (struct clawph *)p_this_ccw->p_buffer; - if ((p_env->packing == PACK_SEND) && - (p_packh->len == 32) && - (p_packh->link_num == 0)) { /* is it a packed ctl rec? */ - p_packh++; /* peek past pack header */ - p_ctlrec = (struct clawctl *)p_packh; - p_packh--; /* un peek */ - if ((p_ctlrec->command == CONNECTION_RESPONSE) || - (p_ctlrec->command == CONNECTION_CONFIRM)) - p_env->packing = DO_PACKED; - } - if (p_env->packing == DO_PACKED) - link_num=p_packh->link_num; - else - link_num=p_this_ccw->header.opcode / 8; - if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) { - mtc_this_frm=1; - if (p_this_ccw->header.length!= - privptr->p_env->read_size ) { - dev_warn(p_dev, - "The communication peer of %s" - " sent a faulty" - " frame of length %02x\n", - dev->name, p_this_ccw->header.length); - } - } - - if (privptr->mtc_skipping) { - /* - * We're in the mode of skipping past a - * multi-frame message - * that we can't process for some reason or other. - * The first frame without the More-To-Come flag is - * the last frame of the skipped message. - */ - /* in case of More-To-Come not set in this frame */ - if (mtc_this_frm==0) { - privptr->mtc_skipping=0; /* Ok, the end */ - privptr->mtc_logical_link=-1; - } - goto NextFrame; - } - - if (link_num==0) { - claw_process_control(dev, p_this_ccw); - CLAW_DBF_TEXT(4, trace, "UnpkCntl"); - goto NextFrame; - } -unpack_next: - if (p_env->packing == DO_PACKED) { - if (pack_off > p_env->read_size) - goto NextFrame; - p_packd = p_this_ccw->p_buffer+pack_off; - p_packh = (struct clawph *) p_packd; - if ((p_packh->len == 0) || /* done with this frame? */ - (p_packh->flag != 0)) - goto NextFrame; - bytes_to_mov = p_packh->len; - pack_off += bytes_to_mov+sizeof(struct clawph); - p++; - } else { - bytes_to_mov=p_this_ccw->header.length; - } - if (privptr->mtc_logical_link<0) { - - /* - * if More-To-Come is set in this frame then we don't know - * length of entire message, and hence have to allocate - * large buffer */ - - /* We are starting a new envelope */ - privptr->mtc_offset=0; - privptr->mtc_logical_link=link_num; - } - - if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) { - /* error */ - privptr->stats.rx_frame_errors++; - goto NextFrame; - } - if (p_env->packing == DO_PACKED) { - memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset, - p_packd+sizeof(struct clawph), bytes_to_mov); - - } else { - memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset, - p_this_ccw->p_buffer, bytes_to_mov); - } - if (mtc_this_frm==0) { - len_of_data=privptr->mtc_offset+bytes_to_mov; - skb=dev_alloc_skb(len_of_data); - if (skb) { - memcpy(skb_put(skb,len_of_data), - privptr->p_mtc_envelope, - len_of_data); - skb->dev=dev; - skb_reset_mac_header(skb); - skb->protocol=htons(ETH_P_IP); - skb->ip_summed=CHECKSUM_UNNECESSARY; - privptr->stats.rx_packets++; - privptr->stats.rx_bytes+=len_of_data; - netif_rx(skb); - } - else { - dev_info(p_dev, "Allocating a buffer for" - " incoming data failed\n"); - privptr->stats.rx_dropped++; - } - privptr->mtc_offset=0; - privptr->mtc_logical_link=-1; - } - else { - privptr->mtc_offset+=bytes_to_mov; - } - if (p_env->packing == DO_PACKED) - goto unpack_next; -NextFrame: - /* - * Remove ThisCCWblock from active read queue, and add it - * to queue of free blocks to be reused. - */ - i++; - p_this_ccw->header.length=0xffff; - p_this_ccw->header.opcode=0xff; - /* - * add this one to the free queue for later reuse - */ - if (p_first_ccw==NULL) { - p_first_ccw = p_this_ccw; - } - else { - p_last_ccw->next = p_this_ccw; - } - p_last_ccw = p_this_ccw; - /* - * chain to next block on active read queue - */ - p_this_ccw = privptr->p_read_active_first; - CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p); - } /* end of while */ - - /* check validity */ - - CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i); - add_claw_reads(dev, p_first_ccw, p_last_ccw); - claw_strt_read(dev, LOCK_YES); - return; -} /* end of unpack_read */ - -/*-------------------------------------------------------------------* -* claw_strt_read * -* * -*--------------------------------------------------------------------*/ -static void -claw_strt_read (struct net_device *dev, int lock ) -{ - int rc = 0; - __u32 parm; - unsigned long saveflags = 0; - struct claw_privbk *privptr = dev->ml_priv; - struct ccwbk*p_ccwbk; - struct chbk *p_ch; - struct clawh *p_clawh; - p_ch = &privptr->channel[READ_CHANNEL]; - - CLAW_DBF_TEXT(4, trace, "StRdNter"); - p_clawh=(struct clawh *)privptr->p_claw_signal_blk; - p_clawh->flag=CLAW_IDLE; /* 0x00 */ - - if ((privptr->p_write_active_first!=NULL && - privptr->p_write_active_first->header.flag!=CLAW_PENDING) || - (privptr->p_read_active_first!=NULL && - privptr->p_read_active_first->header.flag!=CLAW_PENDING )) { - p_clawh->flag=CLAW_BUSY; /* 0xff */ - } - if (lock==LOCK_YES) { - spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); - } - if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { - CLAW_DBF_TEXT(4, trace, "HotRead"); - p_ccwbk=privptr->p_read_active_first; - parm = (unsigned long) p_ch; - rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm, - 0xff, 0); - if (rc != 0) { - ccw_check_return_code(p_ch->cdev, rc); - } - } - else { - CLAW_DBF_TEXT(2, trace, "ReadAct"); - } - - if (lock==LOCK_YES) { - spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); - } - CLAW_DBF_TEXT(4, trace, "StRdExit"); - return; -} /* end of claw_strt_read */ - -/*-------------------------------------------------------------------* -* claw_strt_out_IO * -* * -*--------------------------------------------------------------------*/ - -static void -claw_strt_out_IO( struct net_device *dev ) -{ - int rc = 0; - unsigned long parm; - struct claw_privbk *privptr; - struct chbk *p_ch; - struct ccwbk *p_first_ccw; - - if (!dev) { - return; - } - privptr = (struct claw_privbk *)dev->ml_priv; - p_ch = &privptr->channel[WRITE_CHANNEL]; - - CLAW_DBF_TEXT(4, trace, "strt_io"); - p_first_ccw=privptr->p_write_active_first; - - if (p_ch->claw_state == CLAW_STOP) - return; - if (p_first_ccw == NULL) { - return; - } - if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { - parm = (unsigned long) p_ch; - CLAW_DBF_TEXT(2, trace, "StWrtIO"); - rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm, - 0xff, 0); - if (rc != 0) { - ccw_check_return_code(p_ch->cdev, rc); - } - } - dev->trans_start = jiffies; - return; -} /* end of claw_strt_out_IO */ - -/*-------------------------------------------------------------------* -* Free write buffers * -* * -*--------------------------------------------------------------------*/ - -static void -claw_free_wrt_buf( struct net_device *dev ) -{ - - struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv; - struct ccwbk*p_this_ccw; - struct ccwbk*p_next_ccw; - - CLAW_DBF_TEXT(4, trace, "freewrtb"); - /* scan the write queue to free any completed write packets */ - p_this_ccw=privptr->p_write_active_first; - while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING)) - { - p_next_ccw = p_this_ccw->next; - if (((p_next_ccw!=NULL) && - (p_next_ccw->header.flag!=CLAW_PENDING)) || - ((p_this_ccw == privptr->p_write_active_last) && - (p_this_ccw->header.flag!=CLAW_PENDING))) { - /* The next CCW is OK or this is */ - /* the last CCW...free it @A1A */ - privptr->p_write_active_first=p_this_ccw->next; - p_this_ccw->header.flag=CLAW_PENDING; - p_this_ccw->next=privptr->p_write_free_chain; - privptr->p_write_free_chain=p_this_ccw; - ++privptr->write_free_count; - privptr->stats.tx_bytes+= p_this_ccw->write.count; - p_this_ccw=privptr->p_write_active_first; - privptr->stats.tx_packets++; - } - else { - break; - } - } - if (privptr->write_free_count!=0) { - claw_clearbit_busy(TB_NOBUFFER,dev); - } - /* whole chain removed? */ - if (privptr->p_write_active_first==NULL) { - privptr->p_write_active_last=NULL; - } - CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count); - return; -} - -/*-------------------------------------------------------------------* -* claw free netdevice * -* * -*--------------------------------------------------------------------*/ -static void -claw_free_netdevice(struct net_device * dev, int free_dev) -{ - struct claw_privbk *privptr; - - CLAW_DBF_TEXT(2, setup, "free_dev"); - if (!dev) - return; - CLAW_DBF_TEXT_(2, setup, "%s", dev->name); - privptr = dev->ml_priv; - if (dev->flags & IFF_RUNNING) - claw_release(dev); - if (privptr) { - privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */ - } - dev->ml_priv = NULL; -#ifdef MODULE - if (free_dev) { - free_netdev(dev); - } -#endif - CLAW_DBF_TEXT(2, setup, "free_ok"); -} - -/** - * Claw init netdevice - * Initialize everything of the net device except the name and the - * channel structs. - */ -static const struct net_device_ops claw_netdev_ops = { - .ndo_open = claw_open, - .ndo_stop = claw_release, - .ndo_get_stats = claw_stats, - .ndo_start_xmit = claw_tx, - .ndo_change_mtu = claw_change_mtu, -}; - -static void -claw_init_netdevice(struct net_device * dev) -{ - CLAW_DBF_TEXT(2, setup, "init_dev"); - CLAW_DBF_TEXT_(2, setup, "%s", dev->name); - dev->mtu = CLAW_DEFAULT_MTU_SIZE; - dev->hard_header_len = 0; - dev->addr_len = 0; - dev->type = ARPHRD_SLIP; - dev->tx_queue_len = 1300; - dev->flags = IFF_POINTOPOINT | IFF_NOARP; - dev->netdev_ops = &claw_netdev_ops; - CLAW_DBF_TEXT(2, setup, "initok"); - return; -} - -/** - * Init a new channel in the privptr->channel[i]. - * - * @param cdev The ccw_device to be added. - * - * @return 0 on success, !0 on error. - */ -static int -add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr) -{ - struct chbk *p_ch; - struct ccw_dev_id dev_id; - - CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev)); - privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */ - p_ch = &privptr->channel[i]; - p_ch->cdev = cdev; - snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev)); - ccw_device_get_id(cdev, &dev_id); - p_ch->devno = dev_id.devno; - if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) { - return -ENOMEM; - } - return 0; -} - - -/** - * - * Setup an interface. - * - * @param cgdev Device to be setup. - * - * @returns 0 on success, !0 on failure. - */ -static int -claw_new_device(struct ccwgroup_device *cgdev) -{ - struct claw_privbk *privptr; - struct claw_env *p_env; - struct net_device *dev; - int ret; - struct ccw_dev_id dev_id; - - dev_info(&cgdev->dev, "add for %s\n", - dev_name(&cgdev->cdev[READ_CHANNEL]->dev)); - CLAW_DBF_TEXT(2, setup, "new_dev"); - privptr = dev_get_drvdata(&cgdev->dev); - dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr); - dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr); - if (!privptr) - return -ENODEV; - p_env = privptr->p_env; - ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id); - p_env->devno[READ_CHANNEL] = dev_id.devno; - ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id); - p_env->devno[WRITE_CHANNEL] = dev_id.devno; - ret = add_channel(cgdev->cdev[0],0,privptr); - if (ret == 0) - ret = add_channel(cgdev->cdev[1],1,privptr); - if (ret != 0) { - dev_warn(&cgdev->dev, "Creating a CLAW group device" - " failed with error code %d\n", ret); - goto out; - } - ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]); - if (ret != 0) { - dev_warn(&cgdev->dev, - "Setting the read subchannel online" - " failed with error code %d\n", ret); - goto out; - } - ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]); - if (ret != 0) { - dev_warn(&cgdev->dev, - "Setting the write subchannel online " - "failed with error code %d\n", ret); - goto out; - } - dev = alloc_netdev(0, "claw%d", NET_NAME_UNKNOWN, claw_init_netdevice); - if (!dev) { - dev_warn(&cgdev->dev, - "Activating the CLAW device failed\n"); - goto out; - } - dev->ml_priv = privptr; - dev_set_drvdata(&cgdev->dev, privptr); - dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr); - dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr); - /* sysfs magic */ - SET_NETDEV_DEV(dev, &cgdev->dev); - if (register_netdev(dev) != 0) { - claw_free_netdevice(dev, 1); - CLAW_DBF_TEXT(2, trace, "regfail"); - goto out; - } - dev->flags &=~IFF_RUNNING; - if (privptr->buffs_alloc == 0) { - ret=init_ccw_bk(dev); - if (ret !=0) { - unregister_netdev(dev); - claw_free_netdevice(dev,1); - CLAW_DBF_TEXT(2, trace, "ccwmem"); - goto out; - } - } - privptr->channel[READ_CHANNEL].ndev = dev; - privptr->channel[WRITE_CHANNEL].ndev = dev; - privptr->p_env->ndev = dev; - - dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d " - "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", - dev->name, p_env->read_size, - p_env->write_size, p_env->read_buffers, - p_env->write_buffers, p_env->devno[READ_CHANNEL], - p_env->devno[WRITE_CHANNEL]); - dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name " - ":%.8s api_type: %.8s\n", - dev->name, p_env->host_name, - p_env->adapter_name , p_env->api_type); - return 0; -out: - ccw_device_set_offline(cgdev->cdev[1]); - ccw_device_set_offline(cgdev->cdev[0]); - return -ENODEV; -} - -static void -claw_purge_skb_queue(struct sk_buff_head *q) -{ - struct sk_buff *skb; - - CLAW_DBF_TEXT(4, trace, "purgque"); - while ((skb = skb_dequeue(q))) { - atomic_dec(&skb->users); - dev_kfree_skb_any(skb); - } -} - -/** - * Shutdown an interface. - * - * @param cgdev Device to be shut down. - * - * @returns 0 on success, !0 on failure. - */ -static int -claw_shutdown_device(struct ccwgroup_device *cgdev) -{ - struct claw_privbk *priv; - struct net_device *ndev; - int ret = 0; - - CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); - priv = dev_get_drvdata(&cgdev->dev); - if (!priv) - return -ENODEV; - ndev = priv->channel[READ_CHANNEL].ndev; - if (ndev) { - /* Close the device */ - dev_info(&cgdev->dev, "%s: shutting down\n", - ndev->name); - if (ndev->flags & IFF_RUNNING) - ret = claw_release(ndev); - ndev->flags &=~IFF_RUNNING; - unregister_netdev(ndev); - ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */ - claw_free_netdevice(ndev, 1); - priv->channel[READ_CHANNEL].ndev = NULL; - priv->channel[WRITE_CHANNEL].ndev = NULL; - priv->p_env->ndev = NULL; - } - ccw_device_set_offline(cgdev->cdev[1]); - ccw_device_set_offline(cgdev->cdev[0]); - return ret; -} - -static void -claw_remove_device(struct ccwgroup_device *cgdev) -{ - struct claw_privbk *priv; - - CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); - priv = dev_get_drvdata(&cgdev->dev); - dev_info(&cgdev->dev, " will be removed.\n"); - if (cgdev->state == CCWGROUP_ONLINE) - claw_shutdown_device(cgdev); - kfree(priv->p_mtc_envelope); - priv->p_mtc_envelope=NULL; - kfree(priv->p_env); - priv->p_env=NULL; - kfree(priv->channel[0].irb); - priv->channel[0].irb=NULL; - kfree(priv->channel[1].irb); - priv->channel[1].irb=NULL; - kfree(priv); - dev_set_drvdata(&cgdev->dev, NULL); - dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL); - dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL); - put_device(&cgdev->dev); - - return; -} - - -/* - * sysfs attributes - */ -static ssize_t -claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - return sprintf(buf, "%s\n",p_env->host_name); -} - -static ssize_t -claw_hname_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - if (count > MAX_NAME_LEN+1) - return -EINVAL; - memset(p_env->host_name, 0x20, MAX_NAME_LEN); - strncpy(p_env->host_name,buf, count); - p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */ - p_env->host_name[MAX_NAME_LEN] = 0x00; - CLAW_DBF_TEXT(2, setup, "HstnSet"); - CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name); - - return count; -} - -static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write); - -static ssize_t -claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - return sprintf(buf, "%s\n", p_env->adapter_name); -} - -static ssize_t -claw_adname_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - if (count > MAX_NAME_LEN+1) - return -EINVAL; - memset(p_env->adapter_name, 0x20, MAX_NAME_LEN); - strncpy(p_env->adapter_name,buf, count); - p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */ - p_env->adapter_name[MAX_NAME_LEN] = 0x00; - CLAW_DBF_TEXT(2, setup, "AdnSet"); - CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name); - - return count; -} - -static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write); - -static ssize_t -claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - return sprintf(buf, "%s\n", - p_env->api_type); -} - -static ssize_t -claw_apname_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - if (count > MAX_NAME_LEN+1) - return -EINVAL; - memset(p_env->api_type, 0x20, MAX_NAME_LEN); - strncpy(p_env->api_type,buf, count); - p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */ - p_env->api_type[MAX_NAME_LEN] = 0x00; - if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) { - p_env->read_size=DEF_PACK_BUFSIZE; - p_env->write_size=DEF_PACK_BUFSIZE; - p_env->packing=PACKING_ASK; - CLAW_DBF_TEXT(2, setup, "PACKING"); - } - else { - p_env->packing=0; - p_env->read_size=CLAW_FRAME_SIZE; - p_env->write_size=CLAW_FRAME_SIZE; - CLAW_DBF_TEXT(2, setup, "ApiSet"); - } - CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type); - return count; -} - -static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write); - -static ssize_t -claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - return sprintf(buf, "%d\n", p_env->write_buffers); -} - -static ssize_t -claw_wbuff_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - int nnn,max; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - sscanf(buf, "%i", &nnn); - if (p_env->packing) { - max = 64; - } - else { - max = 512; - } - if ((nnn > max ) || (nnn < 2)) - return -EINVAL; - p_env->write_buffers = nnn; - CLAW_DBF_TEXT(2, setup, "Wbufset"); - CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers); - return count; -} - -static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write); - -static ssize_t -claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - return sprintf(buf, "%d\n", p_env->read_buffers); -} - -static ssize_t -claw_rbuff_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct claw_privbk *priv; - struct claw_env *p_env; - int nnn,max; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - sscanf(buf, "%i", &nnn); - if (p_env->packing) { - max = 64; - } - else { - max = 512; - } - if ((nnn > max ) || (nnn < 2)) - return -EINVAL; - p_env->read_buffers = nnn; - CLAW_DBF_TEXT(2, setup, "Rbufset"); - CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers); - return count; -} -static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write); - -static struct attribute *claw_attr[] = { - &dev_attr_read_buffer.attr, - &dev_attr_write_buffer.attr, - &dev_attr_adapter_name.attr, - &dev_attr_api_type.attr, - &dev_attr_host_name.attr, - NULL, -}; -static struct attribute_group claw_attr_group = { - .attrs = claw_attr, -}; -static const struct attribute_group *claw_attr_groups[] = { - &claw_attr_group, - NULL, -}; -static const struct device_type claw_devtype = { - .name = "claw", - .groups = claw_attr_groups, -}; - -/*----------------------------------------------------------------* - * claw_probe * - * this function is called for each CLAW device. * - *----------------------------------------------------------------*/ -static int claw_probe(struct ccwgroup_device *cgdev) -{ - struct claw_privbk *privptr = NULL; - - CLAW_DBF_TEXT(2, setup, "probe"); - if (!get_device(&cgdev->dev)) - return -ENODEV; - privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); - dev_set_drvdata(&cgdev->dev, privptr); - if (privptr == NULL) { - probe_error(cgdev); - put_device(&cgdev->dev); - CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM); - return -ENOMEM; - } - privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL); - privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL); - if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) { - probe_error(cgdev); - put_device(&cgdev->dev); - CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM); - return -ENOMEM; - } - memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8); - memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8); - memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8); - privptr->p_env->packing = 0; - privptr->p_env->write_buffers = 5; - privptr->p_env->read_buffers = 5; - privptr->p_env->read_size = CLAW_FRAME_SIZE; - privptr->p_env->write_size = CLAW_FRAME_SIZE; - privptr->p_env->p_priv = privptr; - cgdev->cdev[0]->handler = claw_irq_handler; - cgdev->cdev[1]->handler = claw_irq_handler; - cgdev->dev.type = &claw_devtype; - CLAW_DBF_TEXT(2, setup, "prbext 0"); - - return 0; -} /* end of claw_probe */ - -/*--------------------------------------------------------------------* -* claw_init and cleanup * -*---------------------------------------------------------------------*/ - -static void __exit claw_cleanup(void) -{ - ccwgroup_driver_unregister(&claw_group_driver); - ccw_driver_unregister(&claw_ccw_driver); - root_device_unregister(claw_root_dev); - claw_unregister_debug_facility(); - pr_info("Driver unloaded\n"); -} - -/** - * Initialize module. - * This is called just after the module is loaded. - * - * @return 0 on success, !0 on error. - */ -static int __init claw_init(void) -{ - int ret = 0; - - pr_info("Loading %s\n", version); - ret = claw_register_debug_facility(); - if (ret) { - pr_err("Registering with the S/390 debug feature" - " failed with error code %d\n", ret); - goto out_err; - } - CLAW_DBF_TEXT(2, setup, "init_mod"); - claw_root_dev = root_device_register("claw"); - ret = PTR_ERR_OR_ZERO(claw_root_dev); - if (ret) - goto register_err; - ret = ccw_driver_register(&claw_ccw_driver); - if (ret) - goto ccw_err; - claw_group_driver.driver.groups = claw_drv_attr_groups; - ret = ccwgroup_driver_register(&claw_group_driver); - if (ret) - goto ccwgroup_err; - return 0; - -ccwgroup_err: - ccw_driver_unregister(&claw_ccw_driver); -ccw_err: - root_device_unregister(claw_root_dev); -register_err: - CLAW_DBF_TEXT(2, setup, "init_bad"); - claw_unregister_debug_facility(); -out_err: - pr_err("Initializing the claw device driver failed\n"); - return ret; -} - -module_init(claw_init); -module_exit(claw_cleanup); - -MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>"); -MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \ - "Copyright IBM Corp. 2000, 2008\n"); -MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h deleted file mode 100644 index 3339b9b607b3..000000000000 --- a/drivers/s390/net/claw.h +++ /dev/null @@ -1,348 +0,0 @@ -/******************************************************* -* Define constants * -* * -********************************************************/ - -/*-----------------------------------------------------* -* CCW command codes for CLAW protocol * -*------------------------------------------------------*/ - -#define CCW_CLAW_CMD_WRITE 0x01 /* write - not including link */ -#define CCW_CLAW_CMD_READ 0x02 /* read */ -#define CCW_CLAW_CMD_NOP 0x03 /* NOP */ -#define CCW_CLAW_CMD_SENSE 0x04 /* Sense */ -#define CCW_CLAW_CMD_SIGNAL_SMOD 0x05 /* Signal Status Modifier */ -#define CCW_CLAW_CMD_TIC 0x08 /* TIC */ -#define CCW_CLAW_CMD_READHEADER 0x12 /* read header data */ -#define CCW_CLAW_CMD_READFF 0x22 /* read an FF */ -#define CCW_CLAW_CMD_SENSEID 0xe4 /* Sense ID */ - - -/*-----------------------------------------------------* -* CLAW Unique constants * -*------------------------------------------------------*/ - -#define MORE_to_COME_FLAG 0x04 /* OR with write CCW in case of m-t-c */ -#define CLAW_IDLE 0x00 /* flag to indicate CLAW is idle */ -#define CLAW_BUSY 0xff /* flag to indicate CLAW is busy */ -#define CLAW_PENDING 0x00 /* flag to indicate i/o is pending */ -#define CLAW_COMPLETE 0xff /* flag to indicate i/o completed */ - -/*-----------------------------------------------------* -* CLAW control command code * -*------------------------------------------------------*/ - -#define SYSTEM_VALIDATE_REQUEST 0x01 /* System Validate request */ -#define SYSTEM_VALIDATE_RESPONSE 0x02 /* System Validate response */ -#define CONNECTION_REQUEST 0x21 /* Connection request */ -#define CONNECTION_RESPONSE 0x22 /* Connection response */ -#define CONNECTION_CONFIRM 0x23 /* Connection confirm */ -#define DISCONNECT 0x24 /* Disconnect */ -#define CLAW_ERROR 0x41 /* CLAW error message */ -#define CLAW_VERSION_ID 2 /* CLAW version ID */ - -/*-----------------------------------------------------* -* CLAW adater sense bytes * -*------------------------------------------------------*/ - -#define CLAW_ADAPTER_SENSE_BYTE 0x41 /* Stop command issued to adapter */ - -/*-----------------------------------------------------* -* CLAW control command return codes * -*------------------------------------------------------*/ - -#define CLAW_RC_NAME_MISMATCH 166 /* names do not match */ -#define CLAW_RC_WRONG_VERSION 167 /* wrong CLAW version number */ -#define CLAW_RC_HOST_RCV_TOO_SMALL 180 /* Host maximum receive is */ - /* less than Linux on zSeries*/ - /* transmit size */ - -/*-----------------------------------------------------* -* CLAW Constants application name * -*------------------------------------------------------*/ - -#define HOST_APPL_NAME "TCPIP " -#define WS_APPL_NAME_IP_LINK "TCPIP " -#define WS_APPL_NAME_IP_NAME "IP " -#define WS_APPL_NAME_API_LINK "API " -#define WS_APPL_NAME_PACKED "PACKED " -#define WS_NAME_NOT_DEF "NOT_DEF " -#define PACKING_ASK 1 -#define PACK_SEND 2 -#define DO_PACKED 3 - -#define MAX_ENVELOPE_SIZE 65536 -#define CLAW_DEFAULT_MTU_SIZE 4096 -#define DEF_PACK_BUFSIZE 32768 -#define READ_CHANNEL 0 -#define WRITE_CHANNEL 1 - -#define TB_TX 0 /* sk buffer handling in process */ -#define TB_STOP 1 /* network device stop in process */ -#define TB_RETRY 2 /* retry in process */ -#define TB_NOBUFFER 3 /* no buffer on free queue */ -#define CLAW_MAX_LINK_ID 1 -#define CLAW_MAX_DEV 256 /* max claw devices */ -#define MAX_NAME_LEN 8 /* host name, adapter name length */ -#define CLAW_FRAME_SIZE 4096 -#define CLAW_ID_SIZE 20+3 - -/* state machine codes used in claw_irq_handler */ - -#define CLAW_STOP 0 -#define CLAW_START_HALT_IO 1 -#define CLAW_START_SENSEID 2 -#define CLAW_START_READ 3 -#define CLAW_START_WRITE 4 - -/*-----------------------------------------------------* -* Lock flag * -*------------------------------------------------------*/ -#define LOCK_YES 0 -#define LOCK_NO 1 - -/*-----------------------------------------------------* -* DBF Debug macros * -*------------------------------------------------------*/ -#define CLAW_DBF_TEXT(level, name, text) \ - do { \ - debug_text_event(claw_dbf_##name, level, text); \ - } while (0) - -#define CLAW_DBF_HEX(level,name,addr,len) \ -do { \ - debug_event(claw_dbf_##name,level,(void*)(addr),len); \ -} while (0) - -#define CLAW_DBF_TEXT_(level,name,text...) \ - do { \ - if (debug_level_enabled(claw_dbf_##name, level)) { \ - sprintf(debug_buffer, text); \ - debug_text_event(claw_dbf_##name, level, \ - debug_buffer); \ - } \ - } while (0) - -/** - * Enum for classifying detected devices. - */ -enum claw_channel_types { - /* Device is not a channel */ - claw_channel_type_none, - - /* Device is a CLAW channel device */ - claw_channel_type_claw -}; - - -/******************************************************* -* Define Control Blocks * -* * -********************************************************/ - -/*------------------------------------------------------*/ -/* CLAW header */ -/*------------------------------------------------------*/ - -struct clawh { - __u16 length; /* length of data read by preceding read CCW */ - __u8 opcode; /* equivalent read CCW */ - __u8 flag; /* flag of FF to indicate read was completed */ -}; - -/*------------------------------------------------------*/ -/* CLAW Packing header 4 bytes */ -/*------------------------------------------------------*/ -struct clawph { - __u16 len; /* Length of Packed Data Area */ - __u8 flag; /* Reserved not used */ - __u8 link_num; /* Link ID */ -}; - -/*------------------------------------------------------*/ -/* CLAW Ending struct ccwbk */ -/*------------------------------------------------------*/ -struct endccw { - __u32 real; /* real address of this block */ - __u8 write1; /* write 1 is active */ - __u8 read1; /* read 1 is active */ - __u16 reserved; /* reserved for future use */ - struct ccw1 write1_nop1; - struct ccw1 write1_nop2; - struct ccw1 write2_nop1; - struct ccw1 write2_nop2; - struct ccw1 read1_nop1; - struct ccw1 read1_nop2; - struct ccw1 read2_nop1; - struct ccw1 read2_nop2; -}; - -/*------------------------------------------------------*/ -/* CLAW struct ccwbk */ -/*------------------------------------------------------*/ -struct ccwbk { - void *next; /* pointer to next ccw block */ - __u32 real; /* real address of this ccw */ - void *p_buffer; /* virtual address of data */ - struct clawh header; /* claw header */ - struct ccw1 write; /* write CCW */ - struct ccw1 w_read_FF; /* read FF */ - struct ccw1 w_TIC_1; /* TIC */ - struct ccw1 read; /* read CCW */ - struct ccw1 read_h; /* read header */ - struct ccw1 signal; /* signal SMOD */ - struct ccw1 r_TIC_1; /* TIC1 */ - struct ccw1 r_read_FF; /* read FF */ - struct ccw1 r_TIC_2; /* TIC2 */ -}; - -/*------------------------------------------------------*/ -/* CLAW control block */ -/*------------------------------------------------------*/ -struct clawctl { - __u8 command; /* control command */ - __u8 version; /* CLAW protocol version */ - __u8 linkid; /* link ID */ - __u8 correlator; /* correlator */ - __u8 rc; /* return code */ - __u8 reserved1; /* reserved */ - __u8 reserved2; /* reserved */ - __u8 reserved3; /* reserved */ - __u8 data[24]; /* command specific fields */ -}; - -/*------------------------------------------------------*/ -/* Data for SYSTEMVALIDATE command */ -/*------------------------------------------------------*/ -struct sysval { - char WS_name[8]; /* Workstation System name */ - char host_name[8]; /* Host system name */ - __u16 read_frame_size; /* read frame size */ - __u16 write_frame_size; /* write frame size */ - __u8 reserved[4]; /* reserved */ -}; - -/*------------------------------------------------------*/ -/* Data for Connect command */ -/*------------------------------------------------------*/ -struct conncmd { - char WS_name[8]; /* Workstation application name */ - char host_name[8]; /* Host application name */ - __u16 reserved1[2]; /* read frame size */ - __u8 reserved2[4]; /* reserved */ -}; - -/*------------------------------------------------------*/ -/* Data for CLAW error */ -/*------------------------------------------------------*/ -struct clawwerror { - char reserved1[8]; /* reserved */ - char reserved2[8]; /* reserved */ - char reserved3[8]; /* reserved */ -}; - -/*------------------------------------------------------*/ -/* Data buffer for CLAW */ -/*------------------------------------------------------*/ -struct clawbuf { - char buffer[MAX_ENVELOPE_SIZE]; /* data buffer */ -}; - -/*------------------------------------------------------*/ -/* Channel control block for read and write channel */ -/*------------------------------------------------------*/ - -struct chbk { - unsigned int devno; - int irq; - char id[CLAW_ID_SIZE]; - __u32 IO_active; - __u8 claw_state; - struct irb *irb; - struct ccw_device *cdev; /* pointer to the channel device */ - struct net_device *ndev; - wait_queue_head_t wait; - struct tasklet_struct tasklet; - struct timer_list timer; - unsigned long flag_a; /* atomic flags */ -#define CLAW_BH_ACTIVE 0 - unsigned long flag_b; /* atomic flags */ -#define CLAW_WRITE_ACTIVE 0 - __u8 last_dstat; - __u8 flag; - struct sk_buff_head collect_queue; - spinlock_t collect_lock; -#define CLAW_WRITE 0x02 /* - Set if this is a write channel */ -#define CLAW_READ 0x01 /* - Set if this is a read channel */ -#define CLAW_TIMER 0x80 /* - Set if timer made the wake_up */ -}; - -/*--------------------------------------------------------------* -* CLAW environment block * -*---------------------------------------------------------------*/ - -struct claw_env { - unsigned int devno[2]; /* device number */ - char host_name[9]; /* Host name */ - char adapter_name [9]; /* adapter name */ - char api_type[9]; /* TCPIP, API or PACKED */ - void *p_priv; /* privptr */ - __u16 read_buffers; /* read buffer number */ - __u16 write_buffers; /* write buffer number */ - __u16 read_size; /* read buffer size */ - __u16 write_size; /* write buffer size */ - __u16 dev_id; /* device ident */ - __u8 packing; /* are we packing? */ - __u8 in_use; /* device active flag */ - struct net_device *ndev; /* backward ptr to the net dev*/ -}; - -/*--------------------------------------------------------------* -* CLAW main control block * -*---------------------------------------------------------------*/ - -struct claw_privbk { - void *p_buff_ccw; - __u32 p_buff_ccw_num; - void *p_buff_read; - __u32 p_buff_read_num; - __u32 p_buff_pages_perread; - void *p_buff_write; - __u32 p_buff_write_num; - __u32 p_buff_pages_perwrite; - long active_link_ID; /* Active logical link ID */ - struct ccwbk *p_write_free_chain; /* pointer to free ccw chain */ - struct ccwbk *p_write_active_first; /* ptr to the first write ccw */ - struct ccwbk *p_write_active_last; /* ptr to the last write ccw */ - struct ccwbk *p_read_active_first; /* ptr to the first read ccw */ - struct ccwbk *p_read_active_last; /* ptr to the last read ccw */ - struct endccw *p_end_ccw; /*ptr to ending ccw */ - struct ccwbk *p_claw_signal_blk; /* ptr to signal block */ - __u32 write_free_count; /* number of free bufs for write */ - struct net_device_stats stats; /* device status */ - struct chbk channel[2]; /* Channel control blocks */ - __u8 mtc_skipping; - int mtc_offset; - int mtc_logical_link; - void *p_mtc_envelope; - struct sk_buff *pk_skb; /* packing buffer */ - int pk_cnt; - struct clawctl ctl_bk; - struct claw_env *p_env; - __u8 system_validate_comp; - __u8 release_pend; - __u8 checksum_received_ip_pkts; - __u8 buffs_alloc; - struct endccw end_ccw; - unsigned long tbusy; - -}; - - -/************************************************************/ -/* define global constants */ -/************************************************************/ - -#define CCWBK_SIZE sizeof(struct ccwbk) - - diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 642c77c76b84..3466d3cb7647 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4218,7 +4218,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) QETH_CARD_TEXT_(card, 4, "mode:%x", mode); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, - sizeof(struct qeth_ipacmd_setadpparms)); + sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8); if (!iob) return; cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); @@ -4290,7 +4290,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) QETH_CARD_TEXT(card, 4, "chgmac"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, - sizeof(struct qeth_ipacmd_setadpparms)); + sizeof(struct qeth_ipacmd_setadpparms_hdr) + + sizeof(struct qeth_change_addr)); if (!iob) return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 2bbfc25e582c..18f05bff8826 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -390,7 +390,7 @@ static void handle_tx(struct vhost_net *net) ubufs = NULL; } /* TODO: Check specific error and bomb out unless ENOBUFS? */ - err = sock->ops->sendmsg(NULL, sock, &msg, len); + err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { if (zcopy_used) { vhost_net_ubuf_put(ubufs); @@ -566,7 +566,7 @@ static void handle_rx(struct vhost_net *net) /* On overrun, truncate and discard */ if (unlikely(headcount > UIO_MAXIOV)) { iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); - err = sock->ops->recvmsg(NULL, sock, &msg, + err = sock->ops->recvmsg(sock, &msg, 1, MSG_DONTWAIT | MSG_TRUNC); pr_debug("Discarded rx packet: len %zd\n", sock_len); continue; @@ -592,7 +592,7 @@ static void handle_rx(struct vhost_net *net) */ iov_iter_advance(&msg.msg_iter, vhost_hlen); } - err = sock->ops->recvmsg(NULL, sock, &msg, + err = sock->ops->recvmsg(sock, &msg, sock_len, MSG_DONTWAIT | MSG_TRUNC); /* Userspace might have consumed the packet meanwhile: * it's not supposed to do this usually, but might be hard |