diff options
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/net/qeth_core.h | 43 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_main.c | 831 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_mpc.h | 26 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_sys.c | 15 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l2_main.c | 150 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l3_main.c | 363 |
6 files changed, 694 insertions, 734 deletions
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 34e0d476c5c6..6843bc7ee9f2 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -26,6 +26,7 @@ #include <net/ipv6.h> #include <net/if_inet6.h> #include <net/addrconf.h> +#include <net/tcp.h> #include <asm/debug.h> #include <asm/qdio.h> @@ -389,8 +390,9 @@ enum qeth_layer2_frame_flags { enum qeth_header_ids { QETH_HEADER_TYPE_LAYER3 = 0x01, QETH_HEADER_TYPE_LAYER2 = 0x02, - QETH_HEADER_TYPE_TSO = 0x03, + QETH_HEADER_TYPE_L3_TSO = 0x03, QETH_HEADER_TYPE_OSN = 0x04, + QETH_HEADER_TYPE_L2_TSO = 0x06, }; /* flags for qeth_hdr.ext_flags */ #define QETH_HDR_EXT_VLAN_FRAME 0x01 @@ -581,7 +583,8 @@ struct qeth_cmd_buffer { struct qeth_channel *channel; unsigned char *data; int rc; - void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); + void (*callback)(struct qeth_card *card, struct qeth_channel *channel, + struct qeth_cmd_buffer *iob); }; static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob) @@ -638,7 +641,6 @@ struct qeth_reply { atomic_t received; int rc; void *param; - struct qeth_card *card; refcount_t refcnt; }; @@ -671,6 +673,12 @@ struct qeth_card_info { __u32 hwtrap; }; +enum qeth_discipline_id { + QETH_DISCIPLINE_UNDETERMINED = -1, + QETH_DISCIPLINE_LAYER3 = 0, + QETH_DISCIPLINE_LAYER2 = 1, +}; + struct qeth_card_options { struct qeth_routing_info route4; struct qeth_ipa_info ipa4; @@ -680,7 +688,7 @@ struct qeth_card_options { struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */ struct qeth_vnicc_info vnicc; /* VNICC options */ int fake_broadcast; - int layer2; + enum qeth_discipline_id layer; int performance_stats; int rx_sg_cb; enum qeth_ipa_isolation_modes isolation; @@ -690,6 +698,9 @@ struct qeth_card_options { char hsuid[9]; }; +#define IS_LAYER2(card) ((card)->options.layer == QETH_DISCIPLINE_LAYER2) +#define IS_LAYER3(card) ((card)->options.layer == QETH_DISCIPLINE_LAYER3) + /* * thread bits for qeth_card thread masks */ @@ -702,12 +713,6 @@ struct qeth_osn_info { int (*data_cb)(struct sk_buff *skb); }; -enum qeth_discipline_id { - QETH_DISCIPLINE_UNDETERMINED = -1, - QETH_DISCIPLINE_LAYER3 = 0, - QETH_DISCIPLINE_LAYER2 = 1, -}; - struct qeth_discipline { const struct device_type *devtype; int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done); @@ -759,7 +764,6 @@ struct qeth_switch_info { struct qeth_card { struct list_head list; enum qeth_card_states state; - int lan_online; spinlock_t lock; struct ccwgroup_device *gdev; struct qeth_channel read; @@ -892,11 +896,6 @@ static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv) if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) || (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) *flags |= QETH_HDR_EXT_UDP; - if (ipv == 4) { - /* some HW requires combined L3+L4 csum offload: */ - *flags |= QETH_HDR_EXT_CSUM_HDR_REQ; - ip_hdr(skb)->check = 0; - } } static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, @@ -1007,9 +1006,7 @@ int qeth_query_switch_attributes(struct qeth_card *card, int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), void *reply_param); -int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, - int extra_elems, int data_offset); -int qeth_get_elements_for_frags(struct sk_buff *); +unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset); int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, unsigned int offset, unsigned int hd_len); @@ -1027,7 +1024,6 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd); int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); -int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int); int qeth_configure_cq(struct qeth_card *, enum qeth_cq); int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); void qeth_trace_features(struct qeth_card *); @@ -1052,6 +1048,13 @@ int qeth_vm_request_mac(struct qeth_card *card); int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int hdr_len, unsigned int proto_len, unsigned int *elements); +void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len, + struct sk_buff *skb, unsigned int proto_len); +int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, int cast_type, + void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb, int ipv, int cast_type, + unsigned int data_len)); /* exports for OSN */ int qeth_osn_assist(struct net_device *, void *, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ffce6f39828a..3274f13aad57 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -16,6 +16,7 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/log2.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/mii.h> @@ -61,10 +62,10 @@ static struct kmem_cache *qeth_qdio_outbuf_cache; static struct device *qeth_core_root_dev; static struct lock_class_key qdio_out_skb_queue_key; -static struct mutex qeth_mod_mutex; -static void qeth_send_control_data_cb(struct qeth_channel *, - struct qeth_cmd_buffer *); +static void qeth_send_control_data_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); @@ -591,7 +592,6 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) if (reply) { refcount_set(&reply->refcnt, 1); atomic_set(&reply->received, 0); - reply->card = card; } return reply; } @@ -626,80 +626,61 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, } static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_ipa_cmd *cmd) { - struct qeth_ipa_cmd *cmd = NULL; - QETH_CARD_TEXT(card, 5, "chkipad"); - if (IS_IPA(iob->data)) { - cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); - if (IS_IPA_REPLY(cmd)) { - if (cmd->hdr.command != IPA_CMD_SETCCID && - cmd->hdr.command != IPA_CMD_DELCCID && - cmd->hdr.command != IPA_CMD_MODCCID && - cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) - qeth_issue_ipa_msg(cmd, - cmd->hdr.return_code, card); - return cmd; + + if (IS_IPA_REPLY(cmd)) { + if (cmd->hdr.command != IPA_CMD_SETCCID && + cmd->hdr.command != IPA_CMD_DELCCID && + cmd->hdr.command != IPA_CMD_MODCCID && + cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) + qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); + return cmd; + } + + /* handle unsolicited event: */ + switch (cmd->hdr.command) { + case IPA_CMD_STOPLAN: + if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { + dev_err(&card->gdev->dev, + "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", + QETH_CARD_IFNAME(card)); + qeth_close_dev(card); } else { - switch (cmd->hdr.command) { - case IPA_CMD_STOPLAN: - if (cmd->hdr.return_code == - IPA_RC_VEPA_TO_VEB_TRANSITION) { - dev_err(&card->gdev->dev, - "Interface %s is down because the " - "adjacent port is no longer in " - "reflective relay mode\n", - QETH_CARD_IFNAME(card)); - qeth_close_dev(card); - } else { - dev_warn(&card->gdev->dev, - "The link for interface %s on CHPID" - " 0x%X failed\n", - QETH_CARD_IFNAME(card), - card->info.chpid); - qeth_issue_ipa_msg(cmd, - cmd->hdr.return_code, card); - } - card->lan_online = 0; - netif_carrier_off(card->dev); - return NULL; - case IPA_CMD_STARTLAN: - dev_info(&card->gdev->dev, - "The link for %s on CHPID 0x%X has" - " been restored\n", - QETH_CARD_IFNAME(card), - card->info.chpid); - netif_carrier_on(card->dev); - card->lan_online = 1; - if (card->info.hwtrap) - card->info.hwtrap = 2; - qeth_schedule_recovery(card); - return NULL; - case IPA_CMD_SETBRIDGEPORT_IQD: - case IPA_CMD_SETBRIDGEPORT_OSA: - case IPA_CMD_ADDRESS_CHANGE_NOTIF: - if (card->discipline->control_event_handler - (card, cmd)) - return cmd; - else - return NULL; - case IPA_CMD_MODCCID: - return cmd; - case IPA_CMD_REGISTER_LOCAL_ADDR: - QETH_CARD_TEXT(card, 3, "irla"); - break; - case IPA_CMD_UNREGISTER_LOCAL_ADDR: - QETH_CARD_TEXT(card, 3, "urla"); - break; - default: - QETH_DBF_MESSAGE(2, "Received data is IPA " - "but not a reply!\n"); - break; - } + dev_warn(&card->gdev->dev, + "The link for interface %s on CHPID 0x%X failed\n", + QETH_CARD_IFNAME(card), card->info.chpid); + qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); + netif_carrier_off(card->dev); } + return NULL; + case IPA_CMD_STARTLAN: + dev_info(&card->gdev->dev, + "The link for %s on CHPID 0x%X has been restored\n", + QETH_CARD_IFNAME(card), card->info.chpid); + if (card->info.hwtrap) + card->info.hwtrap = 2; + qeth_schedule_recovery(card); + return NULL; + case IPA_CMD_SETBRIDGEPORT_IQD: + case IPA_CMD_SETBRIDGEPORT_OSA: + case IPA_CMD_ADDRESS_CHANGE_NOTIF: + if (card->discipline->control_event_handler(card, cmd)) + return cmd; + return NULL; + case IPA_CMD_MODCCID: + return cmd; + case IPA_CMD_REGISTER_LOCAL_ADDR: + QETH_CARD_TEXT(card, 3, "irla"); + return NULL; + case IPA_CMD_UNREGISTER_LOCAL_ADDR: + QETH_CARD_TEXT(card, 3, "urla"); + return NULL; + default: + QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); + return cmd; } - return cmd; } void qeth_clear_ipacmd_list(struct qeth_card *card) @@ -746,18 +727,10 @@ static int qeth_check_idx_response(struct qeth_card *card, return 0; } -static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev) -{ - struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *) - dev_get_drvdata(&cdev->dev))->dev); - return card; -} - static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) { __u8 index; - QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff"); index = channel->io_buf_no; do { if (channel->iob[index].state == BUF_STATE_FREE) { @@ -778,9 +751,7 @@ void qeth_release_buffer(struct qeth_channel *channel, { unsigned long flags; - QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff"); spin_lock_irqsave(&channel->iob_lock, flags); - memset(iob->data, 0, QETH_BUFSIZE); iob->state = BUF_STATE_FREE; iob->callback = qeth_send_control_data_cb; iob->rc = 0; @@ -789,6 +760,13 @@ void qeth_release_buffer(struct qeth_channel *channel, } EXPORT_SYMBOL_GPL(qeth_release_buffer); +static void qeth_release_buffer_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + qeth_release_buffer(channel, iob); +} + static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel) { struct qeth_cmd_buffer *buffer = NULL; @@ -819,17 +797,16 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel) } EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); -static void qeth_send_control_data_cb(struct qeth_channel *channel, - struct qeth_cmd_buffer *iob) +static void qeth_send_control_data_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) { - struct qeth_card *card; + struct qeth_ipa_cmd *cmd = NULL; struct qeth_reply *reply, *r; - struct qeth_ipa_cmd *cmd; unsigned long flags; int keep_reply; int rc = 0; - card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 4, "sndctlcb"); rc = qeth_check_idx_response(card, iob->data); switch (rc) { @@ -843,16 +820,20 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel, goto out; } - cmd = qeth_check_ipa_data(card, iob); - if ((cmd == NULL) && (card->state != CARD_STATE_DOWN)) - goto out; - /*in case of OSN : check if cmd is set */ - if (card->info.type == QETH_CARD_TYPE_OSN && - cmd && - cmd->hdr.command != IPA_CMD_STARTLAN && - card->osn_info.assist_cb != NULL) { - card->osn_info.assist_cb(card->dev, cmd); - goto out; + if (IS_IPA(iob->data)) { + cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); + cmd = qeth_check_ipa_data(card, cmd); + if (!cmd) + goto out; + if (IS_OSN(card) && card->osn_info.assist_cb && + cmd->hdr.command != IPA_CMD_STARTLAN) { + card->osn_info.assist_cb(card->dev, cmd); + goto out; + } + } else { + /* non-IPA commands should only flow during initialization */ + if (card->state != CARD_STATE_DOWN) + goto out; } spin_lock_irqsave(&card->lock, flags); @@ -900,44 +881,6 @@ out: qeth_release_buffer(channel, iob); } -static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) -{ - int cnt; - - QETH_DBF_TEXT(SETUP, 2, "setupch"); - - channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); - if (!channel->ccw) - return -ENOMEM; - channel->state = CH_STATE_DOWN; - atomic_set(&channel->irq_pending, 0); - init_waitqueue_head(&channel->wait_q); - - if (!alloc_buffers) - return 0; - - for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { - channel->iob[cnt].data = - kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); - if (channel->iob[cnt].data == NULL) - break; - channel->iob[cnt].state = BUF_STATE_FREE; - channel->iob[cnt].channel = channel; - channel->iob[cnt].callback = qeth_send_control_data_cb; - channel->iob[cnt].rc = 0; - } - if (cnt < QETH_CMD_BUFFER_NO) { - kfree(channel->ccw); - while (cnt-- > 0) - kfree(channel->iob[cnt].data); - return -ENOMEM; - } - channel->io_buf_no = 0; - spin_lock_init(&channel->iob_lock); - - return 0; -} - static int qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) { @@ -1013,16 +956,15 @@ void qeth_schedule_recovery(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_schedule_recovery); -static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) +static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, + struct irb *irb) { int dstat, cstat; char *sense; - struct qeth_card *card; sense = (char *) irb->ecw; cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; - card = CARD_FROM_CDEV(cdev); if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | @@ -1062,14 +1004,11 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) return 0; } -static long __qeth_check_irb_error(struct ccw_device *cdev, - unsigned long intparm, struct irb *irb) +static long qeth_check_irb_error(struct qeth_card *card, + struct ccw_device *cdev, unsigned long intparm, + struct irb *irb) { - struct qeth_card *card; - - card = CARD_FROM_CDEV(cdev); - - if (!card || !IS_ERR(irb)) + if (!IS_ERR(irb)) return 0; switch (PTR_ERR(irb)) { @@ -1106,10 +1045,13 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, int rc; int cstat, dstat; struct qeth_cmd_buffer *iob = NULL; + struct ccwgroup_device *gdev; struct qeth_channel *channel; struct qeth_card *card; - card = CARD_FROM_CDEV(cdev); + /* while we hold the ccwdev lock, this stays valid: */ + gdev = dev_get_drvdata(&cdev->dev); + card = dev_get_drvdata(&gdev->dev); if (!card) return; @@ -1129,7 +1071,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, if (qeth_intparm_is_iob(intparm)) iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); - if (__qeth_check_irb_error(cdev, intparm, irb)) { + if (qeth_check_irb_error(card, cdev, intparm, irb)) { /* IO was terminated, free its resources. */ if (iob) qeth_release_buffer(iob->channel, iob); @@ -1184,7 +1126,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, channel->state = CH_STATE_DOWN; goto out; } - rc = qeth_get_problem(cdev, irb); + rc = qeth_get_problem(card, cdev, irb); if (rc) { card->read_or_write_problem = 1; qeth_clear_ipacmd_list(card); @@ -1204,7 +1146,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, __qeth_issue_next_read(card); if (iob && iob->callback) - iob->callback(iob->channel, iob); + iob->callback(card, iob->channel, iob); out: wake_up(&card->wait_q); @@ -1217,54 +1159,23 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, { struct sk_buff *skb; - if (skb_queue_empty(&buf->skb_list)) - goto out; - skb = skb_peek(&buf->skb_list); - while (skb) { + skb_queue_walk(&buf->skb_list, skb) { QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); - if (be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) { - if (skb->sk) { - struct iucv_sock *iucv = iucv_sk(skb->sk); - iucv->sk_txnotify(skb, notification); - } - } - if (skb_queue_is_last(&buf->skb_list, skb)) - skb = NULL; - else - skb = skb_queue_next(&buf->skb_list, skb); + if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk) + iucv_sk(skb->sk)->sk_txnotify(skb, notification); } -out: - return; } static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) { - struct sk_buff *skb; - struct iucv_sock *iucv; - int notify_general_error = 0; - - if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) - notify_general_error = 1; - /* release may never happen from within CQ tasklet scope */ WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); - skb = skb_dequeue(&buf->skb_list); - while (skb) { - QETH_CARD_TEXT(buf->q->card, 5, "skbr"); - QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb); - if (notify_general_error && - be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) { - if (skb->sk) { - iucv = iucv_sk(skb->sk); - iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR); - } - } - refcount_dec(&skb->users); - dev_kfree_skb_any(skb); - skb = skb_dequeue(&buf->skb_list); - } + if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) + qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR); + + __skb_queue_purge(&buf->skb_list); } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, @@ -1336,14 +1247,61 @@ static void qeth_free_buffer_pool(struct qeth_card *card) static void qeth_clean_channel(struct qeth_channel *channel) { + struct ccw_device *cdev = channel->ccwdev; int cnt; QETH_DBF_TEXT(SETUP, 2, "freech"); + + spin_lock_irq(get_ccwdev_lock(cdev)); + cdev->handler = NULL; + spin_unlock_irq(get_ccwdev_lock(cdev)); + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) kfree(channel->iob[cnt].data); kfree(channel->ccw); } +static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) +{ + struct ccw_device *cdev = channel->ccwdev; + int cnt; + + QETH_DBF_TEXT(SETUP, 2, "setupch"); + + channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + if (!channel->ccw) + return -ENOMEM; + channel->state = CH_STATE_DOWN; + atomic_set(&channel->irq_pending, 0); + init_waitqueue_head(&channel->wait_q); + + spin_lock_irq(get_ccwdev_lock(cdev)); + cdev->handler = qeth_irq; + spin_unlock_irq(get_ccwdev_lock(cdev)); + + if (!alloc_buffers) + return 0; + + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { + channel->iob[cnt].data = kmalloc(QETH_BUFSIZE, + GFP_KERNEL | GFP_DMA); + if (channel->iob[cnt].data == NULL) + break; + channel->iob[cnt].state = BUF_STATE_FREE; + channel->iob[cnt].channel = channel; + channel->iob[cnt].callback = qeth_send_control_data_cb; + channel->iob[cnt].rc = 0; + } + if (cnt < QETH_CMD_BUFFER_NO) { + qeth_clean_channel(channel); + return -ENOMEM; + } + channel->io_buf_no = 0; + spin_lock_init(&channel->iob_lock); + + return 0; +} + static void qeth_set_single_write_queues(struct qeth_card *card) { if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && @@ -1421,6 +1379,7 @@ static void qeth_set_initial_options(struct qeth_card *card) card->options.rx_sg_cb = QETH_RX_SG_CB; card->options.isolation = ISOLATION_MODE_NONE; card->options.cq = QETH_CQ_DISABLED; + card->options.layer = QETH_DISCIPLINE_UNDETERMINED; } static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) @@ -1494,7 +1453,7 @@ static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) CARD_BUS_ID(card), card->info.mcl_level); } -static struct qeth_card *qeth_alloc_card(void) +static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) { struct qeth_card *card; @@ -1503,13 +1462,18 @@ static struct qeth_card *qeth_alloc_card(void) if (!card) goto out; QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); + + card->gdev = gdev; + dev_set_drvdata(&gdev->dev, card); + CARD_RDEV(card) = gdev->cdev[0]; + CARD_WDEV(card) = gdev->cdev[1]; + CARD_DDEV(card) = gdev->cdev[2]; if (qeth_setup_channel(&card->read, true)) goto out_ip; if (qeth_setup_channel(&card->write, true)) goto out_channel; if (qeth_setup_channel(&card->data, false)) goto out_data; - card->options.layer2 = -1; card->qeth_service_level.seq_print = qeth_core_sl_print; register_service_level(&card->qeth_service_level); return card; @@ -1519,22 +1483,21 @@ out_data: out_channel: qeth_clean_channel(&card->read); out_ip: + dev_set_drvdata(&gdev->dev, NULL); kfree(card); out: return NULL; } -static int qeth_clear_channel(struct qeth_channel *channel) +static int qeth_clear_channel(struct qeth_card *card, + struct qeth_channel *channel) { - unsigned long flags; - struct qeth_card *card; int rc; - card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 3, "clearch"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) return rc; @@ -1548,17 +1511,15 @@ static int qeth_clear_channel(struct qeth_channel *channel) return 0; } -static int qeth_halt_channel(struct qeth_channel *channel) +static int qeth_halt_channel(struct qeth_card *card, + struct qeth_channel *channel) { - unsigned long flags; - struct qeth_card *card; int rc; - card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 3, "haltch"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) return rc; @@ -1576,9 +1537,9 @@ static int qeth_halt_channels(struct qeth_card *card) int rc1 = 0, rc2 = 0, rc3 = 0; QETH_CARD_TEXT(card, 3, "haltchs"); - rc1 = qeth_halt_channel(&card->read); - rc2 = qeth_halt_channel(&card->write); - rc3 = qeth_halt_channel(&card->data); + rc1 = qeth_halt_channel(card, &card->read); + rc2 = qeth_halt_channel(card, &card->write); + rc3 = qeth_halt_channel(card, &card->data); if (rc1) return rc1; if (rc2) @@ -1591,9 +1552,9 @@ static int qeth_clear_channels(struct qeth_card *card) int rc1 = 0, rc2 = 0, rc3 = 0; QETH_CARD_TEXT(card, 3, "clearchs"); - rc1 = qeth_clear_channel(&card->read); - rc2 = qeth_clear_channel(&card->write); - rc3 = qeth_clear_channel(&card->data); + rc1 = qeth_clear_channel(card, &card->read); + rc2 = qeth_clear_channel(card, &card->write); + rc3 = qeth_clear_channel(card, &card->data); if (rc1) return rc1; if (rc2) @@ -1652,7 +1613,6 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer, char *rcd_buf; int ret; struct qeth_channel *channel = &card->data; - unsigned long flags; /* * scan for RCD command in extended SenseID data @@ -1666,11 +1626,11 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer, qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf); channel->state = CH_STATE_RCD; - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw, QETH_RCD_PARM, LPM_ANYPATH, 0, QETH_RCD_TIMEOUT); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (!ret) wait_event(card->wait_q, (channel->state == CH_STATE_RCD_DONE || @@ -1823,30 +1783,29 @@ static void qeth_init_func_level(struct qeth_card *card) } } -static int qeth_idx_activate_get_answer(struct qeth_channel *channel, - void (*idx_reply_cb)(struct qeth_channel *, - struct qeth_cmd_buffer *)) +static int qeth_idx_activate_get_answer(struct qeth_card *card, + struct qeth_channel *channel, + void (*reply_cb)(struct qeth_card *, + struct qeth_channel *, + struct qeth_cmd_buffer *)) { struct qeth_cmd_buffer *iob; - unsigned long flags; int rc; - struct qeth_card *card; QETH_DBF_TEXT(SETUP, 2, "idxanswr"); - card = CARD_FROM_CDEV(channel->ccwdev); iob = qeth_get_buffer(channel); if (!iob) return -ENOMEM; - iob->callback = idx_reply_cb; + iob->callback = reply_cb; qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); wait_event(card->wait_q, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0, QETH_TIMEOUT); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); @@ -1867,26 +1826,24 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, return rc; } -static int qeth_idx_activate_channel(struct qeth_channel *channel, - void (*idx_reply_cb)(struct qeth_channel *, - struct qeth_cmd_buffer *)) +static int qeth_idx_activate_channel(struct qeth_card *card, + struct qeth_channel *channel, + void (*reply_cb)(struct qeth_card *, + struct qeth_channel *, + struct qeth_cmd_buffer *)) { - struct qeth_card *card; struct qeth_cmd_buffer *iob; - unsigned long flags; __u16 temp; __u8 tmp; int rc; struct ccw_dev_id temp_devid; - card = CARD_FROM_CDEV(channel->ccwdev); - QETH_DBF_TEXT(SETUP, 2, "idxactch"); iob = qeth_get_buffer(channel); if (!iob) return -ENOMEM; - iob->callback = idx_reply_cb; + iob->callback = reply_cb; qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE, iob->data); if (channel == &card->write) { @@ -1913,10 +1870,10 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, wait_event(card->wait_q, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0, QETH_TIMEOUT); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n", @@ -1938,7 +1895,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); return -ETIME; } - return qeth_idx_activate_get_answer(channel, idx_reply_cb); + return qeth_idx_activate_get_answer(card, channel, reply_cb); } static int qeth_peer_func_level(int level) @@ -1950,10 +1907,10 @@ static int qeth_peer_func_level(int level) return level; } -static void qeth_idx_write_cb(struct qeth_channel *channel, - struct qeth_cmd_buffer *iob) +static void qeth_idx_write_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) { - struct qeth_card *card; __u16 temp; QETH_DBF_TEXT(SETUP , 2, "idxwrcb"); @@ -1962,7 +1919,6 @@ static void qeth_idx_write_cb(struct qeth_channel *channel, channel->state = CH_STATE_ACTIVATING; goto out; } - card = CARD_FROM_CDEV(channel->ccwdev); if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL) @@ -1988,10 +1944,10 @@ out: qeth_release_buffer(channel, iob); } -static void qeth_idx_read_cb(struct qeth_channel *channel, - struct qeth_cmd_buffer *iob) +static void qeth_idx_read_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) { - struct qeth_card *card; __u16 temp; QETH_DBF_TEXT(SETUP , 2, "idxrdcb"); @@ -2000,7 +1956,6 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, goto out; } - card = CARD_FROM_CDEV(channel->ccwdev); if (qeth_check_idx_response(card, iob->data)) goto out; @@ -2049,7 +2004,7 @@ void qeth_prepare_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob) { qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data); - iob->callback = qeth_release_buffer; + iob->callback = qeth_release_buffer_cb; memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); @@ -2097,7 +2052,6 @@ int qeth_send_control_data(struct qeth_card *card, int len, { struct qeth_channel *channel = iob->channel; int rc; - unsigned long flags; struct qeth_reply *reply = NULL; unsigned long timeout, event_timeout; struct qeth_ipa_cmd *cmd = NULL; @@ -2130,26 +2084,26 @@ int qeth_send_control_data(struct qeth_card *card, int len, } qeth_prepare_control_data(card, len, iob); - spin_lock_irqsave(&card->lock, flags); + spin_lock_irq(&card->lock); list_add_tail(&reply->list, &card->cmd_waiter_list); - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irq(&card->lock); timeout = jiffies + event_timeout; QETH_CARD_TEXT(card, 6, "noirqpnd"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0, event_timeout); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " "ccw_device_start rc = %i\n", dev_name(&channel->ccwdev->dev), rc); QETH_CARD_TEXT_(card, 2, " err%d", rc); - spin_lock_irqsave(&card->lock, flags); + spin_lock_irq(&card->lock); list_del_init(&reply->list); qeth_put_reply(reply); - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irq(&card->lock); qeth_release_buffer(channel, iob); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); @@ -2177,9 +2131,9 @@ int qeth_send_control_data(struct qeth_card *card, int len, time_err: reply->rc = -ETIME; - spin_lock_irqsave(&reply->card->lock, flags); + spin_lock_irq(&card->lock); list_del_init(&reply->list); - spin_unlock_irqrestore(&reply->card->lock, flags); + spin_unlock_irq(&card->lock); atomic_inc(&reply->received); rc = reply->rc; qeth_put_reply(reply); @@ -2198,7 +2152,6 @@ static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, memcpy(&card->token.cm_filter_r, QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); - QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } @@ -2224,7 +2177,6 @@ static int qeth_cm_enable(struct qeth_card *card) static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmsetpcb"); @@ -2233,7 +2185,6 @@ static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, memcpy(&card->token.cm_connection_r, QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), QETH_MPC_TOKEN_LENGTH); - QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } @@ -2255,7 +2206,6 @@ static int qeth_cm_setup(struct qeth_card *card) rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob, qeth_cm_setup_cb, NULL); return rc; - } static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) @@ -2284,7 +2234,7 @@ static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) if (dev->mtu) new_mtu = dev->mtu; /* default MTUs for first setup: */ - else if (card->options.layer2) + else if (IS_LAYER2(card)) new_mtu = ETH_DATA_LEN; else new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ @@ -2315,7 +2265,6 @@ static int qeth_get_mtu_outof_framesize(int framesize) static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - __u16 mtu, framesize; __u16 len; __u8 link_type; @@ -2343,7 +2292,6 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, } else card->info.link_type = 0; QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type); - QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } @@ -2351,7 +2299,7 @@ static u8 qeth_mpc_select_prot_type(struct qeth_card *card) { if (IS_OSN(card)) return QETH_PROT_OSN2; - return (card->options.layer2 == 1) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; + return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; } static int qeth_ulp_enable(struct qeth_card *card) @@ -2880,23 +2828,18 @@ static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) } static void qeth_fill_ipacmd_header(struct qeth_card *card, - struct qeth_ipa_cmd *cmd, __u8 command, - enum qeth_prot_versions prot) + struct qeth_ipa_cmd *cmd, + enum qeth_ipa_cmds command, + enum qeth_prot_versions prot) { - memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); cmd->hdr.command = command; cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; /* cmd->hdr.seqno is set by qeth_send_control_data() */ cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port; - if (card->options.layer2) - cmd->hdr.prim_version_no = 2; - else - cmd->hdr.prim_version_no = 1; + cmd->hdr.prim_version_no = IS_LAYER2(card) ? 2 : 1; cmd->hdr.param_count = 1; cmd->hdr.prot_version = prot; - cmd->hdr.ipa_supported = 0; - cmd->hdr.ipa_enabled = 0; } struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, @@ -3043,7 +2986,7 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; - return -0; + return 0; default: if (cmd->hdr.return_code) { QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled " @@ -3787,7 +3730,7 @@ EXPORT_SYMBOL_GPL(qeth_get_priority_queue); * Returns the number of pages, and thus QDIO buffer elements, needed to cover * fragmented part of the SKB. Returns zero for linear SKB. */ -int qeth_get_elements_for_frags(struct sk_buff *skb) +static int qeth_get_elements_for_frags(struct sk_buff *skb) { int cnt, elements = 0; @@ -3800,9 +3743,17 @@ int qeth_get_elements_for_frags(struct sk_buff *skb) } return elements; } -EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); -static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset) +/** + * qeth_count_elements() - Counts the number of QDIO buffer elements needed + * to transmit an skb. + * @skb: the skb to operate on. + * @data_offset: skip this part of the skb's linear data + * + * Returns the number of pages, and thus QDIO buffer elements, needed to map the + * skb's data (both its linear part and paged fragments). + */ +unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset) { unsigned int elements = qeth_get_elements_for_frags(skb); addr_t end = (addr_t)skb->data + skb_headlen(skb); @@ -3812,54 +3763,10 @@ static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset) elements += qeth_get_elements_for_range(start, end); return elements; } +EXPORT_SYMBOL_GPL(qeth_count_elements); -/** - * qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags. - * @card: qeth card structure, to check max. elems. - * @skb: SKB address - * @extra_elems: extra elems needed, to check against max. - * @data_offset: range starts at skb->data + data_offset - * - * Returns the number of pages, and thus QDIO buffer elements, needed to cover - * skb data, including linear part and fragments. Checks if the result plus - * extra_elems fits under the limit for the card. Returns 0 if it does not. - * Note: extra_elems is not included in the returned result. - */ -int qeth_get_elements_no(struct qeth_card *card, - struct sk_buff *skb, int extra_elems, int data_offset) -{ - int elements = qeth_count_elements(skb, data_offset); - - if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { - QETH_DBF_MESSAGE(2, "Invalid size of IP packet " - "(Number=%d / Length=%d). Discarded.\n", - elements + extra_elems, skb->len); - return 0; - } - return elements; -} -EXPORT_SYMBOL_GPL(qeth_get_elements_no); - -int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) -{ - int hroom, inpage, rest; - - if (((unsigned long)skb->data & PAGE_MASK) != - (((unsigned long)skb->data + len - 1) & PAGE_MASK)) { - hroom = skb_headroom(skb); - inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE); - rest = len - inpage; - if (rest > hroom) - return 1; - memmove(skb->data - rest, skb->data, skb_headlen(skb)); - skb->data -= rest; - skb->tail -= rest; - *hdr = (struct qeth_hdr *)skb->data; - QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest); - } - return 0; -} -EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); +#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ + MAX_TCP_HEADER) /** * qeth_add_hw_header() - add a HW header to an skb. @@ -3894,7 +3801,11 @@ check_layout: if (qeth_get_elements_for_range(start, end + contiguous) == 1) { /* Push HW header into same page as first protocol header. */ push_ok = true; - __elements = qeth_count_elements(skb, 0); + /* ... but TSO always needs a separate element for headers: */ + if (skb_is_gso(skb)) + __elements = 1 + qeth_count_elements(skb, proto_len); + else + __elements = qeth_count_elements(skb, 0); } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) { /* Push HW header into a new page. */ push_ok = true; @@ -3935,6 +3846,8 @@ check_layout: return hdr_len; } /* fall back */ + if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) + return -E2BIG; *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!*hdr) return -ENOMEM; @@ -4026,8 +3939,7 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, bool is_first_elem = true; int flush_cnt = 0; - refcount_inc(&skb->users); - skb_queue_tail(&buf->skb_list, skb); + __skb_queue_tail(&buf->skb_list, skb); /* build dedicated header element */ if (hd_len) { @@ -4176,6 +4088,97 @@ out: } EXPORT_SYMBOL_GPL(qeth_do_send_packet); +void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len, + struct sk_buff *skb, unsigned int proto_len) +{ + struct qeth_hdr_ext_tso *ext = &hdr->ext; + + ext->hdr_tot_len = sizeof(*ext); + ext->imb_hdr_no = 1; + ext->hdr_type = 1; + ext->hdr_version = 1; + ext->hdr_len = 28; + ext->payload_len = payload_len; + ext->mss = skb_shinfo(skb)->gso_size; + ext->dg_hdr_len = proto_len; +} +EXPORT_SYMBOL_GPL(qeth_fill_tso_ext); + +int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, int cast_type, + void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb, int ipv, int cast_type, + unsigned int data_len)) +{ + unsigned int proto_len, hw_hdr_len; + unsigned int frame_len = skb->len; + bool is_tso = skb_is_gso(skb); + unsigned int data_offset = 0; + struct qeth_hdr *hdr = NULL; + unsigned int hd_len = 0; + unsigned int elements; + int push_len, rc; + bool is_sg; + + if (is_tso) { + hw_hdr_len = sizeof(struct qeth_hdr_tso); + proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + } else { + hw_hdr_len = sizeof(struct qeth_hdr); + proto_len = IS_IQD(card) ? ETH_HLEN : 0; + } + + rc = skb_cow_head(skb, hw_hdr_len); + if (rc) + return rc; + + push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len, + &elements); + if (push_len < 0) + return push_len; + if (is_tso || !push_len) { + /* HW header needs its own buffer element. */ + hd_len = hw_hdr_len + proto_len; + data_offset = push_len + proto_len; + } + memset(hdr, 0, hw_hdr_len); + fill_header(card, hdr, skb, ipv, cast_type, frame_len); + if (is_tso) + qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, + frame_len - proto_len, skb, proto_len); + + is_sg = skb_is_nonlinear(skb); + if (IS_IQD(card)) { + rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, + hd_len); + } else { + /* TODO: drop skb_orphan() once TX completion is fast enough */ + skb_orphan(skb); + rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, + hd_len, elements); + } + + if (!rc) { + if (card->options.performance_stats) { + card->perf_stats.buf_elements_sent += elements; + if (is_sg) + card->perf_stats.sg_skbs_sent++; + if (is_tso) { + card->perf_stats.large_send_bytes += frame_len; + card->perf_stats.large_send_cnt++; + } + } + } else { + if (!push_len) + kmem_cache_free(qeth_core_header_cache, hdr); + if (rc == -EBUSY) + /* roll back to ETH header */ + skb_pull(skb, push_len); + } + return rc; +} +EXPORT_SYMBOL_GPL(qeth_xmit); + static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -4243,8 +4246,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, if (qeth_setadpparms_inspect_rc(cmd)) return 0; - if (!card->options.layer2 || - !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { + if (IS_LAYER3(card) || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { ether_addr_copy(card->dev->dev_addr, cmd->data.setadapterparms.data.change_addr.addr); card->info.mac_bits |= QETH_LAYER2_MAC_READ; @@ -4598,9 +4600,9 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) return -EOPNOTSUPP; if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && - (!card->options.layer2)) { + IS_LAYER3(card)) return -EOPNOTSUPP; - } + /* skip 4 bytes (data_len struct member) to get req_len */ if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) return -EFAULT; @@ -5044,6 +5046,7 @@ static void qeth_core_free_card(struct qeth_card *card) qeth_clean_channel(&card->data); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); + dev_set_drvdata(&card->gdev->dev, NULL); kfree(card); } @@ -5123,7 +5126,7 @@ retriable: qeth_determine_capabilities(card); qeth_init_tokens(card); qeth_init_func_level(card); - rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); + rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb); if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break2"); return rc; @@ -5134,7 +5137,7 @@ retriable: else goto retry; } - rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb); + rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb); if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break3"); return rc; @@ -5158,13 +5161,14 @@ retriable: if (rc == IPA_RC_LAN_OFFLINE) { dev_warn(&card->gdev->dev, "The LAN is offline\n"); - card->lan_online = 0; + netif_carrier_off(card->dev); } else { rc = -ENODEV; goto out; } - } else - card->lan_online = 1; + } else { + netif_carrier_on(card->dev); + } card->options.ipa4.supported_funcs = 0; card->options.ipa6.supported_funcs = 0; @@ -5421,6 +5425,21 @@ static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) return cmd->hdr.return_code; } +static int qeth_setassparms_get_caps_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_ipa_caps *caps = reply->param; + + if (qeth_setassparms_inspect_rc(cmd)) + return 0; + + caps->supported = cmd->data.setassparms.data.caps.supported; + caps->enabled = cmd->data.setassparms.data.caps.enabled; + return 0; +} + int qeth_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -5456,8 +5475,6 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, cmd->data.setassparms.hdr.assist_no = ipa_func; cmd->data.setassparms.hdr.length = 8 + len; cmd->data.setassparms.hdr.command_code = cmd_code; - cmd->data.setassparms.hdr.return_code = 0; - cmd->data.setassparms.hdr.seq_no = 0; } return iob; @@ -5560,11 +5577,11 @@ static int qeth_register_dbf_views(void) return 0; } +static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ + int qeth_core_load_discipline(struct qeth_card *card, enum qeth_discipline_id discipline) { - int rc = 0; - mutex_lock(&qeth_mod_mutex); switch (discipline) { case QETH_DISCIPLINE_LAYER3: @@ -5578,22 +5595,25 @@ int qeth_core_load_discipline(struct qeth_card *card, default: break; } + mutex_unlock(&qeth_mod_mutex); if (!card->discipline) { dev_err(&card->gdev->dev, "There is no kernel module to " "support discipline %d\n", discipline); - rc = -EINVAL; + return -EINVAL; } - mutex_unlock(&qeth_mod_mutex); - return rc; + + card->options.layer = discipline; + return 0; } void qeth_core_free_discipline(struct qeth_card *card) { - if (card->options.layer2) + if (IS_LAYER2(card)) symbol_put(qeth_l2_discipline); else symbol_put(qeth_l3_discipline); + card->options.layer = QETH_DISCIPLINE_UNDETERMINED; card->discipline = NULL; } @@ -5731,7 +5751,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) struct device *dev; int rc; enum qeth_discipline_id enforced_disc; - unsigned long flags; char dbf_name[DBF_NAME_LEN]; QETH_DBF_TEXT(SETUP, 2, "probedev"); @@ -5742,7 +5761,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); - card = qeth_alloc_card(); + card = qeth_alloc_card(gdev); if (!card) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); rc = -ENOMEM; @@ -5758,15 +5777,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) goto err_card; } - card->read.ccwdev = gdev->cdev[0]; - card->write.ccwdev = gdev->cdev[1]; - card->data.ccwdev = gdev->cdev[2]; - dev_set_drvdata(&gdev->dev, card); - card->gdev = gdev; - gdev->cdev[0]->handler = qeth_irq; - gdev->cdev[1]->handler = qeth_irq; - gdev->cdev[2]->handler = qeth_irq; - qeth_setup_card(card); qeth_update_from_chp_desc(card); @@ -5797,9 +5807,9 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) break; } - write_lock_irqsave(&qeth_core_card_list.rwlock, flags); + write_lock_irq(&qeth_core_card_list.rwlock); list_add_tail(&card->list, &qeth_core_card_list.list); - write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); + write_unlock_irq(&qeth_core_card_list.rwlock); return 0; err_disc: @@ -5815,7 +5825,6 @@ err_dev: static void qeth_core_remove_device(struct ccwgroup_device *gdev) { - unsigned long flags; struct qeth_card *card = dev_get_drvdata(&gdev->dev); QETH_DBF_TEXT(SETUP, 2, "removedv"); @@ -5825,12 +5834,11 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev) qeth_core_free_discipline(card); } - write_lock_irqsave(&qeth_core_card_list.rwlock, flags); + write_lock_irq(&qeth_core_card_list.rwlock); list_del(&card->list); - write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); + write_unlock_irq(&qeth_core_card_list.rwlock); free_netdev(card->dev); qeth_core_free_card(card); - dev_set_drvdata(&gdev->dev, NULL); put_device(&gdev->dev); } @@ -6123,7 +6131,7 @@ void qeth_core_get_drvinfo(struct net_device *dev, { struct qeth_card *card = dev->ml_priv; - strlcpy(info->driver, card->options.layer2 ? "qeth_l2" : "qeth_l3", + strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3", sizeof(info->driver)); strlcpy(info->version, "1.0", sizeof(info->version)); strlcpy(info->fw_version, card->info.mcl_level, @@ -6434,27 +6442,85 @@ static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, return rc ? -EIO : 0; } -static int qeth_set_ipa_tso(struct qeth_card *card, int on) +static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_tso_start_data *tso_data = reply->param; + + if (qeth_setassparms_inspect_rc(cmd)) + return 0; + + tso_data->mss = cmd->data.setassparms.data.tso.mss; + tso_data->supported = cmd->data.setassparms.data.tso.supported; + return 0; +} + +static int qeth_set_tso_off(struct qeth_card *card, + enum qeth_prot_versions prot) { + return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_STOP, 0, prot); +} + +static int qeth_set_tso_on(struct qeth_card *card, + enum qeth_prot_versions prot) +{ + struct qeth_tso_start_data tso_data; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_caps caps; int rc; - QETH_CARD_TEXT(card, 3, "sttso"); + iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_START, 0, prot); + if (!iob) + return -ENOMEM; - if (on) { - rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO, - IPA_CMD_ASS_START, 0); - if (rc) { - dev_warn(&card->gdev->dev, - "Starting outbound TCP segmentation offload for %s failed\n", - QETH_CARD_IFNAME(card)); - return -EIO; - } - dev_info(&card->gdev->dev, "Outbound TSO enabled\n"); - } else { - rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO, - IPA_CMD_ASS_STOP, 0); + rc = qeth_send_setassparms(card, iob, 0, 0 /* unused */, + qeth_start_tso_cb, &tso_data); + if (rc) + return rc; + + if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { + qeth_set_tso_off(card, prot); + return -EOPNOTSUPP; } - return rc; + + iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_ENABLE, sizeof(caps), prot); + if (!iob) { + qeth_set_tso_off(card, prot); + return -ENOMEM; + } + + /* enable TSO capability */ + caps.supported = 0; + caps.enabled = QETH_IPA_LARGE_SEND_TCP; + rc = qeth_send_setassparms(card, iob, sizeof(caps), (long) &caps, + qeth_setassparms_get_caps_cb, &caps); + if (rc) { + qeth_set_tso_off(card, prot); + return rc; + } + + if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || + !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { + qeth_set_tso_off(card, prot); + return -EOPNOTSUPP; + } + + dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, + tso_data.mss); + return 0; +} + +static int qeth_set_ipa_tso(struct qeth_card *card, bool on, + enum qeth_prot_versions prot) +{ + int rc = on ? qeth_set_tso_on(card, prot) : + qeth_set_tso_off(card, prot); + + return rc ? -EIO : 0; } static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) @@ -6481,7 +6547,7 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) } #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ - NETIF_F_IPV6_CSUM) + NETIF_F_IPV6_CSUM | NETIF_F_TSO6) /** * qeth_enable_hw_features() - (Re-)Enable HW functions for device features * @dev: a net_device @@ -6531,11 +6597,18 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features) if (rc) changed ^= NETIF_F_RXCSUM; } - if ((changed & NETIF_F_TSO)) { - rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO ? 1 : 0); + if (changed & NETIF_F_TSO) { + rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, + QETH_PROT_IPV4); if (rc) changed ^= NETIF_F_TSO; } + if (changed & NETIF_F_TSO6) { + rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, + QETH_PROT_IPV6); + if (rc) + changed ^= NETIF_F_TSO6; + } /* everything changed successfully? */ if ((dev->features ^ features) == changed) @@ -6561,6 +6634,8 @@ netdev_features_t qeth_fix_features(struct net_device *dev, features &= ~NETIF_F_RXCSUM; if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) features &= ~NETIF_F_TSO; + if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) + features &= ~NETIF_F_TSO6; /* if the card isn't up, remove features that require hw changes */ if (card->state == CARD_STATE_DOWN || card->state == CARD_STATE_RECOVER) @@ -6602,9 +6677,7 @@ static int __init qeth_core_init(void) pr_info("loading core functions\n"); INIT_LIST_HEAD(&qeth_core_card_list.list); - INIT_LIST_HEAD(&qeth_dbf_list); rwlock_init(&qeth_core_card_list.rwlock); - mutex_init(&qeth_mod_mutex); qeth_wq = create_singlethread_workqueue("qeth_wq"); if (!qeth_wq) { @@ -6619,8 +6692,10 @@ static int __init qeth_core_init(void) rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); if (rc) goto register_err; - qeth_core_header_cache = kmem_cache_create("qeth_hdr", - sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL); + qeth_core_header_cache = + kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, + roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), + 0, NULL); if (!qeth_core_header_cache) { rc = -ENOMEM; goto slab_err; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index aa5de1fe01e1..e85090467afe 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -56,6 +56,21 @@ static inline bool qeth_intparm_is_iob(unsigned long intparm) #define IPA_CMD_INITIATOR_OSA_REPLY 0x81 #define IPA_CMD_PRIM_VERSION_NO 0x01 +struct qeth_ipa_caps { + u32 supported; + u32 enabled; +}; + +static inline bool qeth_ipa_caps_supported(struct qeth_ipa_caps *caps, u32 mask) +{ + return (caps->supported & mask) == mask; +} + +static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask) +{ + return (caps->enabled & mask) == mask; +} + enum qeth_card_types { QETH_CARD_TYPE_OSD = 1, QETH_CARD_TYPE_IQD = 5, @@ -405,14 +420,25 @@ struct qeth_checksum_cmd { __u32 enabled; } __packed; +enum qeth_ipa_large_send_caps { + QETH_IPA_LARGE_SEND_TCP = 0x00000001, +}; + +struct qeth_tso_start_data { + u32 mss; + u32 supported; +}; + /* SETASSPARMS IPA Command: */ struct qeth_ipacmd_setassparms { struct qeth_ipacmd_setassparms_hdr hdr; union { __u32 flags_32bit; + struct qeth_ipa_caps caps; struct qeth_checksum_cmd chksum; struct qeth_arp_cache_entry add_arp_entry; struct qeth_arp_query_data query_arp; + struct qeth_tso_start_data tso; __u8 ip[16]; } data; } __attribute__ ((packed)); diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 25d0be25bcb3..30f61608fa22 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -31,10 +31,9 @@ static ssize_t qeth_dev_state_show(struct device *dev, case CARD_STATE_SOFTSETUP: return sprintf(buf, "SOFTSETUP\n"); case CARD_STATE_UP: - if (card->lan_online) - return sprintf(buf, "UP (LAN ONLINE)\n"); - else - return sprintf(buf, "UP (LAN OFFLINE)\n"); + return sprintf(buf, "UP (LAN %s)\n", + netif_carrier_ok(card->dev) ? "ONLINE" : + "OFFLINE"); case CARD_STATE_RECOVER: return sprintf(buf, "RECOVER\n"); default: @@ -228,7 +227,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS; card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; } else if (sysfs_streq(buf, "prio_queueing_vlan")) { - if (!card->options.layer2) { + if (IS_LAYER3(card)) { rc = -ENOTSUPP; goto out; } @@ -379,7 +378,7 @@ static ssize_t qeth_dev_layer2_show(struct device *dev, if (!card) return -EINVAL; - return sprintf(buf, "%i\n", card->options.layer2); + return sprintf(buf, "%i\n", card->options.layer); } static ssize_t qeth_dev_layer2_store(struct device *dev, @@ -413,7 +412,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, goto out; } - if (card->options.layer2 == newdis) + if (card->options.layer == newdis) goto out; if (card->info.layer_enforced) { /* fixed layer, can't switch */ @@ -432,8 +431,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, card->discipline->remove(card->gdev); qeth_core_free_discipline(card); - card->options.layer2 = -1; - free_netdev(card->dev); card->dev = ndev; } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index b5e38531733f..23aaf373f631 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -193,15 +193,25 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) return RTN_UNICAST; } -static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb, - int cast_type, unsigned int data_len) +static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb, int ipv, int cast_type, + unsigned int data_len) { - struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb); + struct vlan_ethhdr *veth = vlan_eth_hdr(skb); - memset(hdr, 0, sizeof(struct qeth_hdr)); - hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; hdr->hdr.l2.pkt_length = data_len; + if (skb_is_gso(skb)) { + hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO; + } else { + hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); + if (card->options.performance_stats) + card->perf_stats.tx_csum++; + } + } + /* set byte byte 3 to casting flags */ if (cast_type == RTN_MULTICAST) hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST; @@ -641,84 +651,43 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) qeth_promisc_to_bridge(card); } -static int qeth_l2_xmit(struct qeth_card *card, struct sk_buff *skb, - struct qeth_qdio_out_q *queue, int cast_type, int ipv) +static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue) { - const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0; - const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); - unsigned int frame_len = skb->len; - unsigned int data_offset = 0; - struct qeth_hdr *hdr = NULL; + struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data; + addr_t end = (addr_t)(skb->data + sizeof(*hdr)); + addr_t start = (addr_t)skb->data; + unsigned int elements = 0; unsigned int hd_len = 0; - unsigned int elements; - int push_len, rc; - bool is_sg; + int rc; - rc = skb_cow_head(skb, hw_hdr_len); - if (rc) - return rc; + if (skb->protocol == htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; - push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len, - &elements); - if (push_len < 0) - return push_len; - if (!push_len) { - /* HW header needs its own buffer element. */ - hd_len = hw_hdr_len + proto_len; - data_offset = proto_len; - } - qeth_l2_fill_header(hdr, skb, cast_type, frame_len); - if (skb->ip_summed == CHECKSUM_PARTIAL) { - qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); - if (card->options.performance_stats) - card->perf_stats.tx_csum++; + if (qeth_get_elements_for_range(start, end) > 1) { + /* Misaligned HW header, move it to its own buffer element. */ + hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); + if (!hdr) + return -ENOMEM; + hd_len = sizeof(*hdr); + skb_copy_from_linear_data(skb, (char *)hdr, hd_len); + elements++; } - is_sg = skb_is_nonlinear(skb); - if (IS_IQD(card)) { - rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, - hd_len); - } else { - /* TODO: drop skb_orphan() once TX completion is fast enough */ - skb_orphan(skb); - rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, - hd_len, elements); + elements += qeth_count_elements(skb, hd_len); + if (elements > QETH_MAX_BUFFER_ELEMENTS(card)) { + rc = -E2BIG; + goto out; } - if (!rc) { - if (card->options.performance_stats) { - card->perf_stats.buf_elements_sent += elements; - if (is_sg) - card->perf_stats.sg_skbs_sent++; - } - } else { - if (!push_len) - kmem_cache_free(qeth_core_header_cache, hdr); - if (rc == -EBUSY) - /* roll back to ETH header */ - skb_pull(skb, push_len); - } + rc = qeth_do_send_packet(card, queue, skb, hdr, hd_len, hd_len, + elements); +out: + if (rc && hd_len) + kmem_cache_free(qeth_core_header_cache, hdr); return rc; } -static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, - struct qeth_qdio_out_q *queue) -{ - unsigned int elements; - struct qeth_hdr *hdr; - - if (skb->protocol == htons(ETH_P_IPV6)) - return -EPROTONOSUPPORT; - - hdr = (struct qeth_hdr *)skb->data; - elements = qeth_get_elements_no(card, skb, 0, 0); - if (!elements) - return -E2BIG; - if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr))) - return -EINVAL; - return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements); -} - static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -729,7 +698,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, int tx_bytes = skb->len; int rc; - if ((card->state != CARD_STATE_UP) || !card->lan_online) { + if (card->state != CARD_STATE_UP) { card->stats.tx_carrier_errors++; goto tx_drop; } @@ -745,7 +714,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, if (IS_OSN(card)) rc = qeth_l2_xmit_osn(card, skb, queue); else - rc = qeth_l2_xmit(card, skb, queue, cast_type, ipv); + rc = qeth_xmit(card, skb, queue, ipv, cast_type, + qeth_l2_fill_header); if (!rc) { card->stats.tx_packets++; @@ -789,7 +759,10 @@ static int __qeth_l2_open(struct net_device *dev) if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { napi_enable(&card->napi); + local_bh_disable(); napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } else rc = -EIO; return rc; @@ -837,7 +810,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) } INIT_LIST_HEAD(&card->vid_list); hash_init(card->mac_htable); - card->options.layer2 = 1; card->info.hwtrap = 0; qeth_l2_vnicc_set_defaults(card); return 0; @@ -929,6 +901,20 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->hw_features |= NETIF_F_RXCSUM; card->dev->vlan_features |= NETIF_F_RXCSUM; } + if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) { + card->dev->hw_features |= NETIF_F_TSO; + card->dev->vlan_features |= NETIF_F_TSO; + } + if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) { + card->dev->hw_features |= NETIF_F_TSO6; + card->dev->vlan_features |= NETIF_F_TSO6; + } + + if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) { + card->dev->needed_headroom = sizeof(struct qeth_hdr_tso); + netif_set_gso_max_size(card->dev, + PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)); + } qeth_l2_request_initial_mac(card); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); @@ -1029,10 +1015,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) goto out_remove; } card->state = CARD_STATE_SOFTSETUP; - if (card->lan_online) - netif_carrier_on(card->dev); - else - netif_carrier_off(card->dev); qeth_set_allowed_threads(card, 0xffffffff, 0); @@ -1178,9 +1160,6 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev) struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc = 0; - if (gdev->state == CCWGROUP_OFFLINE) - goto out; - if (card->state == CARD_STATE_RECOVER) { rc = __qeth_l2_set_online(card->gdev, 1); if (rc) { @@ -1190,7 +1169,7 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev) } } else rc = __qeth_l2_set_online(card->gdev, 0); -out: + qeth_set_allowed_threads(card, 0xffffffff, 0); netif_device_attach(card->dev); if (rc) @@ -1240,7 +1219,6 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob) { struct qeth_channel *channel = iob->channel; - unsigned long flags; int rc = 0; QETH_CARD_TEXT(card, 5, "osndctrd"); @@ -1249,10 +1227,10 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); qeth_prepare_control_data(card, len, iob); QETH_CARD_TEXT(card, 6, "osnoirqp"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " "ccw_device_start rc = %i\n", rc); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ada258c01a08..0b161cc1fd2e 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -33,7 +33,6 @@ #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/ip6_fib.h> -#include <net/ip6_checksum.h> #include <net/iucv/af_iucv.h> #include <linux/hashtable.h> @@ -1349,6 +1348,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, static int qeth_l3_process_inbound_buffer(struct qeth_card *card, int budget, int *done) { + struct net_device *dev = card->dev; int work_done = 0; struct sk_buff *skb; struct qeth_hdr *hdr; @@ -1370,11 +1370,10 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, magic = *(__u16 *)skb->data; if ((card->info.type == QETH_CARD_TYPE_IQD) && (magic == ETH_P_AF_IUCV)) { - skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); len = skb->len; - card->dev->header_ops->create(skb, card->dev, 0, - card->dev->dev_addr, "FAKELL", len); - skb_reset_mac_header(skb); + dev_hard_header(skb, dev, ETH_P_AF_IUCV, + dev->dev_addr, "FAKELL", len); + skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); } else { qeth_l3_rebuild_skb(card, skb, hdr); @@ -1983,39 +1982,38 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb) rcu_read_unlock(); /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */ - if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) - return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? - RTN_MULTICAST : RTN_UNICAST; - else if (be16_to_cpu(skb->protocol) == ETH_P_IP) + switch (qeth_get_ip_version(skb)) { + case 4: return ipv4_is_multicast(ip_hdr(skb)->daddr) ? RTN_MULTICAST : RTN_UNICAST; - - /* ... and MAC address */ - if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast)) - return RTN_BROADCAST; - if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) - return RTN_MULTICAST; - - /* default to unicast */ - return RTN_UNICAST; + case 6: + return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? + RTN_MULTICAST : RTN_UNICAST; + default: + /* ... and MAC address */ + if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, + skb->dev->broadcast)) + return RTN_BROADCAST; + if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) + return RTN_MULTICAST; + /* default to unicast */ + return RTN_UNICAST; + } } static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb, unsigned int data_len) { char daddr[16]; - struct af_iucv_trans_hdr *iucv_hdr; - memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; hdr->hdr.l3.length = data_len; hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; - iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN); memset(daddr, 0, sizeof(daddr)); daddr[0] = 0xfe; daddr[1] = 0x80; - memcpy(&daddr[8], iucv_hdr->destUserID, 8); + memcpy(&daddr[8], iucv_trans_hdr(skb)->destUserID, 8); memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16); } @@ -2034,26 +2032,33 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, struct sk_buff *skb, int ipv, int cast_type, unsigned int data_len) { - memset(hdr, 0, sizeof(struct qeth_hdr)); - hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; + struct vlan_ethhdr *veth = vlan_eth_hdr(skb); + hdr->hdr.l3.length = data_len; - /* - * before we're going to overwrite this location with next hop ip. - * v6 uses passthrough, v4 sets the tag in the QDIO header. - */ - if (skb_vlan_tag_present(skb)) { - if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD)) - hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME; - else - hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG; - hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); + if (skb_is_gso(skb)) { + hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO; + } else { + hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv); + /* some HW requires combined L3+L4 csum offload: */ + if (ipv == 4) + hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ; + if (card->options.performance_stats) + card->perf_stats.tx_csum++; + } } - if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) { - qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv); - if (card->options.performance_stats) - card->perf_stats.tx_csum++; + if (ipv == 4 || IS_IQD(card)) { + /* NETIF_F_HW_VLAN_CTAG_TX */ + if (skb_vlan_tag_present(skb)) { + hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME; + hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); + } + } else if (veth->h_vlan_proto == htons(ETH_P_8021Q)) { + hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_INCLUDE_VLAN_TAG; + hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI); } /* OSA only: */ @@ -2094,85 +2099,41 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, rcu_read_unlock(); } -static void qeth_tso_fill_header(struct qeth_card *card, - struct qeth_hdr *qhdr, struct sk_buff *skb) +static void qeth_l3_fixup_headers(struct sk_buff *skb) { - struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr; - struct tcphdr *tcph = tcp_hdr(skb); struct iphdr *iph = ip_hdr(skb); - struct ipv6hdr *ip6h = ipv6_hdr(skb); - - /*fix header to TSO values ...*/ - hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; - /*set values which are fix for the first approach ...*/ - hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); - hdr->ext.imb_hdr_no = 1; - hdr->ext.hdr_type = 1; - hdr->ext.hdr_version = 1; - hdr->ext.hdr_len = 28; - /*insert non-fix values */ - hdr->ext.mss = skb_shinfo(skb)->gso_size; - hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb)); - hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - - sizeof(struct qeth_hdr_tso)); - tcph->check = 0; - if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) { - ip6h->payload_len = 0; - tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, - 0, IPPROTO_TCP, 0); - } else { - /*OSA want us to set these values ...*/ - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); - iph->tot_len = 0; - iph->check = 0; - } -} -/** - * qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso - * @card: qeth card structure, to check max. elems. - * @skb: SKB address - * @extra_elems: extra elems needed, to check against max. - * - * Returns the number of pages, and thus QDIO buffer elements, needed to cover - * skb data, including linear part and fragments, but excluding TCP header. - * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().) - * Checks if the result plus extra_elems fits under the limit for the card. - * Returns 0 if it does not. - * Note: extra_elems is not included in the returned result. - */ -static int qeth_l3_get_elements_no_tso(struct qeth_card *card, - struct sk_buff *skb, int extra_elems) -{ - addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); - addr_t end = (addr_t)skb->data + skb_headlen(skb); - int elements = qeth_get_elements_for_frags(skb); - - if (start != end) - elements += qeth_get_elements_for_range(start, end); - - if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { - QETH_DBF_MESSAGE(2, - "Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n", - elements + extra_elems, skb->len); - return 0; + /* this is safe, IPv6 traffic takes a different path */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + iph->check = 0; + if (skb_is_gso(skb)) { + iph->tot_len = 0; + tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr, + iph->daddr, 0); } - return elements; } -static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, - struct qeth_qdio_out_q *queue, int ipv, - int cast_type) +static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, int cast_type) { - const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); - unsigned int frame_len, elements; + unsigned int hw_hdr_len, proto_len, frame_len, elements; unsigned char eth_hdr[ETH_HLEN]; + bool is_tso = skb_is_gso(skb); + unsigned int data_offset = 0; struct qeth_hdr *hdr = NULL; unsigned int hd_len = 0; int push_len, rc; bool is_sg; + if (is_tso) { + hw_hdr_len = sizeof(struct qeth_hdr_tso); + proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - + ETH_HLEN; + } else { + hw_hdr_len = sizeof(struct qeth_hdr); + proto_len = 0; + } + /* re-use the L2 header area for the HW header: */ rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); if (rc) @@ -2181,28 +2142,37 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, skb_pull(skb, ETH_HLEN); frame_len = skb->len; - push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0, + qeth_l3_fixup_headers(skb); + push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len, &elements); if (push_len < 0) return push_len; - if (!push_len) { - /* hdr was added discontiguous from skb->data */ - hd_len = hw_hdr_len; + if (is_tso || !push_len) { + /* HW header needs its own buffer element. */ + hd_len = hw_hdr_len + proto_len; + data_offset = push_len + proto_len; } + memset(hdr, 0, hw_hdr_len); - if (skb->protocol == htons(ETH_P_AF_IUCV)) + if (skb->protocol == htons(ETH_P_AF_IUCV)) { qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len); - else + } else { qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len); + if (is_tso) + qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, + frame_len - proto_len, skb, + proto_len); + } is_sg = skb_is_nonlinear(skb); if (IS_IQD(card)) { - rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len); + rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, + hd_len); } else { /* TODO: drop skb_orphan() once TX completion is fast enough */ skb_orphan(skb); - rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, - elements); + rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, + hd_len, elements); } if (!rc) { @@ -2210,6 +2180,10 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, card->perf_stats.buf_elements_sent += elements; if (is_sg) card->perf_stats.sg_skbs_sent++; + if (is_tso) { + card->perf_stats.large_send_bytes += frame_len; + card->perf_stats.large_send_cnt++; + } } } else { if (!push_len) @@ -2224,118 +2198,6 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, return rc; } -static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, - struct qeth_qdio_out_q *queue, int ipv, int cast_type) -{ - int elements, len, rc; - __be16 *tag; - struct qeth_hdr *hdr = NULL; - int hdr_elements = 0; - struct sk_buff *new_skb = NULL; - int tx_bytes = skb->len; - unsigned int hd_len; - bool use_tso, is_sg; - - /* Ignore segment size from skb_is_gso(), 1 page is always used. */ - use_tso = skb_is_gso(skb) && - (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); - - /* create a clone with writeable headroom */ - new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) + - VLAN_HLEN); - if (!new_skb) - return -ENOMEM; - - if (ipv == 4) { - skb_pull(new_skb, ETH_HLEN); - } else if (skb_vlan_tag_present(new_skb)) { - skb_push(new_skb, VLAN_HLEN); - skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); - skb_copy_to_linear_data_offset(new_skb, 4, - new_skb->data + 8, 4); - skb_copy_to_linear_data_offset(new_skb, 8, - new_skb->data + 12, 4); - tag = (__be16 *)(new_skb->data + 12); - *tag = cpu_to_be16(ETH_P_8021Q); - *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb)); - } - - /* fix hardware limitation: as long as we do not have sbal - * chaining we can not send long frag lists - */ - if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || - (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) { - rc = skb_linearize(new_skb); - - if (card->options.performance_stats) { - if (rc) - card->perf_stats.tx_linfail++; - else - card->perf_stats.tx_lin++; - } - if (rc) - goto out; - } - - if (use_tso) { - hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso)); - memset(hdr, 0, sizeof(struct qeth_hdr_tso)); - qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, - new_skb->len - sizeof(struct qeth_hdr_tso)); - qeth_tso_fill_header(card, hdr, new_skb); - hdr_elements++; - } else { - hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); - qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, - new_skb->len - sizeof(struct qeth_hdr)); - } - - elements = use_tso ? - qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : - qeth_get_elements_no(card, new_skb, hdr_elements, 0); - if (!elements) { - rc = -E2BIG; - goto out; - } - elements += hdr_elements; - - if (use_tso) { - hd_len = sizeof(struct qeth_hdr_tso) + - ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); - len = hd_len; - } else { - hd_len = 0; - len = sizeof(struct qeth_hdr_layer3); - } - - if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) { - rc = -EINVAL; - goto out; - } - - is_sg = skb_is_nonlinear(new_skb); - rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len, - elements); -out: - if (!rc) { - if (new_skb != skb) - dev_kfree_skb_any(skb); - if (card->options.performance_stats) { - card->perf_stats.buf_elements_sent += elements; - if (is_sg) - card->perf_stats.sg_skbs_sent++; - if (use_tso) { - card->perf_stats.large_send_bytes += tx_bytes; - card->perf_stats.large_send_cnt++; - } - } - } else { - if (new_skb != skb) - dev_kfree_skb_any(new_skb); - } - return rc; -} - static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -2355,7 +2217,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, goto tx_drop; } - if (card->state != CARD_STATE_UP || !card->lan_online) { + if (card->state != CARD_STATE_UP) { card->stats.tx_carrier_errors++; goto tx_drop; } @@ -2371,10 +2233,11 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, } netif_stop_queue(dev); - if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4)) - rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type); - else + if (ipv == 4 || IS_IQD(card)) rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); + else + rc = qeth_xmit(card, skb, queue, ipv, cast_type, + qeth_l3_fill_header); if (!rc) { card->stats.tx_packets++; @@ -2412,7 +2275,10 @@ static int __qeth_l3_open(struct net_device *dev) if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { napi_enable(&card->napi); + local_bh_disable(); napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } else rc = -EIO; return rc; @@ -2476,6 +2342,15 @@ qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np) return 0; } +static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (qeth_get_ip_version(skb) != 4) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + return qeth_features_check(skb, dev, features); +} + static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_open = qeth_l3_open, .ndo_stop = qeth_l3_stop, @@ -2496,7 +2371,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_stop = qeth_l3_stop, .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l3_hard_start_xmit, - .ndo_features_check = qeth_features_check, + .ndo_features_check = qeth_l3_osa_features_check, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, @@ -2510,6 +2385,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { static int qeth_l3_setup_netdev(struct qeth_card *card) { + unsigned int headroom; int rc; if (card->dev->netdev_ops) @@ -2542,9 +2418,22 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->hw_features |= NETIF_F_IPV6_CSUM; card->dev->vlan_features |= NETIF_F_IPV6_CSUM; } + if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) { + card->dev->hw_features |= NETIF_F_TSO6; + card->dev->vlan_features |= NETIF_F_TSO6; + } + + /* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */ + if (card->dev->hw_features & NETIF_F_TSO6) + headroom = sizeof(struct qeth_hdr_tso) + VLAN_HLEN; + else if (card->dev->hw_features & NETIF_F_TSO) + headroom = sizeof(struct qeth_hdr_tso); + else + headroom = sizeof(struct qeth_hdr) + VLAN_HLEN; } else if (card->info.type == QETH_CARD_TYPE_IQD) { card->dev->flags |= IFF_NOARP; card->dev->netdev_ops = &qeth_l3_netdev_ops; + headroom = sizeof(struct qeth_hdr) - ETH_HLEN; rc = qeth_l3_iqd_read_initial_mac(card); if (rc) @@ -2555,14 +2444,14 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) } else return -ENODEV; + card->dev->needed_headroom = headroom; card->dev->ethtool_ops = &qeth_l3_ethtool_ops; - card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN; card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; netif_keep_dst(card->dev); - if (card->dev->hw_features & NETIF_F_TSO) + if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) netif_set_gso_max_size(card->dev, PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); @@ -2591,7 +2480,6 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev) } hash_init(card->ip_htable); hash_init(card->ip_mc_htable); - card->options.layer2 = 0; card->info.hwtrap = 0; return 0; } @@ -2678,10 +2566,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) qeth_set_allowed_threads(card, 0xffffffff, 0); qeth_l3_recover_ip(card); - if (card->lan_online) - netif_carrier_on(card->dev); - else - netif_carrier_off(card->dev); qeth_enable_hw_features(card->dev); if (recover_flag == CARD_STATE_RECOVER) { @@ -2819,9 +2703,6 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc = 0; - if (gdev->state == CCWGROUP_OFFLINE) - goto out; - if (card->state == CARD_STATE_RECOVER) { rc = __qeth_l3_set_online(card->gdev, 1); if (rc) { @@ -2831,7 +2712,7 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) } } else rc = __qeth_l3_set_online(card->gdev, 0); -out: + qeth_set_allowed_threads(card, 0xffffffff, 0); netif_device_attach(card->dev); if (rc) |