summaryrefslogtreecommitdiffstats
path: root/drivers/s390/net/qeth_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/net/qeth_main.c')
-rw-r--r--drivers/s390/net/qeth_main.c158
1 files changed, 118 insertions, 40 deletions
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 57f69434fbf9..f3e6fbeb2123 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -561,7 +561,7 @@ qeth_set_offline(struct ccwgroup_device *cgdev)
}
static int
-qeth_wait_for_threads(struct qeth_card *card, unsigned long threads);
+qeth_threads_running(struct qeth_card *card, unsigned long threads);
static void
@@ -576,8 +576,7 @@ qeth_remove_device(struct ccwgroup_device *cgdev)
if (!card)
return;
- if (qeth_wait_for_threads(card, 0xffffffff))
- return;
+ wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE){
card->use_hard_stop = 1;
@@ -1542,16 +1541,21 @@ qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
card = CARD_FROM_CDEV(channel->ccwdev);
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
- "reply\n", CARD_WDEV_ID(card));
+ if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
+ PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
+ "adapter exclusively used by another host\n",
+ CARD_WDEV_ID(card));
+ else
+ PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
+ "negative reply\n", CARD_WDEV_ID(card));
goto out;
}
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
- "function level mismatch "
- "(sent: 0x%x, received: 0x%x)\n",
- CARD_WDEV_ID(card), card->info.func_level, temp);
+ "function level mismatch "
+ "(sent: 0x%x, received: 0x%x)\n",
+ CARD_WDEV_ID(card), card->info.func_level, temp);
goto out;
}
channel->state = CH_STATE_UP;
@@ -1597,8 +1601,13 @@ qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
goto out;
}
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
- "reply\n", CARD_RDEV_ID(card));
+ if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
+ PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
+ "adapter exclusively used by another host\n",
+ CARD_RDEV_ID(card));
+ else
+ PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
+ "negative reply\n", CARD_RDEV_ID(card));
goto out;
}
@@ -1613,8 +1622,8 @@ qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if (temp != qeth_peer_func_level(card->info.func_level)) {
PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
- "level mismatch (sent: 0x%x, received: 0x%x)\n",
- CARD_RDEV_ID(card), card->info.func_level, temp);
+ "level mismatch (sent: 0x%x, received: 0x%x)\n",
+ CARD_RDEV_ID(card), card->info.func_level, temp);
goto out;
}
memcpy(&card->token.issuer_rm_r,
@@ -2496,7 +2505,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
struct iphdr *ip_hdr;
QETH_DBF_TEXT(trace,5,"skbfktr");
- skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_TR);
+ skb_set_mac_header(skb, (int)-QETH_FAKE_LL_LEN_TR);
/* this is a fake ethernet header */
fake_hdr = tr_hdr(skb);
@@ -2804,13 +2813,16 @@ qeth_queue_input_buffer(struct qeth_card *card, int index)
if (newcount < count) {
/* we are in memory shortage so we switch back to
traditional skb allocation and drop packages */
- if (atomic_cmpxchg(&card->force_alloc_skb, 0, 1))
- printk(KERN_WARNING
- "qeth: switch to alloc skb\n");
+ if (!atomic_read(&card->force_alloc_skb) &&
+ net_ratelimit())
+ PRINT_WARN("Switch to alloc skb\n");
+ atomic_set(&card->force_alloc_skb, 3);
count = newcount;
} else {
- if (atomic_cmpxchg(&card->force_alloc_skb, 1, 0))
- printk(KERN_WARNING "qeth: switch to sg\n");
+ if ((atomic_read(&card->force_alloc_skb) == 1) &&
+ net_ratelimit())
+ PRINT_WARN("Switch to sg\n");
+ atomic_add_unless(&card->force_alloc_skb, -1, 0);
}
/*
@@ -3354,10 +3366,12 @@ out_freeoutq:
while (i > 0)
kfree(card->qdio.out_qs[--i]);
kfree(card->qdio.out_qs);
+ card->qdio.out_qs = NULL;
out_freepool:
qeth_free_buffer_pool(card);
out_freeinq:
kfree(card->qdio.in_q);
+ card->qdio.in_q = NULL;
out_nomem:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
return -ENOMEM;
@@ -3373,16 +3387,20 @@ qeth_free_qdio_buffers(struct qeth_card *card)
QETH_QDIO_UNINITIALIZED)
return;
kfree(card->qdio.in_q);
+ card->qdio.in_q = NULL;
/* inbound buffer pool */
qeth_free_buffer_pool(card);
/* free outbound qdio_qs */
- for (i = 0; i < card->qdio.no_out_queues; ++i){
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
- qeth_clear_output_buffer(card->qdio.out_qs[i],
- &card->qdio.out_qs[i]->bufs[j]);
- kfree(card->qdio.out_qs[i]);
+ if (card->qdio.out_qs) {
+ for (i = 0; i < card->qdio.no_out_queues; ++i) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
+ qeth_clear_output_buffer(card->qdio.out_qs[i],
+ &card->qdio.out_qs[i]->bufs[j]);
+ kfree(card->qdio.out_qs[i]);
+ }
+ kfree(card->qdio.out_qs);
+ card->qdio.out_qs = NULL;
}
- kfree(card->qdio.out_qs);
}
static void
@@ -3393,7 +3411,7 @@ qeth_clear_qdio_buffers(struct qeth_card *card)
QETH_DBF_TEXT(trace, 2, "clearqdbf");
/* clear outbound buffers to free skbs */
for (i = 0; i < card->qdio.no_out_queues; ++i)
- if (card->qdio.out_qs[i]){
+ if (card->qdio.out_qs && card->qdio.out_qs[i]) {
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
qeth_clear_output_buffer(card->qdio.out_qs[i],
&card->qdio.out_qs[i]->bufs[j]);
@@ -4553,6 +4571,53 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr,
return elements_needed;
}
+static void qeth_tx_csum(struct sk_buff *skb)
+{
+ int tlen;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ tcp_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check = csum_tcpudp_magic(
+ ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ tlen, ip_hdr(skb)->protocol,
+ skb_checksum(skb, skb_transport_offset(skb),
+ tlen, 0));
+ break;
+ case IPPROTO_UDP:
+ udp_hdr(skb)->check = 0;
+ udp_hdr(skb)->check = csum_tcpudp_magic(
+ ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ tlen, ip_hdr(skb)->protocol,
+ skb_checksum(skb, skb_transport_offset(skb),
+ tlen, 0));
+ break;
+ }
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ switch (ipv6_hdr(skb)->nexthdr) {
+ case IPPROTO_TCP:
+ tcp_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check = csum_ipv6_magic(
+ &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
+ ipv6_hdr(skb)->payload_len,
+ ipv6_hdr(skb)->nexthdr,
+ skb_checksum(skb, skb_transport_offset(skb),
+ ipv6_hdr(skb)->payload_len, 0));
+ break;
+ case IPPROTO_UDP:
+ udp_hdr(skb)->check = 0;
+ udp_hdr(skb)->check = csum_ipv6_magic(
+ &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
+ ipv6_hdr(skb)->payload_len,
+ ipv6_hdr(skb)->nexthdr,
+ skb_checksum(skb, skb_transport_offset(skb),
+ ipv6_hdr(skb)->payload_len, 0));
+ break;
+ }
+ }
+}
static int
qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
@@ -4638,12 +4703,22 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
elements_needed += elems;
}
+ if ((large_send == QETH_LARGE_SEND_NO) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ qeth_tx_csum(new_skb);
+
if (card->info.type != QETH_CARD_TYPE_IQD)
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements_needed, ctx);
- else
+ else {
+ if ((skb->protocol == htons(ETH_P_ARP)) &&
+ (card->dev->flags & IFF_NOARP)) {
+ __qeth_free_new_skb(skb, new_skb);
+ return -EPERM;
+ }
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements_needed, ctx);
+ }
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
@@ -6385,20 +6460,18 @@ qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
static u32
qeth_ethtool_get_tx_csum(struct net_device *dev)
{
- /* We may need to say that we support tx csum offload if
- * we do EDDP or TSO. There are discussions going on to
- * enforce rules in the stack and in ethtool that make
- * SG and TSO depend on HW_CSUM. At the moment there are
- * no such rules....
- * If we say yes here, we have to checksum outbound packets
- * any time. */
- return 0;
+ return (dev->features & NETIF_F_HW_CSUM) != 0;
}
static int
qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data)
{
- return -EINVAL;
+ if (data)
+ dev->features |= NETIF_F_HW_CSUM;
+ else
+ dev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
}
static u32
@@ -7412,7 +7485,8 @@ qeth_start_ipa_tso(struct qeth_card *card)
}
if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){
card->options.large_send = QETH_LARGE_SEND_NO;
- card->dev->features &= ~ (NETIF_F_TSO | NETIF_F_SG);
+ card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM);
}
return rc;
}
@@ -7552,22 +7626,26 @@ qeth_set_large_send(struct qeth_card *card, enum qeth_large_send_types type)
card->options.large_send = type;
switch (card->options.large_send) {
case QETH_LARGE_SEND_EDDP:
- card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
+ card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM;
break;
case QETH_LARGE_SEND_TSO:
if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){
- card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
+ card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM;
} else {
PRINT_WARN("TSO not supported on %s. "
"large_send set to 'no'.\n",
card->dev->name);
- card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
+ card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM);
card->options.large_send = QETH_LARGE_SEND_NO;
rc = -EOPNOTSUPP;
}
break;
default: /* includes QETH_LARGE_SEND_NO */
- card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
+ card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM);
break;
}
if (card->state == CARD_STATE_UP)