summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/x86_64/kernel/smpboot.c2
-rw-r--r--drivers/net/e1000/e1000_main.c1
-rw-r--r--drivers/w1/w1.c2
-rw-r--r--include/linux/netpoll.h20
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--net/core/dev.c9
-rw-r--r--net/core/netpoll.c63
-rw-r--r--net/ipv4/tcp_output.c14
9 files changed, 72 insertions, 43 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 5fd00c075053..3b38d6ab06cf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -784,7 +784,7 @@ DVB SUBSYSTEM AND DRIVERS
P: LinuxTV.org Project
M: linux-dvb-maintainer@linuxtv.org
L: linux-dvb@linuxtv.org (subscription required)
-W: http://linuxtv.org/developer/dvb.xml
+W: http://linuxtv.org/
S: Supported
EATA-DMA SCSI DRIVER
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 6e4807d64d46..b15761ff4101 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -334,7 +334,7 @@ static void __cpuinit tsc_sync_wait(void)
{
if (notscsync || !cpu_has_tsc)
return;
- sync_tsc(boot_cpu_id);
+ sync_tsc(0);
}
static __init int notscsync_setup(char *s)
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5e5d2c3c7ce4..b82fd15d0891 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3789,6 +3789,7 @@ e1000_netpoll(struct net_device *netdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev, NULL);
+ e1000_clean_tx_irq(adapter);
enable_irq(adapter->pdev->irq);
}
#endif
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 8a9c42822502..0bbf029b1ef1 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -593,7 +593,7 @@ void w1_search(struct w1_master *dev, w1_slave_found_callback cb)
* Return 0 - device(s) present, 1 - no devices present.
*/
if (w1_reset_bus(dev)) {
- dev_info(&dev->dev, "No devices present on the wire.\n");
+ dev_dbg(&dev->dev, "No devices present on the wire.\n");
break;
}
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index bcd0ac33f592..5ade54a78dbb 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -9,6 +9,7 @@
#include <linux/netdevice.h>
#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
#include <linux/list.h>
struct netpoll;
@@ -26,6 +27,7 @@ struct netpoll {
struct netpoll_info {
spinlock_t poll_lock;
int poll_owner;
+ int tries;
int rx_flags;
spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
@@ -60,25 +62,31 @@ static inline int netpoll_rx(struct sk_buff *skb)
return ret;
}
-static inline void netpoll_poll_lock(struct net_device *dev)
+static inline void *netpoll_poll_lock(struct net_device *dev)
{
+ rcu_read_lock(); /* deal with race on ->npinfo */
if (dev->npinfo) {
spin_lock(&dev->npinfo->poll_lock);
dev->npinfo->poll_owner = smp_processor_id();
+ return dev->npinfo;
}
+ return NULL;
}
-static inline void netpoll_poll_unlock(struct net_device *dev)
+static inline void netpoll_poll_unlock(void *have)
{
- if (dev->npinfo) {
- dev->npinfo->poll_owner = -1;
- spin_unlock(&dev->npinfo->poll_lock);
+ struct netpoll_info *npi = have;
+
+ if (npi) {
+ npi->poll_owner = -1;
+ spin_unlock(&npi->poll_lock);
}
+ rcu_read_unlock();
}
#else
#define netpoll_rx(a) 0
-#define netpoll_poll_lock(a)
+#define netpoll_poll_lock(a) 0
#define netpoll_poll_unlock(a)
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0061c9470482..948527e42a60 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -255,7 +255,7 @@ struct sk_buff {
nohdr:1;
/* 3 bits spare */
__u8 pkt_type;
- __u16 protocol;
+ __be16 protocol;
void (*destructor)(struct sk_buff *skb);
#ifdef CONFIG_NETFILTER
diff --git a/net/core/dev.c b/net/core/dev.c
index 52a3bf7ae177..faf59b02c4bf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1696,7 +1696,8 @@ static void net_rx_action(struct softirq_action *h)
struct softnet_data *queue = &__get_cpu_var(softnet_data);
unsigned long start_time = jiffies;
int budget = netdev_budget;
-
+ void *have;
+
local_irq_disable();
while (!list_empty(&queue->poll_list)) {
@@ -1709,10 +1710,10 @@ static void net_rx_action(struct softirq_action *h)
dev = list_entry(queue->poll_list.next,
struct net_device, poll_list);
- netpoll_poll_lock(dev);
+ have = netpoll_poll_lock(dev);
if (dev->quota <= 0 || dev->poll(dev, &budget)) {
- netpoll_poll_unlock(dev);
+ netpoll_poll_unlock(have);
local_irq_disable();
list_del(&dev->poll_list);
list_add_tail(&dev->poll_list, &queue->poll_list);
@@ -1721,7 +1722,7 @@ static void net_rx_action(struct softirq_action *h)
else
dev->quota = dev->weight;
} else {
- netpoll_poll_unlock(dev);
+ netpoll_poll_unlock(have);
dev_put(dev);
local_irq_disable();
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c327c9edadc5..a1a9a7abff50 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -33,6 +33,7 @@
#define MAX_UDP_CHUNK 1460
#define MAX_SKBS 32
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
+#define MAX_RETRIES 20000
static DEFINE_SPINLOCK(skb_list_lock);
static int nr_skbs;
@@ -248,14 +249,14 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
int status;
struct netpoll_info *npinfo;
-repeat:
- if(!np || !np->dev || !netif_running(np->dev)) {
+ if (!np || !np->dev || !netif_running(np->dev)) {
__kfree_skb(skb);
return;
}
- /* avoid recursion */
npinfo = np->dev->npinfo;
+
+ /* avoid recursion */
if (npinfo->poll_owner == smp_processor_id() ||
np->dev->xmit_lock_owner == smp_processor_id()) {
if (np->drop)
@@ -265,30 +266,37 @@ repeat:
return;
}
- spin_lock(&np->dev->xmit_lock);
- np->dev->xmit_lock_owner = smp_processor_id();
+ do {
+ npinfo->tries--;
+ spin_lock(&np->dev->xmit_lock);
+ np->dev->xmit_lock_owner = smp_processor_id();
- /*
- * network drivers do not expect to be called if the queue is
- * stopped.
- */
- if (netif_queue_stopped(np->dev)) {
+ /*
+ * network drivers do not expect to be called if the queue is
+ * stopped.
+ */
+ if (netif_queue_stopped(np->dev)) {
+ np->dev->xmit_lock_owner = -1;
+ spin_unlock(&np->dev->xmit_lock);
+ netpoll_poll(np);
+ udelay(50);
+ continue;
+ }
+
+ status = np->dev->hard_start_xmit(skb, np->dev);
np->dev->xmit_lock_owner = -1;
spin_unlock(&np->dev->xmit_lock);
- netpoll_poll(np);
- goto repeat;
- }
-
- status = np->dev->hard_start_xmit(skb, np->dev);
- np->dev->xmit_lock_owner = -1;
- spin_unlock(&np->dev->xmit_lock);
+ /* success */
+ if(!status) {
+ npinfo->tries = MAX_RETRIES; /* reset */
+ return;
+ }
- /* transmit busy */
- if(status) {
+ /* transmit busy */
netpoll_poll(np);
- goto repeat;
- }
+ udelay(50);
+ } while (npinfo->tries > 0);
}
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -349,15 +357,11 @@ static void arp_reply(struct sk_buff *skb)
unsigned char *arp_ptr;
int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
u32 sip, tip;
- unsigned long flags;
struct sk_buff *send_skb;
struct netpoll *np = NULL;
- spin_lock_irqsave(&npinfo->rx_lock, flags);
if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
np = npinfo->rx_np;
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-
if (!np)
return;
@@ -639,9 +643,11 @@ int netpoll_setup(struct netpoll *np)
if (!npinfo)
goto release;
+ npinfo->rx_flags = 0;
npinfo->rx_np = NULL;
npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
npinfo->poll_owner = -1;
+ npinfo->tries = MAX_RETRIES;
npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
} else
npinfo = ndev->npinfo;
@@ -718,9 +724,16 @@ int netpoll_setup(struct netpoll *np)
npinfo->rx_np = np;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
+
+ /* fill up the skb queue */
+ refill_skbs();
+
/* last thing to do is link it to the net device structure */
ndev->npinfo = npinfo;
+ /* avoid racing with NAPI reading npinfo */
+ synchronize_rcu();
+
return 0;
release:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7d076f0db100..3ed6fc15815b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1370,15 +1370,21 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (skb->len > cur_mss) {
int old_factor = tcp_skb_pcount(skb);
- int new_factor;
+ int diff;
if (tcp_fragment(sk, skb, cur_mss, cur_mss))
return -ENOMEM; /* We'll try again later. */
/* New SKB created, account for it. */
- new_factor = tcp_skb_pcount(skb);
- tp->packets_out -= old_factor - new_factor;
- tp->packets_out += tcp_skb_pcount(skb->next);
+ diff = old_factor - tcp_skb_pcount(skb) -
+ tcp_skb_pcount(skb->next);
+ tp->packets_out -= diff;
+
+ if (diff > 0) {
+ tp->fackets_out -= diff;
+ if ((int)tp->fackets_out < 0)
+ tp->fackets_out = 0;
+ }
}
/* Collapse two adjacent packets if worthwhile and we can. */