summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2006-10-26 15:46:51 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-02 21:22:33 -0800
commitb6cd27ed33886a5ffaf0925a6d98e13e18e8a1af (patch)
tree9657515409d89b8b43134e0a29f7c3730901277c /net/core
parent93ec2c723e3f8a216dde2899aeb85c648672bc6b (diff)
downloadlinux-b6cd27ed33886a5ffaf0925a6d98e13e18e8a1af.tar.gz
linux-b6cd27ed33886a5ffaf0925a6d98e13e18e8a1af.tar.bz2
linux-b6cd27ed33886a5ffaf0925a6d98e13e18e8a1af.zip
netpoll per device txq
When the netpoll beast got really busy, it tended to clog things, so it stored them for later. But the beast was putting all it's skb's in one basket. This was bad because maybe some pipes were clogged and others were not. Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/netpoll.c50
1 files changed, 15 insertions, 35 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c66df2f45d26..ac4e8b8f57d1 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -38,10 +38,6 @@
static struct sk_buff_head skb_pool;
-static DEFINE_SPINLOCK(queue_lock);
-static int queue_depth;
-static struct sk_buff *queue_head, *queue_tail;
-
static atomic_t trapped;
#define NETPOLL_RX_ENABLED 1
@@ -56,46 +52,25 @@ static void arp_reply(struct sk_buff *skb);
static void queue_process(void *p)
{
- unsigned long flags;
+ struct netpoll_info *npinfo = p;
struct sk_buff *skb;
- while (queue_head) {
- spin_lock_irqsave(&queue_lock, flags);
-
- skb = queue_head;
- queue_head = skb->next;
- if (skb == queue_tail)
- queue_head = NULL;
-
- queue_depth--;
-
- spin_unlock_irqrestore(&queue_lock, flags);
-
+ while ((skb = skb_dequeue(&npinfo->txq)))
dev_queue_xmit(skb);
- }
-}
-static DECLARE_WORK(send_queue, queue_process, NULL);
+}
void netpoll_queue(struct sk_buff *skb)
{
- unsigned long flags;
+ struct net_device *dev = skb->dev;
+ struct netpoll_info *npinfo = dev->npinfo;
- if (queue_depth == MAX_QUEUE_DEPTH) {
- __kfree_skb(skb);
- return;
+ if (!npinfo)
+ kfree_skb(skb);
+ else {
+ skb_queue_tail(&npinfo->txq, skb);
+ schedule_work(&npinfo->tx_work);
}
-
- spin_lock_irqsave(&queue_lock, flags);
- if (!queue_head)
- queue_head = skb;
- else
- queue_tail->next = skb;
- queue_tail = skb;
- queue_depth++;
- spin_unlock_irqrestore(&queue_lock, flags);
-
- schedule_work(&send_queue);
}
static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
@@ -658,6 +633,9 @@ int netpoll_setup(struct netpoll *np)
npinfo->tries = MAX_RETRIES;
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
+ skb_queue_head_init(&npinfo->txq);
+ INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
+
atomic_set(&npinfo->refcnt, 1);
} else {
npinfo = ndev->npinfo;
@@ -780,6 +758,8 @@ void netpoll_cleanup(struct netpoll *np)
np->dev->npinfo = NULL;
if (atomic_dec_and_test(&npinfo->refcnt)) {
skb_queue_purge(&npinfo->arp_tx);
+ skb_queue_purge(&npinfo->txq);
+ flush_scheduled_work();
kfree(npinfo);
}