summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-03-05 12:26:41 -0800
committerDavid S. Miller <davem@davemloft.net>2008-03-05 12:26:41 -0800
commit255333c1db3ec63921de29b134418a4e56e5921e (patch)
treeb1cd99373cabfa6fed020496d4d74500e7bc7e92 /net/core
parent9a43b709a230705ca40a6f854a334a02334a3c1c (diff)
parent0d66afe7805b169b6bf3c7a88cf8163298b8ef05 (diff)
downloadlinux-255333c1db3ec63921de29b134418a4e56e5921e.tar.gz
linux-255333c1db3ec63921de29b134418a4e56e5921e.tar.bz2
linux-255333c1db3ec63921de29b134418a4e56e5921e.zip
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/mac80211/rc80211_pid_algo.c
Diffstat (limited to 'net/core')
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/netpoll.c12
2 files changed, 9 insertions, 5 deletions
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 31b6567f0b6a..23c0a10c0c37 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -839,7 +839,7 @@ static void neigh_timer_handler(unsigned long arg)
struct sk_buff *skb = skb_peek(&neigh->arp_queue);
/* keep skb alive even if arp_queue overflows */
if (skb)
- skb_get(skb);
+ skb = skb_copy(skb, GFP_ATOMIC);
write_unlock(&neigh->lock);
neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 7ae98659d79d..d0c8bf585f06 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -39,6 +39,8 @@ static struct sk_buff_head skb_pool;
static atomic_t trapped;
#define USEC_PER_POLL 50
+#define NETPOLL_RX_ENABLED 1
+#define NETPOLL_RX_DROP 2
#define MAX_SKB_SIZE \
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
@@ -126,11 +128,13 @@ static int poll_one_napi(struct netpoll_info *npinfo,
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
return budget;
+ npinfo->rx_flags |= NETPOLL_RX_DROP;
atomic_inc(&trapped);
work = napi->poll(napi, budget);
atomic_dec(&trapped);
+ npinfo->rx_flags &= ~NETPOLL_RX_DROP;
return budget - work;
}
@@ -470,7 +474,7 @@ int __netpoll_rx(struct sk_buff *skb)
if (skb->dev->type != ARPHRD_ETHER)
goto out;
- /* if receive ARP during middle of NAPI poll, then queue */
+ /* check if netpoll clients need ARP */
if (skb->protocol == htons(ETH_P_ARP) &&
atomic_read(&trapped)) {
skb_queue_tail(&npi->arp_tx, skb);
@@ -532,9 +536,6 @@ int __netpoll_rx(struct sk_buff *skb)
return 1;
out:
- /* If packet received while already in poll then just
- * silently drop.
- */
if (atomic_read(&trapped)) {
kfree_skb(skb);
return 1;
@@ -673,6 +674,7 @@ int netpoll_setup(struct netpoll *np)
goto release;
}
+ npinfo->rx_flags = 0;
npinfo->rx_np = NULL;
spin_lock_init(&npinfo->rx_lock);
@@ -754,6 +756,7 @@ int netpoll_setup(struct netpoll *np)
if (np->rx_hook) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
+ npinfo->rx_flags |= NETPOLL_RX_ENABLED;
npinfo->rx_np = np;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
@@ -795,6 +798,7 @@ void netpoll_cleanup(struct netpoll *np)
if (npinfo->rx_np == np) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_np = NULL;
+ npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}