summaryrefslogtreecommitdiffstats
path: root/net/packet
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-10-12 03:00:31 -0700
committerDavid S. Miller <davem@davemloft.net>2009-10-12 03:00:31 -0700
commitd5e63bded6e819ca77ee1a1d97c783a31f6caf30 (patch)
treebc8d38eb84b48476748e80e19cbfed102fc41953 /net/packet
parent91b2a3f9bb0fa8d64b365a10b0624b0341e1a338 (diff)
downloadlinux-stable-d5e63bded6e819ca77ee1a1d97c783a31f6caf30.tar.gz
linux-stable-d5e63bded6e819ca77ee1a1d97c783a31f6caf30.tar.bz2
linux-stable-d5e63bded6e819ca77ee1a1d97c783a31f6caf30.zip
Revert "af_packet: add interframe drop cmsg (v6)"
This reverts commit 977750076d98c7ff6cbda51858bb5a5894a9d9ab. Neil is reimplementing this generically, outside of AF_PACKET. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/packet')
-rw-r--r--net/packet/af_packet.c33
1 files changed, 0 insertions, 33 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 70073a0dea5d..f87ed4803c11 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -525,31 +525,6 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
}
/*
- * If we've lost frames since the last time we queued one to the
- * sk_receive_queue, we need to record it here.
- * This must be called under the protection of the socket lock
- * to prevent racing with other softirqs and user space
- */
-static inline void record_packet_gap(struct sk_buff *skb,
- struct packet_sock *po)
-{
- /*
- * We overload the mark field here, since we're about
- * to enqueue to a receive queue and no body else will
- * use this field at this point
- */
- skb->mark = po->stats.tp_gap;
- po->stats.tp_gap = 0;
- return;
-
-}
-
-static inline __u32 check_packet_gap(struct sk_buff *skb)
-{
- return skb->mark;
-}
-
-/*
This function makes lazy skb cloning in hope that most of packets
are discarded by BPF.
@@ -652,7 +627,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
spin_lock(&sk->sk_receive_queue.lock);
po->stats.tp_packets++;
- record_packet_gap(skb, po);
__skb_queue_tail(&sk->sk_receive_queue, skb);
spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk, skb->len);
@@ -661,7 +635,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
drop_n_acct:
spin_lock(&sk->sk_receive_queue.lock);
po->stats.tp_drops++;
- po->stats.tp_gap++;
spin_unlock(&sk->sk_receive_queue.lock);
drop_n_restore:
@@ -839,7 +812,6 @@ drop:
ring_is_full:
po->stats.tp_drops++;
- po->stats.tp_gap++;
spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk, 0);
@@ -1449,7 +1421,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
struct sk_buff *skb;
int copied, err;
struct sockaddr_ll *sll;
- __u32 gap;
err = -EINVAL;
if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
@@ -1528,10 +1499,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
}
- gap = check_packet_gap(skb);
- if (gap)
- put_cmsg(msg, SOL_PACKET, PACKET_GAPDATA, sizeof(__u32), &gap);
-
/*
* Free or return the buffer as appropriate. Again this
* hides all the races and re-entrancy issues from us.