summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorBaruch Even <baruch@ev-en.org>2007-02-04 23:36:42 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2007-02-08 12:38:49 -0800
commit6f74651ae626ec672028587bc700538076dfbefb (patch)
treee00c9cf21c9d917a2392310980f9119ed9529221 /net
parentfda03fbb56bf88f1fb1c57b2474082e5addaa884 (diff)
downloadlinux-6f74651ae626ec672028587bc700538076dfbefb.tar.gz
linux-6f74651ae626ec672028587bc700538076dfbefb.tar.bz2
linux-6f74651ae626ec672028587bc700538076dfbefb.zip
[TCP]: Seperate DSACK from SACK fast path
Move DSACK code outside the SACK fast-path checking code. If the DSACK determined that the information was too old we stayed with a partial cache copied. Most likely this matters very little since the next packet will not be DSACK and we will find it in the cache. but it's still not good form and there is little reason to couple the two checks. Since the SACK receive cache doesn't need the data to be in host order we also remove the ntohl in the checking loop. Signed-off-by: Baruch Even <baruch@ev-en.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c66
1 files changed, 31 insertions, 35 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 7670ef968dce..870f53afd363 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -951,16 +951,43 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
tp->fackets_out = 0;
prior_fackets = tp->fackets_out;
+ /* Check for D-SACK. */
+ if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
+ dup_sack = 1;
+ tp->rx_opt.sack_ok |= 4;
+ NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
+ } else if (num_sacks > 1 &&
+ !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
+ !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
+ dup_sack = 1;
+ tp->rx_opt.sack_ok |= 4;
+ NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
+ }
+
+ /* D-SACK for already forgotten data...
+ * Do dumb counting. */
+ if (dup_sack &&
+ !after(ntohl(sp[0].end_seq), prior_snd_una) &&
+ after(ntohl(sp[0].end_seq), tp->undo_marker))
+ tp->undo_retrans--;
+
+ /* Eliminate too old ACKs, but take into
+ * account more or less fresh ones, they can
+ * contain valid SACK info.
+ */
+ if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
+ return 0;
+
/* SACK fastpath:
* if the only SACK change is the increase of the end_seq of
* the first block then only apply that SACK block
* and use retrans queue hinting otherwise slowpath */
flag = 1;
- for (i = 0; i< num_sacks; i++) {
- __u32 start_seq = ntohl(sp[i].start_seq);
- __u32 end_seq = ntohl(sp[i].end_seq);
+ for (i = 0; i < num_sacks; i++) {
+ __be32 start_seq = sp[i].start_seq;
+ __be32 end_seq = sp[i].end_seq;
- if (i == 0){
+ if (i == 0) {
if (tp->recv_sack_cache[i].start_seq != start_seq)
flag = 0;
} else {
@@ -970,37 +997,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
}
tp->recv_sack_cache[i].start_seq = start_seq;
tp->recv_sack_cache[i].end_seq = end_seq;
-
- /* Check for D-SACK. */
- if (i == 0) {
- u32 ack = TCP_SKB_CB(ack_skb)->ack_seq;
-
- if (before(start_seq, ack)) {
- dup_sack = 1;
- tp->rx_opt.sack_ok |= 4;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
- } else if (num_sacks > 1 &&
- !after(end_seq, ntohl(sp[1].end_seq)) &&
- !before(start_seq, ntohl(sp[1].start_seq))) {
- dup_sack = 1;
- tp->rx_opt.sack_ok |= 4;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
- }
-
- /* D-SACK for already forgotten data...
- * Do dumb counting. */
- if (dup_sack &&
- !after(end_seq, prior_snd_una) &&
- after(end_seq, tp->undo_marker))
- tp->undo_retrans--;
-
- /* Eliminate too old ACKs, but take into
- * account more or less fresh ones, they can
- * contain valid SACK info.
- */
- if (before(ack, prior_snd_una - tp->max_window))
- return 0;
- }
}
first_sack_index = 0;