summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@linux-foundation.org>2007-07-25 23:49:34 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-31 02:27:57 -0700
commit30cfd0baf0a0c4329fff1ef4b622919297969ec8 (patch)
tree945a72f636ced1b443d894495704237e4cb7816d /net/ipv4
parent6a302358d87fedaf7bda12b8e909265ebf1ce674 (diff)
downloadlinux-stable-30cfd0baf0a0c4329fff1ef4b622919297969ec8.tar.gz
linux-stable-30cfd0baf0a0c4329fff1ef4b622919297969ec8.tar.bz2
linux-stable-30cfd0baf0a0c4329fff1ef4b622919297969ec8.zip
[TCP]: congestion control API pass RTT in microseconds
This patch changes the API for the callback that is done after an ACK is received. It solves a couple of issues: * Some congestion controls want higher resolution value of RTT (controlled by TCP_CONG_RTT_SAMPLE flag). These don't really want a ktime, but all compute a RTT in microseconds. * Other congestion control could use RTT at jiffies resolution. To keep API consistent the units should be the same for both cases, just the resolution should change. Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cubic.c2
-rw-r--r--net/ipv4/tcp_htcp.c2
-rw-r--r--net/ipv4/tcp_illinois.c8
-rw-r--r--net/ipv4/tcp_input.c21
-rw-r--r--net/ipv4/tcp_lp.c6
-rw-r--r--net/ipv4/tcp_vegas.c6
-rw-r--r--net/ipv4/tcp_vegas.h2
-rw-r--r--net/ipv4/tcp_veno.c6
-rw-r--r--net/ipv4/tcp_westwood.c7
-rw-r--r--net/ipv4/tcp_yeah.c4
11 files changed, 38 insertions, 28 deletions
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 519de091a94d..4586211e3757 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -206,7 +206,7 @@ static void bictcp_state(struct sock *sk, u8 new_state)
/* Track delayed acknowledgment ratio using sliding window
* ratio = (15*ratio + sample) / 16
*/
-static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last)
+static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index d17da30d82d6..0c44bb67a671 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -334,7 +334,7 @@ static void bictcp_state(struct sock *sk, u8 new_state)
/* Track delayed acknowledgment ratio using sliding window
* ratio = (15*ratio + sample) / 16
*/
-static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last)
+static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 08a02e6045c9..fa61663ace3b 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -98,7 +98,7 @@ static inline void measure_rtt(struct sock *sk)
}
}
-static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, ktime_t last)
+static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index cc5de6f69d46..64f1cbaf96e8 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -83,18 +83,16 @@ static void tcp_illinois_init(struct sock *sk)
}
/* Measure RTT for each ack. */
-static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
+static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt)
{
struct illinois *ca = inet_csk_ca(sk);
- u32 rtt;
ca->acked = pkts_acked;
- if (ktime_equal(last, net_invalid_timestamp()))
+ /* dup ack, no rtt sample */
+ if (rtt < 0)
return;
- rtt = ktime_to_us(net_timedelta(last));
-
/* ignore bogus values, this prevents wraparound in alpha math */
if (rtt > RTT_MAX)
rtt = RTT_MAX;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fec8a7a4dbaf..4b255fe999d9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2490,12 +2490,23 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
tcp_ack_update_rtt(sk, acked, seq_rtt);
tcp_ack_packets_out(sk);
- /* Is the ACK triggering packet unambiguous? */
- if (acked & FLAG_RETRANS_DATA_ACKED)
- last_ackt = net_invalid_timestamp();
+ if (ca_ops->pkts_acked) {
+ s32 rtt_us = -1;
+
+ /* Is the ACK triggering packet unambiguous? */
+ if (!(acked & FLAG_RETRANS_DATA_ACKED)) {
+ /* High resolution needed and available? */
+ if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
+ !ktime_equal(last_ackt,
+ net_invalid_timestamp()))
+ rtt_us = ktime_us_delta(ktime_get_real(),
+ last_ackt);
+ else if (seq_rtt > 0)
+ rtt_us = jiffies_to_usecs(seq_rtt);
+ }
- if (ca_ops->pkts_acked)
- ca_ops->pkts_acked(sk, pkts_acked, last_ackt);
+ ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
+ }
}
#if FASTRETRANS_DEBUG > 0
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 80e140e3ec2d..e7f5ef92cbd8 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -260,13 +260,13 @@ static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt)
* newReno in increase case.
* We work it out by following the idea from TCP-LP's paper directly
*/
-static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, ktime_t last)
+static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
- if (!ktime_equal(last, net_invalid_timestamp()))
- tcp_lp_rtt_sample(sk, ktime_to_us(net_timedelta(last)));
+ if (rtt_us > 0)
+ tcp_lp_rtt_sample(sk, rtt_us);
/* calc inference */
if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 914e0307f7af..b49dedcda52d 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -112,16 +112,16 @@ EXPORT_SYMBOL_GPL(tcp_vegas_init);
* o min-filter RTT samples from a much longer window (forever for now)
* to find the propagation delay (baseRTT)
*/
-void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
+void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
struct vegas *vegas = inet_csk_ca(sk);
u32 vrtt;
- if (ktime_equal(last, net_invalid_timestamp()))
+ if (rtt_us < 0)
return;
/* Never allow zero rtt or baseRTT */
- vrtt = ktime_to_us(net_timedelta(last)) + 1;
+ vrtt = rtt_us + 1;
/* Filter to find propagation delay: */
if (vrtt < vegas->baseRTT)
diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h
index 502fa8183634..6c0eea2f8249 100644
--- a/net/ipv4/tcp_vegas.h
+++ b/net/ipv4/tcp_vegas.h
@@ -17,7 +17,7 @@ struct vegas {
extern void tcp_vegas_init(struct sock *sk);
extern void tcp_vegas_state(struct sock *sk, u8 ca_state);
-extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last);
+extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
extern void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
extern void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 7a55ddf86032..8fb2aee0b1a4 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -69,16 +69,16 @@ static void tcp_veno_init(struct sock *sk)
}
/* Do rtt sampling needed for Veno. */
-static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
+static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
struct veno *veno = inet_csk_ca(sk);
u32 vrtt;
- if (ktime_equal(last, net_invalid_timestamp()))
+ if (rtt_us < 0)
return;
/* Never allow zero rtt or baseRTT */
- vrtt = ktime_to_us(net_timedelta(last)) + 1;
+ vrtt = rtt_us + 1;
/* Filter to find propagation delay: */
if (vrtt < veno->basertt)
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index e61e09dd513e..20151d6a6241 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -100,11 +100,12 @@ static void westwood_filter(struct westwood *w, u32 delta)
* Called after processing group of packets.
* but all westwood needs is the last sample of srtt.
*/
-static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
+static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
{
struct westwood *w = inet_csk_ca(sk);
- if (cnt > 0)
- w->rtt = tcp_sk(sk)->srtt >> 3;
+
+ if (rtt > 0)
+ w->rtt = usecs_to_jiffies(rtt);
}
/*
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index c04b7c6ec702..c107fba7430e 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -58,7 +58,7 @@ static void tcp_yeah_init(struct sock *sk)
}
-static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
+static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct yeah *yeah = inet_csk_ca(sk);
@@ -66,7 +66,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
if (icsk->icsk_ca_state == TCP_CA_Open)
yeah->pkts_acked = pkts_acked;
- tcp_vegas_pkts_acked(sk, pkts_acked, last);
+ tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
}
static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack,