summaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h39
1 files changed, 28 insertions, 11 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index bb18c4d69aba..42c29bfbcee3 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -44,6 +44,7 @@
#include <net/dst.h>
#include <linux/seq_file.h>
+#include <linux/memcontrol.h>
extern struct inet_hashinfo tcp_hashinfo;
@@ -229,7 +230,6 @@ extern int sysctl_tcp_fack;
extern int sysctl_tcp_reordering;
extern int sysctl_tcp_ecn;
extern int sysctl_tcp_dsack;
-extern long sysctl_tcp_mem[3];
extern int sysctl_tcp_wmem[3];
extern int sysctl_tcp_rmem[3];
extern int sysctl_tcp_app_win;
@@ -273,6 +273,14 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
return seq3 - seq2 >= seq1 - seq2;
}
+static inline bool tcp_out_of_memory(struct sock *sk)
+{
+ if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
+ sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
+ return true;
+ return false;
+}
+
static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
{
struct percpu_counter *ocp = sk->sk_prot->orphan_count;
@@ -283,13 +291,11 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
if (orphans << shift > sysctl_tcp_max_orphans)
return true;
}
-
- if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
- atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
- return true;
return false;
}
+extern bool tcp_check_oom(struct sock *sk, int shift);
+
/* syncookies: remember time of last synqueue overflow */
static inline void tcp_synq_overflow(struct sock *sk)
{
@@ -311,6 +317,8 @@ extern struct proto tcp_prot;
#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
+extern void tcp_init_mem(struct net *net);
+
extern void tcp_v4_err(struct sk_buff *skb, u32);
extern void tcp_shutdown (struct sock *sk, int how);
@@ -628,7 +636,7 @@ extern u32 __tcp_select_window(struct sock *sk);
struct tcp_skb_cb {
union {
struct inet_skb_parm h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct inet6_skb_parm h6;
#endif
} header; /* For incoming frames */
@@ -773,12 +781,12 @@ static inline int tcp_is_reno(const struct tcp_sock *tp)
static inline int tcp_is_fack(const struct tcp_sock *tp)
{
- return tp->rx_opt.sack_ok & 2;
+ return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
}
static inline void tcp_enable_fack(struct tcp_sock *tp)
{
- tp->rx_opt.sack_ok |= 2;
+ tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
}
static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
@@ -834,6 +842,14 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
+/* The maximum number of MSS of available cwnd for which TSO defers
+ * sending if not using sysctl_tcp_tso_win_divisor.
+ */
+static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
+{
+ return 3;
+}
+
/* Slow start with delack produces 3 packets of burst, so that
* it is safe "de facto". This will be the default - same as
* the default reordering threshold - but if reordering increases,
@@ -1144,7 +1160,7 @@ struct tcp6_md5sig_key {
/* - sock block */
struct tcp_md5sig_info {
struct tcp4_md5sig_key *keys4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct tcp6_md5sig_key *keys6;
u32 entries6;
u32 alloced6;
@@ -1171,7 +1187,7 @@ struct tcp6_pseudohdr {
union tcp_md5sum_block {
struct tcp4_pseudohdr ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct tcp6_pseudohdr ip6;
#endif
};
@@ -1430,7 +1446,8 @@ extern struct request_sock_ops tcp6_request_sock_ops;
extern void tcp_v4_destroy_sock(struct sock *sk);
extern int tcp_v4_gso_send_check(struct sk_buff *skb);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+ netdev_features_t features);
extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
struct sk_buff *skb);
extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,