summaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2021-06-23 17:42:12 +0200
committerBorislav Petkov <bp@suse.de>2021-06-23 17:43:38 +0200
commitc4cf5f61982e35348f522464010445efcc0aeb60 (patch)
treeb3e65bfae5786e33ac82b30d6801983b10934a2f /include/net/sock.h
parentb7c11876d24bdd7ae3feeaa771b8f903f6cf05eb (diff)
parentf9dfb5e390fab2df9f7944bb91e7705aba14cd26 (diff)
downloadlinux-stable-c4cf5f61982e35348f522464010445efcc0aeb60.tar.gz
linux-stable-c4cf5f61982e35348f522464010445efcc0aeb60.tar.bz2
linux-stable-c4cf5f61982e35348f522464010445efcc0aeb60.zip
Merge x86/urgent into x86/fpu
Pick up dependent changes which either went mainline (x86/urgent is based on -rc7 and that contains them) as urgent fixes and the current x86/urgent branch which contains two more urgent fixes, so that the bigger FPU rework can base off ontop. Signed-off-by: Borislav Petkov <bp@suse.de>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h21
1 files changed, 16 insertions, 5 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 42bc5e1a627f..7a7058f4f265 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1934,7 +1934,8 @@ static inline u32 net_tx_rndhash(void)
static inline void sk_set_txhash(struct sock *sk)
{
- sk->sk_txhash = net_tx_rndhash();
+ /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
+ WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
}
static inline bool sk_rethink_txhash(struct sock *sk)
@@ -2206,9 +2207,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock,
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
{
- if (sk->sk_txhash) {
+ /* This pairs with WRITE_ONCE() in sk_set_txhash() */
+ u32 txhash = READ_ONCE(sk->sk_txhash);
+
+ if (txhash) {
skb->l4_hash = 1;
- skb->hash = sk->sk_txhash;
+ skb->hash = txhash;
}
}
@@ -2231,13 +2235,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk_mem_charge(sk, skb->truesize);
}
-static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
{
if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
skb_orphan(skb);
skb->destructor = sock_efree;
skb->sk = sk;
+ return true;
}
+ return false;
}
void sk_reset_timer(struct sock *sk, struct timer_list *timer,
@@ -2264,8 +2270,13 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
static inline int sock_error(struct sock *sk)
{
int err;
- if (likely(!sk->sk_err))
+
+ /* Avoid an atomic operation for the common case.
+ * This is racy since another cpu/thread can change sk_err under us.
+ */
+ if (likely(data_race(!sk->sk_err)))
return 0;
+
err = xchg(&sk->sk_err, 0);
return -err;
}