diff options
author | Kuniyuki Iwashima <kuniyu@amazon.com> | 2022-08-23 10:46:47 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2022-08-24 13:46:57 +0100 |
commit | 61adf447e38664447526698872e21c04623afb8e (patch) | |
tree | 606904f24f02ff830860bbeac2006c4b936a3eda | |
parent | 5dcd08cd19912892586c6082d56718333e2d19db (diff) | |
download | linux-stable-61adf447e38664447526698872e21c04623afb8e.tar.gz linux-stable-61adf447e38664447526698872e21c04623afb8e.tar.bz2 linux-stable-61adf447e38664447526698872e21c04623afb8e.zip |
net: Fix data-races around netdev_tstamp_prequeue.
While reading netdev_tstamp_prequeue, it can be changed concurrently.
Thus, we need to add READ_ONCE() to its readers.
Fixes: 3b098e2d7c69 ("net: Consistent skb timestamping")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/core/dev.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 07da69c1ac0a..4705e6630efa 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4928,7 +4928,7 @@ static int netif_rx_internal(struct sk_buff *skb) { int ret; - net_timestamp_check(netdev_tstamp_prequeue, skb); + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); trace_netif_rx(skb); @@ -5281,7 +5281,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, int ret = NET_RX_DROP; __be16 type; - net_timestamp_check(!netdev_tstamp_prequeue, skb); + net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb); trace_netif_receive_skb(skb); @@ -5664,7 +5664,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb) { int ret; - net_timestamp_check(netdev_tstamp_prequeue, skb); + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); if (skb_defer_rx_timestamp(skb)) return NET_RX_SUCCESS; @@ -5694,7 +5694,7 @@ void netif_receive_skb_list_internal(struct list_head *head) INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { - net_timestamp_check(netdev_tstamp_prequeue, skb); + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); skb_list_del_init(skb); if (!skb_defer_rx_timestamp(skb)) list_add_tail(&skb->list, &sublist); |