diff options
author | Reshetova, Elena <elena.reshetova@intel.com> | 2017-06-30 13:07:59 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-07-01 07:39:08 -0700 |
commit | 2638595afccf6554bfe55268ff9b2d3ac3dff2e6 (patch) | |
tree | 958d39026bc292e6048824012fdcb46f3a70d7e9 /net | |
parent | 633547973ffc32fd2c815639d4675e1531f0896f (diff) | |
download | linux-2638595afccf6554bfe55268ff9b2d3ac3dff2e6.tar.gz linux-2638595afccf6554bfe55268ff9b2d3ac3dff2e6.tar.bz2 linux-2638595afccf6554bfe55268ff9b2d3ac3dff2e6.zip |
net: convert sk_buff_fclones.fclone_ref from atomic_t to refcount_t
refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.
Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: David Windsor <dwindsor@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/skbuff.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 45dc6620dd74..659dfc0494c5 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -268,7 +268,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, kmemcheck_annotate_bitfield(&fclones->skb2, flags1); skb->fclone = SKB_FCLONE_ORIG; - atomic_set(&fclones->fclone_ref, 1); + refcount_set(&fclones->fclone_ref, 1); fclones->skb2.fclone = SKB_FCLONE_CLONE; } @@ -629,7 +629,7 @@ static void kfree_skbmem(struct sk_buff *skb) * This test would have no chance to be true for the clone, * while here, branch prediction will be good. */ - if (atomic_read(&fclones->fclone_ref) == 1) + if (refcount_read(&fclones->fclone_ref) == 1) goto fastpath; break; @@ -637,7 +637,7 @@ static void kfree_skbmem(struct sk_buff *skb) fclones = container_of(skb, struct sk_buff_fclones, skb2); break; } - if (!atomic_dec_and_test(&fclones->fclone_ref)) + if (!refcount_dec_and_test(&fclones->fclone_ref)) return; fastpath: kmem_cache_free(skbuff_fclone_cache, fclones); @@ -1027,9 +1027,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) return NULL; if (skb->fclone == SKB_FCLONE_ORIG && - atomic_read(&fclones->fclone_ref) == 1) { + refcount_read(&fclones->fclone_ref) == 1) { n = &fclones->skb2; - atomic_set(&fclones->fclone_ref, 2); + refcount_set(&fclones->fclone_ref, 2); } else { if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; |