summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-08-02 18:44:59 -0700
committerJakub Kicinski <kuba@kernel.org>2023-08-02 18:44:59 -0700
commit72c1a28473fb132f08a2a447dd1425a472696cd9 (patch)
treead9f5d1ccdab0aea0327a88651f8789910baef0c
parent49c467dca39df9a3674854969cc5a8eb7170682d (diff)
parent37dfe5b8ddeb7c0bb97e3643dcfd8f9f1421ad25 (diff)
downloadlinux-stable-72c1a28473fb132f08a2a447dd1425a472696cd9.tar.gz
linux-stable-72c1a28473fb132f08a2a447dd1425a472696cd9.tar.bz2
linux-stable-72c1a28473fb132f08a2a447dd1425a472696cd9.zip
Merge branch 'net-extend-alloc_skb_with_frags-max-size'
Eric Dumazet says: ==================== net: extend alloc_skb_with_frags() max size alloc_skb_with_frags(), while being able to use high order allocations, limits the payload size to PAGE_SIZE * MAX_SKB_FRAGS Reviewing Tahsin Erdogan patch [1], it was clear to me we need to remove this limitation. [1] https://lore.kernel.org/netdev/20230731230736.109216-1-trdgn@amazon.com/ v2: Addressed Willem feedback on 1st patch. ==================== Link: https://lore.kernel.org/r/20230801205254.400094-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--drivers/net/tap.c4
-rw-r--r--drivers/net/tun.c4
-rw-r--r--net/core/skbuff.c56
-rw-r--r--net/packet/af_packet.c4
4 files changed, 34 insertions, 34 deletions
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 9137fb8c1c42..01574b9d410f 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -614,8 +614,10 @@ static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
+ if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+ linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- err, 0);
+ err, PAGE_ALLOC_COSTLY_ORDER);
if (!skb)
return NULL;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d75456adc62a..8a48431e8c5b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1526,8 +1526,10 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
+ if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+ linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- &err, 0);
+ &err, PAGE_ALLOC_COSTLY_ORDER);
if (!skb)
return ERR_PTR(err);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a298992060e6..c6f98245582c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -6204,7 +6204,7 @@ EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
*
* @header_len: size of linear part
* @data_len: needed length in frags
- * @max_page_order: max page order desired.
+ * @order: max page order desired.
* @errcode: pointer to error code if any
* @gfp_mask: allocation mask
*
@@ -6212,21 +6212,17 @@ EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
*/
struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
unsigned long data_len,
- int max_page_order,
+ int order,
int *errcode,
gfp_t gfp_mask)
{
- int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
unsigned long chunk;
struct sk_buff *skb;
struct page *page;
- int i;
+ int nr_frags = 0;
*errcode = -EMSGSIZE;
- /* Note this test could be relaxed, if we succeed to allocate
- * high order pages...
- */
- if (npages > MAX_SKB_FRAGS)
+ if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order)))
return NULL;
*errcode = -ENOBUFS;
@@ -6234,34 +6230,32 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
if (!skb)
return NULL;
- skb->truesize += npages << PAGE_SHIFT;
-
- for (i = 0; npages > 0; i++) {
- int order = max_page_order;
-
- while (order) {
- if (npages >= 1 << order) {
- page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
- __GFP_COMP |
- __GFP_NOWARN,
- order);
- if (page)
- goto fill_page;
- /* Do not retry other high order allocations */
- order = 1;
- max_page_order = 0;
- }
+ while (data_len) {
+ if (nr_frags == MAX_SKB_FRAGS - 1)
+ goto failure;
+ while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order))
order--;
+
+ if (order) {
+ page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
+ __GFP_COMP |
+ __GFP_NOWARN,
+ order);
+ if (!page) {
+ order--;
+ continue;
+ }
+ } else {
+ page = alloc_page(gfp_mask);
+ if (!page)
+ goto failure;
}
- page = alloc_page(gfp_mask);
- if (!page)
- goto failure;
-fill_page:
chunk = min_t(unsigned long, data_len,
PAGE_SIZE << order);
- skb_fill_page_desc(skb, i, page, 0, chunk);
+ skb_fill_page_desc(skb, nr_frags, page, 0, chunk);
+ nr_frags++;
+ skb->truesize += (PAGE_SIZE << order);
data_len -= chunk;
- npages -= 1 << order;
}
return skb;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8e3ddec4c3d5..3b77d255d22d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2927,8 +2927,10 @@ static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
+ if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+ linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- err, 0);
+ err, PAGE_ALLOC_COSTLY_ORDER);
if (!skb)
return NULL;