From 0d2da4b595d03009db7dfb5ebf01c547b89b0ad8 Mon Sep 17 00:00:00 2001 From: Su Hui Date: Wed, 31 May 2023 12:32:51 +0800 Subject: bpf/tests: Use struct_size() Use struct_size() instead of hand writing it. This is less verbose and more informative. Signed-off-by: Su Hui Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20230531043251.989312-1-suhui@nfschina.com --- lib/test_bpf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/test_bpf.c b/lib/test_bpf.c index ade9ac672adb..fa0833410ac1 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -15056,8 +15056,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs) int which, err; /* Allocate the table of programs to be used for tall calls */ - progs = kzalloc(sizeof(*progs) + (ntests + 1) * sizeof(progs->ptrs[0]), - GFP_KERNEL); + progs = kzalloc(struct_size(progs, ptrs, ntests + 1), GFP_KERNEL); if (!progs) goto out_nomem; -- cgit v1.2.3 From 7a113ff6355944283402fb617dc97122f68d5a41 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Fri, 2 Jun 2023 12:21:33 +0200 Subject: lib/ref_tracker: add unlocked leak print helper To have reliable detection of leaks, caller must be able to check under the same lock both: tracked counter and the leaks. dir.lock is natural candidate for such lock and unlocked print helper can be called with this lock taken. As a bonus we can reuse this helper in ref_tracker_dir_exit. Signed-off-by: Andrzej Hajda Reviewed-by: Andi Shyti Reviewed-by: Eric Dumazet Signed-off-by: Jakub Kicinski --- lib/ref_tracker.c | 66 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 28 deletions(-) (limited to 'lib') diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c index dc7b14aa3431..d4eb0929af8f 100644 --- a/lib/ref_tracker.c +++ b/lib/ref_tracker.c @@ -14,6 +14,38 @@ struct ref_tracker { depot_stack_handle_t free_stack_handle; }; +void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, + unsigned int display_limit) +{ + struct ref_tracker *tracker; + unsigned int i = 0; + + lockdep_assert_held(&dir->lock); + + list_for_each_entry(tracker, &dir->list, head) { + if (i < display_limit) { + pr_err("leaked reference.\n"); + if (tracker->alloc_stack_handle) + stack_depot_print(tracker->alloc_stack_handle); + i++; + } else { + break; + } + } +} +EXPORT_SYMBOL(ref_tracker_dir_print_locked); + +void ref_tracker_dir_print(struct ref_tracker_dir *dir, + unsigned int display_limit) +{ + unsigned long flags; + + spin_lock_irqsave(&dir->lock, flags); + ref_tracker_dir_print_locked(dir, display_limit); + spin_unlock_irqrestore(&dir->lock, flags); +} +EXPORT_SYMBOL(ref_tracker_dir_print); + void ref_tracker_dir_exit(struct ref_tracker_dir *dir) { struct ref_tracker *tracker, *n; @@ -27,13 +59,13 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir) kfree(tracker); dir->quarantine_avail++; } - list_for_each_entry_safe(tracker, n, &dir->list, head) { - pr_err("leaked reference.\n"); - if (tracker->alloc_stack_handle) - stack_depot_print(tracker->alloc_stack_handle); + if (!list_empty(&dir->list)) { + ref_tracker_dir_print_locked(dir, 16); leak = true; - list_del(&tracker->head); - kfree(tracker); + list_for_each_entry_safe(tracker, n, &dir->list, head) { + list_del(&tracker->head); + kfree(tracker); + } } spin_unlock_irqrestore(&dir->lock, flags); WARN_ON_ONCE(leak); @@ -42,28 +74,6 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir) } EXPORT_SYMBOL(ref_tracker_dir_exit); -void ref_tracker_dir_print(struct ref_tracker_dir *dir, - unsigned int display_limit) -{ - struct ref_tracker *tracker; - unsigned long flags; - unsigned int i = 0; - - spin_lock_irqsave(&dir->lock, flags); - list_for_each_entry(tracker, &dir->list, head) { - if (i < display_limit) { - pr_err("leaked reference.\n"); - if (tracker->alloc_stack_handle) - stack_depot_print(tracker->alloc_stack_handle); - i++; - } else { - break; - } - } - spin_unlock_irqrestore(&dir->lock, flags); -} -EXPORT_SYMBOL(ref_tracker_dir_print); - int ref_tracker_alloc(struct ref_tracker_dir *dir, struct ref_tracker **trackerp, gfp_t gfp) -- cgit v1.2.3 From b6d7c0eb2dcbd238fa233a3a1737654e380e784a Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Fri, 2 Jun 2023 12:21:34 +0200 Subject: lib/ref_tracker: improve printing stats In case the library is tracking busy subsystem, simply printing stack for every active reference will spam log with long, hard to read, redundant stack traces. To improve readabilty following changes have been made: - reports are printed per stack_handle - log is more compact, - added display name for ref_tracker_dir - it will differentiate multiple subsystems, - stack trace is printed indented, in the same printk call, - info about dropped references is printed as well. Signed-off-by: Andrzej Hajda Reviewed-by: Andi Shyti Reviewed-by: Eric Dumazet Signed-off-by: Jakub Kicinski --- lib/ref_tracker.c | 90 ++++++++++++++++++++++++++++++++++++++++++++------ lib/test_ref_tracker.c | 2 +- 2 files changed, 80 insertions(+), 12 deletions(-) (limited to 'lib') diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c index d4eb0929af8f..2ffe79c90c17 100644 --- a/lib/ref_tracker.c +++ b/lib/ref_tracker.c @@ -1,11 +1,16 @@ // SPDX-License-Identifier: GPL-2.0-or-later + +#define pr_fmt(fmt) "ref_tracker: " fmt + #include +#include #include #include #include #include #define REF_TRACKER_STACK_ENTRIES 16 +#define STACK_BUF_SIZE 1024 struct ref_tracker { struct list_head head; /* anchor into dir->list or dir->quarantine */ @@ -14,24 +19,87 @@ struct ref_tracker { depot_stack_handle_t free_stack_handle; }; -void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, - unsigned int display_limit) +struct ref_tracker_dir_stats { + int total; + int count; + struct { + depot_stack_handle_t stack_handle; + unsigned int count; + } stacks[]; +}; + +static struct ref_tracker_dir_stats * +ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit) { + struct ref_tracker_dir_stats *stats; struct ref_tracker *tracker; - unsigned int i = 0; - lockdep_assert_held(&dir->lock); + stats = kmalloc(struct_size(stats, stacks, limit), + GFP_NOWAIT | __GFP_NOWARN); + if (!stats) + return ERR_PTR(-ENOMEM); + stats->total = 0; + stats->count = 0; list_for_each_entry(tracker, &dir->list, head) { - if (i < display_limit) { - pr_err("leaked reference.\n"); - if (tracker->alloc_stack_handle) - stack_depot_print(tracker->alloc_stack_handle); - i++; - } else { - break; + depot_stack_handle_t stack = tracker->alloc_stack_handle; + int i; + + ++stats->total; + for (i = 0; i < stats->count; ++i) + if (stats->stacks[i].stack_handle == stack) + break; + if (i >= limit) + continue; + if (i >= stats->count) { + stats->stacks[i].stack_handle = stack; + stats->stacks[i].count = 0; + ++stats->count; } + ++stats->stacks[i].count; + } + + return stats; +} + +void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, + unsigned int display_limit) +{ + struct ref_tracker_dir_stats *stats; + unsigned int i = 0, skipped; + depot_stack_handle_t stack; + char *sbuf; + + lockdep_assert_held(&dir->lock); + + if (list_empty(&dir->list)) + return; + + stats = ref_tracker_get_stats(dir, display_limit); + if (IS_ERR(stats)) { + pr_err("%s@%pK: couldn't get stats, error %pe\n", + dir->name, dir, stats); + return; } + + sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN); + + for (i = 0, skipped = stats->total; i < stats->count; ++i) { + stack = stats->stacks[i].stack_handle; + if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4)) + sbuf[0] = 0; + pr_err("%s@%pK has %d/%d users at\n%s\n", dir->name, dir, + stats->stacks[i].count, stats->total, sbuf); + skipped -= stats->stacks[i].count; + } + + if (skipped) + pr_err("%s@%pK skipped reports about %d/%d users.\n", + dir->name, dir, skipped, stats->total); + + kfree(sbuf); + + kfree(stats); } EXPORT_SYMBOL(ref_tracker_dir_print_locked); diff --git a/lib/test_ref_tracker.c b/lib/test_ref_tracker.c index 19d7dec70cc6..49970a7c96f3 100644 --- a/lib/test_ref_tracker.c +++ b/lib/test_ref_tracker.c @@ -64,7 +64,7 @@ static int __init test_ref_tracker_init(void) { int i; - ref_tracker_dir_init(&ref_dir, 100); + ref_tracker_dir_init(&ref_dir, 100, "selftest"); timer_setup(&test_ref_tracker_timer, test_ref_tracker_timer_func, 0); mod_timer(&test_ref_tracker_timer, jiffies + 1); -- cgit v1.2.3 From 227c6c832303cec3941166d3335ecbccd980d615 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Fri, 2 Jun 2023 12:21:35 +0200 Subject: lib/ref_tracker: add printing to memory buffer Similar to stack_(depot|trace)_snprint the patch adds helper to printing stats to memory buffer. It will be helpful in case of debugfs. Signed-off-by: Andrzej Hajda Reviewed-by: Andi Shyti Reviewed-by: Eric Dumazet Signed-off-by: Jakub Kicinski --- lib/ref_tracker.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 48 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c index 2ffe79c90c17..cce4614b0794 100644 --- a/lib/ref_tracker.c +++ b/lib/ref_tracker.c @@ -62,8 +62,27 @@ ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit) return stats; } -void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, - unsigned int display_limit) +struct ostream { + char *buf; + int size, used; +}; + +#define pr_ostream(stream, fmt, args...) \ +({ \ + struct ostream *_s = (stream); \ +\ + if (!_s->buf) { \ + pr_err(fmt, ##args); \ + } else { \ + int ret, len = _s->size - _s->used; \ + ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \ + _s->used += min(ret, len); \ + } \ +}) + +static void +__ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir, + unsigned int display_limit, struct ostream *s) { struct ref_tracker_dir_stats *stats; unsigned int i = 0, skipped; @@ -77,8 +96,8 @@ void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, stats = ref_tracker_get_stats(dir, display_limit); if (IS_ERR(stats)) { - pr_err("%s@%pK: couldn't get stats, error %pe\n", - dir->name, dir, stats); + pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n", + dir->name, dir, stats); return; } @@ -88,19 +107,27 @@ void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, stack = stats->stacks[i].stack_handle; if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4)) sbuf[0] = 0; - pr_err("%s@%pK has %d/%d users at\n%s\n", dir->name, dir, - stats->stacks[i].count, stats->total, sbuf); + pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir, + stats->stacks[i].count, stats->total, sbuf); skipped -= stats->stacks[i].count; } if (skipped) - pr_err("%s@%pK skipped reports about %d/%d users.\n", - dir->name, dir, skipped, stats->total); + pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n", + dir->name, dir, skipped, stats->total); kfree(sbuf); kfree(stats); } + +void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, + unsigned int display_limit) +{ + struct ostream os = {}; + + __ref_tracker_dir_pr_ostream(dir, display_limit, &os); +} EXPORT_SYMBOL(ref_tracker_dir_print_locked); void ref_tracker_dir_print(struct ref_tracker_dir *dir, @@ -114,6 +141,19 @@ void ref_tracker_dir_print(struct ref_tracker_dir *dir, } EXPORT_SYMBOL(ref_tracker_dir_print); +int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size) +{ + struct ostream os = { .buf = buf, .size = size }; + unsigned long flags; + + spin_lock_irqsave(&dir->lock, flags); + __ref_tracker_dir_pr_ostream(dir, 16, &os); + spin_unlock_irqrestore(&dir->lock, flags); + + return os.used; +} +EXPORT_SYMBOL(ref_tracker_dir_snprint); + void ref_tracker_dir_exit(struct ref_tracker_dir *dir) { struct ref_tracker *tracker, *n; -- cgit v1.2.3 From acd8f0e5d72741bee715867e8185e3d57ca93703 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Fri, 2 Jun 2023 12:21:36 +0200 Subject: lib/ref_tracker: remove warnings in case of allocation failure Library can handle allocation failures. To avoid allocation warnings __GFP_NOWARN has been added everywhere. Moreover GFP_ATOMIC has been replaced with GFP_NOWAIT in case of stack allocation on tracker free call. Signed-off-by: Andrzej Hajda Reviewed-by: Andi Shyti Reviewed-by: Eric Dumazet Signed-off-by: Jakub Kicinski --- lib/ref_tracker.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c index cce4614b0794..cf5609b1ca79 100644 --- a/lib/ref_tracker.c +++ b/lib/ref_tracker.c @@ -189,7 +189,7 @@ int ref_tracker_alloc(struct ref_tracker_dir *dir, unsigned long entries[REF_TRACKER_STACK_ENTRIES]; struct ref_tracker *tracker; unsigned int nr_entries; - gfp_t gfp_mask = gfp; + gfp_t gfp_mask = gfp | __GFP_NOWARN; unsigned long flags; WARN_ON_ONCE(dir->dead); @@ -237,7 +237,8 @@ int ref_tracker_free(struct ref_tracker_dir *dir, return -EEXIST; } nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); - stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC); + stack_handle = stack_depot_save(entries, nr_entries, + GFP_NOWAIT | __GFP_NOWARN); spin_lock_irqsave(&dir->lock, flags); if (tracker->dead) { -- cgit v1.2.3 From 8d2b2281aea90ab265733c3cda83b73a01ca352f Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sun, 4 Jun 2023 16:28:58 +0300 Subject: mac_pton: Clean up the header inclusions Since hex_to_bin() is provided by hex.h there is no need to require kernel.h. Replace the latter by the former and add missing export.h. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20230604132858.6650-1-andriy.shevchenko@linux.intel.com Signed-off-by: Paolo Abeni --- lib/net_utils.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/net_utils.c b/lib/net_utils.c index c17201df3d08..42bb0473fb22 100644 --- a/lib/net_utils.c +++ b/lib/net_utils.c @@ -2,7 +2,8 @@ #include #include #include -#include +#include +#include bool mac_pton(const char *s, u8 *mac) { -- cgit v1.2.3 From f5f82cd18732d828bcd1ec308c4e8c55012e84b0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 6 Jun 2023 14:08:50 +0100 Subject: Move netfs_extract_iter_to_sg() to lib/scatterlist.c Move netfs_extract_iter_to_sg() to lib/scatterlist.c as it's going to be used by more than just network filesystems (AF_ALG, for example). Signed-off-by: David Howells cc: Jeff Layton cc: Steve French cc: Shyam Prasad N cc: Rohith Surabattula cc: Jens Axboe cc: Herbert Xu cc: "David S. Miller" cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: Matthew Wilcox cc: linux-crypto@vger.kernel.org cc: linux-cachefs@redhat.com cc: linux-cifs@vger.kernel.org cc: linux-fsdevel@vger.kernel.org cc: netdev@vger.kernel.org Signed-off-by: Paolo Abeni --- lib/scatterlist.c | 269 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 269 insertions(+) (limited to 'lib') diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 8d7519a8f308..e97d7060329e 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include /** * sg_next - return the next scatterlist entry in a list @@ -1095,3 +1097,270 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, return offset; } EXPORT_SYMBOL(sg_zero_buffer); + +/* + * Extract and pin a list of up to sg_max pages from UBUF- or IOVEC-class + * iterators, and add them to the scatterlist. + */ +static ssize_t extract_user_to_sg(struct iov_iter *iter, + ssize_t maxsize, + struct sg_table *sgtable, + unsigned int sg_max, + iov_iter_extraction_t extraction_flags) +{ + struct scatterlist *sg = sgtable->sgl + sgtable->nents; + struct page **pages; + unsigned int npages; + ssize_t ret = 0, res; + size_t len, off; + + /* We decant the page list into the tail of the scatterlist */ + pages = (void *)sgtable->sgl + + array_size(sg_max, sizeof(struct scatterlist)); + pages -= sg_max; + + do { + res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max, + extraction_flags, &off); + if (res < 0) + goto failed; + + len = res; + maxsize -= len; + ret += len; + npages = DIV_ROUND_UP(off + len, PAGE_SIZE); + sg_max -= npages; + + for (; npages > 0; npages--) { + struct page *page = *pages; + size_t seg = min_t(size_t, PAGE_SIZE - off, len); + + *pages++ = NULL; + sg_set_page(sg, page, seg, off); + sgtable->nents++; + sg++; + len -= seg; + off = 0; + } + } while (maxsize > 0 && sg_max > 0); + + return ret; + +failed: + while (sgtable->nents > sgtable->orig_nents) + put_page(sg_page(&sgtable->sgl[--sgtable->nents])); + return res; +} + +/* + * Extract up to sg_max pages from a BVEC-type iterator and add them to the + * scatterlist. The pages are not pinned. + */ +static ssize_t extract_bvec_to_sg(struct iov_iter *iter, + ssize_t maxsize, + struct sg_table *sgtable, + unsigned int sg_max, + iov_iter_extraction_t extraction_flags) +{ + const struct bio_vec *bv = iter->bvec; + struct scatterlist *sg = sgtable->sgl + sgtable->nents; + unsigned long start = iter->iov_offset; + unsigned int i; + ssize_t ret = 0; + + for (i = 0; i < iter->nr_segs; i++) { + size_t off, len; + + len = bv[i].bv_len; + if (start >= len) { + start -= len; + continue; + } + + len = min_t(size_t, maxsize, len - start); + off = bv[i].bv_offset + start; + + sg_set_page(sg, bv[i].bv_page, len, off); + sgtable->nents++; + sg++; + sg_max--; + + ret += len; + maxsize -= len; + if (maxsize <= 0 || sg_max == 0) + break; + start = 0; + } + + if (ret > 0) + iov_iter_advance(iter, ret); + return ret; +} + +/* + * Extract up to sg_max pages from a KVEC-type iterator and add them to the + * scatterlist. This can deal with vmalloc'd buffers as well as kmalloc'd or + * static buffers. The pages are not pinned. + */ +static ssize_t extract_kvec_to_sg(struct iov_iter *iter, + ssize_t maxsize, + struct sg_table *sgtable, + unsigned int sg_max, + iov_iter_extraction_t extraction_flags) +{ + const struct kvec *kv = iter->kvec; + struct scatterlist *sg = sgtable->sgl + sgtable->nents; + unsigned long start = iter->iov_offset; + unsigned int i; + ssize_t ret = 0; + + for (i = 0; i < iter->nr_segs; i++) { + struct page *page; + unsigned long kaddr; + size_t off, len, seg; + + len = kv[i].iov_len; + if (start >= len) { + start -= len; + continue; + } + + kaddr = (unsigned long)kv[i].iov_base + start; + off = kaddr & ~PAGE_MASK; + len = min_t(size_t, maxsize, len - start); + kaddr &= PAGE_MASK; + + maxsize -= len; + ret += len; + do { + seg = min_t(size_t, len, PAGE_SIZE - off); + if (is_vmalloc_or_module_addr((void *)kaddr)) + page = vmalloc_to_page((void *)kaddr); + else + page = virt_to_page(kaddr); + + sg_set_page(sg, page, len, off); + sgtable->nents++; + sg++; + sg_max--; + + len -= seg; + kaddr += PAGE_SIZE; + off = 0; + } while (len > 0 && sg_max > 0); + + if (maxsize <= 0 || sg_max == 0) + break; + start = 0; + } + + if (ret > 0) + iov_iter_advance(iter, ret); + return ret; +} + +/* + * Extract up to sg_max folios from an XARRAY-type iterator and add them to + * the scatterlist. The pages are not pinned. + */ +static ssize_t extract_xarray_to_sg(struct iov_iter *iter, + ssize_t maxsize, + struct sg_table *sgtable, + unsigned int sg_max, + iov_iter_extraction_t extraction_flags) +{ + struct scatterlist *sg = sgtable->sgl + sgtable->nents; + struct xarray *xa = iter->xarray; + struct folio *folio; + loff_t start = iter->xarray_start + iter->iov_offset; + pgoff_t index = start / PAGE_SIZE; + ssize_t ret = 0; + size_t offset, len; + XA_STATE(xas, xa, index); + + rcu_read_lock(); + + xas_for_each(&xas, folio, ULONG_MAX) { + if (xas_retry(&xas, folio)) + continue; + if (WARN_ON(xa_is_value(folio))) + break; + if (WARN_ON(folio_test_hugetlb(folio))) + break; + + offset = offset_in_folio(folio, start); + len = min_t(size_t, maxsize, folio_size(folio) - offset); + + sg_set_page(sg, folio_page(folio, 0), len, offset); + sgtable->nents++; + sg++; + sg_max--; + + maxsize -= len; + ret += len; + if (maxsize <= 0 || sg_max == 0) + break; + } + + rcu_read_unlock(); + if (ret > 0) + iov_iter_advance(iter, ret); + return ret; +} + +/** + * extract_iter_to_sg - Extract pages from an iterator and add to an sglist + * @iter: The iterator to extract from + * @maxsize: The amount of iterator to copy + * @sgtable: The scatterlist table to fill in + * @sg_max: Maximum number of elements in @sgtable that may be filled + * @extraction_flags: Flags to qualify the request + * + * Extract the page fragments from the given amount of the source iterator and + * add them to a scatterlist that refers to all of those bits, to a maximum + * addition of @sg_max elements. + * + * The pages referred to by UBUF- and IOVEC-type iterators are extracted and + * pinned; BVEC-, KVEC- and XARRAY-type are extracted but aren't pinned; PIPE- + * and DISCARD-type are not supported. + * + * No end mark is placed on the scatterlist; that's left to the caller. + * + * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA + * be allowed on the pages extracted. + * + * If successful, @sgtable->nents is updated to include the number of elements + * added and the number of bytes added is returned. @sgtable->orig_nents is + * left unaltered. + * + * The iov_iter_extract_mode() function should be used to query how cleanup + * should be performed. + */ +ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t maxsize, + struct sg_table *sgtable, unsigned int sg_max, + iov_iter_extraction_t extraction_flags) +{ + if (maxsize == 0) + return 0; + + switch (iov_iter_type(iter)) { + case ITER_UBUF: + case ITER_IOVEC: + return extract_user_to_sg(iter, maxsize, sgtable, sg_max, + extraction_flags); + case ITER_BVEC: + return extract_bvec_to_sg(iter, maxsize, sgtable, sg_max, + extraction_flags); + case ITER_KVEC: + return extract_kvec_to_sg(iter, maxsize, sgtable, sg_max, + extraction_flags); + case ITER_XARRAY: + return extract_xarray_to_sg(iter, maxsize, sgtable, sg_max, + extraction_flags); + default: + pr_err("%s(%u) unsupported\n", __func__, iov_iter_type(iter)); + WARN_ON_ONCE(1); + return -EIO; + } +} +EXPORT_SYMBOL_GPL(extract_iter_to_sg); -- cgit v1.2.3 From 6f67fbf8192da80c4db01a1800c7fceaca9cf1f9 Mon Sep 17 00:00:00 2001 From: Jeremy Sowden Date: Mon, 19 Jun 2023 20:06:57 +0100 Subject: lib/ts_bm: reset initial match offset for every block of text The `shift` variable which indicates the offset in the string at which to start matching the pattern is initialized to `bm->patlen - 1`, but it is not reset when a new block is retrieved. This means the implemen- tation may start looking at later and later positions in each successive block and miss occurrences of the pattern at the beginning. E.g., consider a HTTP packet held in a non-linear skb, where the HTTP request line occurs in the second block: [... 52 bytes of packet headers ...] GET /bmtest HTTP/1.1\r\nHost: www.example.com\r\n\r\n and the pattern is "GET /bmtest". Once the first block comprising the packet headers has been examined, `shift` will be pointing to somewhere near the end of the block, and so when the second block is examined the request line at the beginning will be missed. Reinitialize the variable for each new block. Fixes: 8082e4ed0a61 ("[LIB]: Boyer-Moore extension for textsearch infrastructure strike #2") Link: https://bugzilla.netfilter.org/show_bug.cgi?id=1390 Signed-off-by: Jeremy Sowden Signed-off-by: Pablo Neira Ayuso --- lib/ts_bm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/ts_bm.c b/lib/ts_bm.c index 1f2234221dd1..c8ecbf74ef29 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c @@ -60,10 +60,12 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state) struct ts_bm *bm = ts_config_priv(conf); unsigned int i, text_len, consumed = state->offset; const u8 *text; - int shift = bm->patlen - 1, bs; + int bs; const u8 icase = conf->flags & TS_IGNORECASE; for (;;) { + int shift = bm->patlen - 1; + text_len = conf->get_next_block(consumed, &text, conf, state); if (unlikely(text_len == 0)) -- cgit v1.2.3