summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2020-08-12 20:17:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-14 15:16:03 -0700
commita85ffd59bd362d6c2e456e7f523b091830cd5454 (patch)
tree7efecba6fdd5c3d43ba037acf6f6be8f4426a41f
parentb923f1247b72fc100b87792fd2129d026bb10e66 (diff)
downloadlinux-a85ffd59bd362d6c2e456e7f523b091830cd5454.tar.gz
linux-a85ffd59bd362d6c2e456e7f523b091830cd5454.tar.bz2
linux-a85ffd59bd362d6c2e456e7f523b091830cd5454.zip
dma-debug: fix debug_dma_assert_idle(), use rcu_read_lock()
Since commit 2a9127fcf229 ("mm: rewrite wait_on_page_bit_common() logic") improved unlock_page(), it has become more noticeable how cow_user_page() in a kernel with CONFIG_DMA_API_DEBUG=y can create and suffer from heavy contention on DMA debug's radix_lock in debug_dma_assert_idle(). It is only doing a lookup: use rcu_read_lock() and rcu_read_unlock() instead; though that does require the static ents[] to be moved onstack... ...but, hold on, isn't that radix_tree_gang_lookup() and loop doing quite the wrong thing: searching CACHELINES_PER_PAGE entries for an exact match with the first cacheline of the page in question? radix_tree_gang_lookup() is the right tool for the job, but we need nothing more than to check the first entry it can find, reporting if that falls anywhere within the page. (Is RCU safe here? As safe as using the spinlock was. The entries are never freed, so don't need to be freed by RCU. They may be reused, and there is a faint chance of a race, with an offending entry reused while printing its error info; but the spinlock did not prevent that either, and I agree that it's not worth worrying about. ] [ Side noe: this patch is a clear improvement to the status quo, but the next patch will be removing this debug function entirely. But just in case we decide we want to resurrect the debugging code some day, I'm first applying this improvement patch so that it doesn't get lost - Linus ] Fixes: 3b7a6418c749 ("dma debug: account for cachelines and read-only mappings in overlap tracking") Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Dan Williams <dan.j.williams@intel.com> Acked-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--kernel/dma/debug.c27
1 files changed, 9 insertions, 18 deletions
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index f7f807fb6ebb..6f4f4b9d2d03 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -565,11 +565,8 @@ static void active_cacheline_remove(struct dma_debug_entry *entry)
*/
void debug_dma_assert_idle(struct page *page)
{
- static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
- struct dma_debug_entry *entry = NULL;
- void **results = (void **) &ents;
- unsigned int nents, i;
- unsigned long flags;
+ struct dma_debug_entry *entry;
+ unsigned long pfn;
phys_addr_t cln;
if (dma_debug_disabled())
@@ -578,20 +575,14 @@ void debug_dma_assert_idle(struct page *page)
if (!page)
return;
- cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
- spin_lock_irqsave(&radix_lock, flags);
- nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
- CACHELINES_PER_PAGE);
- for (i = 0; i < nents; i++) {
- phys_addr_t ent_cln = to_cacheline_number(ents[i]);
+ pfn = page_to_pfn(page);
+ cln = (phys_addr_t) pfn << CACHELINE_PER_PAGE_SHIFT;
- if (ent_cln == cln) {
- entry = ents[i];
- break;
- } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
- break;
- }
- spin_unlock_irqrestore(&radix_lock, flags);
+ rcu_read_lock();
+ if (!radix_tree_gang_lookup(&dma_active_cacheline, (void **) &entry,
+ cln, 1) || entry->pfn != pfn)
+ entry = NULL;
+ rcu_read_unlock();
if (!entry)
return;