summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-15 12:31:37 +0200
committerWilly Tarreau <w@1wt.eu>2012-02-11 15:38:10 +0100
commit8669a4b665030d64d19e33e036b1a3e77202b67e (patch)
treecc712bba869803f2638733a1055c7df8cd256d46
parent3db7e32f7521df6f9b64a9b04abb00dd11ca5735 (diff)
downloadlinux-stable-8669a4b665030d64d19e33e036b1a3e77202b67e.tar.gz
linux-stable-8669a4b665030d64d19e33e036b1a3e77202b67e.tar.bz2
linux-stable-8669a4b665030d64d19e33e036b1a3e77202b67e.zip
x86, mm: Add __get_user_pages_fast()
Introduce a gup_fast() variant which is usable from IRQ/NMI context. [ WT: this one is only needed for next patch ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> CC: Nick Piggin <npiggin@suse.de> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Willy Tarreau <w@1wt.eu>
-rw-r--r--arch/x86/mm/gup.c56
-rw-r--r--include/linux/mm.h6
2 files changed, 62 insertions, 0 deletions
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 73e6ce1597b2..5e5ea8be1d56 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -219,6 +219,62 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
return 1;
}
+/*
+ * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
+ * back to the regular GUP.
+ */
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, len, end;
+ unsigned long next;
+ unsigned long flags;
+ pgd_t *pgdp;
+ int nr = 0;
+
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+ if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ (void __user *)start, len)))
+ return 0;
+
+ /*
+ * XXX: batch / limit 'nr', to avoid large irq off latency
+ * needs some instrumenting to determine the common sizes used by
+ * important workloads (eg. DB2), and whether limiting the batch size
+ * will decrease performance.
+ *
+ * It seems like we're in the clear for the moment. Direct-IO is
+ * the main guy that batches up lots of get_user_pages, and even
+ * they are limited to 64-at-a-time which is not so many.
+ */
+ /*
+ * This doesn't prevent pagetable teardown, but does prevent
+ * the pagetables and pages from being freed on x86.
+ *
+ * So long as we atomically load page table pointers versus teardown
+ * (which we do on x86, with the above PAE exception), we can follow the
+ * address down to the the page and take a ref on it.
+ */
+ local_irq_save(flags);
+ pgdp = pgd_offset(mm, addr);
+ do {
+ pgd_t pgd = *pgdp;
+
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ break;
+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ break;
+ } while (pgdp++, addr = next, addr != end);
+ local_irq_restore(flags);
+
+ return nr;
+}
+
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c20007caa48d..4d34c2016155 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -850,6 +850,12 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
/*
+ * doesn't attempt to fault and will return short.
+ */
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages);
+
+/*
* A callback you can register to apply pressure to ageable caches.
*
* 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should