diff options
author | Nick Piggin <npiggin@suse.de> | 2006-03-22 00:08:05 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-22 07:53:57 -0800 |
commit | 8dfcc9ba27e2ed257e5de9539f7f03e57c2c0e33 (patch) | |
tree | aecaeb6a0b33c23f79dfcd2418e4a3881a29f2e2 | |
parent | 8e7a9aae91101916b86de07fafe3272ea8dc1f10 (diff) | |
download | linux-8dfcc9ba27e2ed257e5de9539f7f03e57c2c0e33.tar.gz linux-8dfcc9ba27e2ed257e5de9539f7f03e57c2c0e33.tar.bz2 linux-8dfcc9ba27e2ed257e5de9539f7f03e57c2c0e33.zip |
[PATCH] mm: split highorder pages
Have an explicit mm call to split higher order pages into individual pages.
Should help to avoid bugs and be more explicit about the code's intention.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: David Howells <dhowells@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Zankel <chris@zankel.net>
Signed-off-by: Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/arm/mm/consistent.c | 4 | ||||
-rw-r--r-- | arch/frv/mm/dma-alloc.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 5 | ||||
-rw-r--r-- | arch/ppc/kernel/dma-mapping.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/consistent.c | 3 | ||||
-rw-r--r-- | arch/xtensa/mm/pgtable.c | 10 | ||||
-rw-r--r-- | include/linux/mm.h | 6 | ||||
-rw-r--r-- | mm/memory.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 22 |
9 files changed, 41 insertions, 21 deletions
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c index c2ee18d2075e..8a1bfcd50087 100644 --- a/arch/arm/mm/consistent.c +++ b/arch/arm/mm/consistent.c @@ -223,6 +223,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pte = consistent_pte[idx] + off; c->vm_pages = page; + split_page(page, order); + /* * Set the "dma handle" */ @@ -231,7 +233,6 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, do { BUG_ON(!pte_none(*pte)); - set_page_count(page, 1); /* * x86 does not mark the pages reserved... */ @@ -250,7 +251,6 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, * Free the otherwise unused pages. */ while (page < end) { - set_page_count(page, 1); __free_page(page); page++; } diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c index 342823aad758..636b2f8b5d98 100644 --- a/arch/frv/mm/dma-alloc.c +++ b/arch/frv/mm/dma-alloc.c @@ -115,9 +115,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle) */ if (order > 0) { struct page *rpage = virt_to_page(page); - - for (i = 1; i < (1 << order); i++) - set_page_count(rpage + i, 1); + split_page(rpage, order); } err = 0; diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 0ff9a348b843..a140da9732db 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -54,7 +54,8 @@ unsigned long empty_zero_page, zero_page_mask; */ unsigned long setup_zero_pages(void) { - unsigned long order, size; + unsigned int order; + unsigned long size; struct page *page; if (cpu_has_vce) @@ -67,9 +68,9 @@ unsigned long setup_zero_pages(void) panic("Oh boy, that early out of memory?"); page = virt_to_page(empty_zero_page); + split_page(page, order); while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) { SetPageReserved(page); - set_page_count(page, 1); page++; } diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c index 685fd0defe23..61465ec88bc7 100644 --- a/arch/ppc/kernel/dma-mapping.c +++ b/arch/ppc/kernel/dma-mapping.c @@ -223,6 +223,8 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr); struct page *end = page + (1 << order); + split_page(page, order); + /* * Set the "dma handle" */ @@ -231,7 +233,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) do { BUG_ON(!pte_none(*pte)); - set_page_count(page, 1); SetPageReserved(page); set_pte_at(&init_mm, vaddr, pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL))); @@ -244,7 +245,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) * Free the otherwise unused pages. */ while (page < end) { - set_page_count(page, 1); __free_page(page); page++; } diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index df3a9e452cc5..ee73e30263af 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -23,6 +23,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle) page = alloc_pages(gfp, order); if (!page) return NULL; + split_page(page, order); ret = page_address(page); *handle = virt_to_phys(ret); @@ -37,8 +38,6 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle) end = page + (1 << order); while (++page < end) { - set_page_count(page, 1); - /* Free any unused pages */ if (page >= free) { __free_page(page); diff --git a/arch/xtensa/mm/pgtable.c b/arch/xtensa/mm/pgtable.c index cbc56aedf13e..7d28914d11cb 100644 --- a/arch/xtensa/mm/pgtable.c +++ b/arch/xtensa/mm/pgtable.c @@ -21,13 +21,9 @@ pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER); if (likely(p)) { - struct page *page; + split_page(virt_to_page(p), COLOR_ORDER); for (i = 0; i < COLOR_SIZE; i++) { - page = virt_to_page(p); - - set_page_count(page, 1); - if (ADDR_COLOR(p) == color) pte = p; else @@ -55,9 +51,9 @@ struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address) p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); if (likely(p)) { - for (i = 0; i < PAGE_ORDER; i++) { - set_page_count(p, 1); + split_page(p, COLOR_ORDER); + for (i = 0; i < PAGE_ORDER; i++) { if (PADDR_COLOR(page_address(p)) == color) page = p; else diff --git a/include/linux/mm.h b/include/linux/mm.h index 9bbddf228cd9..e67980654c49 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -328,6 +328,12 @@ static inline void get_page(struct page *page) void put_page(struct page *page); +#ifdef CONFIG_MMU +void split_page(struct page *page, unsigned int order); +#else +static inline void split_page(struct page *page, unsigned int order) {} +#endif + /* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of diff --git a/mm/memory.c b/mm/memory.c index 85e80a57db29..6af555c1c42a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1221,9 +1221,7 @@ out: * The page has to be a nice clean _individual_ kernel allocation. * If you allocate a compound page, you need to have marked it as * such (__GFP_COMP), or manually just split the page up yourself - * (which is mainly an issue of doing "set_page_count(page, 1)" for - * each sub-page, and then freeing them one by one when you free - * them rather than freeing it as a compound page). + * (see split_page()). * * NOTE! Traditionally this was done with "remap_pfn_range()" which * took an arbitrary page protection parameter. This doesn't allow diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 102919851353..fc65e87368b3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -752,6 +752,28 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) clear_highpage(page + i); } +#ifdef CONFIG_MMU +/* + * split_page takes a non-compound higher-order page, and splits it into + * n (1<<order) sub-pages: page[0..n] + * Each sub-page must be freed individually. + * + * Note: this is probably too low level an operation for use in drivers. + * Please consult with lkml before using this in your driver. + */ +void split_page(struct page *page, unsigned int order) +{ + int i; + + BUG_ON(PageCompound(page)); + BUG_ON(!page_count(page)); + for (i = 1; i < (1 << order); i++) { + BUG_ON(page_count(page + i)); + set_page_count(page + i, 1); + } +} +#endif + /* * Really, prep_compound_page() should be called from __rmqueue_bulk(). But * we cheat by calling it from here, in the order > 0 path. Saves a branch |