summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2007-05-08 12:46:49 +1000
committerPaul Mackerras <paulus@samba.org>2007-05-08 13:40:31 +1000
commitd1953c8888ef034b912ee33bc2ea2cce6a414402 (patch)
tree525e581603a2dd8622b821304440b6ce14b535ae /arch
parent00c2ae35bd50664bcd841becc6efceef8aa5d074 (diff)
downloadlinux-d1953c8888ef034b912ee33bc2ea2cce6a414402.tar.gz
linux-d1953c8888ef034b912ee33bc2ea2cce6a414402.tar.bz2
linux-d1953c8888ef034b912ee33bc2ea2cce6a414402.zip
[POWERPC] Remove use of 4level-fixup.h for ppc32
For 32-bit systems, powerpc still relies on the 4level-fixup.h hack, to pretend that the generic pagetable handling stuff is 3-levels rather than 4. This patch removes this, instead using the newer pgtable-nopmd.h to handle the elision of both the pud and pmd pagetable levels (ppc32 pagetables are actually 2 levels). This removes a little extraneous code, and makes it more easily compared to the 64-bit pagetable code. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/lib/dma-noncoherent.c4
-rw-r--r--arch/powerpc/mm/pgtable_32.c28
2 files changed, 19 insertions, 13 deletions
diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c
index 48f3d13a3de5..6656d47841d0 100644
--- a/arch/powerpc/lib/dma-noncoherent.c
+++ b/arch/powerpc/lib/dma-noncoherent.c
@@ -306,13 +306,15 @@ EXPORT_SYMBOL(__dma_free_coherent);
static int __init dma_alloc_init(void)
{
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret = 0;
do {
pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
- pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
+ pud = pud_alloc(&init_mm, pgd, CONSISTENT_BASE);
+ pmd = pmd_alloc(&init_mm, pud, CONSISTENT_BASE);
if (!pmd) {
printk(KERN_ERR "%s: no pmd tables\n", __func__);
ret = -ENOMEM;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index bca560374927..d8232b7a08f7 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -261,7 +261,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
int err = -ENOMEM;
/* Use upper 10 bits of VA to index the first level map */
- pd = pmd_offset(pgd_offset_k(va), va);
+ pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
/* Use middle 10 bits of VA to index the second-level map */
pg = pte_alloc_kernel(pd, va);
if (pg != 0) {
@@ -354,23 +354,27 @@ int
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
{
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int retval = 0;
pgd = pgd_offset(mm, addr & PAGE_MASK);
if (pgd) {
- pmd = pmd_offset(pgd, addr & PAGE_MASK);
- if (pmd_present(*pmd)) {
- pte = pte_offset_map(pmd, addr & PAGE_MASK);
- if (pte) {
- retval = 1;
- *ptep = pte;
- if (pmdp)
- *pmdp = pmd;
- /* XXX caller needs to do pte_unmap, yuck */
- }
- }
+ pud = pud_offset(pgd, addr & PAGE_MASK);
+ if (pud && pud_present(*pud)) {
+ pmd = pmd_offset(pud, addr & PAGE_MASK);
+ if (pmd_present(*pmd)) {
+ pte = pte_offset_map(pmd, addr & PAGE_MASK);
+ if (pte) {
+ retval = 1;
+ *ptep = pte;
+ if (pmdp)
+ *pmdp = pmd;
+ /* XXX caller needs to do pte_unmap, yuck */
+ }
+ }
+ }
}
return(retval);
}