diff options
Diffstat (limited to 'arch/ppc/mm/init.c')
-rw-r--r-- | arch/ppc/mm/init.c | 31 |
1 files changed, 13 insertions, 18 deletions
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c index 363c157e3617..6164a2b34733 100644 --- a/arch/ppc/mm/init.c +++ b/arch/ppc/mm/init.c @@ -96,9 +96,6 @@ extern struct task_struct *current_set[NR_CPUS]; char *klimit = _end; struct mem_pieces phys_avail; -extern char *sysmap; -extern unsigned long sysmap_size; - /* * this tells the system to map all of ram with the segregs * (i.e. page tables) instead of the bats. @@ -442,12 +439,6 @@ void __init mem_init(void) if (agp_special_page) SetPageReserved(virt_to_page(agp_special_page)); #endif - if ( sysmap ) - for (addr = (unsigned long)sysmap; - addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ; - addr += PAGE_SIZE) - SetPageReserved(virt_to_page(addr)); - for (addr = PAGE_OFFSET; addr < (unsigned long)high_memory; addr += PAGE_SIZE) { if (!PageReserved(virt_to_page(addr))) @@ -469,7 +460,6 @@ void __init mem_init(void) struct page *page = mem_map + pfn; ClearPageReserved(page); - set_bit(PG_highmem, &page->flags); set_page_count(page, 1); __free_page(page); totalhigh_pages++; @@ -483,9 +473,7 @@ void __init mem_init(void) codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10), initpages<< (PAGE_SHIFT-10), (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); - if (sysmap) - printk("System.map loaded at 0x%08x for debugger, size: %ld bytes\n", - (unsigned int)sysmap, sysmap_size); + #ifdef CONFIG_PPC_PMAC if (agp_special_page) printk(KERN_INFO "AGP special page: 0x%08lx\n", agp_special_page); @@ -535,9 +523,6 @@ set_phys_avail(unsigned long total_memory) if (rtas_data) mem_pieces_remove(&phys_avail, rtas_data, rtas_size, 1); #endif - /* remove the sysmap pages from the available memory */ - if (sysmap) - mem_pieces_remove(&phys_avail, __pa(sysmap), sysmap_size, 1); #ifdef CONFIG_PPC_PMAC /* Because of some uninorth weirdness, we need a page of * memory as high as possible (it must be outside of the @@ -621,9 +606,19 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, struct page *page = pfn_to_page(pfn); if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { - if (vma->vm_mm == current->active_mm) + if (vma->vm_mm == current->active_mm) { +#ifdef CONFIG_8xx + /* On 8xx, cache control instructions (particularly + * "dcbst" from flush_dcache_icache) fault as write + * operation if there is an unpopulated TLB entry + * for the address in question. To workaround that, + * we invalidate the TLB here, thus avoiding dcbst + * misbehaviour. + */ + _tlbie(address); +#endif __flush_dcache_icache((void *) address); - else + } else flush_dcache_icache_page(page); set_bit(PG_arch_1, &page->flags); } |