diff options
Diffstat (limited to 'arch/powerpc/kernel')
48 files changed, 1705 insertions, 1000 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 92673b43858d..1308a86e9070 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -17,6 +17,7 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog CFLAGS_REMOVE_btext.o = -pg -mno-sched-epilog +CFLAGS_REMOVE_prom.o = -pg -mno-sched-epilog ifdef CONFIG_DYNAMIC_FTRACE # dynamic ftrace setup. @@ -102,6 +103,10 @@ endif obj-$(CONFIG_PPC64) += $(obj64-y) +ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),) +obj-y += ppc_save_regs.o +endif + extra-$(CONFIG_PPC_FPU) += fpu.o extra-$(CONFIG_PPC64) += entry_64.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 75c5dd0138fd..9937fe44555f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -23,9 +23,6 @@ #include <linux/mm.h> #include <linux/suspend.h> #include <linux/hrtimer.h> -#ifdef CONFIG_KVM -#include <linux/kvm_host.h> -#endif #ifdef CONFIG_PPC64 #include <linux/time.h> #include <linux/hardirq.h> @@ -51,6 +48,9 @@ #ifdef CONFIG_PPC_ISERIES #include <asm/iseries/alpaca.h> #endif +#ifdef CONFIG_KVM +#include <asm/kvm_44x.h> +#endif #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) #include "head_booke.h" @@ -60,6 +60,7 @@ int main(void) { DEFINE(THREAD, offsetof(struct task_struct, thread)); DEFINE(MM, offsetof(struct task_struct, mm)); + DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); #ifdef CONFIG_PPC64 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); #else @@ -306,6 +307,7 @@ int main(void) DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); + DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime)); DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size)); DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size)); DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size)); @@ -355,12 +357,10 @@ int main(void) DEFINE(PTE_SIZE, sizeof(pte_t)); #ifdef CONFIG_KVM - DEFINE(TLBE_BYTES, sizeof(struct tlbe)); + DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe)); DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); - DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb)); - DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); @@ -378,6 +378,21 @@ int main(void) DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); #endif +#ifdef CONFIG_44x + DEFINE(PGD_T_LOG2, PGD_T_LOG2); + DEFINE(PTE_T_LOG2, PTE_T_LOG2); +#endif + +#ifdef CONFIG_KVM_EXIT_TIMING + DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, + arch.timing_exit.tv32.tbu)); + DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu, + arch.timing_exit.tv32.tbl)); + DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu, + arch.timing_last_enter.tv32.tbu)); + DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu, + arch.timing_last_enter.tv32.tbl)); +#endif return 0; } diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 7e8719504f39..923f87aff20a 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -19,6 +19,7 @@ #include <asm/oprofile_impl.h> #include <asm/cputable.h> #include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */ +#include <asm/mmu.h> struct cpu_spec* cur_cpu_spec = NULL; EXPORT_SYMBOL(cur_cpu_spec); @@ -94,6 +95,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER3 (630)", .cpu_features = CPU_FTRS_POWER3, .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -109,6 +111,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER3 (630+)", .cpu_features = CPU_FTRS_POWER3, .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -124,6 +127,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "RS64-II (northstar)", .cpu_features = CPU_FTRS_RS64, .cpu_user_features = COMMON_USER_PPC64, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -139,6 +143,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "RS64-III (pulsar)", .cpu_features = CPU_FTRS_RS64, .cpu_user_features = COMMON_USER_PPC64, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -154,6 +159,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "RS64-III (icestar)", .cpu_features = CPU_FTRS_RS64, .cpu_user_features = COMMON_USER_PPC64, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -169,6 +175,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "RS64-IV (sstar)", .cpu_features = CPU_FTRS_RS64, .cpu_user_features = COMMON_USER_PPC64, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -184,6 +191,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER4 (gp)", .cpu_features = CPU_FTRS_POWER4, .cpu_user_features = COMMON_USER_POWER4, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -199,6 +207,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER4+ (gq)", .cpu_features = CPU_FTRS_POWER4, .cpu_user_features = COMMON_USER_POWER4, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -215,6 +224,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -233,6 +243,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -251,6 +262,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -269,6 +281,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -287,6 +300,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -303,6 +317,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER5 (gr)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -323,6 +338,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER5+ (gs)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -339,6 +355,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER5+ (gs)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -356,6 +373,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER5+", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .machine_check = machine_check_generic, @@ -369,6 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_POWER6, .cpu_user_features = COMMON_USER_POWER6 | PPC_FEATURE_POWER6_EXT, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -388,6 +407,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER6 (architected)", .cpu_features = CPU_FTRS_POWER6, .cpu_user_features = COMMON_USER_POWER6, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .machine_check = machine_check_generic, @@ -400,6 +420,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER7 (architected)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .machine_check = machine_check_generic, @@ -412,6 +433,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER7 (raw)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -434,6 +456,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_user_features = COMMON_USER_PPC64 | PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_SMT, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 4, @@ -449,6 +472,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "PA6T", .cpu_features = CPU_FTRS_PA6T, .cpu_user_features = COMMON_USER_PA6T, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 6, @@ -466,6 +490,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER4 (compatible)", .cpu_features = CPU_FTRS_COMPATIBLE, .cpu_user_features = COMMON_USER_PPC64, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -483,6 +508,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC601, .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR | PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_generic, @@ -494,6 +520,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "603", .cpu_features = CPU_FTRS_603, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -506,6 +533,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "603e", .cpu_features = CPU_FTRS_603, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -518,6 +546,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "603ev", .cpu_features = CPU_FTRS_603, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -530,6 +559,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "604", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 2, @@ -543,6 +573,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "604e", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -556,6 +587,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "604r", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -569,6 +601,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "604ev", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -582,6 +615,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "740/750", .cpu_features = CPU_FTRS_740_NOTAU, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -595,6 +629,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750CX", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -608,6 +643,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750CX", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -622,6 +658,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750CXe", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -636,6 +673,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750CXe", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -650,6 +688,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750CL", .cpu_features = CPU_FTRS_750CL, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -664,6 +703,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "745/755", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -678,6 +718,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750FX", .cpu_features = CPU_FTRS_750FX1, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -692,6 +733,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750FX", .cpu_features = CPU_FTRS_750FX2, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -706,6 +748,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750FX", .cpu_features = CPU_FTRS_750FX, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -720,6 +763,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750GX", .cpu_features = CPU_FTRS_750GX, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -734,6 +778,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "740/750", .cpu_features = CPU_FTRS_740, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -749,6 +794,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7400_NOTAU, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -764,6 +810,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7400, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -779,6 +826,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7400, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -794,6 +842,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7450_20, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -811,6 +860,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7450_21, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -828,6 +878,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7450_23, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -845,6 +896,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7455_1, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -862,6 +914,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7455_20, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -879,6 +932,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7455, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -896,6 +950,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7447_10, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -913,6 +968,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7447_10, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -929,6 +985,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "7447/7457", .cpu_features = CPU_FTRS_7447, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -946,6 +1003,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7447A, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -963,6 +1021,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7448, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -979,6 +1038,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "82xx", .cpu_features = CPU_FTRS_82XX, .cpu_user_features = COMMON_USER, + .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -991,6 +1051,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "G2_LE", .cpu_features = CPU_FTRS_G2_LE, .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -1003,6 +1064,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "e300c1", .cpu_features = CPU_FTRS_E300, .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -1015,6 +1077,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "e300c2", .cpu_features = CPU_FTRS_E300C2, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -1027,6 +1090,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "e300c3", .cpu_features = CPU_FTRS_E300, .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -1041,6 +1105,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "e300c4", .cpu_features = CPU_FTRS_E300, .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -1056,6 +1121,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "(generic PPC)", .cpu_features = CPU_FTRS_CLASSIC32, .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_generic, @@ -1071,6 +1137,7 @@ static struct cpu_spec __initdata cpu_specs[] = { * if the 8xx code is there.... */ .cpu_features = CPU_FTRS_8XX, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_TYPE_8xx, .icache_bsize = 16, .dcache_bsize = 16, .platform = "ppc823", @@ -1083,6 +1150,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "403GC", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 16, .dcache_bsize = 16, .machine_check = machine_check_4xx, @@ -1095,6 +1163,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 16, .dcache_bsize = 16, .machine_check = machine_check_4xx, @@ -1106,6 +1175,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "403G ??", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 16, .dcache_bsize = 16, .machine_check = machine_check_4xx, @@ -1118,6 +1188,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1130,6 +1201,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1142,6 +1214,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1154,6 +1227,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1166,6 +1240,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1178,6 +1253,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1190,6 +1266,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1202,6 +1279,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1213,6 +1291,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "405LP", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1225,6 +1304,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1237,6 +1317,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1249,6 +1330,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1261,6 +1343,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1273,6 +1356,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1286,6 +1370,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1298,6 +1383,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1312,6 +1398,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GR Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1323,6 +1410,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440EP Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440ep, @@ -1335,6 +1423,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GR Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1346,6 +1435,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440EP Rev. C", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440ep, @@ -1358,6 +1448,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440EP Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440ep, @@ -1370,6 +1461,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GRX", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440grx, @@ -1382,6 +1474,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440EPX", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440epx, @@ -1394,6 +1487,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GP Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1405,6 +1499,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GP Rev. C", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1416,6 +1511,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GX Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, @@ -1428,6 +1524,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GX Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, @@ -1440,6 +1537,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GX Rev. C", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, @@ -1452,6 +1550,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GX Rev. F", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, @@ -1464,6 +1563,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440SP Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1475,6 +1575,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440SPe Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440spe, @@ -1487,6 +1588,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440SPe Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440spe, @@ -1499,6 +1601,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440 in Virtex-5 FXT", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440x5, @@ -1509,8 +1612,9 @@ static struct cpu_spec __initdata cpu_specs[] = { .pvr_mask = 0xffff0002, .pvr_value = 0x13020002, .cpu_name = "460EX", - .cpu_features = CPU_FTRS_44X, + .cpu_features = CPU_FTRS_440x6, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460ex, @@ -1521,8 +1625,9 @@ static struct cpu_spec __initdata cpu_specs[] = { .pvr_mask = 0xffff0002, .pvr_value = 0x13020000, .cpu_name = "460GT", - .cpu_features = CPU_FTRS_44X, + .cpu_features = CPU_FTRS_440x6, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460gt, @@ -1535,6 +1640,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "(generic 44x PPC)", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1551,6 +1657,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_UNIFIED_CACHE, + .mmu_features = MMU_FTR_TYPE_FSL_E, .dcache_bsize = 32, .machine_check = machine_check_e200, .platform = "ppc5554", @@ -1565,6 +1672,7 @@ static struct cpu_spec __initdata cpu_specs[] = { PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP | PPC_FEATURE_UNIFIED_CACHE, + .mmu_features = MMU_FTR_TYPE_FSL_E, .dcache_bsize = 32, .machine_check = machine_check_e200, .platform = "ppc5554", @@ -1577,6 +1685,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_UNIFIED_CACHE, + .mmu_features = MMU_FTR_TYPE_FSL_E, .dcache_bsize = 32, .machine_check = machine_check_e200, .platform = "ppc5554", @@ -1591,6 +1700,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP, + .mmu_features = MMU_FTR_TYPE_FSL_E, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -1608,6 +1718,7 @@ static struct cpu_spec __initdata cpu_specs[] = { PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP | PPC_FEATURE_HAS_EFP_DOUBLE_COMP, + .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -1622,6 +1733,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "e500mc", .cpu_features = CPU_FTRS_E500MC, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 4, @@ -1638,6 +1750,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP, + .mmu_features = MMU_FTR_TYPE_FSL_E, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_e500, diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 3a6eaa876ee1..1c5c8a6fc129 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -120,6 +120,26 @@ static inline void dma_direct_unmap_page(struct device *dev, { } +#ifdef CONFIG_NOT_COHERENT_CACHE +static inline void dma_direct_sync_sg(struct device *dev, + struct scatterlist *sgl, int nents, + enum dma_data_direction direction) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); +} + +static inline void dma_direct_sync_single_range(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + __dma_sync(bus_to_virt(dma_handle+offset), size, direction); +} +#endif + struct dma_mapping_ops dma_direct_ops = { .alloc_coherent = dma_direct_alloc_coherent, .free_coherent = dma_direct_free_coherent, @@ -128,5 +148,11 @@ struct dma_mapping_ops dma_direct_ops = { .dma_supported = dma_direct_dma_supported, .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, +#ifdef CONFIG_NOT_COHERENT_CACHE + .sync_single_range_for_cpu = dma_direct_sync_single_range, + .sync_single_range_for_device = dma_direct_sync_single_range, + .sync_sg_for_cpu = dma_direct_sync_sg, + .sync_sg_for_device = dma_direct_sync_sg, +#endif }; EXPORT_SYMBOL(dma_direct_ops); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 7ecc0d1855c3..6f7eb7e00c79 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -1162,39 +1162,17 @@ machine_check_in_rtas: #ifdef CONFIG_DYNAMIC_FTRACE _GLOBAL(mcount) _GLOBAL(_mcount) - stwu r1,-48(r1) - stw r3, 12(r1) - stw r4, 16(r1) - stw r5, 20(r1) - stw r6, 24(r1) - mflr r3 - stw r7, 28(r1) - mfcr r5 - stw r8, 32(r1) - stw r9, 36(r1) - stw r10,40(r1) - stw r3, 44(r1) - stw r5, 8(r1) - subi r3, r3, MCOUNT_INSN_SIZE - .globl mcount_call -mcount_call: - bl ftrace_stub - nop - lwz r6, 8(r1) - lwz r0, 44(r1) - lwz r3, 12(r1) + /* + * It is required that _mcount on PPC32 must preserve the + * link register. But we have r0 to play with. We use r0 + * to push the return address back to the caller of mcount + * into the ctr register, restore the link register and + * then jump back using the ctr register. + */ + mflr r0 mtctr r0 - lwz r4, 16(r1) - mtcr r6 - lwz r5, 20(r1) - lwz r6, 24(r1) - lwz r0, 52(r1) - lwz r7, 28(r1) - lwz r8, 32(r1) + lwz r0, 4(r1) mtlr r0 - lwz r9, 36(r1) - lwz r10,40(r1) - addi r1, r1, 48 bctr _GLOBAL(ftrace_caller) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index e0bcf9354286..383ed6eb0085 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -894,18 +894,6 @@ _GLOBAL(enter_prom) #ifdef CONFIG_DYNAMIC_FTRACE _GLOBAL(mcount) _GLOBAL(_mcount) - /* Taken from output of objdump from lib64/glibc */ - mflr r3 - stdu r1, -112(r1) - std r3, 128(r1) - subi r3, r3, MCOUNT_INSN_SIZE - .globl mcount_call -mcount_call: - bl ftrace_stub - nop - ld r0, 128(r1) - mtlr r0 - addi r1, r1, 112 blr _GLOBAL(ftrace_caller) diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index f4b006ed0ab1..5355244c99ff 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -9,22 +9,30 @@ #include <linux/spinlock.h> #include <linux/hardirq.h> +#include <linux/uaccess.h> +#include <linux/module.h> #include <linux/ftrace.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/list.h> #include <asm/cacheflush.h> +#include <asm/code-patching.h> #include <asm/ftrace.h> +#if 0 +#define DEBUGP printk +#else +#define DEBUGP(fmt , ...) do { } while (0) +#endif -static unsigned int ftrace_nop = 0x60000000; +static unsigned int ftrace_nop = PPC_NOP_INSTR; #ifdef CONFIG_PPC32 # define GET_ADDR(addr) addr #else /* PowerPC64's functions are data that points to the functions */ -# define GET_ADDR(addr) *(unsigned long *)addr +# define GET_ADDR(addr) (*(unsigned long *)addr) #endif @@ -33,12 +41,12 @@ static unsigned int ftrace_calc_offset(long ip, long addr) return (int)(addr - ip); } -unsigned char *ftrace_nop_replace(void) +static unsigned char *ftrace_nop_replace(void) { return (char *)&ftrace_nop; } -unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) +static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) { static unsigned int op; @@ -68,49 +76,422 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) # define _ASM_PTR " .long " #endif -int +static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, unsigned char *new_code) { - unsigned replaced; - unsigned old = *(unsigned *)old_code; - unsigned new = *(unsigned *)new_code; - int faulted = 0; + unsigned char replaced[MCOUNT_INSN_SIZE]; /* * Note: Due to modules and __init, code can * disappear and change, we need to protect against faulting - * as well as code changing. + * as well as code changing. We do this by using the + * probe_kernel_* functions. * * No real locking needed, this code is run through - * kstop_machine. + * kstop_machine, or before SMP starts. */ - asm volatile ( - "1: lwz %1, 0(%2)\n" - " cmpw %1, %5\n" - " bne 2f\n" - " stwu %3, 0(%2)\n" - "2:\n" - ".section .fixup, \"ax\"\n" - "3: li %0, 1\n" - " b 2b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - _ASM_ALIGN "\n" - _ASM_PTR "1b, 3b\n" - ".previous" - : "=r"(faulted), "=r"(replaced) - : "r"(ip), "r"(new), - "0"(faulted), "r"(old) - : "memory"); - - if (replaced != old && replaced != new) - faulted = 2; - - if (!faulted) - flush_icache_range(ip, ip + 8); - - return faulted; + + /* read the text we want to modify */ + if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + /* Make sure it is what we expect it to be */ + if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) + return -EINVAL; + + /* replace the text with the new text */ + if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) + return -EPERM; + + flush_icache_range(ip, ip + 8); + + return 0; +} + +/* + * Helper functions that are the same for both PPC64 and PPC32. + */ +static int test_24bit_addr(unsigned long ip, unsigned long addr) +{ + + /* use the create_branch to verify that this offset can be branched */ + return create_branch((unsigned int *)ip, addr, 0); +} + +static int is_bl_op(unsigned int op) +{ + return (op & 0xfc000003) == 0x48000001; +} + +static unsigned long find_bl_target(unsigned long ip, unsigned int op) +{ + static int offset; + + offset = (op & 0x03fffffc); + /* make it signed */ + if (offset & 0x02000000) + offset |= 0xfe000000; + + return ip + (long)offset; +} + +#ifdef CONFIG_PPC64 +static int +__ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int op; + unsigned int jmp[5]; + unsigned long ptr; + unsigned long ip = rec->ip; + unsigned long tramp; + int offset; + + /* read where this goes */ + if (probe_kernel_read(&op, (void *)ip, sizeof(int))) + return -EFAULT; + + /* Make sure that that this is still a 24bit jump */ + if (!is_bl_op(op)) { + printk(KERN_ERR "Not expected bl: opcode is %x\n", op); + return -EINVAL; + } + + /* lets find where the pointer goes */ + tramp = find_bl_target(ip, op); + + /* + * On PPC64 the trampoline looks like: + * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high> + * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low> + * Where the bytes 2,3,6 and 7 make up the 32bit offset + * to the TOC that holds the pointer. + * to jump to. + * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1) + * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12) + * The actually address is 32 bytes from the offset + * into the TOC. + * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12) + */ + + DEBUGP("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); + + /* Find where the trampoline jumps to */ + if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { + printk(KERN_ERR "Failed to read %lx\n", tramp); + return -EFAULT; + } + + DEBUGP(" %08x %08x", jmp[0], jmp[1]); + + /* verify that this is what we expect it to be */ + if (((jmp[0] & 0xffff0000) != 0x3d820000) || + ((jmp[1] & 0xffff0000) != 0x398c0000) || + (jmp[2] != 0xf8410028) || + (jmp[3] != 0xe96c0020) || + (jmp[4] != 0xe84c0028)) { + printk(KERN_ERR "Not a trampoline\n"); + return -EINVAL; + } + + offset = (unsigned)((unsigned short)jmp[0]) << 16 | + (unsigned)((unsigned short)jmp[1]); + + DEBUGP(" %x ", offset); + + /* get the address this jumps too */ + tramp = mod->arch.toc + offset + 32; + DEBUGP("toc: %lx", tramp); + + if (probe_kernel_read(jmp, (void *)tramp, 8)) { + printk(KERN_ERR "Failed to read %lx\n", tramp); + return -EFAULT; + } + + DEBUGP(" %08x %08x\n", jmp[0], jmp[1]); + + ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; + + /* This should match what was called */ + if (ptr != GET_ADDR(addr)) { + printk(KERN_ERR "addr does not match %lx\n", ptr); + return -EINVAL; + } + + /* + * We want to nop the line, but the next line is + * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1) + * This needs to be turned to a nop too. + */ + if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) + return -EFAULT; + + if (op != 0xe8410028) { + printk(KERN_ERR "Next line is not ld! (%08x)\n", op); + return -EINVAL; + } + + /* + * Milton Miller pointed out that we can not blindly do nops. + * If a task was preempted when calling a trace function, + * the nops will remove the way to restore the TOC in r2 + * and the r2 TOC will get corrupted. + */ + + /* + * Replace: + * bl <tramp> <==== will be replaced with "b 1f" + * ld r2,40(r1) + * 1: + */ + op = 0x48000008; /* b +8 */ + + if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE)) + return -EPERM; + + + flush_icache_range(ip, ip + 8); + + return 0; +} + +#else /* !PPC64 */ +static int +__ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int op; + unsigned int jmp[4]; + unsigned long ip = rec->ip; + unsigned long tramp; + + if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + /* Make sure that that this is still a 24bit jump */ + if (!is_bl_op(op)) { + printk(KERN_ERR "Not expected bl: opcode is %x\n", op); + return -EINVAL; + } + + /* lets find where the pointer goes */ + tramp = find_bl_target(ip, op); + + /* + * On PPC32 the trampoline looks like: + * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha + * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l + * 0x7d, 0x69, 0x03, 0xa6 mtctr r11 + * 0x4e, 0x80, 0x04, 0x20 bctr + */ + + DEBUGP("ip:%lx jumps to %lx", ip, tramp); + + /* Find where the trampoline jumps to */ + if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { + printk(KERN_ERR "Failed to read %lx\n", tramp); + return -EFAULT; + } + + DEBUGP(" %08x %08x ", jmp[0], jmp[1]); + + /* verify that this is what we expect it to be */ + if (((jmp[0] & 0xffff0000) != 0x3d600000) || + ((jmp[1] & 0xffff0000) != 0x396b0000) || + (jmp[2] != 0x7d6903a6) || + (jmp[3] != 0x4e800420)) { + printk(KERN_ERR "Not a trampoline\n"); + return -EINVAL; + } + + tramp = (jmp[1] & 0xffff) | + ((jmp[0] & 0xffff) << 16); + if (tramp & 0x8000) + tramp -= 0x10000; + + DEBUGP(" %x ", tramp); + + if (tramp != addr) { + printk(KERN_ERR + "Trampoline location %08lx does not match addr\n", + tramp); + return -EINVAL; + } + + op = PPC_NOP_INSTR; + + if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE)) + return -EPERM; + + flush_icache_range(ip, ip + 8); + + return 0; +} +#endif /* PPC64 */ + +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned char *old, *new; + unsigned long ip = rec->ip; + + /* + * If the calling address is more that 24 bits away, + * then we had to use a trampoline to make the call. + * Otherwise just update the call site. + */ + if (test_24bit_addr(ip, addr)) { + /* within range */ + old = ftrace_call_replace(ip, addr); + new = ftrace_nop_replace(); + return ftrace_modify_code(ip, old, new); + } + + /* + * Out of range jumps are called from modules. + * We should either already have a pointer to the module + * or it has been passed in. + */ + if (!rec->arch.mod) { + if (!mod) { + printk(KERN_ERR "No module loaded addr=%lx\n", + addr); + return -EFAULT; + } + rec->arch.mod = mod; + } else if (mod) { + if (mod != rec->arch.mod) { + printk(KERN_ERR + "Record mod %p not equal to passed in mod %p\n", + rec->arch.mod, mod); + return -EINVAL; + } + /* nothing to do if mod == rec->arch.mod */ + } else + mod = rec->arch.mod; + + return __ftrace_make_nop(mod, rec, addr); + +} + +#ifdef CONFIG_PPC64 +static int +__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int op[2]; + unsigned long ip = rec->ip; + + /* read where this goes */ + if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2)) + return -EFAULT; + + /* + * It should be pointing to two nops or + * b +8; ld r2,40(r1) + */ + if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && + ((op[0] != PPC_NOP_INSTR) || (op[1] != PPC_NOP_INSTR))) { + printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); + return -EINVAL; + } + + /* If we never set up a trampoline to ftrace_caller, then bail */ + if (!rec->arch.mod->arch.tramp) { + printk(KERN_ERR "No ftrace trampoline\n"); + return -EINVAL; + } + + /* create the branch to the trampoline */ + op[0] = create_branch((unsigned int *)ip, + rec->arch.mod->arch.tramp, BRANCH_SET_LINK); + if (!op[0]) { + printk(KERN_ERR "REL24 out of range!\n"); + return -EINVAL; + } + + /* ld r2,40(r1) */ + op[1] = 0xe8410028; + + DEBUGP("write to %lx\n", rec->ip); + + if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2)) + return -EPERM; + + flush_icache_range(ip, ip + 8); + + return 0; +} +#else +static int +__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int op; + unsigned long ip = rec->ip; + + /* read where this goes */ + if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + /* It should be pointing to a nop */ + if (op != PPC_NOP_INSTR) { + printk(KERN_ERR "Expected NOP but have %x\n", op); + return -EINVAL; + } + + /* If we never set up a trampoline to ftrace_caller, then bail */ + if (!rec->arch.mod->arch.tramp) { + printk(KERN_ERR "No ftrace trampoline\n"); + return -EINVAL; + } + + /* create the branch to the trampoline */ + op = create_branch((unsigned int *)ip, + rec->arch.mod->arch.tramp, BRANCH_SET_LINK); + if (!op) { + printk(KERN_ERR "REL24 out of range!\n"); + return -EINVAL; + } + + DEBUGP("write to %lx\n", rec->ip); + + if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE)) + return -EPERM; + + flush_icache_range(ip, ip + 8); + + return 0; +} +#endif /* CONFIG_PPC64 */ + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned char *old, *new; + unsigned long ip = rec->ip; + + /* + * If the calling address is more that 24 bits away, + * then we had to use a trampoline to make the call. + * Otherwise just update the call site. + */ + if (test_24bit_addr(ip, addr)) { + /* within range */ + old = ftrace_nop_replace(); + new = ftrace_call_replace(ip, addr); + return ftrace_modify_code(ip, old, new); + } + + /* + * Out of range jumps are called from modules. + * Being that we are converting from nop, it had better + * already have a module defined. + */ + if (!rec->arch.mod) { + printk(KERN_ERR "No module loaded\n"); + return -EINVAL; + } + + return __ftrace_make_call(rec, addr); } int ftrace_update_ftrace_func(ftrace_func_t func) @@ -128,10 +509,10 @@ int ftrace_update_ftrace_func(ftrace_func_t func) int __init ftrace_dyn_arch_init(void *data) { - /* This is running in kstop_machine */ + /* caller expects data to be zero */ + unsigned long *p = data; - ftrace_mcount_set(data); + *p = 0; return 0; } - diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 0c326823c6d4..a1c4cfd25ded 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -31,6 +31,7 @@ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> +#include <asm/bug.h> /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ #define LOAD_BAT(n, reg, RA, RB) \ @@ -182,7 +183,8 @@ __after_mmu_off: bl reloc_offset mr r26,r3 addis r4,r3,KERNELBASE@h /* current address of _start */ - cmpwi 0,r4,0 /* are we already running at 0? */ + lis r5,PHYSICAL_START@h + cmplw 0,r4,r5 /* already running at PHYSICAL_START? */ bne relocate_kernel /* * we now have the 1st 16M of ram mapped with the bats. @@ -810,13 +812,13 @@ giveup_altivec: /* * This code is jumped to from the startup code to copy - * the kernel image to physical address 0. + * the kernel image to physical address PHYSICAL_START. */ relocate_kernel: addis r9,r26,klimit@ha /* fetch klimit */ lwz r25,klimit@l(r9) addis r25,r25,-KERNELBASE@h - li r3,0 /* Destination base address */ + lis r3,PHYSICAL_START@h /* Destination base address */ li r6,0 /* Destination offset */ li r5,0x4000 /* # bytes of memory to copy */ bl copy_and_flush /* copy the first 0x4000 bytes */ @@ -989,12 +991,12 @@ load_up_mmu: LOAD_BAT(1,r3,r4,r5) LOAD_BAT(2,r3,r4,r5) LOAD_BAT(3,r3,r4,r5) -BEGIN_FTR_SECTION +BEGIN_MMU_FTR_SECTION LOAD_BAT(4,r3,r4,r5) LOAD_BAT(5,r3,r4,r5) LOAD_BAT(6,r3,r4,r5) LOAD_BAT(7,r3,r4,r5) -END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr /* @@ -1070,9 +1072,14 @@ start_here: RFI /* + * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next); + * * Set up the segment registers for a new context. */ -_ENTRY(set_context) +_ENTRY(switch_mmu_context) + lwz r3,MMCONTEXTID(r4) + cmpwi cr0,r3,0 + blt- 4f mulli r3,r3,897 /* multiply context by skew factor */ rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ addis r3,r3,0x6000 /* Set Ks, Ku bits */ @@ -1083,6 +1090,7 @@ _ENTRY(set_context) /* Context switch the PTE pointer for the Abatron BDI2000. * The PGDIR is passed as second argument. */ + lwz r4,MM_PGD(r4) lis r5, KERNELBASE@h lwz r5, 0xf0(r5) stw r4, 0x4(r5) @@ -1098,6 +1106,9 @@ _ENTRY(set_context) sync isync blr +4: trap + EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0 + blr /* * An undocumented "feature" of 604e requires that the v bit @@ -1131,7 +1142,7 @@ clear_bats: mtspr SPRN_IBAT2L,r10 mtspr SPRN_IBAT3U,r10 mtspr SPRN_IBAT3L,r10 -BEGIN_FTR_SECTION +BEGIN_MMU_FTR_SECTION /* Here's a tweak: at this point, CPU setup have * not been called yet, so HIGH_BAT_EN may not be * set in HID0 for the 745x processors. However, it @@ -1154,7 +1165,7 @@ BEGIN_FTR_SECTION mtspr SPRN_IBAT6L,r10 mtspr SPRN_IBAT7U,r10 mtspr SPRN_IBAT7L,r10 -END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr flush_tlbs: @@ -1178,11 +1189,11 @@ mmu_off: /* * Use the first pair of BAT registers to map the 1st 16MB - * of RAM to KERNELBASE. From this point on we can't safely + * of RAM to PAGE_OFFSET. From this point on we can't safely * call OF any more. */ initial_bats: - lis r11,KERNELBASE@h + lis r11,PAGE_OFFSET@h mfspr r9,SPRN_PVR rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ cmpwi 0,r9,1 diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index f3a1ea9d7fe4..b56fecc93a16 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -69,6 +69,17 @@ _ENTRY(_start); li r24,0 /* CPU number */ /* + * In case the firmware didn't do it, we apply some workarounds + * that are good for all 440 core variants here + */ + mfspr r3,SPRN_CCR0 + rlwinm r3,r3,0,0,27 /* disable icache prefetch */ + isync + mtspr SPRN_CCR0,r3 + isync + sync + +/* * Set up the initial MMU state * * We are still executing code at the virtual address @@ -391,12 +402,14 @@ interrupt_base: rlwimi r13,r12,10,30,30 /* Load the PTE */ - rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ + /* Compute pgdir/pmd offset */ + rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 lwzx r11, r12, r11 /* Get pgd/pmd entry */ rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ beq 2f /* Bail if no table */ - rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ + /* Compute pte address */ + rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 lwz r11, 0(r12) /* Get high word of pte entry */ lwz r12, 4(r12) /* Get low word of pte entry */ @@ -485,12 +498,14 @@ tlb_44x_patch_hwater_D: /* Make up the required permissions */ li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC - rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ + /* Compute pgdir/pmd offset */ + rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 lwzx r11, r12, r11 /* Get pgd/pmd entry */ rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ beq 2f /* Bail if no table */ - rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ + /* Compute pte address */ + rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 lwz r11, 0(r12) /* Get high word of pte entry */ lwz r12, 4(r12) /* Get low word of pte entry */ @@ -554,15 +569,16 @@ tlb_44x_patch_hwater_I: */ finish_tlb_load: /* Combine RPN & ERPN an write WS 0 */ - rlwimi r11,r12,0,0,19 + rlwimi r11,r12,0,0,31-PAGE_SHIFT tlbwe r11,r13,PPC44x_TLB_XLAT /* * Create WS1. This is the faulting address (EPN), * page size, and valid flag. */ - li r11,PPC44x_TLB_VALID | PPC44x_TLB_4K - rlwimi r10,r11,0,20,31 /* Insert valid and page size*/ + li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE + /* Insert valid and page size */ + rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */ /* And WS 2 */ @@ -634,12 +650,12 @@ _GLOBAL(set_context) * goes at the beginning of the data segment, which is page-aligned. */ .data - .align 12 + .align PAGE_SHIFT .globl sdata sdata: .globl empty_zero_page empty_zero_page: - .space 4096 + .space PAGE_SIZE /* * To support >32-bit physical addresses, we use an 8KB pgdir. diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 590304c24dad..11b549acc034 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -92,6 +92,7 @@ _ENTRY(_start); * if needed */ +_ENTRY(__early_start) /* 1. Find the index of the entry we're executing in */ bl invstr /* Find our address */ invstr: mflr r6 /* Make it accessible */ @@ -235,36 +236,40 @@ skpinv: addi r6,r6,1 /* Increment */ tlbivax 0,r9 TLBSYNC +/* The mapping only needs to be cache-coherent on SMP */ +#ifdef CONFIG_SMP +#define M_IF_SMP MAS2_M +#else +#define M_IF_SMP 0 +#endif + /* 6. Setup KERNELBASE mapping in TLB1[0] */ lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ mtspr SPRN_MAS0,r6 lis r6,(MAS1_VALID|MAS1_IPROT)@h ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l mtspr SPRN_MAS1,r6 - li r7,0 - lis r6,PAGE_OFFSET@h - ori r6,r6,PAGE_OFFSET@l - rlwimi r6,r7,0,20,31 + lis r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@h + ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@l mtspr SPRN_MAS2,r6 mtspr SPRN_MAS3,r8 tlbwe /* 7. Jump to KERNELBASE mapping */ - lis r6,KERNELBASE@h - ori r6,r6,KERNELBASE@l - rlwimi r6,r7,0,20,31 + lis r6,(KERNELBASE & ~0xfff)@h + ori r6,r6,(KERNELBASE & ~0xfff)@l lis r7,MSR_KERNEL@h ori r7,r7,MSR_KERNEL@l bl 1f /* Find our address */ 1: mflr r9 rlwimi r6,r9,0,20,31 - addi r6,r6,24 + addi r6,r6,(2f - 1b) mtspr SPRN_SRR0,r6 mtspr SPRN_SRR1,r7 rfi /* start execution out of TLB1[0] entry */ /* 8. Clear out the temp mapping */ - lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ +2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ mtspr SPRN_MAS0,r7 tlbre @@ -344,6 +349,15 @@ skpinv: addi r6,r6,1 /* Increment */ mtspr SPRN_DBSR,r2 #endif +#ifdef CONFIG_SMP + /* Check to see if we're the second processor, and jump + * to the secondary_start code if so + */ + mfspr r24,SPRN_PIR + cmpwi r24,0 + bne __secondary_start +#endif + /* * This is where the main kernel code starts. */ @@ -685,12 +699,13 @@ interrupt_base: /* SPE Floating Point Data */ #ifdef CONFIG_SPE EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); -#else - EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) -#endif /* CONFIG_SPE */ /* SPE Floating Point Round */ + EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE) +#else + EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE) +#endif /* CONFIG_SPE */ /* Performance Monitor */ EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD) @@ -735,6 +750,9 @@ finish_tlb_load: #else rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ #endif +#ifdef CONFIG_SMP + ori r12, r12, MAS2_M +#endif mtspr SPRN_MAS2, r12 li r10, (_PAGE_HWEXEC | _PAGE_PRESENT) @@ -746,15 +764,15 @@ finish_tlb_load: iseleq r12, r12, r10 #ifdef CONFIG_PTE_64BIT -2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ + rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ mtspr SPRN_MAS3, r12 -BEGIN_FTR_SECTION +BEGIN_MMU_FTR_SECTION srwi r10, r13, 8 /* grab RPN[8:31] */ mtspr SPRN_MAS7, r10 -END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) #else -2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ + rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ mtspr SPRN_MAS3, r11 #endif #ifdef CONFIG_E200 @@ -1037,6 +1055,63 @@ _GLOBAL(flush_dcache_L1) blr +#ifdef CONFIG_SMP +/* When we get here, r24 needs to hold the CPU # */ + .globl __secondary_start +__secondary_start: + lis r3,__secondary_hold_acknowledge@h + ori r3,r3,__secondary_hold_acknowledge@l + stw r24,0(r3) + + li r3,0 + mr r4,r24 /* Why? */ + bl call_setup_cpu + + lis r3,tlbcam_index@ha + lwz r3,tlbcam_index@l(r3) + mtctr r3 + li r26,0 /* r26 safe? */ + + /* Load each CAM entry */ +1: mr r3,r26 + bl loadcam_entry + addi r26,r26,1 + bdnz 1b + + /* get current_thread_info and current */ + lis r1,secondary_ti@ha + lwz r1,secondary_ti@l(r1) + lwz r2,TI_TASK(r1) + + /* stack */ + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + li r0,0 + stw r0,0(r1) + + /* ptr to current thread */ + addi r4,r2,THREAD /* address of our thread_struct */ + mtspr SPRN_SPRG3,r4 + + /* Setup the defaults for TLB entries */ + li r4,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l + mtspr SPRN_MAS4,r4 + + /* Jump to start_secondary */ + lis r4,MSR_KERNEL@h + ori r4,r4,MSR_KERNEL@l + lis r3,start_secondary@h + ori r3,r3,start_secondary@l + mtspr SPRN_SRR0,r3 + mtspr SPRN_SRR1,r4 + sync + rfi + sync + + .globl __secondary_hold_acknowledge +__secondary_hold_acknowledge: + .long -1 +#endif + /* * We put a few things here that have to be page-aligned. This stuff * goes at the beginning of the data segment, which is page-aligned. diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 64299d28f364..6e3f62493659 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -47,7 +47,7 @@ #include <asm/abs_addr.h> static struct device ibmebus_bus_device = { /* fake "parent" device */ - .bus_id = "ibmebus", + .init_name = "ibmebus", }; struct bus_type ibmebus_bus_type; @@ -231,6 +231,7 @@ void ibmebus_free_irq(u32 ist, void *dev_id) unsigned int irq = irq_find_mapping(NULL, ist); free_irq(irq, dev_id); + irq_dispose_mapping(irq); } EXPORT_SYMBOL(ibmebus_free_irq); diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 31982d05d81a..88d9c1d5e5fb 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -69,10 +69,15 @@ void cpu_idle(void) smp_mb(); local_irq_disable(); + /* Don't trace irqs off for idle */ + stop_critical_timings(); + /* check again after disabling irqs */ if (!need_resched() && !cpu_should_die()) ppc_md.power_save(); + start_critical_timings(); + local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c index 4c85b8d56478..688b329800bd 100644 --- a/arch/powerpc/kernel/init_task.c +++ b/arch/powerpc/kernel/init_task.c @@ -7,7 +7,6 @@ #include <linux/mqueue.h> #include <asm/uaccess.h> -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index ac222d0ab12e..23b8b5e36f98 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -237,7 +237,7 @@ void fixup_irqs(cpumask_t map) mask = map; } if (irq_desc[irq].chip->set_affinity) - irq_desc[irq].chip->set_affinity(irq, mask); + irq_desc[irq].chip->set_affinity(irq, &mask); else if (irq_desc[irq].action && !(warned++)) printk("Cannot set affinity for irq %i\n", irq); } diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index ac2a21f45c75..b3abebb7ee64 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -13,13 +13,17 @@ #include <linux/reboot.h> #include <linux/threads.h> #include <linux/lmb.h> +#include <linux/of.h> #include <asm/machdep.h> #include <asm/prom.h> +#include <asm/sections.h> void machine_crash_shutdown(struct pt_regs *regs) { if (ppc_md.machine_crash_shutdown) ppc_md.machine_crash_shutdown(regs); + else + default_machine_crash_shutdown(regs); } /* @@ -31,11 +35,8 @@ int machine_kexec_prepare(struct kimage *image) { if (ppc_md.machine_kexec_prepare) return ppc_md.machine_kexec_prepare(image); - /* - * Fail if platform doesn't provide its own machine_kexec_prepare - * implementation. - */ - return -ENOSYS; + else + return default_machine_kexec_prepare(image); } void machine_kexec_cleanup(struct kimage *image) @@ -52,13 +53,11 @@ void machine_kexec(struct kimage *image) { if (ppc_md.machine_kexec) ppc_md.machine_kexec(image); - else { - /* - * Fall back to normal restart if platform doesn't provide - * its own kexec function, and user insist to kexec... - */ - machine_restart(NULL); - } + else + default_machine_kexec(image); + + /* Fall back to normal restart if we're still alive. */ + machine_restart(NULL); for(;;); } @@ -118,3 +117,71 @@ int overlaps_crashkernel(unsigned long start, unsigned long size) { return (start + size) > crashk_res.start && start <= crashk_res.end; } + +/* Values we need to export to the second kernel via the device tree. */ +static unsigned long kernel_end; +static unsigned long crashk_size; + +static struct property kernel_end_prop = { + .name = "linux,kernel-end", + .length = sizeof(unsigned long), + .value = &kernel_end, +}; + +static struct property crashk_base_prop = { + .name = "linux,crashkernel-base", + .length = sizeof(unsigned long), + .value = &crashk_res.start, +}; + +static struct property crashk_size_prop = { + .name = "linux,crashkernel-size", + .length = sizeof(unsigned long), + .value = &crashk_size, +}; + +static void __init export_crashk_values(struct device_node *node) +{ + struct property *prop; + + /* There might be existing crash kernel properties, but we can't + * be sure what's in them, so remove them. */ + prop = of_find_property(node, "linux,crashkernel-base", NULL); + if (prop) + prom_remove_property(node, prop); + + prop = of_find_property(node, "linux,crashkernel-size", NULL); + if (prop) + prom_remove_property(node, prop); + + if (crashk_res.start != 0) { + prom_add_property(node, &crashk_base_prop); + crashk_size = crashk_res.end - crashk_res.start + 1; + prom_add_property(node, &crashk_size_prop); + } +} + +static int __init kexec_setup(void) +{ + struct device_node *node; + struct property *prop; + + node = of_find_node_by_path("/chosen"); + if (!node) + return -ENOENT; + + /* remove any stale properties so ours can be found */ + prop = of_find_property(node, kernel_end_prop.name, NULL); + if (prop) + prom_remove_property(node, prop); + + /* information needed by userspace when using default_machine_kexec */ + kernel_end = __pa(_end); + prom_add_property(node, &kernel_end_prop); + + export_crashk_values(node); + + of_node_put(node); + return 0; +} +late_initcall(kexec_setup); diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 3c4ca046e854..49e705fcee6d 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -289,7 +289,7 @@ void default_machine_kexec(struct kimage *image) } /* Values we need to export to the second kernel via the device tree. */ -static unsigned long htab_base, kernel_end; +static unsigned long htab_base; static struct property htab_base_prop = { .name = "linux,htab-base", @@ -303,25 +303,20 @@ static struct property htab_size_prop = { .value = &htab_size_bytes, }; -static struct property kernel_end_prop = { - .name = "linux,kernel-end", - .length = sizeof(unsigned long), - .value = &kernel_end, -}; - -static void __init export_htab_values(void) +static int __init export_htab_values(void) { struct device_node *node; struct property *prop; + /* On machines with no htab htab_address is NULL */ + if (!htab_address) + return -ENODEV; + node = of_find_node_by_path("/chosen"); if (!node) - return; + return -ENODEV; /* remove any stale propertys so ours can be found */ - prop = of_find_property(node, kernel_end_prop.name, NULL); - if (prop) - prom_remove_property(node, prop); prop = of_find_property(node, htab_base_prop.name, NULL); if (prop) prom_remove_property(node, prop); @@ -329,68 +324,11 @@ static void __init export_htab_values(void) if (prop) prom_remove_property(node, prop); - /* information needed by userspace when using default_machine_kexec */ - kernel_end = __pa(_end); - prom_add_property(node, &kernel_end_prop); - - /* On machines with no htab htab_address is NULL */ - if (NULL == htab_address) - goto out; - htab_base = __pa(htab_address); prom_add_property(node, &htab_base_prop); prom_add_property(node, &htab_size_prop); - out: - of_node_put(node); -} - -static struct property crashk_base_prop = { - .name = "linux,crashkernel-base", - .length = sizeof(unsigned long), - .value = &crashk_res.start, -}; - -static unsigned long crashk_size; - -static struct property crashk_size_prop = { - .name = "linux,crashkernel-size", - .length = sizeof(unsigned long), - .value = &crashk_size, -}; - -static void __init export_crashk_values(void) -{ - struct device_node *node; - struct property *prop; - - node = of_find_node_by_path("/chosen"); - if (!node) - return; - - /* There might be existing crash kernel properties, but we can't - * be sure what's in them, so remove them. */ - prop = of_find_property(node, "linux,crashkernel-base", NULL); - if (prop) - prom_remove_property(node, prop); - - prop = of_find_property(node, "linux,crashkernel-size", NULL); - if (prop) - prom_remove_property(node, prop); - - if (crashk_res.start != 0) { - prom_add_property(node, &crashk_base_prop); - crashk_size = crashk_res.end - crashk_res.start + 1; - prom_add_property(node, &crashk_size_prop); - } - of_node_put(node); -} - -static int __init kexec_setup(void) -{ - export_htab_values(); - export_crashk_values(); return 0; } -__initcall(kexec_setup); +late_initcall(export_htab_values); diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index bdc8b0e860e5..15f28e0de78d 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -29,6 +29,7 @@ #include <asm/asm-offsets.h> #include <asm/processor.h> #include <asm/kexec.h> +#include <asm/bug.h> .text @@ -271,228 +272,6 @@ _GLOBAL(real_writeb) #endif /* CONFIG_40x */ -/* - * Flush MMU TLB - */ -#ifndef CONFIG_FSL_BOOKE -_GLOBAL(_tlbil_all) -_GLOBAL(_tlbil_pid) -#endif -_GLOBAL(_tlbia) -#if defined(CONFIG_40x) - sync /* Flush to memory before changing mapping */ - tlbia - isync /* Flush shadow TLB */ -#elif defined(CONFIG_44x) - li r3,0 - sync - - /* Load high watermark */ - lis r4,tlb_44x_hwater@ha - lwz r5,tlb_44x_hwater@l(r4) - -1: tlbwe r3,r3,PPC44x_TLB_PAGEID - addi r3,r3,1 - cmpw 0,r3,r5 - ble 1b - - isync -#elif defined(CONFIG_FSL_BOOKE) - /* Invalidate all entries in TLB0 */ - li r3, 0x04 - tlbivax 0,3 - /* Invalidate all entries in TLB1 */ - li r3, 0x0c - tlbivax 0,3 - msync -#ifdef CONFIG_SMP - tlbsync -#endif /* CONFIG_SMP */ -#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ -#if defined(CONFIG_SMP) - rlwinm r8,r1,0,0,(31-THREAD_SHIFT) - lwz r8,TI_CPU(r8) - oris r8,r8,10 - mfmsr r10 - SYNC - rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ - rlwinm r0,r0,0,28,26 /* clear DR */ - mtmsr r0 - SYNC_601 - isync - lis r9,mmu_hash_lock@h - ori r9,r9,mmu_hash_lock@l - tophys(r9,r9) -10: lwarx r7,0,r9 - cmpwi 0,r7,0 - bne- 10b - stwcx. r8,0,r9 - bne- 10b - sync - tlbia - sync - TLBSYNC - li r0,0 - stw r0,0(r9) /* clear mmu_hash_lock */ - mtmsr r10 - SYNC_601 - isync -#else /* CONFIG_SMP */ - sync - tlbia - sync -#endif /* CONFIG_SMP */ -#endif /* ! defined(CONFIG_40x) */ - blr - -/* - * Flush MMU TLB for a particular address - */ -#ifndef CONFIG_FSL_BOOKE -_GLOBAL(_tlbil_va) -#endif -_GLOBAL(_tlbie) -#if defined(CONFIG_40x) - /* We run the search with interrupts disabled because we have to change - * the PID and I don't want to preempt when that happens. - */ - mfmsr r5 - mfspr r6,SPRN_PID - wrteei 0 - mtspr SPRN_PID,r4 - tlbsx. r3, 0, r3 - mtspr SPRN_PID,r6 - wrtee r5 - bne 10f - sync - /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. - * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate - * the TLB entry. */ - tlbwe r3, r3, TLB_TAG - isync -10: - -#elif defined(CONFIG_44x) - mfspr r5,SPRN_MMUCR - rlwimi r5,r4,0,24,31 /* Set TID */ - - /* We have to run the search with interrupts disabled, even critical - * and debug interrupts (in fact the only critical exceptions we have - * are debug and machine check). Otherwise an interrupt which causes - * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ - mfmsr r4 - lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha - addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l - andc r6,r4,r6 - mtmsr r6 - mtspr SPRN_MMUCR,r5 - tlbsx. r3, 0, r3 - mtmsr r4 - bne 10f - sync - /* There are only 64 TLB entries, so r3 < 64, - * which means bit 22, is clear. Since 22 is - * the V bit in the TLB_PAGEID, loading this - * value will invalidate the TLB entry. - */ - tlbwe r3, r3, PPC44x_TLB_PAGEID - isync -10: -#elif defined(CONFIG_FSL_BOOKE) - rlwinm r4, r3, 0, 0, 19 - ori r5, r4, 0x08 /* TLBSEL = 1 */ - tlbivax 0, r4 - tlbivax 0, r5 - msync -#if defined(CONFIG_SMP) - tlbsync -#endif /* CONFIG_SMP */ -#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ -#if defined(CONFIG_SMP) - rlwinm r8,r1,0,0,(31-THREAD_SHIFT) - lwz r8,TI_CPU(r8) - oris r8,r8,11 - mfmsr r10 - SYNC - rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ - rlwinm r0,r0,0,28,26 /* clear DR */ - mtmsr r0 - SYNC_601 - isync - lis r9,mmu_hash_lock@h - ori r9,r9,mmu_hash_lock@l - tophys(r9,r9) -10: lwarx r7,0,r9 - cmpwi 0,r7,0 - bne- 10b - stwcx. r8,0,r9 - bne- 10b - eieio - tlbie r3 - sync - TLBSYNC - li r0,0 - stw r0,0(r9) /* clear mmu_hash_lock */ - mtmsr r10 - SYNC_601 - isync -#else /* CONFIG_SMP */ - tlbie r3 - sync -#endif /* CONFIG_SMP */ -#endif /* ! CONFIG_40x */ - blr - -#if defined(CONFIG_FSL_BOOKE) -/* - * Flush MMU TLB, but only on the local processor (no broadcast) - */ -_GLOBAL(_tlbil_all) -#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \ - MMUCSR0_TLB2FI | MMUCSR0_TLB3FI) - li r3,(MMUCSR0_TLBFI)@l - mtspr SPRN_MMUCSR0, r3 -1: - mfspr r3,SPRN_MMUCSR0 - andi. r3,r3,MMUCSR0_TLBFI@l - bne 1b - blr - -/* - * Flush MMU TLB for a particular process id, but only on the local processor - * (no broadcast) - */ -_GLOBAL(_tlbil_pid) -/* we currently do an invalidate all since we don't have per pid invalidate */ - li r3,(MMUCSR0_TLBFI)@l - mtspr SPRN_MMUCSR0, r3 -1: - mfspr r3,SPRN_MMUCSR0 - andi. r3,r3,MMUCSR0_TLBFI@l - bne 1b - msync - isync - blr - -/* - * Flush MMU TLB for a particular address, but only on the local processor - * (no broadcast) - */ -_GLOBAL(_tlbil_va) - slwi r4,r4,16 - mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ - tlbsx 0,r3 - mfspr r4,SPRN_MAS1 /* check valid */ - andis. r3,r4,MAS1_VALID@h - beqlr - rlwinm r4,r4,0,1,31 - mtspr SPRN_MAS1,r4 - tlbwe - msync - isync - blr -#endif /* CONFIG_FSL_BOOKE */ - /* * Flush instruction cache. @@ -647,8 +426,8 @@ _GLOBAL(__flush_dcache_icache) BEGIN_FTR_SECTION blr END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) - rlwinm r3,r3,0,0,19 /* Get page base address */ - li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ + rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ + li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ mtctr r4 mr r6,r3 0: dcbst 0,r3 /* Write line to ram */ @@ -688,8 +467,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) rlwinm r0,r10,0,28,26 /* clear DR */ mtmsr r0 isync - rlwinm r3,r3,0,0,19 /* Get page base address */ - li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ + rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ + li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ mtctr r4 mr r6,r3 0: dcbst 0,r3 /* Write line to ram */ @@ -713,7 +492,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) * void clear_pages(void *page, int order) ; */ _GLOBAL(clear_pages) - li r0,4096/L1_CACHE_BYTES + li r0,PAGE_SIZE/L1_CACHE_BYTES slw r0,r0,r4 mtctr r0 #ifdef CONFIG_8xx @@ -771,7 +550,7 @@ _GLOBAL(copy_page) dcbt r5,r4 li r11,L1_CACHE_BYTES+4 #endif /* MAX_COPY_PREFETCH */ - li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH + li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH crclr 4*cr0+eq 2: mtctr r0 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 7ff292475269..43e7e3a7f130 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -78,6 +78,12 @@ int module_finalize(const Elf_Ehdr *hdr, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); + sect = find_section(hdr, sechdrs, "__mmu_ftr_fixup"); + if (sect != NULL) + do_feature_fixups(cur_cpu_spec->mmu_features, + (void *)sect->sh_addr, + (void *)sect->sh_addr + sect->sh_size); + #ifdef CONFIG_PPC64 sect = find_section(hdr, sechdrs, "__fw_ftr_fixup"); if (sect != NULL) diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 2df91a03462a..f832773fc28e 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -22,6 +22,7 @@ #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> +#include <linux/ftrace.h> #include <linux/cache.h> #include <linux/bug.h> #include <linux/sort.h> @@ -53,6 +54,9 @@ static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) r_addend = rela[i].r_addend; } +#ifdef CONFIG_DYNAMIC_FTRACE + _count_relocs++; /* add one for ftrace_caller */ +#endif return _count_relocs; } @@ -306,5 +310,11 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, return -ENOEXEC; } } +#ifdef CONFIG_DYNAMIC_FTRACE + module->arch.tramp = + do_plt_call(module->module_core, + (unsigned long)ftrace_caller, + sechdrs, module); +#endif return 0; } diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 1af2377e4992..8992b031a7b6 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -20,6 +20,7 @@ #include <linux/moduleloader.h> #include <linux/err.h> #include <linux/vmalloc.h> +#include <linux/ftrace.h> #include <linux/bug.h> #include <asm/module.h> #include <asm/firmware.h> @@ -163,6 +164,11 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, } } +#ifdef CONFIG_DYNAMIC_FTRACE + /* make the trampoline to the ftrace_caller */ + relocs++; +#endif + DEBUGP("Looks like a total of %lu stubs, max\n", relocs); return relocs * sizeof(struct ppc64_stub_entry); } @@ -441,5 +447,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, } } +#ifdef CONFIG_DYNAMIC_FTRACE + me->arch.toc = my_r2(sechdrs, me); + me->arch.tramp = stub_for_addr(sechdrs, + (unsigned long)ftrace_caller, + me); +#endif + return 0; } diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c index f3c9cae01dd5..fa983a59c4ce 100644 --- a/arch/powerpc/kernel/of_device.c +++ b/arch/powerpc/kernel/of_device.c @@ -14,7 +14,6 @@ static void of_device_make_bus_id(struct of_device *dev) { static atomic_t bus_no_reg_magic; struct device_node *node = dev->node; - char *name = dev->dev.bus_id; const u32 *reg; u64 addr; int magic; @@ -27,14 +26,12 @@ static void of_device_make_bus_id(struct of_device *dev) reg = of_get_property(node, "dcr-reg", NULL); if (reg) { #ifdef CONFIG_PPC_DCR_NATIVE - snprintf(name, BUS_ID_SIZE, "d%x.%s", - *reg, node->name); + dev_set_name(&dev->dev, "d%x.%s", *reg, node->name); #else /* CONFIG_PPC_DCR_NATIVE */ addr = of_translate_dcr_address(node, *reg, NULL); if (addr != OF_BAD_ADDR) { - snprintf(name, BUS_ID_SIZE, - "D%llx.%s", (unsigned long long)addr, - node->name); + dev_set_name(&dev->dev, "D%llx.%s", + (unsigned long long)addr, node->name); return; } #endif /* !CONFIG_PPC_DCR_NATIVE */ @@ -48,9 +45,8 @@ static void of_device_make_bus_id(struct of_device *dev) if (reg) { addr = of_translate_address(node, reg); if (addr != OF_BAD_ADDR) { - snprintf(name, BUS_ID_SIZE, - "%llx.%s", (unsigned long long)addr, - node->name); + dev_set_name(&dev->dev, "%llx.%s", + (unsigned long long)addr, node->name); return; } } @@ -60,7 +56,7 @@ static void of_device_make_bus_id(struct of_device *dev) * counter (and pray...) */ magic = atomic_add_return(1, &bus_no_reg_magic); - snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1); + dev_set_name(&dev->dev, "%s.%d", node->name, magic - 1); } struct of_device *of_device_alloc(struct device_node *np, @@ -80,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np, dev->dev.archdata.of_node = np; if (bus_id) - strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE); + dev_set_name(&dev->dev, bus_id); else of_device_make_bus_id(dev); diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 48a347133f41..c744b327bcab 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -37,6 +37,7 @@ struct lppaca lppaca[] = { .end_of_quantum = 0xfffffffffffffffful, .slb_count = 64, .vmxregs_in_use = 0, + .page_ins = 0, }, }; diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index f36936d9fda3..2538030954d8 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -37,13 +37,7 @@ #include <asm/machdep.h> #include <asm/ppc-pci.h> #include <asm/firmware.h> - -#ifdef DEBUG -#include <asm/udbg.h> -#define DBG(fmt...) printk(fmt) -#else -#define DBG(fmt...) -#endif +#include <asm/eeh.h> static DEFINE_SPINLOCK(hose_spinlock); @@ -53,8 +47,9 @@ static int global_phb_number; /* Global phb counter */ /* ISA Memory physical address */ resource_size_t isa_mem_base; -/* Default PCI flags is 0 */ -unsigned int ppc_pci_flags; +/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */ +unsigned int ppc_pci_flags = 0; + static struct dma_mapping_ops *pci_dma_ops; @@ -165,8 +160,6 @@ EXPORT_SYMBOL(pci_domain_nr); */ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) { - if (!have_of) - return NULL; while(node) { struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) @@ -208,26 +201,6 @@ char __devinit *pcibios_setup(char *str) return str; } -void __devinit pcibios_setup_new_device(struct pci_dev *dev) -{ - struct dev_archdata *sd = &dev->dev.archdata; - - sd->of_node = pci_device_to_OF_node(dev); - - DBG("PCI: device %s OF node: %s\n", pci_name(dev), - sd->of_node ? sd->of_node->full_name : "<none>"); - - sd->dma_ops = pci_dma_ops; -#ifdef CONFIG_PPC32 - sd->dma_data = (void *)PCI_DRAM_OFFSET; -#endif - set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); - - if (ppc_md.pci_dma_dev_setup) - ppc_md.pci_dma_dev_setup(dev); -} -EXPORT_SYMBOL(pcibios_setup_new_device); - /* * Reads the interrupt pin to determine if interrupt is use by card. * If the interrupt is used, then gets the interrupt line from the @@ -252,7 +225,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev) return -1; #endif - DBG("Try to map irq for %s...\n", pci_name(pci_dev)); + pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); #ifdef DEBUG memset(&oirq, 0xff, sizeof(oirq)); @@ -276,26 +249,26 @@ int pci_read_irq_line(struct pci_dev *pci_dev) line == 0xff || line == 0) { return -1; } - DBG(" -> no map ! Using line %d (pin %d) from PCI config\n", - line, pin); + pr_debug(" No map ! Using line %d (pin %d) from PCI config\n", + line, pin); virq = irq_create_mapping(NULL, line); if (virq != NO_IRQ) set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); } else { - DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n", - oirq.size, oirq.specifier[0], oirq.specifier[1], + pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", + oirq.size, oirq.specifier[0], oirq.specifier[1], oirq.controller->full_name); virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); } if(virq == NO_IRQ) { - DBG(" -> failed to map !\n"); + pr_debug(" Failed to map !\n"); return -1; } - DBG(" -> mapped to linux irq %d\n", virq); + pr_debug(" Mapped to linux irq %d\n", virq); pci_dev->irq = virq; @@ -397,13 +370,10 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, } /* XXX would be nice to have a way to ask for write-through */ - prot |= _PAGE_NO_CACHE; if (write_combine) - prot &= ~_PAGE_GUARDED; + return pgprot_noncached_wc(prot); else - prot |= _PAGE_GUARDED; - - return __pgprot(prot); + return pgprot_noncached(prot); } /* @@ -414,19 +384,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, pgprot_t pci_phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, - pgprot_t protection) + pgprot_t prot) { struct pci_dev *pdev = NULL; struct resource *found = NULL; - unsigned long prot = pgprot_val(protection); resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; int i; if (page_is_ram(pfn)) - return __pgprot(prot); - - prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; + return prot; + prot = pgprot_noncached(prot); for_each_pci_dev(pdev) { for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &pdev->resource[i]; @@ -447,14 +415,14 @@ pgprot_t pci_phys_mem_access_prot(struct file *file, } if (found) { if (found->flags & IORESOURCE_PREFETCH) - prot &= ~_PAGE_GUARDED; + prot = pgprot_noncached_wc(prot); pci_dev_put(pdev); } - DBG("non-PCI map for %llx, prot: %lx\n", - (unsigned long long)offset, prot); + pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", + (unsigned long long)offset, pgprot_val(prot)); - return __pgprot(prot); + return prot; } @@ -610,8 +578,7 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus, pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); vma->vm_pgoff = offset >> PAGE_SHIFT; - vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) - | _PAGE_NO_CACHE | _PAGE_GUARDED); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); @@ -853,15 +820,12 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, int pci_proc_domain(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); -#ifdef CONFIG_PPC64 - return hose->buid != 0; -#else + if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS)) return 0; if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0) return hose->global_number != 0; return 1; -#endif } void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, @@ -1083,27 +1047,50 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) } } -static void __devinit __pcibios_fixup_bus(struct pci_bus *bus) +void __devinit pcibios_setup_bus_self(struct pci_bus *bus) { - struct pci_dev *dev = bus->self; - - pr_debug("PCI: Fixup bus %d (%s)\n", bus->number, dev ? pci_name(dev) : "PHB"); - - /* Fixup PCI<->PCI bridges. Host bridges are handled separately, for - * now differently between 32 and 64 bits. - */ - if (dev != NULL) + /* Fix up the bus resources for P2P bridges */ + if (bus->self != NULL) pcibios_fixup_bridge(bus); - /* Additional setup that is different between 32 and 64 bits for now */ - pcibios_do_bus_setup(bus); - - /* Platform specific bus fixups */ + /* Platform specific bus fixups. This is currently only used + * by fsl_pci and I'm hoping to get rid of it at some point + */ if (ppc_md.pcibios_fixup_bus) ppc_md.pcibios_fixup_bus(bus); - /* Read default IRQs and fixup if necessary */ + /* Setup bus DMA mappings */ + if (ppc_md.pci_dma_bus_setup) + ppc_md.pci_dma_bus_setup(bus); +} + +void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) +{ + struct pci_dev *dev; + + pr_debug("PCI: Fixup bus devices %d (%s)\n", + bus->number, bus->self ? pci_name(bus->self) : "PHB"); + list_for_each_entry(dev, &bus->devices, bus_list) { + struct dev_archdata *sd = &dev->dev.archdata; + + /* Setup OF node pointer in archdata */ + sd->of_node = pci_device_to_OF_node(dev); + + /* Fixup NUMA node as it may not be setup yet by the generic + * code and is needed by the DMA init + */ + set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); + + /* Hook up default DMA ops */ + sd->dma_ops = pci_dma_ops; + sd->dma_data = (void *)PCI_DRAM_OFFSET; + + /* Additional platform DMA/iommu setup */ + if (ppc_md.pci_dma_dev_setup) + ppc_md.pci_dma_dev_setup(dev); + + /* Read default IRQs and fixup if necessary */ pci_read_irq_line(dev); if (ppc_md.pci_irq_fixup) ppc_md.pci_irq_fixup(dev); @@ -1113,22 +1100,19 @@ static void __devinit __pcibios_fixup_bus(struct pci_bus *bus) void __devinit pcibios_fixup_bus(struct pci_bus *bus) { /* When called from the generic PCI probe, read PCI<->PCI bridge - * bases before proceeding + * bases. This is -not- called when generating the PCI tree from + * the OF device-tree. */ if (bus->self != NULL) pci_read_bridge_bases(bus); - __pcibios_fixup_bus(bus); -} -EXPORT_SYMBOL(pcibios_fixup_bus); -/* When building a bus from the OF tree rather than probing, we need a - * slightly different version of the fixup which doesn't read the - * bridge bases using config space accesses - */ -void __devinit pcibios_fixup_of_probed_bus(struct pci_bus *bus) -{ - __pcibios_fixup_bus(bus); + /* Now fixup the bus bus */ + pcibios_setup_bus_self(bus); + + /* Now fixup devices on that bus */ + pcibios_setup_bus_devices(bus); } +EXPORT_SYMBOL(pcibios_fixup_bus); static int skip_isa_ioresource_align(struct pci_dev *dev) { @@ -1198,10 +1182,10 @@ static int __init reparent_resources(struct resource *parent, *pp = NULL; for (p = res->child; p != NULL; p = p->sibling) { p->parent = res; - DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n", - p->name, - (unsigned long long)p->start, - (unsigned long long)p->end, res->name); + pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n", + p->name, + (unsigned long long)p->start, + (unsigned long long)p->end, res->name); } return 0; } @@ -1245,9 +1229,12 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus) int i; struct resource *res, *pr; + pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", + pci_domain_nr(bus), bus->number); + for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { if ((res = bus->resource[i]) == NULL || !res->flags - || res->start > res->end) + || res->start > res->end || res->parent) continue; if (bus->parent == NULL) pr = (res->flags & IORESOURCE_IO) ? @@ -1271,14 +1258,14 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus) } } - DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " - "[0x%x], parent %p (%s)\n", - bus->self ? pci_name(bus->self) : "PHB", - bus->number, i, - (unsigned long long)res->start, - (unsigned long long)res->end, - (unsigned int)res->flags, - pr, (pr && pr->name) ? pr->name : "nil"); + pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " + "[0x%x], parent %p (%s)\n", + bus->self ? pci_name(bus->self) : "PHB", + bus->number, i, + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned int)res->flags, + pr, (pr && pr->name) ? pr->name : "nil"); if (pr && !(pr->flags & IORESOURCE_UNSET)) { if (request_resource(pr, res) == 0) @@ -1305,11 +1292,11 @@ static inline void __devinit alloc_resource(struct pci_dev *dev, int idx) { struct resource *pr, *r = &dev->resource[idx]; - DBG("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", - pci_name(dev), idx, - (unsigned long long)r->start, - (unsigned long long)r->end, - (unsigned int)r->flags); + pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", + pci_name(dev), idx, + (unsigned long long)r->start, + (unsigned long long)r->end, + (unsigned int)r->flags); pr = pci_find_parent_resource(dev, r); if (!pr || (pr->flags & IORESOURCE_UNSET) || @@ -1317,10 +1304,11 @@ static inline void __devinit alloc_resource(struct pci_dev *dev, int idx) printk(KERN_WARNING "PCI: Cannot allocate resource region %d" " of device %s, will remap\n", idx, pci_name(dev)); if (pr) - DBG("PCI: parent is %p: %016llx-%016llx [%x]\n", pr, - (unsigned long long)pr->start, - (unsigned long long)pr->end, - (unsigned int)pr->flags); + pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n", + pr, + (unsigned long long)pr->start, + (unsigned long long)pr->end, + (unsigned int)pr->flags); /* We'll assign a new address later */ r->flags |= IORESOURCE_UNSET; r->end -= r->start; @@ -1358,7 +1346,8 @@ static void __init pcibios_allocate_resources(int pass) * but keep it unregistered. */ u32 reg; - DBG("PCI: Switching off ROM of %s\n", pci_name(dev)); + pr_debug("PCI: Switching off ROM of %s\n", + pci_name(dev)); r->flags &= ~IORESOURCE_ROM_ENABLE; pci_read_config_dword(dev, dev->rom_base_reg, ®); pci_write_config_dword(dev, dev->rom_base_reg, @@ -1383,7 +1372,7 @@ void __init pcibios_resource_survey(void) } if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { - DBG("PCI: Assigning unassigned resouces...\n"); + pr_debug("PCI: Assigning unassigned resouces...\n"); pci_assign_unassigned_resources(); } @@ -1393,9 +1382,11 @@ void __init pcibios_resource_survey(void) } #ifdef CONFIG_HOTPLUG -/* This is used by the pSeries hotplug driver to allocate resource + +/* This is used by the PCI hotplug driver to allocate resource * of newly plugged busses. We can try to consolidate with the - * rest of the code later, for now, keep it as-is + * rest of the code later, for now, keep it as-is as our main + * resource allocation function doesn't deal with sub-trees yet. */ void __devinit pcibios_claim_one_bus(struct pci_bus *bus) { @@ -1410,6 +1401,14 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus) if (r->parent || !r->start || !r->flags) continue; + + pr_debug("PCI: Claiming %s: " + "Resource %d: %016llx..%016llx [%x]\n", + pci_name(dev), i, + (unsigned long long)r->start, + (unsigned long long)r->end, + (unsigned int)r->flags); + pci_claim_resource(dev, i); } } @@ -1418,6 +1417,31 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus) pcibios_claim_one_bus(child_bus); } EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); + + +/* pcibios_finish_adding_to_bus + * + * This is to be called by the hotplug code after devices have been + * added to a bus, this include calling it for a PHB that is just + * being added + */ +void pcibios_finish_adding_to_bus(struct pci_bus *bus) +{ + pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n", + pci_domain_nr(bus), bus->number); + + /* Allocate bus and devices resources */ + pcibios_allocate_bus_resources(bus); + pcibios_claim_one_bus(bus); + + /* Add new devices to global lists. Register in proc, sysfs. */ + pci_bus_add_devices(bus); + + /* Fixup EEH */ + eeh_add_device_tree_late(bus); +} +EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); + #endif /* CONFIG_HOTPLUG */ int pcibios_enable_device(struct pci_dev *dev, int mask) @@ -1428,3 +1452,61 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) return pci_enable_resources(dev, mask); } + +void __devinit pcibios_setup_phb_resources(struct pci_controller *hose) +{ + struct pci_bus *bus = hose->bus; + struct resource *res; + int i; + + /* Hookup PHB IO resource */ + bus->resource[0] = res = &hose->io_resource; + + if (!res->flags) { + printk(KERN_WARNING "PCI: I/O resource not set for host" + " bridge %s (domain %d)\n", + hose->dn->full_name, hose->global_number); +#ifdef CONFIG_PPC32 + /* Workaround for lack of IO resource only on 32-bit */ + res->start = (unsigned long)hose->io_base_virt - isa_io_base; + res->end = res->start + IO_SPACE_LIMIT; + res->flags = IORESOURCE_IO; +#endif /* CONFIG_PPC32 */ + } + + pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n", + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned long)res->flags); + + /* Hookup PHB Memory resources */ + for (i = 0; i < 3; ++i) { + res = &hose->mem_resources[i]; + if (!res->flags) { + if (i > 0) + continue; + printk(KERN_ERR "PCI: Memory resource 0 not set for " + "host bridge %s (domain %d)\n", + hose->dn->full_name, hose->global_number); +#ifdef CONFIG_PPC32 + /* Workaround for lack of MEM resource only on 32-bit */ + res->start = hose->pci_mem_offset; + res->end = (resource_size_t)-1LL; + res->flags = IORESOURCE_MEM; +#endif /* CONFIG_PPC32 */ + } + bus->resource[i+1] = res; + + pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i, + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned long)res->flags); + } + + pr_debug("PCI: PHB MEM offset = %016llx\n", + (unsigned long long)hose->pci_mem_offset); + pr_debug("PCI: PHB IO offset = %08lx\n", + (unsigned long)hose->io_base_virt - _IO_BASE); + +} + diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 131b1dfa68c6..132cd80afa21 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -26,12 +26,6 @@ #undef DEBUG -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - unsigned long isa_io_base = 0; unsigned long pci_dram_offset = 0; int pcibios_assign_bus_offset = 1; @@ -272,17 +266,14 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) { struct device_node *parent, *np; - if (!have_of) - return NULL; - - DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); + pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); parent = scan_OF_for_pci_bus(bus); if (parent == NULL) return NULL; - DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>"); + pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>"); np = scan_OF_for_pci_dev(parent, devfn); of_node_put(parent); - DBG(" result is %s\n", np ? np->full_name : "<NULL>"); + pr_debug(" result is %s\n", np ? np->full_name : "<NULL>"); /* XXX most callers don't release the returned node * mostly because ppc64 doesn't increase the refcount, @@ -315,8 +306,6 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn) struct pci_controller* hose; struct pci_dev* dev = NULL; - if (!have_of) - return -ENODEV; /* Make sure it's really a PCI device */ hose = pci_find_hose_for_OF_device(node); if (!hose || !hose->dn) @@ -379,10 +368,41 @@ void pcibios_make_OF_bus_map(void) } #endif /* CONFIG_PPC_OF */ +static void __devinit pcibios_scan_phb(struct pci_controller *hose) +{ + struct pci_bus *bus; + struct device_node *node = hose->dn; + unsigned long io_offset; + struct resource *res = &hose->io_resource; + + pr_debug("PCI: Scanning PHB %s\n", + node ? node->full_name : "<NO NAME>"); + + /* Create an empty bus for the toplevel */ + bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); + if (bus == NULL) { + printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", + hose->global_number); + return; + } + bus->secondary = hose->first_busno; + hose->bus = bus; + + /* Fixup IO space offset */ + io_offset = (unsigned long)hose->io_base_virt - isa_io_base; + res->start = (res->start + io_offset) & 0xffffffffu; + res->end = (res->end + io_offset) & 0xffffffffu; + + /* Wire up PHB bus resources */ + pcibios_setup_phb_resources(hose); + + /* Scan children */ + hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); +} + static int __init pcibios_init(void) { struct pci_controller *hose, *tmp; - struct pci_bus *bus; int next_busno = 0; printk(KERN_INFO "PCI: Probing PCI hardware\n"); @@ -395,12 +415,8 @@ static int __init pcibios_init(void) if (pci_assign_all_buses) hose->first_busno = next_busno; hose->last_busno = 0xff; - bus = pci_scan_bus_parented(hose->parent, hose->first_busno, - hose->ops, hose); - if (bus) { - pci_bus_add_devices(bus); - hose->last_busno = bus->subordinate; - } + pcibios_scan_phb(hose); + pci_bus_add_devices(hose->bus); if (pci_assign_all_buses || next_busno <= hose->last_busno) next_busno = hose->last_busno + pcibios_assign_bus_offset; } @@ -410,7 +426,7 @@ static int __init pcibios_init(void) * numbers vs. kernel bus numbers since we may have to * remap them. */ - if (pci_assign_all_buses && have_of) + if (pci_assign_all_buses) pcibios_make_OF_bus_map(); /* Call common code to handle resource allocation */ @@ -425,54 +441,6 @@ static int __init pcibios_init(void) subsys_initcall(pcibios_init); -void __devinit pcibios_do_bus_setup(struct pci_bus *bus) -{ - struct pci_controller *hose = (struct pci_controller *) bus->sysdata; - unsigned long io_offset; - struct resource *res; - int i; - struct pci_dev *dev; - - /* Hookup PHB resources */ - io_offset = (unsigned long)hose->io_base_virt - isa_io_base; - if (bus->parent == NULL) { - /* This is a host bridge - fill in its resources */ - hose->bus = bus; - - bus->resource[0] = res = &hose->io_resource; - if (!res->flags) { - if (io_offset) - printk(KERN_ERR "I/O resource not set for host" - " bridge %d\n", hose->global_number); - res->start = 0; - res->end = IO_SPACE_LIMIT; - res->flags = IORESOURCE_IO; - } - res->start = (res->start + io_offset) & 0xffffffffu; - res->end = (res->end + io_offset) & 0xffffffffu; - - for (i = 0; i < 3; ++i) { - res = &hose->mem_resources[i]; - if (!res->flags) { - if (i > 0) - continue; - printk(KERN_ERR "Memory resource not set for " - "host bridge %d\n", hose->global_number); - res->start = hose->pci_mem_offset; - res->end = ~0U; - res->flags = IORESOURCE_MEM; - } - bus->resource[i+1] = res; - } - } - - if (ppc_md.pci_dma_bus_setup) - ppc_md.pci_dma_bus_setup(bus); - - list_for_each_entry(dev, &bus->devices, bus_list) - pcibios_setup_new_device(dev); -} - /* the next one is stolen from the alpha port... */ void __init pcibios_update_irq(struct pci_dev *dev, int irq) diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 3502b9101e6b..39fadc6e1492 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -32,13 +32,6 @@ #include <asm/machdep.h> #include <asm/ppc-pci.h> -#ifdef DEBUG -#include <asm/udbg.h> -#define DBG(fmt...) printk(fmt) -#else -#define DBG(fmt...) -#endif - unsigned long pci_probe_only = 1; /* pci_io_base -- the base address from which io bars are offsets. @@ -102,7 +95,7 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) addrs = of_get_property(node, "assigned-addresses", &proplen); if (!addrs) return; - DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs); + pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs); for (; proplen >= 20; proplen -= 20, addrs += 5) { flags = pci_parse_of_flags(addrs[0]); if (!flags) @@ -112,8 +105,9 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) if (!size) continue; i = addrs[0] & 0xff; - DBG(" base: %llx, size: %llx, i: %x\n", - (unsigned long long)base, (unsigned long long)size, i); + pr_debug(" base: %llx, size: %llx, i: %x\n", + (unsigned long long)base, + (unsigned long long)size, i); if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; @@ -144,7 +138,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, if (type == NULL) type = ""; - DBG(" create device, devfn: %x, type: %s\n", devfn, type); + pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); dev->bus = bus; dev->sysdata = node; @@ -165,8 +159,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, dev->class = get_int_prop(node, "class-code", 0); dev->revision = get_int_prop(node, "revision-id", 0); - DBG(" class: 0x%x\n", dev->class); - DBG(" revision: 0x%x\n", dev->revision); + pr_debug(" class: 0x%x\n", dev->class); + pr_debug(" revision: 0x%x\n", dev->revision); dev->current_state = 4; /* unknown power state */ dev->error_state = pci_channel_io_normal; @@ -187,7 +181,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, pci_parse_of_addrs(node, dev); - DBG(" adding to system ...\n"); + pr_debug(" adding to system ...\n"); pci_device_add(dev, bus); @@ -195,19 +189,20 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, } EXPORT_SYMBOL(of_create_pci_dev); -void __devinit of_scan_bus(struct device_node *node, - struct pci_bus *bus) +static void __devinit __of_scan_bus(struct device_node *node, + struct pci_bus *bus, int rescan_existing) { struct device_node *child; const u32 *reg; int reglen, devfn; struct pci_dev *dev; - DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number); + pr_debug("of_scan_bus(%s) bus no %d... \n", + node->full_name, bus->number); /* Scan direct children */ for_each_child_of_node(node, child) { - DBG(" * %s\n", child->full_name); + pr_debug(" * %s\n", child->full_name); reg = of_get_property(child, "reg", ®len); if (reg == NULL || reglen < 20) continue; @@ -217,11 +212,15 @@ void __devinit of_scan_bus(struct device_node *node, dev = of_create_pci_dev(child, bus, devfn); if (!dev) continue; - DBG(" dev header type: %x\n", dev->hdr_type); + pr_debug(" dev header type: %x\n", dev->hdr_type); } - /* Ally all fixups */ - pcibios_fixup_of_probed_bus(bus); + /* Apply all fixups necessary. We don't fixup the bus "self" + * for an existing bridge that is being rescanned + */ + if (!rescan_existing) + pcibios_setup_bus_self(bus); + pcibios_setup_bus_devices(bus); /* Now scan child busses */ list_for_each_entry(dev, &bus->devices, bus_list) { @@ -233,7 +232,20 @@ void __devinit of_scan_bus(struct device_node *node, } } } -EXPORT_SYMBOL(of_scan_bus); + +void __devinit of_scan_bus(struct device_node *node, + struct pci_bus *bus) +{ + __of_scan_bus(node, bus, 0); +} +EXPORT_SYMBOL_GPL(of_scan_bus); + +void __devinit of_rescan_bus(struct device_node *node, + struct pci_bus *bus) +{ + __of_scan_bus(node, bus, 1); +} +EXPORT_SYMBOL_GPL(of_rescan_bus); void __devinit of_scan_pci_bridge(struct device_node *node, struct pci_dev *dev) @@ -245,7 +257,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node, unsigned int flags; u64 size; - DBG("of_scan_pci_bridge(%s)\n", node->full_name); + pr_debug("of_scan_pci_bridge(%s)\n", node->full_name); /* parse bus-range property */ busrange = of_get_property(node, "bus-range", &len); @@ -309,12 +321,12 @@ void __devinit of_scan_pci_bridge(struct device_node *node, } sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), bus->number); - DBG(" bus name: %s\n", bus->name); + pr_debug(" bus name: %s\n", bus->name); mode = PCI_PROBE_NORMAL; if (ppc_md.pci_probe_mode) mode = ppc_md.pci_probe_mode(bus); - DBG(" probe mode: %d\n", mode); + pr_debug(" probe mode: %d\n", mode); if (mode == PCI_PROBE_DEVTREE) of_scan_bus(node, bus); @@ -327,9 +339,10 @@ void __devinit scan_phb(struct pci_controller *hose) { struct pci_bus *bus; struct device_node *node = hose->dn; - int i, mode; + int mode; - DBG("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); + pr_debug("PCI: Scanning PHB %s\n", + node ? node->full_name : "<NO NAME>"); /* Create an empty bus for the toplevel */ bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node); @@ -345,26 +358,13 @@ void __devinit scan_phb(struct pci_controller *hose) pcibios_map_io_space(bus); /* Wire up PHB bus resources */ - DBG("PCI: PHB IO resource = %016lx-%016lx [%lx]\n", - hose->io_resource.start, hose->io_resource.end, - hose->io_resource.flags); - bus->resource[0] = &hose->io_resource; - for (i = 0; i < 3; ++i) { - DBG("PCI: PHB MEM resource %d = %016lx-%016lx [%lx]\n", i, - hose->mem_resources[i].start, - hose->mem_resources[i].end, - hose->mem_resources[i].flags); - bus->resource[i+1] = &hose->mem_resources[i]; - } - DBG("PCI: PHB MEM offset = %016lx\n", hose->pci_mem_offset); - DBG("PCI: PHB IO offset = %08lx\n", - (unsigned long)hose->io_base_virt - _IO_BASE); + pcibios_setup_phb_resources(hose); /* Get probe mode and perform scan */ mode = PCI_PROBE_NORMAL; if (node && ppc_md.pci_probe_mode) mode = ppc_md.pci_probe_mode(bus); - DBG(" probe mode: %d\n", mode); + pr_debug(" probe mode: %d\n", mode); if (mode == PCI_PROBE_DEVTREE) { bus->subordinate = hose->last_busno; of_scan_bus(node, bus); @@ -380,7 +380,7 @@ static int __init pcibios_init(void) printk(KERN_INFO "PCI: Probing PCI hardware\n"); - /* For now, override phys_mem_access_prot. If we need it, + /* For now, override phys_mem_access_prot. If we need it,g * later, we may move that initialization to each ppc_md */ ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; @@ -388,6 +388,11 @@ static int __init pcibios_init(void) if (pci_probe_only) ppc_pci_flags |= PPC_PCI_PROBE_ONLY; + /* On ppc64, we always enable PCI domains and we keep domain 0 + * backward compatible in /proc for video cards + */ + ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0; + /* Scan all of the recorded PCI controllers. */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { scan_phb(hose); @@ -422,8 +427,8 @@ int pcibios_unmap_io_space(struct pci_bus *bus) if (bus->self) { struct resource *res = bus->resource[0]; - DBG("IO unmapping for PCI-PCI bridge %s\n", - pci_name(bus->self)); + pr_debug("IO unmapping for PCI-PCI bridge %s\n", + pci_name(bus->self)); __flush_hash_table_range(&init_mm, res->start + _IO_BASE, res->end + _IO_BASE + 1); @@ -437,8 +442,8 @@ int pcibios_unmap_io_space(struct pci_bus *bus) if (hose->io_base_alloc == 0) return 0; - DBG("IO unmapping for PHB %s\n", hose->dn->full_name); - DBG(" alloc=0x%p\n", hose->io_base_alloc); + pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name); + pr_debug(" alloc=0x%p\n", hose->io_base_alloc); /* This is a PHB, we fully unmap the IO area */ vunmap(hose->io_base_alloc); @@ -463,11 +468,11 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus) * thus HPTEs will be faulted in when needed */ if (bus->self) { - DBG("IO mapping for PCI-PCI bridge %s\n", - pci_name(bus->self)); - DBG(" virt=0x%016lx...0x%016lx\n", - bus->resource[0]->start + _IO_BASE, - bus->resource[0]->end + _IO_BASE); + pr_debug("IO mapping for PCI-PCI bridge %s\n", + pci_name(bus->self)); + pr_debug(" virt=0x%016lx...0x%016lx\n", + bus->resource[0]->start + _IO_BASE, + bus->resource[0]->end + _IO_BASE); return 0; } @@ -496,11 +501,11 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus) hose->io_base_virt = (void __iomem *)(area->addr + hose->io_base_phys - phys_page); - DBG("IO mapping for PHB %s\n", hose->dn->full_name); - DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n", - hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); - DBG(" size=0x%016lx (alloc=0x%016lx)\n", - hose->pci_io_size, size_page); + pr_debug("IO mapping for PHB %s\n", hose->dn->full_name); + pr_debug(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n", + hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); + pr_debug(" size=0x%016lx (alloc=0x%016lx)\n", + hose->pci_io_size, size_page); /* Establish the mapping */ if (__ioremap_at(phys_page, area->addr, size_page, @@ -512,24 +517,13 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus) hose->io_resource.start += io_virt_offset; hose->io_resource.end += io_virt_offset; - DBG(" hose->io_resource=0x%016lx...0x%016lx\n", - hose->io_resource.start, hose->io_resource.end); + pr_debug(" hose->io_resource=0x%016lx...0x%016lx\n", + hose->io_resource.start, hose->io_resource.end); return 0; } EXPORT_SYMBOL_GPL(pcibios_map_io_space); -void __devinit pcibios_do_bus_setup(struct pci_bus *bus) -{ - struct pci_dev *dev; - - if (ppc_md.pci_dma_bus_setup) - ppc_md.pci_dma_bus_setup(bus); - - list_for_each_entry(dev, &bus->devices, bus_list) - pcibios_setup_new_device(dev); -} - unsigned long pci_address_to_pio(phys_addr_t address) { struct pci_controller *hose, *tmp; diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 260089dccfb0..dcec1325d340 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -116,12 +116,6 @@ EXPORT_SYMBOL(giveup_spe); #ifndef CONFIG_PPC64 EXPORT_SYMBOL(flush_instruction_cache); -EXPORT_SYMBOL(flush_tlb_kernel_range); -EXPORT_SYMBOL(flush_tlb_page); -EXPORT_SYMBOL(_tlbie); -#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) -EXPORT_SYMBOL(_tlbil_va); -#endif #endif EXPORT_SYMBOL(__flush_icache_range); EXPORT_SYMBOL(flush_dcache_range); @@ -174,8 +168,7 @@ EXPORT_SYMBOL(cacheable_memcpy); #endif #ifdef CONFIG_PPC32 -EXPORT_SYMBOL(next_mmu_context); -EXPORT_SYMBOL(set_context); +EXPORT_SYMBOL(switch_mmu_context); #endif #ifdef CONFIG_PPC_STD_MMU_32 diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S new file mode 100644 index 000000000000..5113bd2285e1 --- /dev/null +++ b/arch/powerpc/kernel/ppc_save_regs.S @@ -0,0 +1,74 @@ +/* + * Copyright (C) 1996 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * NOTE: assert(sizeof(buf) > 23 * sizeof(long)) + */ +#include <asm/processor.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> + +/* + * Grab the register values as they are now. + * This won't do a particularily good job because we really + * want our caller's caller's registers, and our caller has + * already executed its prologue. + * ToDo: We could reach back into the caller's save area to do + * a better job of representing the caller's state (note that + * that will be different for 32-bit and 64-bit, because of the + * different ABIs, though). + */ +_GLOBAL(ppc_save_regs) + PPC_STL r0,0*SZL(r3) + PPC_STL r2,2*SZL(r3) + PPC_STL r3,3*SZL(r3) + PPC_STL r4,4*SZL(r3) + PPC_STL r5,5*SZL(r3) + PPC_STL r6,6*SZL(r3) + PPC_STL r7,7*SZL(r3) + PPC_STL r8,8*SZL(r3) + PPC_STL r9,9*SZL(r3) + PPC_STL r10,10*SZL(r3) + PPC_STL r11,11*SZL(r3) + PPC_STL r12,12*SZL(r3) + PPC_STL r13,13*SZL(r3) + PPC_STL r14,14*SZL(r3) + PPC_STL r15,15*SZL(r3) + PPC_STL r16,16*SZL(r3) + PPC_STL r17,17*SZL(r3) + PPC_STL r18,18*SZL(r3) + PPC_STL r19,19*SZL(r3) + PPC_STL r20,20*SZL(r3) + PPC_STL r21,21*SZL(r3) + PPC_STL r22,22*SZL(r3) + PPC_STL r23,23*SZL(r3) + PPC_STL r24,24*SZL(r3) + PPC_STL r25,25*SZL(r3) + PPC_STL r26,26*SZL(r3) + PPC_STL r27,27*SZL(r3) + PPC_STL r28,28*SZL(r3) + PPC_STL r29,29*SZL(r3) + PPC_STL r30,30*SZL(r3) + PPC_STL r31,31*SZL(r3) + /* go up one stack frame for SP */ + PPC_LL r4,0(r1) + PPC_STL r4,1*SZL(r3) + /* get caller's LR */ + PPC_LL r0,LRSAVE(r4) + PPC_STL r0,_NIP-STACK_FRAME_OVERHEAD(r3) + PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3) + mfmsr r0 + PPC_STL r0,_MSR-STACK_FRAME_OVERHEAD(r3) + mfctr r0 + PPC_STL r0,_CTR-STACK_FRAME_OVERHEAD(r3) + mfxer r0 + PPC_STL r0,_XER-STACK_FRAME_OVERHEAD(r3) + mfcr r0 + PPC_STL r0,_CCR-STACK_FRAME_OVERHEAD(r3) + li r0,0 + PPC_STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3) + blr diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 957bded0020d..fb7049c054c0 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -33,6 +33,7 @@ #include <linux/mqueue.h> #include <linux/hardirq.h> #include <linux/utsname.h> +#include <linux/kernel_stat.h> #include <asm/pgtable.h> #include <asm/uaccess.h> @@ -467,6 +468,8 @@ static struct regbit { {MSR_VEC, "VEC"}, {MSR_VSX, "VSX"}, {MSR_ME, "ME"}, + {MSR_CE, "CE"}, + {MSR_DE, "DE"}, {MSR_IR, "IR"}, {MSR_DR, "DR"}, {0, NULL} @@ -998,7 +1001,7 @@ unsigned long get_wchan(struct task_struct *p) return 0; } -static int kstack_depth_to_print = 64; +static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; void show_stack(struct task_struct *tsk, unsigned long *stack) { diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 3a2dc7e6586a..6f73c739f1e2 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -1160,6 +1160,8 @@ static inline void __init phyp_dump_reserve_mem(void) {} void __init early_init_devtree(void *params) { + unsigned long limit; + DBG(" -> early_init_devtree(%p)\n", params); /* Setup flat device-tree pointer */ @@ -1200,7 +1202,19 @@ void __init early_init_devtree(void *params) early_reserve_mem(); phyp_dump_reserve_mem(); - lmb_enforce_memory_limit(memory_limit); + limit = memory_limit; + if (! limit) { + unsigned long memsize; + + /* Ensure that total memory size is page-aligned, because + * otherwise mark_bootmem() gets upset. */ + lmb_analyze(); + memsize = lmb_phys_mem_size(); + if ((memsize & PAGE_MASK) != memsize) + limit = memsize & PAGE_MASK; + } + lmb_enforce_memory_limit(limit); + lmb_analyze(); DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); @@ -1271,6 +1285,37 @@ struct device_node *of_find_node_by_phandle(phandle handle) EXPORT_SYMBOL(of_find_node_by_phandle); /** + * of_find_next_cache_node - Find a node's subsidiary cache + * @np: node of type "cpu" or "cache" + * + * Returns a node pointer with refcount incremented, use + * of_node_put() on it when done. Caller should hold a reference + * to np. + */ +struct device_node *of_find_next_cache_node(struct device_node *np) +{ + struct device_node *child; + const phandle *handle; + + handle = of_get_property(np, "l2-cache", NULL); + if (!handle) + handle = of_get_property(np, "next-level-cache", NULL); + + if (handle) + return of_find_node_by_phandle(*handle); + + /* OF on pmac has nodes instead of properties named "l2-cache" + * beneath CPU nodes. + */ + if (!strcmp(np->type, "cpu")) + for_each_child_of_node(np, child) + if (!strcmp(child->type, "cache")) + return child; + + return NULL; +} + +/** * of_find_all_nodes - Get next node in global list * @prev: Previous node or NULL to start iteration * of_node_put() will be called on it diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index a11d68976dc8..8c1335566089 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c @@ -734,10 +734,7 @@ void of_irq_map_init(unsigned int flags) if (flags & OF_IMAP_NO_PHANDLE) { struct device_node *np; - for(np = NULL; (np = of_find_all_nodes(np)) != NULL;) { - if (of_get_property(np, "interrupt-controller", NULL) - == NULL) - continue; + for_each_node_with_property(np, "interrupt-controller") { /* Skip /chosen/interrupt-controller */ if (strcmp(np->name, "chosen") == 0) continue; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 1f8505c23548..fdfe14c4bdef 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -566,6 +566,32 @@ int rtas_get_sensor(int sensor, int index, int *state) } EXPORT_SYMBOL(rtas_get_sensor); +bool rtas_indicator_present(int token, int *maxindex) +{ + int proplen, count, i; + const struct indicator_elem { + u32 token; + u32 maxindex; + } *indicators; + + indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen); + if (!indicators) + return false; + + count = proplen / sizeof(struct indicator_elem); + + for (i = 0; i < count; i++) { + if (indicators[i].token != token) + continue; + if (maxindex) + *maxindex = indicators[i].maxindex; + return true; + } + + return false; +} +EXPORT_SYMBOL(rtas_indicator_present); + int rtas_set_indicator(int indicator, int index, int new_value) { int token = rtas_token("set-indicator"); diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 589a2797eac2..8869001ab5d7 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -301,51 +301,3 @@ void __init find_and_init_phbs(void) #endif /* CONFIG_PPC32 */ } } - -/* RPA-specific bits for removing PHBs */ -int pcibios_remove_root_bus(struct pci_controller *phb) -{ - struct pci_bus *b = phb->bus; - struct resource *res; - int rc, i; - - res = b->resource[0]; - if (!res->flags) { - printk(KERN_ERR "%s: no IO resource for PHB %s\n", __func__, - b->name); - return 1; - } - - rc = pcibios_unmap_io_space(b); - if (rc) { - printk(KERN_ERR "%s: failed to unmap IO on bus %s\n", - __func__, b->name); - return 1; - } - - if (release_resource(res)) { - printk(KERN_ERR "%s: failed to release IO on bus %s\n", - __func__, b->name); - return 1; - } - - for (i = 1; i < 3; ++i) { - res = b->resource[i]; - if (!res->flags && i == 0) { - printk(KERN_ERR "%s: no MEM resource for PHB %s\n", - __func__, b->name); - return 1; - } - if (res->flags && release_resource(res)) { - printk(KERN_ERR - "%s: failed to release IO %d on bus %s\n", - __func__, i, b->name); - return 1; - } - } - - pcibios_free_controller(phb); - - return 0; -} -EXPORT_SYMBOL(pcibios_remove_root_bus); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index c1a27626a940..9e1ca745d8f0 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -38,6 +38,7 @@ #include <asm/time.h> #include <asm/serial.h> #include <asm/udbg.h> +#include <asm/mmu_context.h> #include "setup.h" @@ -49,12 +50,12 @@ int boot_cpuid; EXPORT_SYMBOL_GPL(boot_cpuid); int boot_cpuid_phys; +int smp_hw_index[NR_CPUS]; + unsigned long ISA_DMA_THRESHOLD; unsigned int DMA_MODE_READ; unsigned int DMA_MODE_WRITE; -int have_of = 1; - #ifdef CONFIG_VGA_CONSOLE unsigned long vgacon_remap_base; EXPORT_SYMBOL(vgacon_remap_base); @@ -97,6 +98,10 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) PTRRELOC(&__start___ftr_fixup), PTRRELOC(&__stop___ftr_fixup)); + do_feature_fixups(spec->mmu_features, + PTRRELOC(&__start___mmu_ftr_fixup), + PTRRELOC(&__stop___mmu_ftr_fixup)); + do_lwsync_fixups(spec->cpu_features, PTRRELOC(&__start___lwsync_fixup), PTRRELOC(&__stop___lwsync_fixup)); @@ -121,6 +126,8 @@ notrace void __init machine_init(unsigned long dt_ptr) probe_machine(); + setup_kdump_trampoline(); + #ifdef CONFIG_6xx if (cpu_has_feature(CPU_FTR_CAN_DOZE) || cpu_has_feature(CPU_FTR_CAN_NAP)) @@ -326,4 +333,8 @@ void __init setup_arch(char **cmdline_p) if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); paging_init(); + + /* Initialize the MMU context management stuff */ + mmu_context_init(); + } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 169d74cef157..d8bd2161e738 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -70,7 +70,6 @@ #define DBG(fmt...) #endif -int have_of = 1; int boot_cpuid = 0; u64 ppc64_pft_size; @@ -362,6 +361,8 @@ void __init setup_system(void) */ do_feature_fixups(cur_cpu_spec->cpu_features, &__start___ftr_fixup, &__stop___ftr_fixup); + do_feature_fixups(cur_cpu_spec->mmu_features, + &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup); do_feature_fixups(powerpc_firmware_features, &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); do_lwsync_fixups(cur_cpu_spec->cpu_features, @@ -606,8 +607,6 @@ void __init setup_per_cpu_areas(void) for_each_possible_cpu(i) { ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); - if (!ptr) - panic("Cannot allocate cpu data for CPU %d\n", i); paca[i].data_offset = ptr - __per_cpu_start; memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c index bc892e69b4f7..a5e54526403d 100644 --- a/arch/powerpc/kernel/smp-tbsync.c +++ b/arch/powerpc/kernel/smp-tbsync.c @@ -113,7 +113,7 @@ void __devinit smp_generic_give_timebase(void) { int i, score, score2, old, min=0, max=5000, offset=1000; - printk("Synchronizing timebase\n"); + pr_debug("Software timebase sync\n"); /* if this fails then this kernel won't work anyway... */ tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL ); @@ -123,13 +123,13 @@ void __devinit smp_generic_give_timebase(void) while (!tbsync->ack) barrier(); - printk("Got ack\n"); + pr_debug("Got ack\n"); /* binary search */ for (old = -1; old != offset ; offset = (min+max) / 2) { score = start_contest(kSetAndTest, offset, NUM_ITER); - printk("score %d, offset %d\n", score, offset ); + pr_debug("score %d, offset %d\n", score, offset ); if( score > 0 ) max = offset; @@ -140,8 +140,8 @@ void __devinit smp_generic_give_timebase(void) score = start_contest(kSetAndTest, min, NUM_ITER); score2 = start_contest(kSetAndTest, max, NUM_ITER); - printk("Min %d (score %d), Max %d (score %d)\n", - min, score, max, score2); + pr_debug("Min %d (score %d), Max %d (score %d)\n", + min, score, max, score2); score = abs(score); score2 = abs(score2); offset = (score < score2) ? min : max; @@ -155,7 +155,7 @@ void __devinit smp_generic_give_timebase(void) if (score2 <= score || score2 < 20) break; } - printk("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER ); + pr_debug("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER ); /* exiting */ tbsync->cmd = kExit; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index ff9f7010097d..65484b2200b3 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -57,16 +57,11 @@ #define DBG(fmt...) #endif -int smp_hw_index[NR_CPUS]; struct thread_info *secondary_ti; -cpumask_t cpu_possible_map = CPU_MASK_NONE; -cpumask_t cpu_online_map = CPU_MASK_NONE; DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; -EXPORT_SYMBOL(cpu_online_map); -EXPORT_SYMBOL(cpu_possible_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map); @@ -123,6 +118,65 @@ void smp_message_recv(int msg) } } +static irqreturn_t call_function_action(int irq, void *data) +{ + generic_smp_call_function_interrupt(); + return IRQ_HANDLED; +} + +static irqreturn_t reschedule_action(int irq, void *data) +{ + /* we just need the return path side effect of checking need_resched */ + return IRQ_HANDLED; +} + +static irqreturn_t call_function_single_action(int irq, void *data) +{ + generic_smp_call_function_single_interrupt(); + return IRQ_HANDLED; +} + +static irqreturn_t debug_ipi_action(int irq, void *data) +{ + smp_message_recv(PPC_MSG_DEBUGGER_BREAK); + return IRQ_HANDLED; +} + +static irq_handler_t smp_ipi_action[] = { + [PPC_MSG_CALL_FUNCTION] = call_function_action, + [PPC_MSG_RESCHEDULE] = reschedule_action, + [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action, + [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action, +}; + +const char *smp_ipi_name[] = { + [PPC_MSG_CALL_FUNCTION] = "ipi call function", + [PPC_MSG_RESCHEDULE] = "ipi reschedule", + [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single", + [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger", +}; + +/* optional function to request ipi, for controllers with >= 4 ipis */ +int smp_request_message_ipi(int virq, int msg) +{ + int err; + + if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) { + return -EINVAL; + } +#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC) + if (msg == PPC_MSG_DEBUGGER_BREAK) { + return 1; + } +#endif + err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU, + smp_ipi_name[msg], 0); + WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", + virq, smp_ipi_name[msg], err); + + return err; +} + void smp_send_reschedule(int cpu) { if (likely(smp_ops)) @@ -408,8 +462,7 @@ out: static struct device_node *cpu_to_l2cache(int cpu) { struct device_node *np; - const phandle *php; - phandle ph; + struct device_node *cache; if (!cpu_present(cpu)) return NULL; @@ -418,13 +471,11 @@ static struct device_node *cpu_to_l2cache(int cpu) if (np == NULL) return NULL; - php = of_get_property(np, "l2-cache", NULL); - if (php == NULL) - return NULL; - ph = *php; + cache = of_find_next_cache_node(np); + of_node_put(np); - return of_find_node_by_phandle(ph); + return cache; } /* Activate a secondary processor. */ diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c index 77b7b34b5955..560c96119501 100644 --- a/arch/powerpc/kernel/swsusp.c +++ b/arch/powerpc/kernel/swsusp.c @@ -34,6 +34,6 @@ void save_processor_state(void) void restore_processor_state(void) { #ifdef CONFIG_PPC32 - set_context(current->active_mm->context.id, current->active_mm->pgd); + switch_mmu_context(NULL, current->active_mm); #endif } diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index 77fc76607ab2..b47d8ceffb52 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S @@ -5,7 +5,7 @@ #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> - +#include <asm/mmu.h> /* * Structure for storing CPU registers on the save area. @@ -279,7 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) mtibatl 3,r4 #endif -BEGIN_FTR_SECTION +BEGIN_MMU_FTR_SECTION li r4,0 mtspr SPRN_DBAT4U,r4 mtspr SPRN_DBAT4L,r4 @@ -297,7 +297,7 @@ BEGIN_FTR_SECTION mtspr SPRN_IBAT6L,r4 mtspr SPRN_IBAT7U,r4 mtspr SPRN_IBAT7L,r4 -END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) /* Flush all TLBs */ lis r4,0x1000 diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 20885a38237a..0c64f10087b9 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -566,7 +566,6 @@ static bool cache_is_unified(struct device_node *np) static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level) { - const phandle *next_cache_phandle; struct device_node *next_cache; struct cache_desc *new, **end; @@ -591,11 +590,7 @@ static struct cache_desc * __cpuinit create_cache_index_info(struct device_node while (*end) end = &(*end)->next; - next_cache_phandle = of_get_property(np, "l2-cache", NULL); - if (!next_cache_phandle) - goto out; - - next_cache = of_find_node_by_phandle(*next_cache_phandle); + next_cache = of_find_next_cache_node(np); if (!next_cache) goto out; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index e2ee66b5831d..c9564031a2a9 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -164,8 +164,6 @@ static u64 tb_to_ns_scale __read_mostly; static unsigned tb_to_ns_shift __read_mostly; static unsigned long boot_tb __read_mostly; -static struct gettimeofday_struct do_gtod; - extern struct timezone sys_tz; static long timezone_offset; @@ -258,8 +256,10 @@ void account_system_vtime(struct task_struct *tsk) delta += sys_time; get_paca()->system_time = 0; } - account_system_time(tsk, 0, delta); - account_system_time_scaled(tsk, deltascaled); + if (in_irq() || idle_task(smp_processor_id()) != tsk) + account_system_time(tsk, 0, delta, deltascaled); + else + account_idle_time(delta); per_cpu(cputime_last_delta, smp_processor_id()) = delta; per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; local_irq_restore(flags); @@ -277,10 +277,8 @@ void account_process_tick(struct task_struct *tsk, int user_tick) utime = get_paca()->user_time; get_paca()->user_time = 0; - account_user_time(tsk, utime); - utimescaled = cputime_to_scaled(utime); - account_user_time_scaled(tsk, utimescaled); + account_user_time(tsk, utime, utimescaled); } /* @@ -340,8 +338,12 @@ void calculate_steal_time(void) tb = mftb(); purr = mfspr(SPRN_PURR); stolen = (tb - pme->tb) - (purr - pme->purr); - if (stolen > 0) - account_steal_time(current, stolen); + if (stolen > 0) { + if (idle_task(smp_processor_id()) != current) + account_steal_time(stolen); + else + account_idle_time(stolen); + } pme->tb = tb; pme->purr = purr; } @@ -415,31 +417,9 @@ void udelay(unsigned long usecs) } EXPORT_SYMBOL(udelay); - -/* - * There are two copies of tb_to_xs and stamp_xsec so that no - * lock is needed to access and use these values in - * do_gettimeofday. We alternate the copies and as long as a - * reasonable time elapses between changes, there will never - * be inconsistent values. ntpd has a minimum of one minute - * between updates. - */ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, u64 new_tb_to_xs) { - unsigned temp_idx; - struct gettimeofday_vars *temp_varp; - - temp_idx = (do_gtod.var_idx == 0); - temp_varp = &do_gtod.vars[temp_idx]; - - temp_varp->tb_to_xs = new_tb_to_xs; - temp_varp->tb_orig_stamp = new_tb_stamp; - temp_varp->stamp_xsec = new_stamp_xsec; - smp_mb(); - do_gtod.varp = temp_varp; - do_gtod.var_idx = temp_idx; - /* * tb_update_count is used to allow the userspace gettimeofday code * to assure itself that it sees a consistent view of the tb_to_xs and @@ -456,6 +436,7 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, vdso_data->tb_to_xs = new_tb_to_xs; vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; + vdso_data->stamp_xtime = xtime; smp_wmb(); ++(vdso_data->tb_update_count); } @@ -514,9 +495,7 @@ static int __init iSeries_tb_recal(void) tb_ticks_per_sec = new_tb_ticks_per_sec; calc_cputime_factors(); div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); - do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; tb_to_xs = divres.result_low; - do_gtod.varp->tb_to_xs = tb_to_xs; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; vdso_data->tb_to_xs = tb_to_xs; } @@ -869,7 +848,7 @@ static void register_decrementer_clockevent(int cpu) struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; *dec = decrementer_clockevent; - dec->cpumask = cpumask_of_cpu(cpu); + dec->cpumask = cpumask_of(cpu); printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", dec->name, dec->mult, dec->shift, cpu); @@ -988,15 +967,6 @@ void __init time_init(void) sys_tz.tz_dsttime = 0; } - do_gtod.varp = &do_gtod.vars[0]; - do_gtod.var_idx = 0; - do_gtod.varp->tb_orig_stamp = tb_last_jiffy; - __get_cpu_var(last_jiffy) = tb_last_jiffy; - do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; - do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; - do_gtod.varp->tb_to_xs = tb_to_xs; - do_gtod.tb_to_us = tb_to_us; - vdso_data->tb_orig_stamp = tb_last_jiffy; vdso_data->tb_update_count = 0; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index f5def6cf5cd6..5457e9575685 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1160,37 +1160,85 @@ void CacheLockingException(struct pt_regs *regs, unsigned long address, #ifdef CONFIG_SPE void SPEFloatingPointException(struct pt_regs *regs) { + extern int do_spe_mathemu(struct pt_regs *regs); unsigned long spefscr; int fpexc_mode; int code = 0; + int err; + + preempt_disable(); + if (regs->msr & MSR_SPE) + giveup_spe(current); + preempt_enable(); spefscr = current->thread.spefscr; fpexc_mode = current->thread.fpexc_mode; - /* Hardware does not neccessarily set sticky - * underflow/overflow/invalid flags */ if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { code = FPE_FLTOVF; - spefscr |= SPEFSCR_FOVFS; } else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { code = FPE_FLTUND; - spefscr |= SPEFSCR_FUNFS; } else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) code = FPE_FLTDIV; else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { code = FPE_FLTINV; - spefscr |= SPEFSCR_FINVS; } else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) code = FPE_FLTRES; - current->thread.spefscr = spefscr; + err = do_spe_mathemu(regs); + if (err == 0) { + regs->nip += 4; /* skip emulated instruction */ + emulate_single_step(regs); + return; + } + + if (err == -EFAULT) { + /* got an error reading the instruction */ + _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); + } else if (err == -EINVAL) { + /* didn't recognize the instruction */ + printk(KERN_ERR "unrecognized spe instruction " + "in %s at %lx\n", current->comm, regs->nip); + } else { + _exception(SIGFPE, regs, code, regs->nip); + } - _exception(SIGFPE, regs, code, regs->nip); return; } + +void SPEFloatingPointRoundException(struct pt_regs *regs) +{ + extern int speround_handler(struct pt_regs *regs); + int err; + + preempt_disable(); + if (regs->msr & MSR_SPE) + giveup_spe(current); + preempt_enable(); + + regs->nip -= 4; + err = speround_handler(regs); + if (err == 0) { + regs->nip += 4; /* skip emulated instruction */ + emulate_single_step(regs); + return; + } + + if (err == -EFAULT) { + /* got an error reading the instruction */ + _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); + } else if (err == -EINVAL) { + /* didn't recognize the instruction */ + printk(KERN_ERR "unrecognized spe instruction " + "in %s at %lx\n", current->comm, regs->nip); + } else { + _exception(SIGFPE, regs, 0, regs->nip); + return; + } +} #endif /* diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 65639a43e644..ad06d5c75b15 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -184,8 +184,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) * This is called from binfmt_elf, we create the special vma for the * vDSO and insert it into the mm struct tree */ -int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack) +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; struct page **vdso_pagelist; @@ -567,6 +566,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32, do_feature_fixups(cur_cpu_spec->cpu_features, start64, start64 + size64); + start64 = find_section64(v64->hdr, "__mmu_ftr_fixup", &size64); + if (start64) + do_feature_fixups(cur_cpu_spec->mmu_features, + start64, start64 + size64); + start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64); if (start64) do_feature_fixups(powerpc_firmware_features, @@ -583,6 +587,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32, do_feature_fixups(cur_cpu_spec->cpu_features, start32, start32 + size32); + start32 = find_section32(v32->hdr, "__mmu_ftr_fixup", &size32); + if (start32) + do_feature_fixups(cur_cpu_spec->mmu_features, + start32, start32 + size32); + #ifdef CONFIG_PPC64 start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32); if (start32) diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index 72ca26df457e..ee038d4bf252 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -16,6 +16,13 @@ #include <asm/asm-offsets.h> #include <asm/unistd.h> +/* Offset for the low 32-bit part of a field of long type */ +#ifdef CONFIG_PPC64 +#define LOPART 4 +#else +#define LOPART 0 +#endif + .text /* * Exact prototype of gettimeofday @@ -90,101 +97,53 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) mflr r12 /* r12 saves lr */ .cfi_register lr,r12 - mr r10,r3 /* r10 saves id */ mr r11,r4 /* r11 saves tp */ bl __get_datapage@local /* get data page */ mr r9,r3 /* datapage ptr in r9 */ - beq cr1,50f /* if monotonic -> jump there */ - - /* - * CLOCK_REALTIME - */ - - bl __do_get_xsec@local /* get xsec from tb & kernel */ - bne- 98f /* out of line -> do syscall */ - - /* seconds are xsec >> 20 */ - rlwinm r5,r4,12,20,31 - rlwimi r5,r3,12,0,19 - stw r5,TSPC32_TV_SEC(r11) - /* get remaining xsec and convert to nsec. we scale - * up remaining xsec by 12 bits and get the top 32 bits - * of the multiplication, then we multiply by 1000 - */ - rlwinm r5,r4,12,0,19 - lis r6,1000000@h - ori r6,r6,1000000@l - mulhwu r5,r5,r6 - mulli r5,r5,1000 - stw r5,TSPC32_TV_NSEC(r11) - mtlr r12 - crclr cr0*4+so - li r3,0 - blr +50: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */ + bne cr1,80f /* not monotonic -> all done */ /* * CLOCK_MONOTONIC */ -50: bl __do_get_xsec@local /* get xsec from tb & kernel */ - bne- 98f /* out of line -> do syscall */ - - /* seconds are xsec >> 20 */ - rlwinm r6,r4,12,20,31 - rlwimi r6,r3,12,0,19 - - /* get remaining xsec and convert to nsec. we scale - * up remaining xsec by 12 bits and get the top 32 bits - * of the multiplication, then we multiply by 1000 - */ - rlwinm r7,r4,12,0,19 - lis r5,1000000@h - ori r5,r5,1000000@l - mulhwu r7,r7,r5 - mulli r7,r7,1000 - /* now we must fixup using wall to monotonic. We need to snapshot * that value and do the counter trick again. Fortunately, we still * have the counter value in r8 that was returned by __do_get_xsec. - * At this point, r6,r7 contain our sec/nsec values, r3,r4 and r5 - * can be used + * At this point, r3,r4 contain our sec/nsec values, r5 and r6 + * can be used, r7 contains NSEC_PER_SEC. */ - lwz r3,WTOM_CLOCK_SEC(r9) - lwz r4,WTOM_CLOCK_NSEC(r9) + lwz r5,WTOM_CLOCK_SEC(r9) + lwz r6,WTOM_CLOCK_NSEC(r9) - /* We now have our result in r3,r4. We create a fake dependency - * on that result and re-check the counter + /* We now have our offset in r5,r6. We create a fake dependency + * on that value and re-check the counter */ - or r5,r4,r3 - xor r0,r5,r5 + or r0,r6,r5 + xor r0,r0,r0 add r9,r9,r0 -#ifdef CONFIG_PPC64 - lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9) -#else - lwz r0,(CFG_TB_UPDATE_COUNT)(r9) -#endif + lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) cmpl cr0,r8,r0 /* check if updated */ bne- 50b - /* Calculate and store result. Note that this mimmics the C code, + /* Calculate and store result. Note that this mimics the C code, * which may cause funny results if nsec goes negative... is that * possible at all ? */ - add r3,r3,r6 - add r4,r4,r7 - lis r5,NSEC_PER_SEC@h - ori r5,r5,NSEC_PER_SEC@l - cmpl cr0,r4,r5 - cmpli cr1,r4,0 + add r3,r3,r5 + add r4,r4,r6 + cmpw cr0,r4,r7 + cmpwi cr1,r4,0 blt 1f - subf r4,r5,r4 + subf r4,r7,r4 addi r3,r3,1 -1: bge cr1,1f +1: bge cr1,80f addi r3,r3,-1 - add r4,r4,r5 -1: stw r3,TSPC32_TV_SEC(r11) + add r4,r4,r7 + +80: stw r3,TSPC32_TV_SEC(r11) stw r4,TSPC32_TV_NSEC(r11) mtlr r12 @@ -195,10 +154,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) /* * syscall fallback */ -98: - mtlr r12 - mr r3,r10 - mr r4,r11 99: li r0,__NR_clock_gettime sc @@ -254,11 +209,7 @@ __do_get_xsec: /* Check for update count & load values. We use the low * order 32 bits of the update count */ -#ifdef CONFIG_PPC64 -1: lwz r8,(CFG_TB_UPDATE_COUNT+4)(r9) -#else -1: lwz r8,(CFG_TB_UPDATE_COUNT)(r9) -#endif +1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9) andi. r0,r8,1 /* pending update ? loop */ bne- 1b xor r0,r8,r8 /* create dependency */ @@ -305,11 +256,7 @@ __do_get_xsec: or r6,r4,r3 xor r0,r6,r6 add r9,r9,r0 -#ifdef CONFIG_PPC64 - lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9) -#else - lwz r0,(CFG_TB_UPDATE_COUNT)(r9) -#endif + lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) cmpl cr0,r8,r0 /* check if updated */ bne- 1b @@ -322,3 +269,98 @@ __do_get_xsec: */ 3: blr .cfi_endproc + +/* + * This is the core of clock_gettime(), it returns the current + * time in seconds and nanoseconds in r3 and r4. + * It expects the datapage ptr in r9 and doesn't clobber it. + * It clobbers r0, r5, r6, r10 and returns NSEC_PER_SEC in r7. + * On return, r8 contains the counter value that can be reused. + * This clobbers cr0 but not any other cr field. + */ +__do_get_tspec: + .cfi_startproc + /* Check for update count & load values. We use the low + * order 32 bits of the update count + */ +1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9) + andi. r0,r8,1 /* pending update ? loop */ + bne- 1b + xor r0,r8,r8 /* create dependency */ + add r9,r9,r0 + + /* Load orig stamp (offset to TB) */ + lwz r5,CFG_TB_ORIG_STAMP(r9) + lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) + + /* Get a stable TB value */ +2: mftbu r3 + mftbl r4 + mftbu r0 + cmpl cr0,r3,r0 + bne- 2b + + /* Subtract tb orig stamp and shift left 12 bits. + */ + subfc r7,r6,r4 + subfe r0,r5,r3 + slwi r0,r0,12 + rlwimi. r0,r7,12,20,31 + slwi r7,r7,12 + + /* Load scale factor & do multiplication */ + lwz r5,CFG_TB_TO_XS(r9) /* load values */ + lwz r6,(CFG_TB_TO_XS+4)(r9) + mulhwu r3,r7,r6 + mullw r10,r7,r5 + mulhwu r4,r7,r5 + addc r10,r3,r10 + li r3,0 + + beq+ 4f /* skip high part computation if 0 */ + mulhwu r3,r0,r5 + mullw r7,r0,r5 + mulhwu r5,r0,r6 + mullw r6,r0,r6 + adde r4,r4,r7 + addze r3,r3 + addc r4,r4,r5 + addze r3,r3 + addc r10,r10,r6 + +4: addze r4,r4 /* add in carry */ + lis r7,NSEC_PER_SEC@h + ori r7,r7,NSEC_PER_SEC@l + mulhwu r4,r4,r7 /* convert to nanoseconds */ + + /* At this point, we have seconds & nanoseconds since the xtime + * stamp in r3+CA and r4. Load & add the xtime stamp. + */ +#ifdef CONFIG_PPC64 + lwz r5,STAMP_XTIME+TSPC64_TV_SEC+LOPART(r9) + lwz r6,STAMP_XTIME+TSPC64_TV_NSEC+LOPART(r9) +#else + lwz r5,STAMP_XTIME+TSPC32_TV_SEC(r9) + lwz r6,STAMP_XTIME+TSPC32_TV_NSEC(r9) +#endif + add r4,r4,r6 + adde r3,r3,r5 + + /* We now have our result in r3,r4. We create a fake dependency + * on that result and re-check the counter + */ + or r6,r4,r3 + xor r0,r6,r6 + add r9,r9,r0 + lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) + cmpl cr0,r8,r0 /* check if updated */ + bne- 1b + + /* check for nanosecond overflow and adjust if necessary */ + cmpw r4,r7 + bltlr /* all done if no overflow */ + subf r4,r7,r4 /* adjust if overflow */ + addi r3,r3,1 + + blr + .cfi_endproc diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index be3b6a41dc09..904ef1360dd7 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -34,6 +34,9 @@ SECTIONS __ftr_fixup : { *(__ftr_fixup) } . = ALIGN(8); + __mmu_ftr_fixup : { *(__mmu_ftr_fixup) } + + . = ALIGN(8); __lwsync_fixup : { *(__lwsync_fixup) } #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index c6401f9e37f1..262cd5857a56 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -75,90 +75,49 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) mflr r12 /* r12 saves lr */ .cfi_register lr,r12 - mr r10,r3 /* r10 saves id */ mr r11,r4 /* r11 saves tp */ bl V_LOCAL_FUNC(__get_datapage) /* get data page */ - beq cr1,50f /* if monotonic -> jump there */ - - /* - * CLOCK_REALTIME - */ - - bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ - - lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */ - ori r7,r7,16960 - rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ - rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ - std r5,TSPC64_TV_SEC(r11) /* store sec in tv */ - subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ - mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) / - * XSEC_PER_SEC - */ - rldicl r0,r0,44,20 - mulli r0,r0,1000 /* nsec = usec * 1000 */ - std r0,TSPC64_TV_NSEC(r11) /* store nsec in tp */ - - mtlr r12 - crclr cr0*4+so - li r3,0 - blr +50: bl V_LOCAL_FUNC(__do_get_tspec) /* get time from tb & kernel */ + bne cr1,80f /* if not monotonic, all done */ /* * CLOCK_MONOTONIC */ -50: bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ - - lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */ - ori r7,r7,16960 - rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ - rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ - subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ - mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) / - * XSEC_PER_SEC - */ - rldicl r6,r0,44,20 - mulli r6,r6,1000 /* nsec = usec * 1000 */ - /* now we must fixup using wall to monotonic. We need to snapshot * that value and do the counter trick again. Fortunately, we still - * have the counter value in r8 that was returned by __do_get_xsec. - * At this point, r5,r6 contain our sec/nsec values. - * can be used + * have the counter value in r8 that was returned by __do_get_tspec. + * At this point, r4,r5 contain our sec/nsec values. */ - lwa r4,WTOM_CLOCK_SEC(r3) - lwa r7,WTOM_CLOCK_NSEC(r3) + lwa r6,WTOM_CLOCK_SEC(r3) + lwa r9,WTOM_CLOCK_NSEC(r3) - /* We now have our result in r4,r7. We create a fake dependency + /* We now have our result in r6,r9. We create a fake dependency * on that result and re-check the counter */ - or r9,r4,r7 - xor r0,r9,r9 + or r0,r6,r9 + xor r0,r0,r0 add r3,r3,r0 ld r0,CFG_TB_UPDATE_COUNT(r3) cmpld cr0,r0,r8 /* check if updated */ bne- 50b - /* Calculate and store result. Note that this mimmics the C code, - * which may cause funny results if nsec goes negative... is that - * possible at all ? + /* Add wall->monotonic offset and check for overflow or underflow. */ - add r4,r4,r5 - add r7,r7,r6 - lis r9,NSEC_PER_SEC@h - ori r9,r9,NSEC_PER_SEC@l - cmpl cr0,r7,r9 - cmpli cr1,r7,0 + add r4,r4,r6 + add r5,r5,r9 + cmpd cr0,r5,r7 + cmpdi cr1,r5,0 blt 1f - subf r7,r9,r7 + subf r5,r7,r5 addi r4,r4,1 -1: bge cr1,1f +1: bge cr1,80f addi r4,r4,-1 - add r7,r7,r9 -1: std r4,TSPC64_TV_SEC(r11) - std r7,TSPC64_TV_NSEC(r11) + add r5,r5,r7 + +80: std r4,TSPC64_TV_SEC(r11) + std r5,TSPC64_TV_NSEC(r11) mtlr r12 crclr cr0*4+so @@ -168,10 +127,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) /* * syscall fallback */ -98: - mtlr r12 - mr r3,r10 - mr r4,r11 99: li r0,__NR_clock_gettime sc @@ -253,3 +208,59 @@ V_FUNCTION_BEGIN(__do_get_xsec) blr .cfi_endproc V_FUNCTION_END(__do_get_xsec) + +/* + * This is the core of clock_gettime(), it returns the current + * time in seconds and nanoseconds in r4 and r5. + * It expects the datapage ptr in r3 and doesn't clobber it. + * It clobbers r0 and r6 and returns NSEC_PER_SEC in r7. + * On return, r8 contains the counter value that can be reused. + * This clobbers cr0 but not any other cr field. + */ +V_FUNCTION_BEGIN(__do_get_tspec) + .cfi_startproc + /* check for update count & load values */ +1: ld r8,CFG_TB_UPDATE_COUNT(r3) + andi. r0,r8,1 /* pending update ? loop */ + bne- 1b + xor r0,r8,r8 /* create dependency */ + add r3,r3,r0 + + /* Get TB & offset it. We use the MFTB macro which will generate + * workaround code for Cell. + */ + MFTB(r7) + ld r9,CFG_TB_ORIG_STAMP(r3) + subf r7,r9,r7 + + /* Scale result */ + ld r5,CFG_TB_TO_XS(r3) + sldi r7,r7,12 /* compute time since stamp_xtime */ + mulhdu r6,r7,r5 /* in units of 2^-32 seconds */ + + /* Add stamp since epoch */ + ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3) + ld r5,STAMP_XTIME+TSPC64_TV_NSEC(r3) + or r0,r4,r5 + or r0,r0,r6 + xor r0,r0,r0 + add r3,r3,r0 + ld r0,CFG_TB_UPDATE_COUNT(r3) + cmpld r0,r8 /* check if updated */ + bne- 1b /* reload if so */ + + /* convert to seconds & nanoseconds and add to stamp */ + lis r7,NSEC_PER_SEC@h + ori r7,r7,NSEC_PER_SEC@l + mulhwu r0,r6,r7 /* compute nanoseconds and */ + srdi r6,r6,32 /* seconds since stamp_xtime */ + clrldi r0,r0,32 + add r5,r5,r0 /* add nanoseconds together */ + cmpd r5,r7 /* overflow? */ + add r4,r4,r6 + bltlr /* all done if no overflow */ + subf r5,r7,r5 /* if overflow, adjust */ + addi r4,r4,1 + blr + .cfi_endproc +V_FUNCTION_END(__do_get_tspec) diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index d0b2526dd38d..0e615404e247 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -35,6 +35,9 @@ SECTIONS __ftr_fixup : { *(__ftr_fixup) } . = ALIGN(8); + __mmu_ftr_fixup : { *(__mmu_ftr_fixup) } + + . = ALIGN(8); __lwsync_fixup : { *(__lwsync_fixup) } . = ALIGN(8); diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index a11e6bc59b30..94aa7b011b27 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -41,9 +41,9 @@ static struct bus_type vio_bus_type; static struct vio_dev vio_bus_device = { /* fake "parent" device */ - .name = vio_bus_device.dev.bus_id, + .name = "vio", .type = "", - .dev.bus_id = "vio", + .dev.init_name = "vio", .dev.bus = &vio_bus_type, }; @@ -1216,7 +1216,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) viodev->irq = irq_of_parse_and_map(of_node, 0); - snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); + dev_set_name(&viodev->dev, "%x", *unit_address); viodev->name = of_node->name; viodev->type = of_node->type; viodev->unit_address = *unit_address; @@ -1243,7 +1243,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) /* register with generic device framework */ if (device_register(&viodev->dev)) { printk(KERN_ERR "%s: failed to register device %s\n", - __func__, viodev->dev.bus_id); + __func__, dev_name(&viodev->dev)); /* XXX free TCE table */ kfree(viodev); return NULL; @@ -1400,13 +1400,13 @@ static struct vio_dev *vio_find_name(const char *name) struct vio_dev *vio_find_node(struct device_node *vnode) { const uint32_t *unit_address; - char kobj_name[BUS_ID_SIZE]; + char kobj_name[20]; /* construct the kobject name from the device node */ unit_address = of_get_property(vnode, "reg", NULL); if (!unit_address) return NULL; - snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address); + snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address); return vio_find_name(kobj_name); } diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 2412c056baa4..47bf15cd2c9e 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -152,6 +152,12 @@ SECTIONS __stop___ftr_fixup = .; } . = ALIGN(8); + __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) { + __start___mmu_ftr_fixup = .; + *(__mmu_ftr_fixup) + __stop___mmu_ftr_fixup = .; + } + . = ALIGN(8); __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) { __start___lwsync_fixup = .; *(__lwsync_fixup) |