summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2005-12-04 18:39:15 +1100
committerPaul Mackerras <paulus@samba.org>2006-01-09 14:51:50 +1100
commit51fae6de24da57bc6cdaa1b253595c3513ecbf2d (patch)
tree27df7f07270fb79eae44783331faeb3af3d313ab
parentcd0ca2ce4b2f4a5132e7e230be8a510755c20870 (diff)
downloadlinux-51fae6de24da57bc6cdaa1b253595c3513ecbf2d.tar.gz
linux-51fae6de24da57bc6cdaa1b253595c3513ecbf2d.tar.bz2
linux-51fae6de24da57bc6cdaa1b253595c3513ecbf2d.zip
[PATCH] powerpc: Add a is_kernel_addr() macro
There's a bunch of code that compares an address with KERNELBASE to see if it's a "kernel address", ie. >= KERNELBASE. The proper test is actually to compare with PAGE_OFFSET, since we're going to change KERNELBASE soon. So replace all of them with an is_kernel_addr() macro that does that. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/mm/slb.c6
-rw-r--r--arch/powerpc/mm/stab.c6
-rw-r--r--arch/powerpc/mm/tlb_64.c2
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c4
-rw-r--r--arch/powerpc/oprofile/op_model_rs64.c3
-rw-r--r--arch/powerpc/xmon/xmon.c4
-rw-r--r--include/asm-powerpc/page.h6
9 files changed, 20 insertions, 15 deletions
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index bcdc209dca85..369e1a6cdd40 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1994,7 +1994,7 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
if (r3 && r4 && r4 != 0xdeadbeef) {
unsigned long val;
- RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
+ RELOC(prom_initrd_start) = is_kernel_addr(r3) ? __pa(r3) : r3;
RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
val = RELOC(prom_initrd_start);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index a6d8aebf2bc6..d5c52fae023a 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -319,7 +319,7 @@ void __init check_for_initrd(void)
/* If we were passed an initrd, set the ROOT_DEV properly if the values
* look sensible. If not, clear initrd reference.
*/
- if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
+ if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
initrd_end > initrd_start)
ROOT_DEV = Root_RAM0;
else
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 60e852f2f8e5..a47b273600ec 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -134,14 +134,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
else
unmapped_base = TASK_UNMAPPED_BASE_USER64;
- if (pc >= KERNELBASE)
+ if (is_kernel_addr(pc))
return;
slb_allocate(pc);
if (GET_ESID(pc) == GET_ESID(stack))
return;
- if (stack >= KERNELBASE)
+ if (is_kernel_addr(stack))
return;
slb_allocate(stack);
@@ -149,7 +149,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
|| (GET_ESID(stack) == GET_ESID(unmapped_base)))
return;
- if (unmapped_base >= KERNELBASE)
+ if (is_kernel_addr(unmapped_base))
return;
slb_allocate(unmapped_base);
}
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 51e7951414e5..a18dab0d6b12 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -122,7 +122,7 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
unsigned long offset;
/* Kernel or user address? */
- if (ea >= KERNELBASE) {
+ if (is_kernel_addr(ea)) {
vsid = get_kernel_vsid(ea);
} else {
if ((ea >= TASK_SIZE_USER64) || (! mm))
@@ -133,7 +133,7 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
- if (ea < KERNELBASE) {
+ if (!is_kernel_addr(ea)) {
offset = __get_cpu_var(stab_cache_ptr);
if (offset < NR_STAB_CACHE_ENTRIES)
__get_cpu_var(stab_cache[offset++]) = stab_entry;
@@ -190,7 +190,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
entry++, ste++) {
unsigned long ea;
ea = ste->esid_data & ESID_MASK;
- if (ea < KERNELBASE) {
+ if (!is_kernel_addr(ea)) {
ste->esid_data = 0;
}
}
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index 859d29a0cac5..bb3afb6e6317 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -168,7 +168,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
batch->mm = mm;
batch->psize = psize;
}
- if (addr < KERNELBASE) {
+ if (!is_kernel_addr(addr)) {
vsid = get_vsid(mm->context.id, addr);
WARN_ON(vsid == 0);
} else
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index a3401b46f3ba..659a021da0c7 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -252,7 +252,7 @@ static unsigned long get_pc(struct pt_regs *regs)
return (unsigned long)__va(pc);
/* Not sure where we were */
- if (pc < KERNELBASE)
+ if (!is_kernel_addr(pc))
/* function descriptor madness */
return *((unsigned long *)kernel_unknown_bucket);
@@ -264,7 +264,7 @@ static int get_kernel(unsigned long pc)
int is_kernel;
if (!mmcra_has_sihv) {
- is_kernel = (pc >= KERNELBASE);
+ is_kernel = is_kernel_addr(pc);
} else {
unsigned long mmcra = mfspr(SPRN_MMCRA);
is_kernel = ((mmcra & MMCRA_SIPR) == 0);
diff --git a/arch/powerpc/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c
index e010b85996e8..5c909ee609fe 100644
--- a/arch/powerpc/oprofile/op_model_rs64.c
+++ b/arch/powerpc/oprofile/op_model_rs64.c
@@ -178,7 +178,6 @@ static void rs64_handle_interrupt(struct pt_regs *regs,
int val;
int i;
unsigned long pc = mfspr(SPRN_SIAR);
- int is_kernel = (pc >= KERNELBASE);
/* set the PMM bit (see comment below) */
mtmsrd(mfmsr() | MSR_PMM);
@@ -187,7 +186,7 @@ static void rs64_handle_interrupt(struct pt_regs *regs,
val = ctr_read(i);
if (val < 0) {
if (ctr[i].enabled) {
- oprofile_add_pc(pc, is_kernel, i);
+ oprofile_add_pc(pc, is_kernel_addr(pc), i);
ctr_write(i, reset_value[i]);
} else {
ctr_write(i, 0);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 465b75c5647e..22612ed5379c 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1013,7 +1013,7 @@ static long check_bp_loc(unsigned long addr)
unsigned int instr;
addr &= ~3;
- if (addr < KERNELBASE) {
+ if (!is_kernel_addr(addr)) {
printf("Breakpoints may only be placed at kernel addresses\n");
return 0;
}
@@ -1064,7 +1064,7 @@ bpt_cmds(void)
dabr.address = 0;
dabr.enabled = 0;
if (scanhex(&dabr.address)) {
- if (dabr.address < KERNELBASE) {
+ if (!is_kernel_addr(dabr.address)) {
printf(badaddr);
break;
}
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
index 18c1e5ee81a3..94905ba2cf41 100644
--- a/include/asm-powerpc/page.h
+++ b/include/asm-powerpc/page.h
@@ -86,6 +86,12 @@
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
+/*
+ * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
+ * "kernelness", use is_kernel_addr() - it should do what you want.
+ */
+#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+
#ifndef __ASSEMBLY__
#undef STRICT_MM_TYPECHECKS