summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2007-03-20 14:29:54 -0500
committerPaul Mackerras <paulus@samba.org>2007-03-22 15:01:43 +1100
commit4f5fa2fb1259f506d20e8af447117ec3ec426a53 (patch)
tree71de5f42287ddb2899d1ec0a1bec54dcc346bdbb /arch/powerpc
parentb4aea36b7956eeebfc56314ce0944db1441255ce (diff)
downloadlinux-stable-4f5fa2fb1259f506d20e8af447117ec3ec426a53.tar.gz
linux-stable-4f5fa2fb1259f506d20e8af447117ec3ec426a53.tar.bz2
linux-stable-4f5fa2fb1259f506d20e8af447117ec3ec426a53.zip
[POWERPC] Bypass hcall stats until cpu features have run
I noticed that we execute hcalls before cpu feature code has run (eg for setting up the bolted kernel region). This means that we may be executing code that is not appropriate for the processor we have. Create an unconditional branch that we nop out all the time to fix this. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S7
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 1501b0a9e749..c1427b3634ec 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -30,9 +30,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_PURR);
/*
* postcall is performed immediately before function return which
- * allows liberal use of volatile registers.
+ * allows liberal use of volatile registers. We branch around this
+ * in early init (eg when populating the MMU hashtable) by using an
+ * unconditional cpu feature.
*/
#define HCALL_INST_POSTCALL \
+BEGIN_FTR_SECTION; \
+ b 1f; \
+END_FTR_SECTION(0, 1); \
ld r4,STK_PARM(r3)(r1); /* validate opcode */ \
cmpldi cr7,r4,MAX_HCALL_OPCODE; \
bgt- cr7,1f; \