summaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
authorGrant Grundler <grundler@parisc-linux.org>2005-10-21 22:46:48 -0400
committerKyle McMartin <kyle@parisc-linux.org>2005-10-21 22:46:48 -0400
commit413059f28e9949d9ad2d04d1070c63169798176e (patch)
treec4609bc151c3f88f7e8f2128e81c32356f842063 /arch/parisc
parent3499495205a676d85fcc2f3c28e35ec9b43c47e3 (diff)
downloadlinux-413059f28e9949d9ad2d04d1070c63169798176e.tar.gz
linux-413059f28e9949d9ad2d04d1070c63169798176e.tar.bz2
linux-413059f28e9949d9ad2d04d1070c63169798176e.zip
[PARISC] Replace uses of __LP64__ with CONFIG_64BIT
2.6.12-rc4-pa3 s/__LP64__/CONFIG_64BIT/ and fixup config.h usage Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/kernel/entry.S98
-rw-r--r--arch/parisc/kernel/head.S26
-rw-r--r--arch/parisc/kernel/pacache.S30
-rw-r--r--arch/parisc/kernel/real2.S16
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/parisc/kernel/syscall.S37
-rw-r--r--arch/parisc/kernel/syscall_table.S4
7 files changed, 108 insertions, 105 deletions
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index ec04e0ad77fa..0ca49710d95e 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -37,7 +37,7 @@
#include <asm/unistd.h>
#include <asm/thread_info.h>
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
#define CMPIB cmpib,*
#define CMPB cmpb,*
#define COND(x) *x
@@ -217,7 +217,7 @@
va = r8 /* virtual address for which the trap occured */
spc = r24 /* space for which the trap occured */
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/*
* itlb miss interruption handler (parisc 1.1 - 32 bit)
@@ -239,7 +239,7 @@
.macro itlb_20 code
mfctl %pcsq, spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b itlb_miss_20w
#else
b itlb_miss_20
@@ -249,7 +249,7 @@
.align 32
.endm
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/*
* naitlb miss interruption handler (parisc 1.1 - 32 bit)
*
@@ -286,7 +286,7 @@
.macro naitlb_20 code
mfctl %isr,spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b itlb_miss_20w
#else
b itlb_miss_20
@@ -299,7 +299,7 @@
.align 32
.endm
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/*
* dtlb miss interruption handler (parisc 1.1 - 32 bit)
*/
@@ -321,7 +321,7 @@
.macro dtlb_20 code
mfctl %isr, spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b dtlb_miss_20w
#else
b dtlb_miss_20
@@ -331,7 +331,7 @@
.align 32
.endm
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
.macro nadtlb_11 code
@@ -349,7 +349,7 @@
.macro nadtlb_20 code
mfctl %isr,spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b nadtlb_miss_20w
#else
b nadtlb_miss_20
@@ -359,7 +359,7 @@
.align 32
.endm
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/*
* dirty bit trap interruption handler (parisc 1.1 - 32 bit)
*/
@@ -381,7 +381,7 @@
.macro dbit_20 code
mfctl %isr,spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b dbit_trap_20w
#else
b dbit_trap_20
@@ -394,7 +394,7 @@
/* The following are simple 32 vs 64 bit instruction
* abstractions for the macros */
.macro EXTR reg1,start,length,reg2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
extrd,u \reg1,32+\start,\length,\reg2
#else
extrw,u \reg1,\start,\length,\reg2
@@ -402,7 +402,7 @@
.endm
.macro DEP reg1,start,length,reg2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
depd \reg1,32+\start,\length,\reg2
#else
depw \reg1,\start,\length,\reg2
@@ -410,7 +410,7 @@
.endm
.macro DEPI val,start,length,reg
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
depdi \val,32+\start,\length,\reg
#else
depwi \val,\start,\length,\reg
@@ -421,7 +421,7 @@
* fault. We have to extract this and place it in the va,
* zeroing the corresponding bits in the space register */
.macro space_adjust spc,va,tmp
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
extrd,u \spc,63,SPACEID_SHIFT,\tmp
depd %r0,63,SPACEID_SHIFT,\spc
depd \tmp,31,SPACEID_SHIFT,\va
@@ -479,7 +479,7 @@
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
copy \pmd,%r9
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
shld %r9,PxD_VALUE_SHIFT,\pmd
#else
shlw %r9,PxD_VALUE_SHIFT,\pmd
@@ -610,7 +610,7 @@
.macro do_alias spc,tmp,tmp1,va,pte,prot,fault
cmpib,COND(<>),n 0,\spc,\fault
ldil L%(TMPALIAS_MAP_START),\tmp
-#if defined(__LP64__) && (TMPALIAS_MAP_START >= 0x80000000)
+#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
/* on LP64, ldi will sign extend into the upper 32 bits,
* which is behaviour we don't want */
depdi 0,31,32,\tmp
@@ -624,7 +624,7 @@
* OK, it is in the temp alias region, check whether "from" or "to".
* Check "subtle" note in pacache.S re: r23/r26.
*/
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
extrd,u,*= \va,41,1,%r0
#else
extrw,u,= \va,9,1,%r0
@@ -691,7 +691,7 @@ fault_vector_20:
def 30
def 31
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
.export fault_vector_11
@@ -764,7 +764,7 @@ __kernel_thread:
copy %r30, %r1
ldo PT_SZ_ALGN(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* Yo, function pointers in wide mode are little structs... -PB */
ldd 24(%r26), %r2
STREG %r2, PT_GR27(%r1) /* Store childs %dp */
@@ -780,7 +780,7 @@ __kernel_thread:
or %r26, %r24, %r26 /* will have kernel mappings. */
ldi 1, %r25 /* stack_start, signals kernel thread */
stw %r0, -52(%r30) /* user_tid */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
BL do_fork, %r2
@@ -809,7 +809,7 @@ ret_from_kernel_thread:
LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
LDREG TASK_PT_GR25(%r1), %r26
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
LDREG TASK_PT_GR27(%r1), %r27
LDREG TASK_PT_GR22(%r1), %r22
#endif
@@ -817,7 +817,7 @@ ret_from_kernel_thread:
ble 0(%sr7, %r1)
copy %r31, %r2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
loadgp /* Thread could have been in a module */
#endif
@@ -838,7 +838,7 @@ __execve:
STREG %r26, PT_GR26(%r16)
STREG %r25, PT_GR25(%r16)
STREG %r24, PT_GR24(%r16)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
BL sys_execve, %r2
@@ -916,7 +916,7 @@ syscall_exit_rfi:
STREG %r19,PT_IAOQ1(%r16)
LDREG PT_PSW(%r16),%r19
load32 USER_PSW_MASK,%r1
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
load32 USER_PSW_HI_MASK,%r20
depd %r20,31,32,%r1
#endif
@@ -960,7 +960,7 @@ intr_return:
/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
** irq_stat[] is defined using ____cacheline_aligned.
*/
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
shld %r1, 6, %r20
#else
shlw %r1, 5, %r20
@@ -1018,7 +1018,7 @@ intr_restore:
.import do_softirq,code
intr_do_softirq:
BL do_softirq,%r2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#else
nop
@@ -1036,7 +1036,7 @@ intr_do_resched:
CMPIB= 0,%r20,intr_restore /* backward */
nop
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
@@ -1069,7 +1069,7 @@ intr_do_signal:
copy %r0, %r24 /* unsigned long in_syscall */
copy %r16, %r25 /* struct pt_regs *regs */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
@@ -1093,7 +1093,7 @@ intr_extint:
mfctl %cr31,%r1
copy %r30,%r17
/* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
depdi 0,63,15,%r17
#else
depi 0,31,15,%r17
@@ -1120,7 +1120,7 @@ intr_extint:
ldil L%intr_return, %r2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
@@ -1164,7 +1164,7 @@ intr_save:
mfctl %cr21, %r17 /* ior */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/*
* If the interrupted code was running with W bit off (32 bit),
* clear the b bits (bits 0 & 1) in the ior.
@@ -1199,7 +1199,7 @@ skip_save_ior:
loadgp
copy %r29, %r25 /* arg1 is pt_regs */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
@@ -1237,7 +1237,7 @@ skip_save_ior:
spc = r24 /* space for which the trap occured */
ptp = r25 /* page directory/page table pointer */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
dtlb_miss_20w:
space_adjust spc,va,t0
@@ -1528,7 +1528,7 @@ nadtlb_probe_check:
nop
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
itlb_miss_20w:
/*
@@ -1595,7 +1595,7 @@ itlb_miss_20:
#endif
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
dbit_trap_20w:
space_adjust spc,va,t0
@@ -1804,7 +1804,7 @@ sys_fork_wrapper:
STREG %r2,-RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
@@ -1854,7 +1854,7 @@ sys_clone_wrapper:
STREG %r2,-RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
@@ -1876,7 +1876,7 @@ sys_vfork_wrapper:
STREG %r2,-RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
@@ -1904,7 +1904,7 @@ sys_vfork_wrapper:
STREG %r2,-RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
BL \execve,%r2
@@ -1930,7 +1930,7 @@ error_\execve:
sys_execve_wrapper:
execve_wrapper sys_execve
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
.export sys32_execve_wrapper
.import sys32_execve
@@ -1944,7 +1944,7 @@ sys_rt_sigreturn_wrapper:
ldo TASK_REGS(%r26),%r26 /* get pt regs */
/* Don't save regs, we are going to restore them from sigcontext. */
STREG %r2, -RP_OFFSET(%r30)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo FRAME_SIZE(%r30), %r30
BL sys_rt_sigreturn,%r2
ldo -16(%r30),%r29 /* Reference param save area */
@@ -1975,7 +1975,7 @@ sys_sigaltstack_wrapper:
ldo TASK_REGS(%r1),%r24 /* get pt regs */
LDREG TASK_PT_GR30(%r24),%r24
STREG %r2, -RP_OFFSET(%r30)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo FRAME_SIZE(%r30), %r30
b,l do_sigaltstack,%r2
ldo -16(%r30),%r29 /* Reference param save area */
@@ -1989,7 +1989,7 @@ sys_sigaltstack_wrapper:
bv %r0(%r2)
nop
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
.export sys32_sigaltstack_wrapper
sys32_sigaltstack_wrapper:
/* Get the user stack pointer */
@@ -2013,7 +2013,7 @@ sys_rt_sigsuspend_wrapper:
reg_save %r24
STREG %r2, -RP_OFFSET(%r30)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo FRAME_SIZE(%r30), %r30
b,l sys_rt_sigsuspend,%r2
ldo -16(%r30),%r29 /* Reference param save area */
@@ -2086,7 +2086,7 @@ syscall_check_bh:
ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
shld %r26, 6, %r20
#else
shlw %r26, 5, %r20
@@ -2151,7 +2151,7 @@ syscall_restore:
depi 3,31,2,%r31 /* ensure return to user mode. */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* decide whether to reset the wide mode bit
*
* For a syscall, the W bit is stored in the lowest bit
@@ -2247,7 +2247,7 @@ syscall_do_softirq:
.import schedule,code
syscall_do_resched:
BL schedule,%r2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#else
nop
@@ -2267,7 +2267,7 @@ syscall_do_signal:
ldi 1, %r24 /* unsigned long in_syscall */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
BL do_signal,%r2
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 2b8738576ec2..0b47afc20690 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -12,7 +12,7 @@
* Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
*/
-#include <linux/autoconf.h> /* for CONFIG_SMP */
+#include <linux/config.h> /* for CONFIG_SMP */
#include <asm/asm-offsets.h>
#include <asm/psw.h>
@@ -36,10 +36,10 @@ boot_args:
.align 4
.import init_thread_union,data
.import fault_vector_20,code /* IVA parisc 2.0 32 bit */
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
.import fault_vector_11,code /* IVA parisc 1.1 32 bit */
.import $global$ /* forward declaration */
-#endif /*!LP64*/
+#endif /*!CONFIG_64BIT*/
.export stext
.export _stext,data /* Kernel want it this way! */
_stext:
@@ -76,7 +76,7 @@ $bss_loop:
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* Set pmd in pgd */
load32 PA(pmd0),%r5
shrd %r5,PxD_VALUE_SHIFT,%r3
@@ -99,7 +99,7 @@ $bss_loop:
stw %r3,0(%r4)
ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
addib,> -1,%r1,1b
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
#else
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
@@ -170,7 +170,7 @@ common_stext:
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
#endif /*CONFIG_SMP*/
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
tophys_r1 %sp
/* Save the rfi target address */
@@ -233,7 +233,7 @@ stext_pdc_ret:
* following short sequence of instructions can determine this
* (without being illegal on a PA1.1 machine).
*/
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
ldi 32,%r10
mtctl %r10,%cr11
.level 2.0
@@ -246,7 +246,7 @@ stext_pdc_ret:
$is_pa20:
.level LEVEL /* restore 1.1 || 2.0w */
-#endif /*!LP64*/
+#endif /*!CONFIG_64BIT*/
load32 PA(fault_vector_20),%r10
$install_iva:
@@ -284,7 +284,7 @@ aligned_rfi:
.import smp_init_current_idle_task,data
.import smp_callin,code
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
smp_callin_rtn:
.proc
.callinfo
@@ -292,7 +292,7 @@ smp_callin_rtn:
nop
nop
.procend
-#endif /*!LP64*/
+#endif /*!CONFIG_64BIT*/
/***************************************************************************
* smp_slave_stext is executed by all non-monarch Processors when the Monarch
@@ -327,7 +327,7 @@ smp_slave_stext:
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* Setup PDCE_PROC entry */
copy %arg0,%r3
#else
@@ -344,7 +344,7 @@ smp_slave_stext:
.procend
#endif /* CONFIG_SMP */
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
.data
.align 4
@@ -354,4 +354,4 @@ smp_slave_stext:
.size $global$,4
$global$:
.word 0
-#endif /*!LP64*/
+#endif /*!CONFIG_64BIT*/
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 71ade44c4618..4a7d9c8903f4 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -26,7 +26,7 @@
* can be used.
*/
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
#define ADDIB addib,*
#define CMPB cmpb,*
#define ANDCM andcm,*
@@ -40,6 +40,8 @@
.level 2.0
#endif
+#include <linux/config.h>
+
#include <asm/psw.h>
#include <asm/assembly.h>
#include <asm/pgtable.h>
@@ -294,7 +296,7 @@ copy_user_page_asm:
.callinfo NO_CALLS
.entry
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
* Unroll the loop by hand and arrange insn appropriately.
* GCC probably can do this just as well.
@@ -454,7 +456,7 @@ copy_user_page_asm:
sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
ldil L%(TMPALIAS_MAP_START), %r28
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
@@ -541,7 +543,7 @@ __clear_user_page_asm:
tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
depdi 0, 31,32, %r28 /* clear any sign extension */
#endif
@@ -558,7 +560,7 @@ __clear_user_page_asm:
pdtlb 0(%r28)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldi 32, %r1 /* PAGE_SIZE/128 == 32 */
/* PREFETCH (Write) has not (yet) been proven to help here */
@@ -583,7 +585,7 @@ __clear_user_page_asm:
ADDIB> -1, %r1, 1b
ldo 128(%r28), %r28
-#else /* ! __LP64 */
+#else /* ! CONFIG_64BIT */
ldi 64, %r1 /* PAGE_SIZE/64 == 64 */
@@ -606,7 +608,7 @@ __clear_user_page_asm:
stw %r0, 60(%r28)
ADDIB> -1, %r1, 1b
ldo 64(%r28), %r28
-#endif /* __LP64 */
+#endif /* CONFIG_64BIT */
bv %r0(%r2)
nop
@@ -624,7 +626,7 @@ flush_kernel_dcache_page:
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
@@ -668,7 +670,7 @@ flush_user_dcache_page:
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
depdi,z 1,63-PAGE_SHIFT,1, %r25
#else
depwi,z 1,31-PAGE_SHIFT,1, %r25
@@ -712,7 +714,7 @@ flush_user_icache_page:
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
@@ -757,7 +759,7 @@ purge_kernel_dcache_page:
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
@@ -805,7 +807,7 @@ flush_alias_page:
tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
depdi 0, 63,12, %r28 /* Clear any offset bits */
@@ -822,7 +824,7 @@ flush_alias_page:
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r29
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r29
@@ -933,7 +935,7 @@ flush_kernel_icache_page:
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index 2310fc1b06a9..085bff7c569d 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -7,6 +7,8 @@
* Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
*
*/
+#include <linux/config.h>
+
#include <asm/psw.h>
#include <asm/assembly.h>
@@ -20,7 +22,7 @@ real32_stack:
real64_stack:
.block 8192
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
# define REG_SZ 8
#else
# define REG_SZ 4
@@ -50,7 +52,7 @@ save_cr_end:
real32_call_asm:
STREG %rp, -RP_OFFSET(%sp) /* save RP */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
callee_save
ldo 2*REG_SZ(%sp), %sp /* room for a couple more saves */
STREG %r27, -1*REG_SZ(%sp)
@@ -77,7 +79,7 @@ real32_call_asm:
b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
nop
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
rsm PSW_SM_W, %r0 /* go narrow */
#endif
@@ -85,7 +87,7 @@ real32_call_asm:
bv 0(%r31)
nop
ric_ret:
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ssm PSW_SM_W, %r0 /* go wide */
#endif
/* restore CRs before going virtual in case we page fault */
@@ -97,7 +99,7 @@ ric_ret:
tovirt_r1 %sp
LDREG -REG_SZ(%sp), %sp /* restore SP */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
LDREG -1*REG_SZ(%sp), %r27
LDREG -2*REG_SZ(%sp), %r29
ldo -2*REG_SZ(%sp), %sp
@@ -212,7 +214,7 @@ rfi_r2v_1:
bv 0(%r2)
nop
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/************************ 64-bit real-mode calls ***********************/
/* This is only usable in wide kernels right now and will probably stay so */
@@ -290,7 +292,7 @@ pc_in_user_space:
** comparing function pointers.
*/
__canonicalize_funcptr_for_compare:
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
bve (%r2)
#else
bv %r0(%r2)
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index bcc7e83f5142..5db3be4e2704 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -18,7 +18,7 @@
*/
#undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */
-#include <linux/autoconf.h>
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/spinlock.h>
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 8c7a7185cd3b..b29b76b42bb7 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -6,6 +6,7 @@
* thanks to Philipp Rumpf, Mike Shaver and various others
* sorry about the wall, puffin..
*/
+#include <linux/config.h> /* for CONFIG_SMP */
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
@@ -22,15 +23,13 @@
*/
#define KILL_INSN break 0,0
-#include <linux/config.h> /* for CONFIG_SMP */
-
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
.level 2.0w
#else
.level 1.1
#endif
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
.macro fixup_branch,lbl
b \lbl
.endm
@@ -103,7 +102,7 @@ linux_gateway_entry:
mfsp %sr7,%r1 /* save user sr7 */
mtsp %r1,%sr3 /* and store it in sr3 */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall
* since we don't support wide userland processes. We could
* also save the current SM other than in r0 and restore it on
@@ -155,7 +154,7 @@ linux_gateway_entry:
STREG %r19, TASK_PT_GR19(%r1)
LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */
#if 0
xor %r19,%r2,%r2 /* clear bottom bit */
@@ -186,7 +185,7 @@ linux_gateway_entry:
loadgp
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
copy %r19,%r2 /* W bit back to r2 */
#else
@@ -205,7 +204,7 @@ linux_gateway_entry:
/* Note! We cannot use the syscall table that is mapped
nearby since the gateway page is mapped execute-only. */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldil L%sys_call_table, %r1
or,= %r2,%r2,%r2
addil L%(sys_call_table64-sys_call_table), %r1
@@ -321,7 +320,7 @@ tracesys_next:
LDREG TASK_PT_GR25(%r1), %r25
LDREG TASK_PT_GR24(%r1), %r24
LDREG TASK_PT_GR23(%r1), %r23
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
LDREG TASK_PT_GR22(%r1), %r22
LDREG TASK_PT_GR21(%r1), %r21
ldo -16(%r30),%r29 /* Reference param save area */
@@ -350,7 +349,7 @@ tracesys_next:
tracesys_exit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
bl syscall_trace, %r2
@@ -371,7 +370,7 @@ tracesys_exit:
tracesys_sigexit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG 0(%r1), %r1
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
bl syscall_trace, %r2
@@ -404,7 +403,7 @@ lws_start:
gate .+8, %r0
depi 3, 31, 2, %r31 /* Ensure we return to userspace */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* FIXME: If we are a 64-bit kernel just
* turn this on unconditionally.
*/
@@ -440,7 +439,7 @@ lws_exit_nosys:
/* Fall through: Return to userspace */
lws_exit:
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* decide whether to reset the wide mode bit
*
* For a syscall, the W bit is stored in the lowest bit
@@ -486,7 +485,7 @@ lws_exit:
/* ELF64 Process entry path */
lws_compare_and_swap64:
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b,n lws_compare_and_swap
#else
/* If we are not a 64-bit kernel, then we don't
@@ -497,7 +496,7 @@ lws_compare_and_swap64:
/* ELF32 Process entry path */
lws_compare_and_swap32:
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* Clip all the input registers */
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
@@ -608,7 +607,7 @@ cas_action:
the other for the store. Either return -EFAULT.
Each of the entries must be relocated. */
.section __ex_table,"aw"
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* Pad the address calculation */
.word 0,(2b - linux_gateway_page)
.word 0,(3b - linux_gateway_page)
@@ -619,7 +618,7 @@ cas_action:
.previous
.section __ex_table,"aw"
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* Pad the address calculation */
.word 0,(1b - linux_gateway_page)
.word 0,(3b - linux_gateway_page)
@@ -638,7 +637,7 @@ end_linux_gateway_page:
/* Relocate symbols assuming linux_gateway_page is mapped
to virtual address 0x0 */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* FIXME: The code will always be on the gateay page
and thus it will be on the first 4k, the
assembler seems to think that the final
@@ -666,7 +665,7 @@ lws_table:
sys_call_table:
#include "syscall_table.S"
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
.align 4096
.export sys_call_table64
.Lsys_call_table64:
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 065ab809659d..32cbc0489324 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -35,7 +35,7 @@
#undef ENTRY_UHOH
#undef ENTRY_COMP
#undef ENTRY_OURS
-#if defined(__LP64__) && !defined(SYSCALL_TABLE_64BIT)
+#if defined(CONFIG_64BIT) && !defined(SYSCALL_TABLE_64BIT)
/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
* narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific
* implementation is required on wide palinux. Use ENTRY_COMP where
@@ -46,7 +46,7 @@
#define ENTRY_UHOH(_name_) .dword sys32_##unimplemented
#define ENTRY_OURS(_name_) .dword parisc_##_name_
#define ENTRY_COMP(_name_) .dword compat_sys_##_name_
-#elif defined(__LP64__) && defined(SYSCALL_TABLE_64BIT)
+#elif defined(CONFIG_64BIT) && defined(SYSCALL_TABLE_64BIT)
#define ENTRY_SAME(_name_) .dword sys_##_name_
#define ENTRY_DIFF(_name_) .dword sys_##_name_
#define ENTRY_UHOH(_name_) .dword sys_##_name_