summaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2007-10-18 00:04:25 -0700
committerKyle McMartin <kyle@shortfin.cabal.ca>2007-10-18 00:59:12 -0700
commitbe1b3d8cb141c0705d61af2e2372d72ff16c7d04 (patch)
tree37ba423c1929ebd9c102390aac9c9398dde8fb45 /arch/parisc
parente9a03990d90ac5006f37f3ff7a6b87966d208697 (diff)
downloadlinux-stable-be1b3d8cb141c0705d61af2e2372d72ff16c7d04.tar.gz
linux-stable-be1b3d8cb141c0705d61af2e2372d72ff16c7d04.tar.bz2
linux-stable-be1b3d8cb141c0705d61af2e2372d72ff16c7d04.zip
[PARISC] Beautify parisc vmlinux.lds.S
Introduce a consistent layout of vmlinux. The same layout has been introduced for most architectures. And the same time move a few label definitions inside the curly brackets so they are assigned the correct starting address. Before a ld inserted alignment would have casued the label to pint before the actual start of the section. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S319
1 files changed, 185 insertions, 134 deletions
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index ee7a16eb6fdd..d6951bb5a9c9 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -46,168 +46,219 @@ jiffies = jiffies_64;
#endif
SECTIONS
{
+ . = KERNEL_BINARY_TEXT_START;
- . = KERNEL_BINARY_TEXT_START;
-
- _text = .; /* Text and read-only data */
- .text ALIGN(16) : {
- TEXT_TEXT
- SCHED_TEXT
- LOCK_TEXT
- *(.text.do_softirq)
- *(.text.sys_exit)
- *(.text.do_sigaltstack)
- *(.text.do_fork)
- *(.text.*)
- *(.fixup)
- *(.lock.text) /* out-of-line lock text */
- *(.gnu.warning)
+ _text = .; /* Text and read-only data */
+ .text ALIGN(16) : {
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ *(.text.do_softirq)
+ *(.text.sys_exit)
+ *(.text.do_sigaltstack)
+ *(.text.do_fork)
+ *(.text.*)
+ *(.fixup)
+ *(.lock.text) /* out-of-line lock text */
+ *(.gnu.warning)
} = 0
+ /* End of text section */
+ _etext = .;
- _etext = .; /* End of text section */
+ RODATA
+ BUG_TABLE
- RODATA
-
- BUG_TABLE
-
- /* writeable */
- . = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so
- that we can properly leave these
- as writable */
- data_start = .;
-
- . = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
+ /* writeable */
+ /* Make sure this is page aligned so
+ * that we can properly leave these
+ * as writable
+ */
+ . = ALIGN(ASM_PAGE_SIZE);
+ data_start = .;
+ . = ALIGN(16);
+ /* Exception table */
+ __ex_table : {
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+ }
- NOTES
+ NOTES
- __start___unwind = .; /* unwind info */
- .PARISC.unwind : { *(.PARISC.unwind) }
- __stop___unwind = .;
+ /* unwind info */
+ .PARISC.unwind : {
+ __start___unwind = .;
+ *(.PARISC.unwind)
+ __stop___unwind = .;
+ }
- /* rarely changed data like cpu maps */
- . = ALIGN(16);
- .data.read_mostly : { *(.data.read_mostly) }
+ /* rarely changed data like cpu maps */
+ . = ALIGN(16);
+ .data.read_mostly : {
+ *(.data.read_mostly)
+ }
- . = ALIGN(L1_CACHE_BYTES);
- .data : { /* Data */
- DATA_DATA
- CONSTRUCTORS
+ . = ALIGN(L1_CACHE_BYTES);
+ /* Data */
+ .data : {
+ DATA_DATA
+ CONSTRUCTORS
}
- . = ALIGN(L1_CACHE_BYTES);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+ . = ALIGN(L1_CACHE_BYTES);
+ .data.cacheline_aligned : {
+ *(.data.cacheline_aligned)
+ }
- /* PA-RISC locks requires 16-byte alignment */
- . = ALIGN(16);
- .data.lock_aligned : { *(.data.lock_aligned) }
+ /* PA-RISC locks requires 16-byte alignment */
+ . = ALIGN(16);
+ .data.lock_aligned : {
+ *(.data.lock_aligned)
+ }
- . = ALIGN(ASM_PAGE_SIZE);
- /* nosave data is really only used for software suspend...it's here
- * just in case we ever implement it */
- __nosave_begin = .;
- .data_nosave : { *(.data.nosave) }
- . = ALIGN(ASM_PAGE_SIZE);
- __nosave_end = .;
+ /* nosave data is really only used for software suspend...it's here
+ * just in case we ever implement it
+ */
+ . = ALIGN(ASM_PAGE_SIZE);
+ __nosave_begin = .;
+ .data_nosave : {
+ *(.data.nosave)
+ }
+ . = ALIGN(ASM_PAGE_SIZE);
+ __nosave_end = .;
- _edata = .; /* End of data section */
+ /* End of data section */
+ _edata = .;
- __bss_start = .; /* BSS */
- /* page table entries need to be PAGE_SIZE aligned */
- . = ALIGN(ASM_PAGE_SIZE);
- .data.vmpages : {
- *(.data.vm0.pmd)
- *(.data.vm0.pgd)
- *(.data.vm0.pte)
+ /* BSS */
+ __bss_start = .;
+ /* page table entries need to be PAGE_SIZE aligned */
+ . = ALIGN(ASM_PAGE_SIZE);
+ .data.vmpages : {
+ *(.data.vm0.pmd)
+ *(.data.vm0.pgd)
+ *(.data.vm0.pte)
+ }
+ .bss : {
+ *(.bss)
+ *(COMMON)
}
- .bss : { *(.bss) *(COMMON) }
- __bss_stop = .;
+ __bss_stop = .;
- /* assembler code expects init_task to be 16k aligned */
- . = ALIGN(16384); /* init_task */
- .data.init_task : { *(.data.init_task) }
+ /* assembler code expects init_task to be 16k aligned */
+ . = ALIGN(16384);
+ /* init_task */
+ .data.init_task : {
+ *(.data.init_task)
+ }
- /* The interrupt stack is currently partially coded, but not yet
- * implemented */
- . = ALIGN(16384);
- init_istack : { *(init_istack) }
+ /* The interrupt stack is currently partially coded, but not yet
+ * implemented
+ */
+ . = ALIGN(16384);
+ init_istack : {
+ *(init_istack)
+ }
#ifdef CONFIG_64BIT
- . = ALIGN(16); /* Linkage tables */
- .opd : { *(.opd) } PROVIDE (__gp = .);
- .plt : { *(.plt) }
- .dlt : { *(.dlt) }
+ . = ALIGN(16);
+ /* Linkage tables */
+ .opd : {
+ *(.opd)
+ } PROVIDE (__gp = .);
+ .plt : {
+ *(.plt)
+ }
+ .dlt : {
+ *(.dlt)
+ }
#endif
- /* reserve space for interrupt stack by aligning __init* to 16k */
- . = ALIGN(16384);
- __init_begin = .;
- .init.text : {
- _sinittext = .;
- *(.init.text)
- _einittext = .;
- }
- .init.data : { *(.init.data) }
- . = ALIGN(16);
- __setup_start = .;
- .init.setup : { *(.init.setup) }
- __setup_end = .;
- __initcall_start = .;
- .initcall.init : {
- INITCALLS
- }
- __initcall_end = .;
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
- SECURITY_INIT
- /* alternate instruction replacement. This is a mechanism x86 uses
- * to detect the CPU type and replace generic instruction sequences
- * with CPU specific ones. We don't currently do this in PA, but
- * it seems like a good idea... */
- . = ALIGN(4);
- __alt_instructions = .;
- .altinstructions : { *(.altinstructions) }
- __alt_instructions_end = .;
- .altinstr_replacement : { *(.altinstr_replacement) }
- /* .exit.text is discard at runtime, not link time, to deal with references
- from .altinstructions and .eh_frame */
- .exit.text : { *(.exit.text) }
- .exit.data : { *(.exit.data) }
+ /* reserve space for interrupt stack by aligning __init* to 16k */
+ . = ALIGN(16384);
+ __init_begin = .;
+ .init.text : {
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+ }
+ .init.data : {
+ *(.init.data)
+ }
+ . = ALIGN(16);
+ .init.setup : {
+ __setup_start = .;
+ *(.init.setup)
+ __setup_end = .;
+ }
+ .initcall.init : {
+ __initcall_start = .;
+ INITCALLS
+ __initcall_end = .;
+ }
+ .con_initcall.init : {
+ __con_initcall_start = .;
+ *(.con_initcall.init)
+ __con_initcall_end = .;
+ }
+ SECURITY_INIT
+
+ /* alternate instruction replacement. This is a mechanism x86 uses
+ * to detect the CPU type and replace generic instruction sequences
+ * with CPU specific ones. We don't currently do this in PA, but
+ * it seems like a good idea...
+ */
+ . = ALIGN(4);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+ .altinstr_replacement : {
+ *(.altinstr_replacement)
+ }
+
+ /* .exit.text is discard at runtime, not link time, to deal with references
+ * from .altinstructions and .eh_frame
+ */
+ .exit.text : {
+ *(.exit.text)
+ }
+ .exit.data : {
+ *(.exit.data)
+ }
#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(ASM_PAGE_SIZE);
- __initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
+ . = ALIGN(ASM_PAGE_SIZE);
+ .init.ramfs : {
+ __initramfs_start = .;
+ *(.init.ramfs)
+ __initramfs_end = .;
+ }
#endif
- PERCPU(ASM_PAGE_SIZE)
+ PERCPU(ASM_PAGE_SIZE)
+ . = ALIGN(ASM_PAGE_SIZE);
+ __init_end = .;
+ /* freed after init ends here */
+ _end = . ;
- . = ALIGN(ASM_PAGE_SIZE);
- __init_end = .;
- /* freed after init ends here */
-
- _end = . ;
-
- /* Sections to be discarded */
- /DISCARD/ : {
- *(.exitcall.exit)
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.exitcall.exit)
#ifdef CONFIG_64BIT
- /* temporary hack until binutils is fixed to not emit these
- for static binaries */
- *(.interp)
- *(.dynsym)
- *(.dynstr)
- *(.dynamic)
- *(.hash)
- *(.gnu.hash)
+ /* temporary hack until binutils is fixed to not emit these
+ * for static binaries
+ */
+ *(.interp)
+ *(.dynsym)
+ *(.dynstr)
+ *(.dynamic)
+ *(.hash)
+ *(.gnu.hash)
#endif
}
- STABS_DEBUG
- .note 0 : { *(.note) }
-
+ STABS_DEBUG
+ .note 0 : { *(.note) }
}