1
linux/arch/ia64/kernel/vmlinux.lds.S

306 lines
7.3 KiB
ArmAsm
Raw Normal View History

#include <asm/cache.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm-generic/vmlinux.lds.h>
#define IVT_TEXT \
VMLINUX_SYMBOL(__start_ivt_text) = .; \
*(.text.ivt) \
VMLINUX_SYMBOL(__end_ivt_text) = .;
OUTPUT_FORMAT("elf64-ia64-little")
OUTPUT_ARCH(ia64)
ENTRY(phys_start)
jiffies = jiffies_64;
PHDRS {
code PT_LOAD;
percpu PT_LOAD;
data PT_LOAD;
note PT_NOTE;
unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
}
SECTIONS
{
/* Sections to be discarded */
/DISCARD/ : {
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
*(.IA_64.unwind.exit.text)
*(.IA_64.unwind_info.exit.text)
}
v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
phys_start = _start - LOAD_OFFSET;
code : { } :code
. = KERNEL_START;
_text = .;
_stext = .;
.text : AT(ADDR(.text) - LOAD_OFFSET)
{
IVT_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.gnu.linkonce.t*)
}
.text.head : AT(ADDR(.text.head) - LOAD_OFFSET)
{ *(.text.head) }
.text2 : AT(ADDR(.text2) - LOAD_OFFSET)
{ *(.text2) }
#ifdef CONFIG_SMP
.text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET)
{ *(.text.lock) }
#endif
_etext = .;
/* Read-only data */
NOTES :code :note /* put .notes in text and mark in PT_NOTE */
code_continues : {} :code /* switch back to regular program... */
/* Exception table */
. = ALIGN(16);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET)
{
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
/* MCA table */
. = ALIGN(16);
__mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
{
__start___mca_table = .;
*(__mca_table)
__stop___mca_table = .;
}
[IA64] remove per-cpu ia64_phys_stacked_size_p8 It's not efficient to use a per-cpu variable just to store how many physical stack register a cpu has. Ever since the incarnation of ia64 up till upcoming Montecito processor, that variable has "glued" to 96. Having a variable in memory means that the kernel is burning an extra cacheline access on every syscall and kernel exit path. Such "static" value is better served with the instruction patching utility exists today. Convert ia64_phys_stacked_size_p8 into dynamic insn patching. This also has a pleasant side effect of eliminating access to per-cpu area while psr.ic=0 in the kernel exit path. (fixable for per-cpu DTC work, but why bother?) There are some concerns with the default value that the instruc- tion encoded in the kernel image. It shouldn't be concerned. The reasons are: (1) cpu_init() is called at CPU initialization. In there, we find out physical stack register size from PAL and patch two instructions in kernel exit code. The code in question can not be executed before the patching is done. (2) current implementation stores zero in ia64_phys_stacked_size_p8, and that's what the current kernel exit path loads the value with. With the new code, it is equivalent that we store reg size 96 in ia64_phys_stacked_size_p8, thus creating a better safety net. Given (1) above can never fail, having (2) is just a bonus. All in all, this patch allow one less memory reference in the kernel exit path, thus reducing syscall and interrupt return latency; and avoid polluting potential useful data in the CPU cache. Signed-off-by: Ken Chen <kenneth.w.chen@intel.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
2006-10-13 10:05:45 -07:00
.data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET)
{
__start___phys_stack_reg_patchlist = .;
*(.data.patch.phys_stack_reg)
__end___phys_stack_reg_patchlist = .;
}
/* Global data */
_data = .;
/* Unwind info & table: */
. = ALIGN(8);
.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
{ *(.IA_64.unwind_info*) }
.IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET)
{
__start_unwind = .;
*(.IA_64.unwind*)
__end_unwind = .;
} :code :unwind
code_continues2 : {} : code
RODATA
.opd : AT(ADDR(.opd) - LOAD_OFFSET)
{ *(.opd) }
/* Initialization code and data: */
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET)
{
_sinittext = .;
INIT_TEXT
_einittext = .;
}
.init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
{ INIT_DATA }
#ifdef CONFIG_BLK_DEV_INITRD
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET)
{
__initramfs_start = .;
*(.init.ramfs)
__initramfs_end = .;
}
#endif
. = ALIGN(16);
.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
{
__setup_start = .;
*(.init.setup)
__setup_end = .;
}
.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
{
__initcall_start = .;
INITCALLS
__initcall_end = .;
}
.data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
{
__start___vtop_patchlist = .;
*(.data.patch.vtop)
__end___vtop_patchlist = .;
}
[IA64] Workaround for RSE issue Problem: An application violating the architectural rules regarding operation dependencies and having specific Register Stack Engine (RSE) state at the time of the violation, may result in an illegal operation fault and invalid RSE state. Such faults may initiate a cascade of repeated illegal operation faults within OS interruption handlers. The specific behavior is OS dependent. Implication: An application causing an illegal operation fault with specific RSE state may result in a series of illegal operation faults and an eventual OS stack overflow condition. Workaround: OS interruption handlers that switch to kernel backing store implement a check for invalid RSE state to avoid the series of illegal operation faults. The core of the workaround is the RSE_WORKAROUND code sequence inserted into each invocation of the SAVE_MIN_WITH_COVER and SAVE_MIN_WITH_COVER_R19 macros. This sequence includes hard-coded constants that depend on the number of stacked physical registers being 96. The rest of this patch consists of code to disable this workaround should this not be the case (with the presumption that if a future Itanium processor increases the number of registers, it would also remove the need for this patch). Move the start of the RBS up to a mod32 boundary to avoid some corner cases. The dispatch_illegal_op_fault code outgrew the spot it was squatting in when built with this patch and CONFIG_VIRT_CPU_ACCOUNTING=y Move it out to the end of the ivt. Signed-off-by: Tony Luck <tony.luck@intel.com>
2008-05-27 13:23:16 -07:00
.data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET)
{
__start___rse_patchlist = .;
*(.data.patch.rse)
__end___rse_patchlist = .;
}
.data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
{
__start___mckinley_e9_bundles = .;
*(.data.patch.mckinley_e9)
__end___mckinley_e9_bundles = .;
}
#if defined(CONFIG_IA64_GENERIC)
/* Machine Vector */
. = ALIGN(16);
.machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
{
machvec_start = .;
*(.machvec)
machvec_end = .;
}
#endif
. = ALIGN(8);
__con_initcall_start = .;
.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
{ *(.con_initcall.init) }
__con_initcall_end = .;
__security_initcall_start = .;
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET)
{ *(.security_initcall.init) }
__security_initcall_end = .;
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* The initial task and kernel stack */
.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
{ *(.data.init_task) }
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
{ *(__special_page_section)
__start_gate_section = .;
*(.data.gate)
__stop_gate_section = .;
#ifdef CONFIG_XEN
. = ALIGN(PAGE_SIZE);
__xen_start_gate_section = .;
*(.data.gate.xen)
__xen_stop_gate_section = .;
#endif
}
. = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose
* kernel data
*/
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
{ *(.data.read_mostly) }
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
{ *(.data.cacheline_aligned) }
/* Per-cpu data: */
percpu : { } :percpu
. = ALIGN(PERCPU_PAGE_SIZE);
__phys_per_cpu_start = .;
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
{
__per_cpu_start = .;
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;
}
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
* into percpu page size
*/
data : { } :data
.data : AT(ADDR(.data) - LOAD_OFFSET)
{
#ifdef CONFIG_SMP
. = ALIGN(PERCPU_PAGE_SIZE);
__cpu0_per_cpu = .;
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
#endif
DATA_DATA
*(.data1)
*(.gnu.linkonce.d*)
CONSTRUCTORS
}
. = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
.got : AT(ADDR(.got) - LOAD_OFFSET)
{ *(.got.plt) *(.got) }
__gp = ADDR(.got) + 0x200000;
/* We want the small data sections together, so single-instruction offsets
can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */
.sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
{ *(.sdata) *(.sdata1) *(.srdata) }
_edata = .;
__bss_start = .;
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
{ *(.sbss) *(.scommon) }
.bss : AT(ADDR(.bss) - LOAD_OFFSET)
{ *(.bss) *(COMMON) }
__bss_stop = .;
_end = .;
code : { } :code
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to the beginning
of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/* These must appear regardless of . */
/DISCARD/ : { *(.comment) }
/DISCARD/ : { *(.note) }
}