1 2#include <asm/cache.h> 3#include <asm/ptrace.h> 4#include <asm/system.h> 5#include <asm/pgtable.h> 6 7#include <asm-generic/vmlinux.lds.h> 8 9#define IVT_TEXT \ 10 VMLINUX_SYMBOL(__start_ivt_text) = .; \ 11 *(.text.ivt) \ 12 VMLINUX_SYMBOL(__end_ivt_text) = .; 13 14OUTPUT_FORMAT("elf64-ia64-little") 15OUTPUT_ARCH(ia64) 16ENTRY(phys_start) 17jiffies = jiffies_64; 18PHDRS { 19 code PT_LOAD; 20 percpu PT_LOAD; 21 data PT_LOAD; 22 note PT_NOTE; 23 unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */ 24} 25SECTIONS 26{ 27 /* unwind exit sections must be discarded before the rest of the 28 sections get included. */ 29 /DISCARD/ : { 30 *(.IA_64.unwind.exit.text) 31 *(.IA_64.unwind_info.exit.text) 32 *(.comment) 33 *(.note) 34 } 35 36 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */ 37 phys_start = _start - LOAD_OFFSET; 38 39 code : { } :code 40 . = KERNEL_START; 41 42 _text = .; 43 _stext = .; 44 45 .text : AT(ADDR(.text) - LOAD_OFFSET) 46 { 47 IVT_TEXT 48 TEXT_TEXT 49 SCHED_TEXT 50 LOCK_TEXT 51 KPROBES_TEXT 52 *(.gnu.linkonce.t*) 53 } 54 .text2 : AT(ADDR(.text2) - LOAD_OFFSET) 55 { *(.text2) } 56#ifdef CONFIG_SMP 57 .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) 58 { *(.text.lock) } 59#endif 60 _etext = .; 61 62 /* Read-only data */ 63 64 NOTES :code :note /* put .notes in text and mark in PT_NOTE */ 65 code_continues : {} :code /* switch back to regular program... */ 66 67 EXCEPTION_TABLE(16) 68 69 /* MCA table */ 70 . = ALIGN(16); 71 __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) 72 { 73 __start___mca_table = .; 74 *(__mca_table) 75 __stop___mca_table = .; 76 } 77 78 .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET) 79 { 80 __start___phys_stack_reg_patchlist = .; 81 *(.data.patch.phys_stack_reg) 82 __end___phys_stack_reg_patchlist = .; 83 } 84 85 /* Global data */ 86 _data = .; 87 88 /* Unwind info & table: */ 89 . = ALIGN(8); 90 .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) 91 { *(.IA_64.unwind_info*) } 92 .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) 93 { 94 __start_unwind = .; 95 *(.IA_64.unwind*) 96 __end_unwind = .; 97 } :code :unwind 98 code_continues2 : {} : code 99 100 RODATA 101 102 .opd : AT(ADDR(.opd) - LOAD_OFFSET) 103 { *(.opd) } 104 105 /* Initialization code and data: */ 106 107 . = ALIGN(PAGE_SIZE); 108 __init_begin = .; 109 110 INIT_TEXT_SECTION(PAGE_SIZE) 111 INIT_DATA_SECTION(16) 112 113 .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) 114 { 115 __start___vtop_patchlist = .; 116 *(.data.patch.vtop) 117 __end___vtop_patchlist = .; 118 } 119 120 .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET) 121 { 122 __start___rse_patchlist = .; 123 *(.data.patch.rse) 124 __end___rse_patchlist = .; 125 } 126 127 .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) 128 { 129 __start___mckinley_e9_bundles = .; 130 *(.data.patch.mckinley_e9) 131 __end___mckinley_e9_bundles = .; 132 } 133 134#if defined(CONFIG_PARAVIRT) 135 . = ALIGN(16); 136 .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) 137 { 138 __start_paravirt_bundles = .; 139 *(.paravirt_bundles) 140 __stop_paravirt_bundles = .; 141 } 142 . = ALIGN(16); 143 .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) 144 { 145 __start_paravirt_insts = .; 146 *(.paravirt_insts) 147 __stop_paravirt_insts = .; 148 } 149 . = ALIGN(16); 150 .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) 151 { 152 __start_paravirt_branches = .; 153 *(.paravirt_branches) 154 __stop_paravirt_branches = .; 155 } 156#endif 157 158#if defined(CONFIG_IA64_GENERIC) 159 /* Machine Vector */ 160 . = ALIGN(16); 161 .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) 162 { 163 machvec_start = .; 164 *(.machvec) 165 machvec_end = .; 166 } 167#endif 168 169 . = ALIGN(PAGE_SIZE); 170 __init_end = .; 171 172 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) 173 { 174 PAGE_ALIGNED_DATA(PAGE_SIZE) 175 . = ALIGN(PAGE_SIZE); 176 __start_gate_section = .; 177 *(.data.gate) 178 __stop_gate_section = .; 179#ifdef CONFIG_XEN 180 . = ALIGN(PAGE_SIZE); 181 __xen_start_gate_section = .; 182 *(.data.gate.xen) 183 __xen_stop_gate_section = .; 184#endif 185 } 186 . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose 187 * kernel data 188 */ 189 190 /* Per-cpu data: */ 191 . = ALIGN(PERCPU_PAGE_SIZE); 192 PERCPU_VADDR(PERCPU_ADDR, :percpu) 193 __phys_per_cpu_start = __per_cpu_load; 194 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits 195 * into percpu page size 196 */ 197 198 data : { } :data 199 .data : AT(ADDR(.data) - LOAD_OFFSET) 200 { 201#ifdef CONFIG_SMP 202 . = ALIGN(PERCPU_PAGE_SIZE); 203 __cpu0_per_cpu = .; 204 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ 205#endif 206 INIT_TASK_DATA(PAGE_SIZE) 207 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) 208 READ_MOSTLY_DATA(SMP_CACHE_BYTES) 209 DATA_DATA 210 *(.data1) 211 *(.gnu.linkonce.d*) 212 CONSTRUCTORS 213 } 214 215 . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */ 216 .got : AT(ADDR(.got) - LOAD_OFFSET) 217 { *(.got.plt) *(.got) } 218 __gp = ADDR(.got) + 0x200000; 219 /* We want the small data sections together, so single-instruction offsets 220 can access them all, and initialized data all before uninitialized, so 221 we can shorten the on-disk segment size. */ 222 .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) 223 { *(.sdata) *(.sdata1) *(.srdata) } 224 _edata = .; 225 226 BSS_SECTION(0, 0, 0) 227 228 _end = .; 229 230 code : { } :code 231 232 STABS_DEBUG 233 DWARF_DEBUG 234 235 /* Default discards */ 236 DISCARDS 237} 238