1 2#include <asm/cache.h> 3#include <asm/ptrace.h> 4#include <asm/pgtable.h> 5 6#include <asm-generic/vmlinux.lds.h> 7 8OUTPUT_FORMAT("elf64-ia64-little") 9OUTPUT_ARCH(ia64) 10ENTRY(phys_start) 11jiffies = jiffies_64; 12 13PHDRS { 14 code PT_LOAD; 15 percpu PT_LOAD; 16 data PT_LOAD; 17 note PT_NOTE; 18 unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */ 19} 20 21SECTIONS { 22 /* 23 * unwind exit sections must be discarded before 24 * the rest of the sections get included. 25 */ 26 /DISCARD/ : { 27 *(.IA_64.unwind.exit.text) 28 *(.IA_64.unwind_info.exit.text) 29 *(.comment) 30 *(.note) 31 } 32 33 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */ 34 phys_start = _start - LOAD_OFFSET; 35 36 code : { 37 } :code 38 . = KERNEL_START; 39 40 _text = .; 41 _stext = .; 42 43 .text : AT(ADDR(.text) - LOAD_OFFSET) { 44 __start_ivt_text = .; 45 *(.text..ivt) 46 __end_ivt_text = .; 47 TEXT_TEXT 48 SCHED_TEXT 49 CPUIDLE_TEXT 50 LOCK_TEXT 51 KPROBES_TEXT 52 *(.gnu.linkonce.t*) 53 } 54 55 .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { 56 *(.text2) 57 } 58 59#ifdef CONFIG_SMP 60 .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) { 61 *(.text..lock) 62 } 63#endif 64 _etext = .; 65 66 /* 67 * Read-only data 68 */ 69 NOTES :code :note /* put .notes in text and mark in PT_NOTE */ 70 code_continues : { 71 } : code /* switch back to regular program... */ 72 73 EXCEPTION_TABLE(16) 74 75 /* MCA table */ 76 . = ALIGN(16); 77 __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) { 78 __start___mca_table = .; 79 *(__mca_table) 80 __stop___mca_table = .; 81 } 82 83 .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) { 84 __start___phys_stack_reg_patchlist = .; 85 *(.data..patch.phys_stack_reg) 86 __end___phys_stack_reg_patchlist = .; 87 } 88 89 /* 90 * Global data 91 */ 92 _data = .; 93 94 /* Unwind info & table: */ 95 . = ALIGN(8); 96 .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) { 97 *(.IA_64.unwind_info*) 98 } 99 .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) { 100 __start_unwind = .; 101 *(.IA_64.unwind*) 102 __end_unwind = .; 103 } :code :unwind 104 code_continues2 : { 105 } : code 106 107 RODATA 108 109 .opd : AT(ADDR(.opd) - LOAD_OFFSET) { 110 *(.opd) 111 } 112 113 /* 114 * Initialization code and data: 115 */ 116 . = ALIGN(PAGE_SIZE); 117 __init_begin = .; 118 119 INIT_TEXT_SECTION(PAGE_SIZE) 120 INIT_DATA_SECTION(16) 121 122 .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) { 123 __start___vtop_patchlist = .; 124 *(.data..patch.vtop) 125 __end___vtop_patchlist = .; 126 } 127 128 .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) { 129 __start___rse_patchlist = .; 130 *(.data..patch.rse) 131 __end___rse_patchlist = .; 132 } 133 134 .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) { 135 __start___mckinley_e9_bundles = .; 136 *(.data..patch.mckinley_e9) 137 __end___mckinley_e9_bundles = .; 138 } 139 140#if defined(CONFIG_IA64_GENERIC) 141 /* Machine Vector */ 142 . = ALIGN(16); 143 .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) { 144 machvec_start = .; 145 *(.machvec) 146 machvec_end = .; 147 } 148#endif 149 150#ifdef CONFIG_SMP 151 . = ALIGN(PERCPU_PAGE_SIZE); 152 __cpu0_per_cpu = .; 153 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ 154#endif 155 156 . = ALIGN(PAGE_SIZE); 157 __init_end = .; 158 159 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { 160 PAGE_ALIGNED_DATA(PAGE_SIZE) 161 . = ALIGN(PAGE_SIZE); 162 __start_gate_section = .; 163 *(.data..gate) 164 __stop_gate_section = .; 165 } 166 /* 167 * make sure the gate page doesn't expose 168 * kernel data 169 */ 170 . = ALIGN(PAGE_SIZE); 171 172 /* Per-cpu data: */ 173 . = ALIGN(PERCPU_PAGE_SIZE); 174 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) 175 __phys_per_cpu_start = __per_cpu_load; 176 /* 177 * ensure percpu data fits 178 * into percpu page size 179 */ 180 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; 181 182 data : { 183 } :data 184 .data : AT(ADDR(.data) - LOAD_OFFSET) { 185 _sdata = .; 186 INIT_TASK_DATA(PAGE_SIZE) 187 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) 188 READ_MOSTLY_DATA(SMP_CACHE_BYTES) 189 DATA_DATA 190 *(.data1) 191 *(.gnu.linkonce.d*) 192 CONSTRUCTORS 193 } 194 195 . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */ 196 .got : AT(ADDR(.got) - LOAD_OFFSET) { 197 *(.got.plt) 198 *(.got) 199 } 200 __gp = ADDR(.got) + 0x200000; 201 202 /* 203 * We want the small data sections together, 204 * so single-instruction offsets can access 205 * them all, and initialized data all before 206 * uninitialized, so we can shorten the 207 * on-disk segment size. 208 */ 209 .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) { 210 *(.sdata) 211 *(.sdata1) 212 *(.srdata) 213 } 214 _edata = .; 215 216 BSS_SECTION(0, 0, 0) 217 218 _end = .; 219 220 code : { 221 } :code 222 223 STABS_DEBUG 224 DWARF_DEBUG 225 226 /* Default discards */ 227 DISCARDS 228} 229