linux/arch/ia64/kernel/vmlinux.lds.S
<<
>>
Prefs
   1
   2#include <asm/cache.h>
   3#include <asm/ptrace.h>
   4#include <asm/pgtable.h>
   5
   6#include <asm-generic/vmlinux.lds.h>
   7
   8OUTPUT_FORMAT("elf64-ia64-little")
   9OUTPUT_ARCH(ia64)
  10ENTRY(phys_start)
  11jiffies = jiffies_64;
  12
  13PHDRS {
  14        code   PT_LOAD;
  15        percpu PT_LOAD;
  16        data   PT_LOAD;
  17        note   PT_NOTE;
  18        unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
  19}
  20
  21SECTIONS {
  22        /*
  23         * unwind exit sections must be discarded before
  24         * the rest of the sections get included.
  25         */
  26        /DISCARD/ : {
  27                *(.IA_64.unwind.exit.text)
  28                *(.IA_64.unwind_info.exit.text)
  29                *(.comment)
  30                *(.note)
  31        }
  32
  33        v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
  34        phys_start = _start - LOAD_OFFSET;
  35
  36        code : {
  37        } :code
  38        . = KERNEL_START;
  39
  40        _text = .;
  41        _stext = .;
  42
  43        .text : AT(ADDR(.text) - LOAD_OFFSET) {
  44                __start_ivt_text = .;
  45                *(.text..ivt)
  46                __end_ivt_text = .;
  47                TEXT_TEXT
  48                SCHED_TEXT
  49                LOCK_TEXT
  50                KPROBES_TEXT
  51                *(.gnu.linkonce.t*)
  52        }
  53
  54        .text2 : AT(ADDR(.text2) - LOAD_OFFSET) {
  55                *(.text2)
  56        }
  57
  58#ifdef CONFIG_SMP
  59        .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
  60                *(.text..lock)
  61        }
  62#endif
  63        _etext = .;
  64
  65        /*
  66         * Read-only data
  67         */
  68        NOTES :code :note       /* put .notes in text and mark in PT_NOTE  */
  69        code_continues : {
  70        } : code               /* switch back to regular program...  */
  71
  72        EXCEPTION_TABLE(16)
  73
  74        /* MCA table */
  75        . = ALIGN(16);
  76        __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
  77                __start___mca_table = .;
  78                *(__mca_table)
  79                __stop___mca_table = .;
  80        }
  81
  82        .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
  83                __start___phys_stack_reg_patchlist = .;
  84                *(.data..patch.phys_stack_reg)
  85                __end___phys_stack_reg_patchlist = .;
  86        }
  87
  88        /*
  89         * Global data
  90         */
  91        _data = .;
  92
  93        /* Unwind info & table: */
  94        . = ALIGN(8);
  95        .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
  96                *(.IA_64.unwind_info*)
  97        }
  98        .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
  99                __start_unwind = .;
 100                *(.IA_64.unwind*)
 101                __end_unwind = .;
 102        } :code :unwind
 103        code_continues2 : {
 104        } : code
 105
 106        RODATA
 107
 108        .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
 109                *(.opd)
 110        }
 111
 112        /*
 113         * Initialization code and data:
 114         */
 115        . = ALIGN(PAGE_SIZE);
 116        __init_begin = .;
 117
 118        INIT_TEXT_SECTION(PAGE_SIZE)
 119        INIT_DATA_SECTION(16)
 120
 121        .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
 122                __start___vtop_patchlist = .;
 123                *(.data..patch.vtop)
 124                __end___vtop_patchlist = .;
 125        }
 126
 127        .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
 128                __start___rse_patchlist = .;
 129                *(.data..patch.rse)
 130                __end___rse_patchlist = .;
 131        }
 132
 133        .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
 134                __start___mckinley_e9_bundles = .;
 135                *(.data..patch.mckinley_e9)
 136                __end___mckinley_e9_bundles = .;
 137        }
 138
 139#if defined(CONFIG_PARAVIRT)
 140        . = ALIGN(16);
 141        .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) {
 142                __start_paravirt_bundles = .;
 143                *(.paravirt_bundles)
 144                __stop_paravirt_bundles = .;
 145        }
 146        . = ALIGN(16);
 147        .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) {
 148                __start_paravirt_insts = .;
 149                *(.paravirt_insts)
 150                __stop_paravirt_insts = .;
 151        }
 152        . = ALIGN(16);
 153        .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) {
 154                __start_paravirt_branches = .;
 155                *(.paravirt_branches)
 156                __stop_paravirt_branches = .;
 157        }
 158#endif
 159
 160#if defined(CONFIG_IA64_GENERIC)
 161        /* Machine Vector */
 162        . = ALIGN(16);
 163        .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
 164                machvec_start = .;
 165                *(.machvec)
 166                machvec_end = .;
 167        }
 168#endif
 169
 170#ifdef  CONFIG_SMP
 171        . = ALIGN(PERCPU_PAGE_SIZE);
 172        __cpu0_per_cpu = .;
 173        . = . + PERCPU_PAGE_SIZE;   /* cpu0 per-cpu space */
 174#endif
 175
 176        . = ALIGN(PAGE_SIZE);
 177        __init_end = .;
 178
 179        .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
 180                PAGE_ALIGNED_DATA(PAGE_SIZE)
 181                . = ALIGN(PAGE_SIZE);
 182                __start_gate_section = .;
 183                *(.data..gate)
 184                __stop_gate_section = .;
 185        }
 186        /*
 187         * make sure the gate page doesn't expose
 188         * kernel data
 189         */
 190        . = ALIGN(PAGE_SIZE);
 191
 192        /* Per-cpu data: */
 193        . = ALIGN(PERCPU_PAGE_SIZE);
 194        PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
 195        __phys_per_cpu_start = __per_cpu_load;
 196        /*
 197         * ensure percpu data fits
 198         * into percpu page size
 199         */
 200        . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
 201
 202        data : {
 203        } :data
 204        .data : AT(ADDR(.data) - LOAD_OFFSET) {
 205                _sdata  =  .;
 206                INIT_TASK_DATA(PAGE_SIZE)
 207                CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
 208                READ_MOSTLY_DATA(SMP_CACHE_BYTES)
 209                DATA_DATA
 210                *(.data1)
 211                *(.gnu.linkonce.d*)
 212                CONSTRUCTORS
 213        }
 214
 215        . = ALIGN(16);  /* gp must be 16-byte aligned for exc. table */
 216        .got : AT(ADDR(.got) - LOAD_OFFSET) {
 217                *(.got.plt)
 218                *(.got)
 219        }
 220        __gp = ADDR(.got) + 0x200000;
 221
 222        /*
 223         * We want the small data sections together,
 224         * so single-instruction offsets can access
 225         * them all, and initialized data all before
 226         * uninitialized, so we can shorten the
 227         * on-disk segment size.
 228         */
 229        .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
 230                *(.sdata)
 231                *(.sdata1)
 232                *(.srdata)
 233        }
 234        _edata  =  .;
 235
 236        BSS_SECTION(0, 0, 0)
 237
 238        _end = .;
 239
 240        code : {
 241        } :code
 242
 243        STABS_DEBUG
 244        DWARF_DEBUG
 245
 246        /* Default discards */
 247        DISCARDS
 248}
 249