linux/arch/ia64/kernel/vmlinux.lds.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2
   3#include <asm/cache.h>
   4#include <asm/ptrace.h>
   5#include <asm/pgtable.h>
   6#include <asm/thread_info.h>
   7
   8#include <asm-generic/vmlinux.lds.h>
   9
  10OUTPUT_FORMAT("elf64-ia64-little")
  11OUTPUT_ARCH(ia64)
  12ENTRY(phys_start)
  13jiffies = jiffies_64;
  14
  15PHDRS {
  16        code   PT_LOAD;
  17        percpu PT_LOAD;
  18        data   PT_LOAD;
  19        note   PT_NOTE;
  20        unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
  21}
  22
  23SECTIONS {
  24        /*
  25         * unwind exit sections must be discarded before
  26         * the rest of the sections get included.
  27         */
  28        /DISCARD/ : {
  29                *(.IA_64.unwind.exit.text)
  30                *(.IA_64.unwind_info.exit.text)
  31                *(.comment)
  32                *(.note)
  33        }
  34
  35        v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
  36        phys_start = _start - LOAD_OFFSET;
  37
  38        code : {
  39        } :code
  40        . = KERNEL_START;
  41
  42        _text = .;
  43        _stext = .;
  44
  45        .text : AT(ADDR(.text) - LOAD_OFFSET) {
  46                __start_ivt_text = .;
  47                *(.text..ivt)
  48                __end_ivt_text = .;
  49                TEXT_TEXT
  50                SCHED_TEXT
  51                CPUIDLE_TEXT
  52                LOCK_TEXT
  53                KPROBES_TEXT
  54                *(.gnu.linkonce.t*)
  55        }
  56
  57        .text2 : AT(ADDR(.text2) - LOAD_OFFSET) {
  58                *(.text2)
  59        }
  60
  61#ifdef CONFIG_SMP
  62        .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
  63                *(.text..lock)
  64        }
  65#endif
  66        _etext = .;
  67
  68        /*
  69         * Read-only data
  70         */
  71        NOTES :code :note       /* put .notes in text and mark in PT_NOTE  */
  72        code_continues : {
  73        } : code               /* switch back to regular program...  */
  74
  75        EXCEPTION_TABLE(16)
  76
  77        /* MCA table */
  78        . = ALIGN(16);
  79        __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
  80                __start___mca_table = .;
  81                *(__mca_table)
  82                __stop___mca_table = .;
  83        }
  84
  85        .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
  86                __start___phys_stack_reg_patchlist = .;
  87                *(.data..patch.phys_stack_reg)
  88                __end___phys_stack_reg_patchlist = .;
  89        }
  90
  91        /*
  92         * Global data
  93         */
  94        _data = .;
  95
  96        /* Unwind info & table: */
  97        . = ALIGN(8);
  98        .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
  99                *(.IA_64.unwind_info*)
 100        }
 101        .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
 102                __start_unwind = .;
 103                *(.IA_64.unwind*)
 104                __end_unwind = .;
 105        } :code :unwind
 106        code_continues2 : {
 107        } : code
 108
 109        RODATA
 110
 111        .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
 112                __start_opd = .;
 113                *(.opd)
 114                __end_opd = .;
 115        }
 116
 117        /*
 118         * Initialization code and data:
 119         */
 120        . = ALIGN(PAGE_SIZE);
 121        __init_begin = .;
 122
 123        INIT_TEXT_SECTION(PAGE_SIZE)
 124        INIT_DATA_SECTION(16)
 125
 126        .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
 127                __start___vtop_patchlist = .;
 128                *(.data..patch.vtop)
 129                __end___vtop_patchlist = .;
 130        }
 131
 132        .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
 133                __start___rse_patchlist = .;
 134                *(.data..patch.rse)
 135                __end___rse_patchlist = .;
 136        }
 137
 138        .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
 139                __start___mckinley_e9_bundles = .;
 140                *(.data..patch.mckinley_e9)
 141                __end___mckinley_e9_bundles = .;
 142        }
 143
 144#if defined(CONFIG_IA64_GENERIC)
 145        /* Machine Vector */
 146        . = ALIGN(16);
 147        .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
 148                machvec_start = .;
 149                *(.machvec)
 150                machvec_end = .;
 151        }
 152#endif
 153
 154#ifdef  CONFIG_SMP
 155        . = ALIGN(PERCPU_PAGE_SIZE);
 156        __cpu0_per_cpu = .;
 157        . = . + PERCPU_PAGE_SIZE;   /* cpu0 per-cpu space */
 158#endif
 159
 160        . = ALIGN(PAGE_SIZE);
 161        __init_end = .;
 162
 163        .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
 164                PAGE_ALIGNED_DATA(PAGE_SIZE)
 165                . = ALIGN(PAGE_SIZE);
 166                __start_gate_section = .;
 167                *(.data..gate)
 168                __stop_gate_section = .;
 169        }
 170        /*
 171         * make sure the gate page doesn't expose
 172         * kernel data
 173         */
 174        . = ALIGN(PAGE_SIZE);
 175
 176        /* Per-cpu data: */
 177        . = ALIGN(PERCPU_PAGE_SIZE);
 178        PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
 179        __phys_per_cpu_start = __per_cpu_load;
 180        /*
 181         * ensure percpu data fits
 182         * into percpu page size
 183         */
 184        . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
 185
 186        data : {
 187        } :data
 188        .data : AT(ADDR(.data) - LOAD_OFFSET) {
 189                _sdata  =  .;
 190                INIT_TASK_DATA(PAGE_SIZE)
 191                CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
 192                READ_MOSTLY_DATA(SMP_CACHE_BYTES)
 193                DATA_DATA
 194                *(.data1)
 195                *(.gnu.linkonce.d*)
 196                CONSTRUCTORS
 197        }
 198
 199        BUG_TABLE
 200
 201        . = ALIGN(16);  /* gp must be 16-byte aligned for exc. table */
 202        .got : AT(ADDR(.got) - LOAD_OFFSET) {
 203                *(.got.plt)
 204                *(.got)
 205        }
 206        __gp = ADDR(.got) + 0x200000;
 207
 208        /*
 209         * We want the small data sections together,
 210         * so single-instruction offsets can access
 211         * them all, and initialized data all before
 212         * uninitialized, so we can shorten the
 213         * on-disk segment size.
 214         */
 215        .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
 216                *(.sdata)
 217                *(.sdata1)
 218                *(.srdata)
 219        }
 220        _edata  =  .;
 221
 222        BSS_SECTION(0, 0, 0)
 223
 224        _end = .;
 225
 226        code : {
 227        } :code
 228
 229        STABS_DEBUG
 230        DWARF_DEBUG
 231
 232        /* Default discards */
 233        DISCARDS
 234}
 235