linux/include/asm-generic/vmlinux.lds.h
<<
>>
Prefs
   1/*
   2 * Helper macros to support writing architecture specific
   3 * linker scripts.
   4 *
   5 * A minimal linker scripts has following content:
   6 * [This is a sample, architectures may have special requiriements]
   7 *
   8 * OUTPUT_FORMAT(...)
   9 * OUTPUT_ARCH(...)
  10 * ENTRY(...)
  11 * SECTIONS
  12 * {
  13 *      . = START;
  14 *      __init_begin = .;
  15 *      HEAD_TEXT_SECTION
  16 *      INIT_TEXT_SECTION(PAGE_SIZE)
  17 *      INIT_DATA_SECTION(...)
  18 *      PERCPU(PAGE_SIZE)
  19 *      __init_end = .;
  20 *
  21 *      _stext = .;
  22 *      TEXT_SECTION = 0
  23 *      _etext = .;
  24 *
  25 *      _sdata = .;
  26 *      RO_DATA_SECTION(PAGE_SIZE)
  27 *      RW_DATA_SECTION(...)
  28 *      _edata = .;
  29 *
  30 *      EXCEPTION_TABLE(...)
  31 *      NOTES
  32 *
  33 *      BSS_SECTION(0, 0, 0)
  34 *      _end = .;
  35 *
  36 *      STABS_DEBUG
  37 *      DWARF_DEBUG
  38 *
  39 *      DISCARDS                // must be the last
  40 * }
  41 *
  42 * [__init_begin, __init_end] is the init section that may be freed after init
  43 * [_stext, _etext] is the text section
  44 * [_sdata, _edata] is the data section
  45 *
  46 * Some of the included output section have their own set of constants.
  47 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
  48 *               [__nosave_begin, __nosave_end] for the nosave data
  49 */
  50
  51#ifndef LOAD_OFFSET
  52#define LOAD_OFFSET 0
  53#endif
  54
  55#ifndef SYMBOL_PREFIX
  56#define VMLINUX_SYMBOL(sym) sym
  57#else
  58#define PASTE2(x,y) x##y
  59#define PASTE(x,y) PASTE2(x,y)
  60#define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym)
  61#endif
  62
  63/* Align . to a 8 byte boundary equals to maximum function alignment. */
  64#define ALIGN_FUNCTION()  . = ALIGN(8)
  65
  66/*
  67 * Align to a 32 byte boundary equal to the
  68 * alignment gcc 4.5 uses for a struct
  69 */
  70#define STRUCT_ALIGNMENT 32
  71#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
  72
  73/* The actual configuration determine if the init/exit sections
  74 * are handled as text/data or they can be discarded (which
  75 * often happens at runtime)
  76 */
  77#ifdef CONFIG_HOTPLUG
  78#define DEV_KEEP(sec)    *(.dev##sec)
  79#define DEV_DISCARD(sec)
  80#else
  81#define DEV_KEEP(sec)
  82#define DEV_DISCARD(sec) *(.dev##sec)
  83#endif
  84
  85#ifdef CONFIG_HOTPLUG_CPU
  86#define CPU_KEEP(sec)    *(.cpu##sec)
  87#define CPU_DISCARD(sec)
  88#else
  89#define CPU_KEEP(sec)
  90#define CPU_DISCARD(sec) *(.cpu##sec)
  91#endif
  92
  93#if defined(CONFIG_MEMORY_HOTPLUG)
  94#define MEM_KEEP(sec)    *(.mem##sec)
  95#define MEM_DISCARD(sec)
  96#else
  97#define MEM_KEEP(sec)
  98#define MEM_DISCARD(sec) *(.mem##sec)
  99#endif
 100
 101#ifdef CONFIG_FTRACE_MCOUNT_RECORD
 102#define MCOUNT_REC()    . = ALIGN(8);                           \
 103                        VMLINUX_SYMBOL(__start_mcount_loc) = .; \
 104                        *(__mcount_loc)                         \
 105                        VMLINUX_SYMBOL(__stop_mcount_loc) = .;
 106#else
 107#define MCOUNT_REC()
 108#endif
 109
 110#ifdef CONFIG_TRACE_BRANCH_PROFILING
 111#define LIKELY_PROFILE()        VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
 112                                *(_ftrace_annotated_branch)                           \
 113                                VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
 114#else
 115#define LIKELY_PROFILE()
 116#endif
 117
 118#ifdef CONFIG_PROFILE_ALL_BRANCHES
 119#define BRANCH_PROFILE()        VMLINUX_SYMBOL(__start_branch_profile) = .;   \
 120                                *(_ftrace_branch)                             \
 121                                VMLINUX_SYMBOL(__stop_branch_profile) = .;
 122#else
 123#define BRANCH_PROFILE()
 124#endif
 125
 126#ifdef CONFIG_EVENT_TRACING
 127#define FTRACE_EVENTS() . = ALIGN(8);                                   \
 128                        VMLINUX_SYMBOL(__start_ftrace_events) = .;      \
 129                        *(_ftrace_events)                               \
 130                        VMLINUX_SYMBOL(__stop_ftrace_events) = .;
 131#else
 132#define FTRACE_EVENTS()
 133#endif
 134
 135#ifdef CONFIG_TRACING
 136#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .;      \
 137                         *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
 138                         VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
 139#else
 140#define TRACE_PRINTKS()
 141#endif
 142
 143#ifdef CONFIG_FTRACE_SYSCALLS
 144#define TRACE_SYSCALLS() . = ALIGN(8);                                  \
 145                         VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
 146                         *(__syscalls_metadata)                         \
 147                         VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
 148#else
 149#define TRACE_SYSCALLS()
 150#endif
 151
 152
 153#define KERNEL_DTB()                                                    \
 154        STRUCT_ALIGN();                                                 \
 155        VMLINUX_SYMBOL(__dtb_start) = .;                                \
 156        *(.dtb.init.rodata)                                             \
 157        VMLINUX_SYMBOL(__dtb_end) = .;
 158
 159/* .data section */
 160#define DATA_DATA                                                       \
 161        *(.data)                                                        \
 162        *(.ref.data)                                                    \
 163        *(.data..shared_aligned) /* percpu related */                   \
 164        DEV_KEEP(init.data)                                             \
 165        DEV_KEEP(exit.data)                                             \
 166        CPU_KEEP(init.data)                                             \
 167        CPU_KEEP(exit.data)                                             \
 168        MEM_KEEP(init.data)                                             \
 169        MEM_KEEP(exit.data)                                             \
 170        STRUCT_ALIGN();                                                 \
 171        *(__tracepoints)                                                \
 172        /* implement dynamic printk debug */                            \
 173        . = ALIGN(8);                                                   \
 174        VMLINUX_SYMBOL(__start___verbose) = .;                          \
 175        *(__verbose)                                                    \
 176        VMLINUX_SYMBOL(__stop___verbose) = .;                           \
 177        LIKELY_PROFILE()                                                \
 178        BRANCH_PROFILE()                                                \
 179        TRACE_PRINTKS()
 180
 181/*
 182 * Data section helpers
 183 */
 184#define NOSAVE_DATA                                                     \
 185        . = ALIGN(PAGE_SIZE);                                           \
 186        VMLINUX_SYMBOL(__nosave_begin) = .;                             \
 187        *(.data..nosave)                                                \
 188        . = ALIGN(PAGE_SIZE);                                           \
 189        VMLINUX_SYMBOL(__nosave_end) = .;
 190
 191#define PAGE_ALIGNED_DATA(page_align)                                   \
 192        . = ALIGN(page_align);                                          \
 193        *(.data..page_aligned)
 194
 195#define READ_MOSTLY_DATA(align)                                         \
 196        . = ALIGN(align);                                               \
 197        *(.data..read_mostly)                                           \
 198        . = ALIGN(align);
 199
 200#define CACHELINE_ALIGNED_DATA(align)                                   \
 201        . = ALIGN(align);                                               \
 202        *(.data..cacheline_aligned)
 203
 204#define INIT_TASK_DATA(align)                                           \
 205        . = ALIGN(align);                                               \
 206        *(.data..init_task)
 207
 208/*
 209 * Read only Data
 210 */
 211#define RO_DATA_SECTION(align)                                          \
 212        . = ALIGN((align));                                             \
 213        .rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {           \
 214                VMLINUX_SYMBOL(__start_rodata) = .;                     \
 215                *(.rodata) *(.rodata.*)                                 \
 216                *(__vermagic)           /* Kernel version magic */      \
 217                . = ALIGN(8);                                           \
 218                VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;         \
 219                *(__tracepoints_ptrs)   /* Tracepoints: pointer array */\
 220                VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .;          \
 221                *(__markers_strings)    /* Markers: strings */          \
 222                *(__tracepoints_strings)/* Tracepoints: strings */      \
 223        }                                                               \
 224                                                                        \
 225        .rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {          \
 226                *(.rodata1)                                             \
 227        }                                                               \
 228                                                                        \
 229        BUG_TABLE                                                       \
 230                                                                        \
 231        JUMP_TABLE                                                      \
 232                                                                        \
 233        /* PCI quirks */                                                \
 234        .pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {        \
 235                VMLINUX_SYMBOL(__start_pci_fixups_early) = .;           \
 236                *(.pci_fixup_early)                                     \
 237                VMLINUX_SYMBOL(__end_pci_fixups_early) = .;             \
 238                VMLINUX_SYMBOL(__start_pci_fixups_header) = .;          \
 239                *(.pci_fixup_header)                                    \
 240                VMLINUX_SYMBOL(__end_pci_fixups_header) = .;            \
 241                VMLINUX_SYMBOL(__start_pci_fixups_final) = .;           \
 242                *(.pci_fixup_final)                                     \
 243                VMLINUX_SYMBOL(__end_pci_fixups_final) = .;             \
 244                VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;          \
 245                *(.pci_fixup_enable)                                    \
 246                VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;            \
 247                VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;          \
 248                *(.pci_fixup_resume)                                    \
 249                VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;            \
 250                VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;    \
 251                *(.pci_fixup_resume_early)                              \
 252                VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;      \
 253                VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;         \
 254                *(.pci_fixup_suspend)                                   \
 255                VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;           \
 256        }                                                               \
 257                                                                        \
 258        /* Built-in firmware blobs */                                   \
 259        .builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {      \
 260                VMLINUX_SYMBOL(__start_builtin_fw) = .;                 \
 261                *(.builtin_fw)                                          \
 262                VMLINUX_SYMBOL(__end_builtin_fw) = .;                   \
 263        }                                                               \
 264                                                                        \
 265        /* RapidIO route ops */                                         \
 266        .rio_ops        : AT(ADDR(.rio_ops) - LOAD_OFFSET) {            \
 267                VMLINUX_SYMBOL(__start_rio_switch_ops) = .;             \
 268                *(.rio_switch_ops)                                      \
 269                VMLINUX_SYMBOL(__end_rio_switch_ops) = .;               \
 270        }                                                               \
 271                                                                        \
 272        TRACEDATA                                                       \
 273                                                                        \
 274        /* Kernel symbol table: Normal symbols */                       \
 275        __ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {         \
 276                VMLINUX_SYMBOL(__start___ksymtab) = .;                  \
 277                *(__ksymtab)                                            \
 278                VMLINUX_SYMBOL(__stop___ksymtab) = .;                   \
 279        }                                                               \
 280                                                                        \
 281        /* Kernel symbol table: GPL-only symbols */                     \
 282        __ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {     \
 283                VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;              \
 284                *(__ksymtab_gpl)                                        \
 285                VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;               \
 286        }                                                               \
 287                                                                        \
 288        /* Kernel symbol table: Normal unused symbols */                \
 289        __ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {  \
 290                VMLINUX_SYMBOL(__start___ksymtab_unused) = .;           \
 291                *(__ksymtab_unused)                                     \
 292                VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;            \
 293        }                                                               \
 294                                                                        \
 295        /* Kernel symbol table: GPL-only unused symbols */              \
 296        __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
 297                VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;       \
 298                *(__ksymtab_unused_gpl)                                 \
 299                VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;        \
 300        }                                                               \
 301                                                                        \
 302        /* Kernel symbol table: GPL-future-only symbols */              \
 303        __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
 304                VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;       \
 305                *(__ksymtab_gpl_future)                                 \
 306                VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;        \
 307        }                                                               \
 308                                                                        \
 309        /* Kernel symbol table: Normal symbols */                       \
 310        __kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {         \
 311                VMLINUX_SYMBOL(__start___kcrctab) = .;                  \
 312                *(__kcrctab)                                            \
 313                VMLINUX_SYMBOL(__stop___kcrctab) = .;                   \
 314        }                                                               \
 315                                                                        \
 316        /* Kernel symbol table: GPL-only symbols */                     \
 317        __kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {     \
 318                VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;              \
 319                *(__kcrctab_gpl)                                        \
 320                VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;               \
 321        }                                                               \
 322                                                                        \
 323        /* Kernel symbol table: Normal unused symbols */                \
 324        __kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {  \
 325                VMLINUX_SYMBOL(__start___kcrctab_unused) = .;           \
 326                *(__kcrctab_unused)                                     \
 327                VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;            \
 328        }                                                               \
 329                                                                        \
 330        /* Kernel symbol table: GPL-only unused symbols */              \
 331        __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
 332                VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;       \
 333                *(__kcrctab_unused_gpl)                                 \
 334                VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;        \
 335        }                                                               \
 336                                                                        \
 337        /* Kernel symbol table: GPL-future-only symbols */              \
 338        __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
 339                VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;       \
 340                *(__kcrctab_gpl_future)                                 \
 341                VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;        \
 342        }                                                               \
 343                                                                        \
 344        /* Kernel symbol table: strings */                              \
 345        __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
 346                *(__ksymtab_strings)                                    \
 347        }                                                               \
 348                                                                        \
 349        /* __*init sections */                                          \
 350        __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {         \
 351                *(.ref.rodata)                                          \
 352                DEV_KEEP(init.rodata)                                   \
 353                DEV_KEEP(exit.rodata)                                   \
 354                CPU_KEEP(init.rodata)                                   \
 355                CPU_KEEP(exit.rodata)                                   \
 356                MEM_KEEP(init.rodata)                                   \
 357                MEM_KEEP(exit.rodata)                                   \
 358        }                                                               \
 359                                                                        \
 360        /* Built-in module parameters. */                               \
 361        __param : AT(ADDR(__param) - LOAD_OFFSET) {                     \
 362                VMLINUX_SYMBOL(__start___param) = .;                    \
 363                *(__param)                                              \
 364                VMLINUX_SYMBOL(__stop___param) = .;                     \
 365        }                                                               \
 366                                                                        \
 367        /* Built-in module versions. */                                 \
 368        __modver : AT(ADDR(__modver) - LOAD_OFFSET) {                   \
 369                VMLINUX_SYMBOL(__start___modver) = .;                   \
 370                *(__modver)                                             \
 371                VMLINUX_SYMBOL(__stop___modver) = .;                    \
 372                . = ALIGN((align));                                     \
 373                VMLINUX_SYMBOL(__end_rodata) = .;                       \
 374        }                                                               \
 375        . = ALIGN((align));
 376
 377/* RODATA & RO_DATA provided for backward compatibility.
 378 * All archs are supposed to use RO_DATA() */
 379#define RODATA          RO_DATA_SECTION(4096)
 380#define RO_DATA(align)  RO_DATA_SECTION(align)
 381
 382#define SECURITY_INIT                                                   \
 383        .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
 384                VMLINUX_SYMBOL(__security_initcall_start) = .;          \
 385                *(.security_initcall.init)                              \
 386                VMLINUX_SYMBOL(__security_initcall_end) = .;            \
 387        }
 388
 389/* .text section. Map to function alignment to avoid address changes
 390 * during second ld run in second ld pass when generating System.map */
 391#define TEXT_TEXT                                                       \
 392                ALIGN_FUNCTION();                                       \
 393                *(.text.hot)                                            \
 394                *(.text)                                                \
 395                *(.ref.text)                                            \
 396        DEV_KEEP(init.text)                                             \
 397        DEV_KEEP(exit.text)                                             \
 398        CPU_KEEP(init.text)                                             \
 399        CPU_KEEP(exit.text)                                             \
 400        MEM_KEEP(init.text)                                             \
 401        MEM_KEEP(exit.text)                                             \
 402                *(.text.unlikely)
 403
 404
 405/* sched.text is aling to function alignment to secure we have same
 406 * address even at second ld pass when generating System.map */
 407#define SCHED_TEXT                                                      \
 408                ALIGN_FUNCTION();                                       \
 409                VMLINUX_SYMBOL(__sched_text_start) = .;                 \
 410                *(.sched.text)                                          \
 411                VMLINUX_SYMBOL(__sched_text_end) = .;
 412
 413/* spinlock.text is aling to function alignment to secure we have same
 414 * address even at second ld pass when generating System.map */
 415#define LOCK_TEXT                                                       \
 416                ALIGN_FUNCTION();                                       \
 417                VMLINUX_SYMBOL(__lock_text_start) = .;                  \
 418                *(.spinlock.text)                                       \
 419                VMLINUX_SYMBOL(__lock_text_end) = .;
 420
 421#define KPROBES_TEXT                                                    \
 422                ALIGN_FUNCTION();                                       \
 423                VMLINUX_SYMBOL(__kprobes_text_start) = .;               \
 424                *(.kprobes.text)                                        \
 425                VMLINUX_SYMBOL(__kprobes_text_end) = .;
 426
 427#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 428#define IRQENTRY_TEXT                                                   \
 429                ALIGN_FUNCTION();                                       \
 430                VMLINUX_SYMBOL(__irqentry_text_start) = .;              \
 431                *(.irqentry.text)                                       \
 432                VMLINUX_SYMBOL(__irqentry_text_end) = .;
 433#else
 434#define IRQENTRY_TEXT
 435#endif
 436
 437/* Section used for early init (in .S files) */
 438#define HEAD_TEXT  *(.head.text)
 439
 440#define HEAD_TEXT_SECTION                                                       \
 441        .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {               \
 442                HEAD_TEXT                                               \
 443        }
 444
 445/*
 446 * Exception table
 447 */
 448#define EXCEPTION_TABLE(align)                                          \
 449        . = ALIGN(align);                                               \
 450        __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {               \
 451                VMLINUX_SYMBOL(__start___ex_table) = .;                 \
 452                *(__ex_table)                                           \
 453                VMLINUX_SYMBOL(__stop___ex_table) = .;                  \
 454        }
 455
 456/*
 457 * Init task
 458 */
 459#define INIT_TASK_DATA_SECTION(align)                                   \
 460        . = ALIGN(align);                                               \
 461        .data..init_task :  AT(ADDR(.data..init_task) - LOAD_OFFSET) {  \
 462                INIT_TASK_DATA(align)                                   \
 463        }
 464
 465#ifdef CONFIG_CONSTRUCTORS
 466#define KERNEL_CTORS()  . = ALIGN(8);                      \
 467                        VMLINUX_SYMBOL(__ctors_start) = .; \
 468                        *(.ctors)                          \
 469                        VMLINUX_SYMBOL(__ctors_end) = .;
 470#else
 471#define KERNEL_CTORS()
 472#endif
 473
 474/* init and exit section handling */
 475#define INIT_DATA                                                       \
 476        *(.init.data)                                                   \
 477        DEV_DISCARD(init.data)                                          \
 478        CPU_DISCARD(init.data)                                          \
 479        MEM_DISCARD(init.data)                                          \
 480        KERNEL_CTORS()                                                  \
 481        *(.init.rodata)                                                 \
 482        MCOUNT_REC()                                                    \
 483        FTRACE_EVENTS()                                                 \
 484        TRACE_SYSCALLS()                                                \
 485        DEV_DISCARD(init.rodata)                                        \
 486        CPU_DISCARD(init.rodata)                                        \
 487        MEM_DISCARD(init.rodata)                                        \
 488        KERNEL_DTB()
 489
 490#define INIT_TEXT                                                       \
 491        *(.init.text)                                                   \
 492        DEV_DISCARD(init.text)                                          \
 493        CPU_DISCARD(init.text)                                          \
 494        MEM_DISCARD(init.text)
 495
 496#define EXIT_DATA                                                       \
 497        *(.exit.data)                                                   \
 498        DEV_DISCARD(exit.data)                                          \
 499        DEV_DISCARD(exit.rodata)                                        \
 500        CPU_DISCARD(exit.data)                                          \
 501        CPU_DISCARD(exit.rodata)                                        \
 502        MEM_DISCARD(exit.data)                                          \
 503        MEM_DISCARD(exit.rodata)
 504
 505#define EXIT_TEXT                                                       \
 506        *(.exit.text)                                                   \
 507        DEV_DISCARD(exit.text)                                          \
 508        CPU_DISCARD(exit.text)                                          \
 509        MEM_DISCARD(exit.text)
 510
 511#define EXIT_CALL                                                       \
 512        *(.exitcall.exit)
 513
 514/*
 515 * bss (Block Started by Symbol) - uninitialized data
 516 * zeroed during startup
 517 */
 518#define SBSS(sbss_align)                                                \
 519        . = ALIGN(sbss_align);                                          \
 520        .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) {                         \
 521                *(.sbss)                                                \
 522                *(.scommon)                                             \
 523        }
 524
 525#define BSS(bss_align)                                                  \
 526        . = ALIGN(bss_align);                                           \
 527        .bss : AT(ADDR(.bss) - LOAD_OFFSET) {                           \
 528                *(.bss..page_aligned)                                   \
 529                *(.dynbss)                                              \
 530                *(.bss)                                                 \
 531                *(COMMON)                                               \
 532        }
 533
 534/*
 535 * DWARF debug sections.
 536 * Symbols in the DWARF debugging sections are relative to
 537 * the beginning of the section so we begin them at 0.
 538 */
 539#define DWARF_DEBUG                                                     \
 540                /* DWARF 1 */                                           \
 541                .debug          0 : { *(.debug) }                       \
 542                .line           0 : { *(.line) }                        \
 543                /* GNU DWARF 1 extensions */                            \
 544                .debug_srcinfo  0 : { *(.debug_srcinfo) }               \
 545                .debug_sfnames  0 : { *(.debug_sfnames) }               \
 546                /* DWARF 1.1 and DWARF 2 */                             \
 547                .debug_aranges  0 : { *(.debug_aranges) }               \
 548                .debug_pubnames 0 : { *(.debug_pubnames) }              \
 549                /* DWARF 2 */                                           \
 550                .debug_info     0 : { *(.debug_info                     \
 551                                .gnu.linkonce.wi.*) }                   \
 552                .debug_abbrev   0 : { *(.debug_abbrev) }                \
 553                .debug_line     0 : { *(.debug_line) }                  \
 554                .debug_frame    0 : { *(.debug_frame) }                 \
 555                .debug_str      0 : { *(.debug_str) }                   \
 556                .debug_loc      0 : { *(.debug_loc) }                   \
 557                .debug_macinfo  0 : { *(.debug_macinfo) }               \
 558                /* SGI/MIPS DWARF 2 extensions */                       \
 559                .debug_weaknames 0 : { *(.debug_weaknames) }            \
 560                .debug_funcnames 0 : { *(.debug_funcnames) }            \
 561                .debug_typenames 0 : { *(.debug_typenames) }            \
 562                .debug_varnames  0 : { *(.debug_varnames) }             \
 563
 564                /* Stabs debugging sections.  */
 565#define STABS_DEBUG                                                     \
 566                .stab 0 : { *(.stab) }                                  \
 567                .stabstr 0 : { *(.stabstr) }                            \
 568                .stab.excl 0 : { *(.stab.excl) }                        \
 569                .stab.exclstr 0 : { *(.stab.exclstr) }                  \
 570                .stab.index 0 : { *(.stab.index) }                      \
 571                .stab.indexstr 0 : { *(.stab.indexstr) }                \
 572                .comment 0 : { *(.comment) }
 573
 574#ifdef CONFIG_GENERIC_BUG
 575#define BUG_TABLE                                                       \
 576        . = ALIGN(8);                                                   \
 577        __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {             \
 578                VMLINUX_SYMBOL(__start___bug_table) = .;                \
 579                *(__bug_table)                                          \
 580                VMLINUX_SYMBOL(__stop___bug_table) = .;                 \
 581        }
 582#else
 583#define BUG_TABLE
 584#endif
 585
 586#define JUMP_TABLE                                                      \
 587        . = ALIGN(8);                                                   \
 588        __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) {           \
 589                VMLINUX_SYMBOL(__start___jump_table) = .;               \
 590                *(__jump_table)                                         \
 591                VMLINUX_SYMBOL(__stop___jump_table) = .;                \
 592        }
 593
 594#ifdef CONFIG_PM_TRACE
 595#define TRACEDATA                                                       \
 596        . = ALIGN(4);                                                   \
 597        .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {               \
 598                VMLINUX_SYMBOL(__tracedata_start) = .;                  \
 599                *(.tracedata)                                           \
 600                VMLINUX_SYMBOL(__tracedata_end) = .;                    \
 601        }
 602#else
 603#define TRACEDATA
 604#endif
 605
 606#define NOTES                                                           \
 607        .notes : AT(ADDR(.notes) - LOAD_OFFSET) {                       \
 608                VMLINUX_SYMBOL(__start_notes) = .;                      \
 609                *(.note.*)                                              \
 610                VMLINUX_SYMBOL(__stop_notes) = .;                       \
 611        }
 612
 613#define INIT_SETUP(initsetup_align)                                     \
 614                . = ALIGN(initsetup_align);                             \
 615                VMLINUX_SYMBOL(__setup_start) = .;                      \
 616                *(.init.setup)                                          \
 617                VMLINUX_SYMBOL(__setup_end) = .;
 618
 619#define INITCALLS                                                       \
 620        *(.initcallearly.init)                                          \
 621        VMLINUX_SYMBOL(__early_initcall_end) = .;                       \
 622        *(.initcall0.init)                                              \
 623        *(.initcall0s.init)                                             \
 624        *(.initcall1.init)                                              \
 625        *(.initcall1s.init)                                             \
 626        *(.initcall2.init)                                              \
 627        *(.initcall2s.init)                                             \
 628        *(.initcall3.init)                                              \
 629        *(.initcall3s.init)                                             \
 630        *(.initcall4.init)                                              \
 631        *(.initcall4s.init)                                             \
 632        *(.initcall5.init)                                              \
 633        *(.initcall5s.init)                                             \
 634        *(.initcallrootfs.init)                                         \
 635        *(.initcall6.init)                                              \
 636        *(.initcall6s.init)                                             \
 637        *(.initcall7.init)                                              \
 638        *(.initcall7s.init)
 639
 640#define INIT_CALLS                                                      \
 641                VMLINUX_SYMBOL(__initcall_start) = .;                   \
 642                INITCALLS                                               \
 643                VMLINUX_SYMBOL(__initcall_end) = .;
 644
 645#define CON_INITCALL                                                    \
 646                VMLINUX_SYMBOL(__con_initcall_start) = .;               \
 647                *(.con_initcall.init)                                   \
 648                VMLINUX_SYMBOL(__con_initcall_end) = .;
 649
 650#define SECURITY_INITCALL                                               \
 651                VMLINUX_SYMBOL(__security_initcall_start) = .;          \
 652                *(.security_initcall.init)                              \
 653                VMLINUX_SYMBOL(__security_initcall_end) = .;
 654
 655#ifdef CONFIG_BLK_DEV_INITRD
 656#define INIT_RAM_FS                                                     \
 657        . = ALIGN(4);                                                   \
 658        VMLINUX_SYMBOL(__initramfs_start) = .;                          \
 659        *(.init.ramfs)                                                  \
 660        . = ALIGN(8);                                                   \
 661        *(.init.ramfs.info)
 662#else
 663#define INIT_RAM_FS
 664#endif
 665
 666/*
 667 * Default discarded sections.
 668 *
 669 * Some archs want to discard exit text/data at runtime rather than
 670 * link time due to cross-section references such as alt instructions,
 671 * bug table, eh_frame, etc.  DISCARDS must be the last of output
 672 * section definitions so that such archs put those in earlier section
 673 * definitions.
 674 */
 675#define DISCARDS                                                        \
 676        /DISCARD/ : {                                                   \
 677        EXIT_TEXT                                                       \
 678        EXIT_DATA                                                       \
 679        EXIT_CALL                                                       \
 680        *(.discard)                                                     \
 681        *(.discard.*)                                                   \
 682        }
 683
 684/**
 685 * PERCPU_VADDR - define output section for percpu area
 686 * @vaddr: explicit base address (optional)
 687 * @phdr: destination PHDR (optional)
 688 *
 689 * Macro which expands to output section for percpu area.  If @vaddr
 690 * is not blank, it specifies explicit base address and all percpu
 691 * symbols will be offset from the given address.  If blank, @vaddr
 692 * always equals @laddr + LOAD_OFFSET.
 693 *
 694 * @phdr defines the output PHDR to use if not blank.  Be warned that
 695 * output PHDR is sticky.  If @phdr is specified, the next output
 696 * section in the linker script will go there too.  @phdr should have
 697 * a leading colon.
 698 *
 699 * Note that this macros defines __per_cpu_load as an absolute symbol.
 700 * If there is no need to put the percpu section at a predetermined
 701 * address, use PERCPU().
 702 */
 703#define PERCPU_VADDR(vaddr, phdr)                                       \
 704        VMLINUX_SYMBOL(__per_cpu_load) = .;                             \
 705        .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)         \
 706                                - LOAD_OFFSET) {                        \
 707                VMLINUX_SYMBOL(__per_cpu_start) = .;                    \
 708                *(.data..percpu..first)                                 \
 709                . = ALIGN(PAGE_SIZE);                                   \
 710                *(.data..percpu..page_aligned)                          \
 711                *(.data..percpu..readmostly)                            \
 712                *(.data..percpu)                                        \
 713                *(.data..percpu..shared_aligned)                        \
 714                VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
 715        } phdr                                                          \
 716        . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
 717
 718/**
 719 * PERCPU - define output section for percpu area, simple version
 720 * @align: required alignment
 721 *
 722 * Align to @align and outputs output section for percpu area.  This
 723 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
 724 * __per_cpu_start will be identical.
 725 *
 726 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
 727 * that __per_cpu_load is defined as a relative symbol against
 728 * .data..percpu which is required for relocatable x86_32
 729 * configuration.
 730 */
 731#define PERCPU(align)                                                   \
 732        . = ALIGN(align);                                               \
 733        .data..percpu   : AT(ADDR(.data..percpu) - LOAD_OFFSET) {       \
 734                VMLINUX_SYMBOL(__per_cpu_load) = .;                     \
 735                VMLINUX_SYMBOL(__per_cpu_start) = .;                    \
 736                *(.data..percpu..first)                                 \
 737                . = ALIGN(PAGE_SIZE);                                   \
 738                *(.data..percpu..page_aligned)                          \
 739                *(.data..percpu..readmostly)                            \
 740                *(.data..percpu)                                        \
 741                *(.data..percpu..shared_aligned)                        \
 742                VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
 743        }
 744
 745
 746/*
 747 * Definition of the high level *_SECTION macros
 748 * They will fit only a subset of the architectures
 749 */
 750
 751
 752/*
 753 * Writeable data.
 754 * All sections are combined in a single .data section.
 755 * The sections following CONSTRUCTORS are arranged so their
 756 * typical alignment matches.
 757 * A cacheline is typical/always less than a PAGE_SIZE so
 758 * the sections that has this restriction (or similar)
 759 * is located before the ones requiring PAGE_SIZE alignment.
 760 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
 761 * matches the requirment of PAGE_ALIGNED_DATA.
 762 *
 763 * use 0 as page_align if page_aligned data is not used */
 764#define RW_DATA_SECTION(cacheline, pagealigned, inittask)               \
 765        . = ALIGN(PAGE_SIZE);                                           \
 766        .data : AT(ADDR(.data) - LOAD_OFFSET) {                         \
 767                INIT_TASK_DATA(inittask)                                \
 768                NOSAVE_DATA                                             \
 769                PAGE_ALIGNED_DATA(pagealigned)                          \
 770                CACHELINE_ALIGNED_DATA(cacheline)                       \
 771                READ_MOSTLY_DATA(cacheline)                             \
 772                DATA_DATA                                               \
 773                CONSTRUCTORS                                            \
 774        }
 775
 776#define INIT_TEXT_SECTION(inittext_align)                               \
 777        . = ALIGN(inittext_align);                                      \
 778        .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {               \
 779                VMLINUX_SYMBOL(_sinittext) = .;                         \
 780                INIT_TEXT                                               \
 781                VMLINUX_SYMBOL(_einittext) = .;                         \
 782        }
 783
 784#define INIT_DATA_SECTION(initsetup_align)                              \
 785        .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {               \
 786                INIT_DATA                                               \
 787                INIT_SETUP(initsetup_align)                             \
 788                INIT_CALLS                                              \
 789                CON_INITCALL                                            \
 790                SECURITY_INITCALL                                       \
 791                INIT_RAM_FS                                             \
 792        }
 793
 794#define BSS_SECTION(sbss_align, bss_align, stop_align)                  \
 795        . = ALIGN(sbss_align);                                          \
 796        VMLINUX_SYMBOL(__bss_start) = .;                                \
 797        SBSS(sbss_align)                                                \
 798        BSS(bss_align)                                                  \
 799        . = ALIGN(stop_align);                                          \
 800        VMLINUX_SYMBOL(__bss_stop) = .;
 801