linux/arch/blackfin/kernel/vmlinux.lds.S
<<
>>
Prefs
   1/*
   2 * Copyright 2004-2009 Analog Devices Inc.
   3 *
   4 * Licensed under the GPL-2 or later
   5 */
   6
   7#include <asm-generic/vmlinux.lds.h>
   8#include <asm/mem_map.h>
   9#include <asm/page.h>
  10#include <asm/thread_info.h>
  11
  12OUTPUT_FORMAT("elf32-bfin")
  13ENTRY(__start)
  14_jiffies = _jiffies_64;
  15
  16SECTIONS
  17{
  18#ifdef CONFIG_RAMKERNEL
  19        . = CONFIG_BOOT_LOAD;
  20#else
  21        . = CONFIG_ROM_BASE;
  22#endif
  23
  24        /* Neither the text, ro_data or bss section need to be aligned
  25         * So pack them back to back
  26         */
  27        .text :
  28        {
  29                __text = .;
  30                _text = .;
  31                __stext = .;
  32                TEXT_TEXT
  33#ifndef CONFIG_SCHEDULE_L1
  34                SCHED_TEXT
  35#endif
  36                CPUIDLE_TEXT
  37                LOCK_TEXT
  38                IRQENTRY_TEXT
  39                SOFTIRQENTRY_TEXT
  40                KPROBES_TEXT
  41#ifdef CONFIG_ROMKERNEL
  42                __sinittext = .;
  43                INIT_TEXT
  44                __einittext = .;
  45                EXIT_TEXT
  46#endif
  47                *(.text.*)
  48                *(.fixup)
  49
  50#if !L1_CODE_LENGTH
  51                *(.l1.text)
  52#endif
  53                __etext = .;
  54        }
  55
  56        EXCEPTION_TABLE(4)
  57        NOTES
  58
  59        /* Just in case the first read only is a 32-bit access */
  60        RO_DATA(4)
  61        __rodata_end = .;
  62
  63#ifdef CONFIG_ROMKERNEL
  64        . = CONFIG_BOOT_LOAD;
  65        .bss : AT(__rodata_end)
  66#else
  67        .bss :
  68#endif
  69        {
  70                . = ALIGN(4);
  71                ___bss_start = .;
  72                *(.bss .bss.*)
  73                *(COMMON)
  74#if !L1_DATA_A_LENGTH
  75                *(.l1.bss)
  76#endif
  77#if !L1_DATA_B_LENGTH
  78                *(.l1.bss.B)
  79#endif
  80                . = ALIGN(4);
  81                ___bss_stop = .;
  82        }
  83
  84#if defined(CONFIG_ROMKERNEL)
  85        .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
  86#else
  87        .data :
  88#endif
  89        {
  90                __sdata = .;
  91                /* This gets done first, so the glob doesn't suck it in */
  92                CACHELINE_ALIGNED_DATA(32)
  93
  94#if !L1_DATA_A_LENGTH
  95                . = ALIGN(32);
  96                *(.data_l1.cacheline_aligned)
  97                *(.l1.data)
  98#endif
  99#if !L1_DATA_B_LENGTH
 100                *(.l1.data.B)
 101#endif
 102#if !L2_LENGTH
 103                . = ALIGN(32);
 104                *(.data_l2.cacheline_aligned)
 105                *(.l2.data)
 106#endif
 107
 108                DATA_DATA
 109                CONSTRUCTORS
 110
 111                INIT_TASK_DATA(THREAD_SIZE)
 112
 113                __edata = .;
 114        }
 115        __data_lma = LOADADDR(.data);
 116        __data_len = SIZEOF(.data);
 117
 118        BUG_TABLE
 119
 120        /* The init section should be last, so when we free it, it goes into
 121         * the general memory pool, and (hopefully) will decrease fragmentation
 122         * a tiny bit. The init section has a _requirement_ that it be
 123         * PAGE_SIZE aligned
 124         */
 125        . = ALIGN(PAGE_SIZE);
 126        ___init_begin = .;
 127
 128#ifdef CONFIG_RAMKERNEL
 129        INIT_TEXT_SECTION(PAGE_SIZE)
 130
 131        /* We have to discard exit text and such at runtime, not link time, to
 132         * handle embedded cross-section references (alt instructions, bug
 133         * table, eh_frame, etc...).  We need all of our .text up front and
 134         * .data after it for PCREL call issues.
 135         */
 136        .exit.text :
 137        {
 138                EXIT_TEXT
 139        }
 140
 141        . = ALIGN(16);
 142        INIT_DATA_SECTION(16)
 143        PERCPU_SECTION(32)
 144
 145        .exit.data :
 146        {
 147                EXIT_DATA
 148        }
 149
 150        .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
 151#else
 152        .init.data : AT(__data_lma + __data_len + 32)
 153        {
 154                __sinitdata = .;
 155                INIT_DATA
 156                INIT_SETUP(16)
 157                INIT_CALLS
 158                CON_INITCALL
 159                SECURITY_INITCALL
 160                INIT_RAM_FS
 161
 162                . = ALIGN(PAGE_SIZE);
 163                ___per_cpu_load = .;
 164                PERCPU_INPUT(32)
 165
 166                EXIT_DATA
 167                __einitdata = .;
 168        }
 169        __init_data_lma = LOADADDR(.init.data);
 170        __init_data_len = SIZEOF(.init.data);
 171        __init_data_end = .;
 172
 173        .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
 174#endif
 175        {
 176                . = ALIGN(4);
 177                __stext_l1 = .;
 178                *(.l1.text.head)
 179                *(.l1.text)
 180#ifdef CONFIG_SCHEDULE_L1
 181                SCHED_TEXT
 182#endif
 183                . = ALIGN(4);
 184                __etext_l1 = .;
 185        }
 186        __text_l1_lma = LOADADDR(.text_l1);
 187        __text_l1_len = SIZEOF(.text_l1);
 188        ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
 189
 190        .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
 191        {
 192                . = ALIGN(4);
 193                __sdata_l1 = .;
 194                *(.l1.data)
 195                __edata_l1 = .;
 196
 197                . = ALIGN(32);
 198                *(.data_l1.cacheline_aligned)
 199
 200                . = ALIGN(4);
 201                __sbss_l1 = .;
 202                *(.l1.bss)
 203                . = ALIGN(4);
 204                __ebss_l1 = .;
 205        }
 206        __data_l1_lma = LOADADDR(.data_l1);
 207        __data_l1_len = SIZEOF(.data_l1);
 208        ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
 209
 210        .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
 211        {
 212                . = ALIGN(4);
 213                __sdata_b_l1 = .;
 214                *(.l1.data.B)
 215                __edata_b_l1 = .;
 216
 217                . = ALIGN(4);
 218                __sbss_b_l1 = .;
 219                *(.l1.bss.B)
 220                . = ALIGN(4);
 221                __ebss_b_l1 = .;
 222        }
 223        __data_b_l1_lma = LOADADDR(.data_b_l1);
 224        __data_b_l1_len = SIZEOF(.data_b_l1);
 225        ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
 226
 227        .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
 228        {
 229                . = ALIGN(4);
 230                __stext_l2 = .;
 231                *(.l2.text)
 232                . = ALIGN(4);
 233                __etext_l2 = .;
 234
 235                . = ALIGN(4);
 236                __sdata_l2 = .;
 237                *(.l2.data)
 238                __edata_l2 = .;
 239
 240                . = ALIGN(32);
 241                *(.data_l2.cacheline_aligned)
 242
 243                . = ALIGN(4);
 244                __sbss_l2 = .;
 245                *(.l2.bss)
 246                . = ALIGN(4);
 247                __ebss_l2 = .;
 248        }
 249        __l2_lma = LOADADDR(.text_data_l2);
 250        __l2_len = SIZEOF(.text_data_l2);
 251        ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
 252
 253        /* Force trailing alignment of our init section so that when we
 254         * free our init memory, we don't leave behind a partial page.
 255         */
 256#ifdef CONFIG_RAMKERNEL
 257        . = __l2_lma + __l2_len;
 258#else
 259        . = __init_data_end;
 260#endif
 261        . = ALIGN(PAGE_SIZE);
 262        ___init_end = .;
 263
 264        __end =.;
 265
 266        STABS_DEBUG
 267
 268        DWARF_DEBUG
 269
 270        DISCARDS
 271}
 272