linux/arch/blackfin/kernel/vmlinux.lds.S
<<
>>
Prefs
   1/*
   2 * Copyright 2004-2009 Analog Devices Inc.
   3 *
   4 * Licensed under the GPL-2 or later
   5 */
   6
   7#include <asm-generic/vmlinux.lds.h>
   8#include <asm/mem_map.h>
   9#include <asm/page.h>
  10#include <asm/thread_info.h>
  11
  12OUTPUT_FORMAT("elf32-bfin")
  13ENTRY(__start)
  14_jiffies = _jiffies_64;
  15
  16SECTIONS
  17{
  18#ifdef CONFIG_RAMKERNEL
  19        . = CONFIG_BOOT_LOAD;
  20#else
  21        . = CONFIG_ROM_BASE;
  22#endif
  23
  24        /* Neither the text, ro_data or bss section need to be aligned
  25         * So pack them back to back
  26         */
  27        .text :
  28        {
  29                __text = .;
  30                _text = .;
  31                __stext = .;
  32                TEXT_TEXT
  33#ifndef CONFIG_SCHEDULE_L1
  34                SCHED_TEXT
  35#endif
  36                LOCK_TEXT
  37                IRQENTRY_TEXT
  38                KPROBES_TEXT
  39#ifdef CONFIG_ROMKERNEL
  40                __sinittext = .;
  41                INIT_TEXT
  42                __einittext = .;
  43                EXIT_TEXT
  44#endif
  45                *(.text.*)
  46                *(.fixup)
  47
  48#if !L1_CODE_LENGTH
  49                *(.l1.text)
  50#endif
  51                __etext = .;
  52        }
  53
  54        EXCEPTION_TABLE(4)
  55        NOTES
  56
  57        /* Just in case the first read only is a 32-bit access */
  58        RO_DATA(4)
  59        __rodata_end = .;
  60
  61#ifdef CONFIG_ROMKERNEL
  62        . = CONFIG_BOOT_LOAD;
  63        .bss : AT(__rodata_end)
  64#else
  65        .bss :
  66#endif
  67        {
  68                . = ALIGN(4);
  69                ___bss_start = .;
  70                *(.bss .bss.*)
  71                *(COMMON)
  72#if !L1_DATA_A_LENGTH
  73                *(.l1.bss)
  74#endif
  75#if !L1_DATA_B_LENGTH
  76                *(.l1.bss.B)
  77#endif
  78                . = ALIGN(4);
  79                ___bss_stop = .;
  80        }
  81
  82#if defined(CONFIG_ROMKERNEL)
  83        .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
  84#else
  85        .data :
  86#endif
  87        {
  88                __sdata = .;
  89                /* This gets done first, so the glob doesn't suck it in */
  90                CACHELINE_ALIGNED_DATA(32)
  91
  92#if !L1_DATA_A_LENGTH
  93                . = ALIGN(32);
  94                *(.data_l1.cacheline_aligned)
  95                *(.l1.data)
  96#endif
  97#if !L1_DATA_B_LENGTH
  98                *(.l1.data.B)
  99#endif
 100#if !L2_LENGTH
 101                . = ALIGN(32);
 102                *(.data_l2.cacheline_aligned)
 103                *(.l2.data)
 104#endif
 105
 106                DATA_DATA
 107                CONSTRUCTORS
 108
 109                INIT_TASK_DATA(THREAD_SIZE)
 110
 111                __edata = .;
 112        }
 113        __data_lma = LOADADDR(.data);
 114        __data_len = SIZEOF(.data);
 115
 116        /* The init section should be last, so when we free it, it goes into
 117         * the general memory pool, and (hopefully) will decrease fragmentation
 118         * a tiny bit. The init section has a _requirement_ that it be
 119         * PAGE_SIZE aligned
 120         */
 121        . = ALIGN(PAGE_SIZE);
 122        ___init_begin = .;
 123
 124#ifdef CONFIG_RAMKERNEL
 125        INIT_TEXT_SECTION(PAGE_SIZE)
 126
 127        /* We have to discard exit text and such at runtime, not link time, to
 128         * handle embedded cross-section references (alt instructions, bug
 129         * table, eh_frame, etc...).  We need all of our .text up front and
 130         * .data after it for PCREL call issues.
 131         */
 132        .exit.text :
 133        {
 134                EXIT_TEXT
 135        }
 136
 137        . = ALIGN(16);
 138        INIT_DATA_SECTION(16)
 139        PERCPU_SECTION(32)
 140
 141        .exit.data :
 142        {
 143                EXIT_DATA
 144        }
 145
 146        .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
 147#else
 148        .init.data : AT(__data_lma + __data_len + 32)
 149        {
 150                __sinitdata = .;
 151                INIT_DATA
 152                INIT_SETUP(16)
 153                INIT_CALLS
 154                CON_INITCALL
 155                SECURITY_INITCALL
 156                INIT_RAM_FS
 157
 158                . = ALIGN(PAGE_SIZE);
 159                ___per_cpu_load = .;
 160                PERCPU_INPUT(32)
 161
 162                EXIT_DATA
 163                __einitdata = .;
 164        }
 165        __init_data_lma = LOADADDR(.init.data);
 166        __init_data_len = SIZEOF(.init.data);
 167        __init_data_end = .;
 168
 169        .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
 170#endif
 171        {
 172                . = ALIGN(4);
 173                __stext_l1 = .;
 174                *(.l1.text.head)
 175                *(.l1.text)
 176#ifdef CONFIG_SCHEDULE_L1
 177                SCHED_TEXT
 178#endif
 179                . = ALIGN(4);
 180                __etext_l1 = .;
 181        }
 182        __text_l1_lma = LOADADDR(.text_l1);
 183        __text_l1_len = SIZEOF(.text_l1);
 184        ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
 185
 186        .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
 187        {
 188                . = ALIGN(4);
 189                __sdata_l1 = .;
 190                *(.l1.data)
 191                __edata_l1 = .;
 192
 193                . = ALIGN(32);
 194                *(.data_l1.cacheline_aligned)
 195
 196                . = ALIGN(4);
 197                __sbss_l1 = .;
 198                *(.l1.bss)
 199                . = ALIGN(4);
 200                __ebss_l1 = .;
 201        }
 202        __data_l1_lma = LOADADDR(.data_l1);
 203        __data_l1_len = SIZEOF(.data_l1);
 204        ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
 205
 206        .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
 207        {
 208                . = ALIGN(4);
 209                __sdata_b_l1 = .;
 210                *(.l1.data.B)
 211                __edata_b_l1 = .;
 212
 213                . = ALIGN(4);
 214                __sbss_b_l1 = .;
 215                *(.l1.bss.B)
 216                . = ALIGN(4);
 217                __ebss_b_l1 = .;
 218        }
 219        __data_b_l1_lma = LOADADDR(.data_b_l1);
 220        __data_b_l1_len = SIZEOF(.data_b_l1);
 221        ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
 222
 223        .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
 224        {
 225                . = ALIGN(4);
 226                __stext_l2 = .;
 227                *(.l2.text)
 228                . = ALIGN(4);
 229                __etext_l2 = .;
 230
 231                . = ALIGN(4);
 232                __sdata_l2 = .;
 233                *(.l2.data)
 234                __edata_l2 = .;
 235
 236                . = ALIGN(32);
 237                *(.data_l2.cacheline_aligned)
 238
 239                . = ALIGN(4);
 240                __sbss_l2 = .;
 241                *(.l2.bss)
 242                . = ALIGN(4);
 243                __ebss_l2 = .;
 244        }
 245        __l2_lma = LOADADDR(.text_data_l2);
 246        __l2_len = SIZEOF(.text_data_l2);
 247        ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
 248
 249        /* Force trailing alignment of our init section so that when we
 250         * free our init memory, we don't leave behind a partial page.
 251         */
 252#ifdef CONFIG_RAMKERNEL
 253        . = __l2_lma + __l2_len;
 254#else
 255        . = __init_data_end;
 256#endif
 257        . = ALIGN(PAGE_SIZE);
 258        ___init_end = .;
 259
 260        __end =.;
 261
 262        STABS_DEBUG
 263
 264        DWARF_DEBUG
 265
 266        DISCARDS
 267}
 268