linux/arch/blackfin/kernel/vmlinux.lds.S
<<
>>
Prefs
   1/*
   2 * Copyright 2004-2009 Analog Devices Inc.
   3 *
   4 * Licensed under the GPL-2 or later
   5 */
   6
   7#define VMLINUX_SYMBOL(_sym_) _##_sym_
   8
   9#include <asm-generic/vmlinux.lds.h>
  10#include <asm/mem_map.h>
  11#include <asm/page.h>
  12#include <asm/thread_info.h>
  13
  14OUTPUT_FORMAT("elf32-bfin")
  15ENTRY(__start)
  16_jiffies = _jiffies_64;
  17
  18SECTIONS
  19{
  20        . = CONFIG_BOOT_LOAD;
  21        /* Neither the text, ro_data or bss section need to be aligned
  22         * So pack them back to back
  23         */
  24        .text :
  25        {
  26                __text = .;
  27                _text = .;
  28                __stext = .;
  29                TEXT_TEXT
  30#ifndef CONFIG_SCHEDULE_L1
  31                SCHED_TEXT
  32#endif
  33                LOCK_TEXT
  34                IRQENTRY_TEXT
  35                KPROBES_TEXT
  36                *(.text.*)
  37                *(.fixup)
  38
  39#if !L1_CODE_LENGTH
  40                *(.l1.text)
  41#endif
  42
  43                . = ALIGN(16);
  44                ___start___ex_table = .;
  45                *(__ex_table)
  46                ___stop___ex_table = .;
  47
  48                __etext = .;
  49        }
  50
  51        NOTES
  52
  53        /* Just in case the first read only is a 32-bit access */
  54        RO_DATA(4)
  55
  56        .bss :
  57        {
  58                . = ALIGN(4);
  59                ___bss_start = .;
  60                *(.bss .bss.*)
  61                *(COMMON)
  62#if !L1_DATA_A_LENGTH
  63                *(.l1.bss)
  64#endif
  65#if !L1_DATA_B_LENGTH
  66                *(.l1.bss.B)
  67#endif
  68                . = ALIGN(4);
  69                ___bss_stop = .;
  70        }
  71
  72        .data :
  73        {
  74                __sdata = .;
  75                /* This gets done first, so the glob doesn't suck it in */
  76                CACHELINE_ALIGNED_DATA(32)
  77
  78#if !L1_DATA_A_LENGTH
  79                . = ALIGN(32);
  80                *(.data_l1.cacheline_aligned)
  81                *(.l1.data)
  82#endif
  83#if !L1_DATA_B_LENGTH
  84                *(.l1.data.B)
  85#endif
  86#if !L2_LENGTH
  87                . = ALIGN(32);
  88                *(.data_l2.cacheline_aligned)
  89                *(.l2.data)
  90#endif
  91
  92                DATA_DATA
  93                CONSTRUCTORS
  94
  95                INIT_TASK_DATA(THREAD_SIZE)
  96
  97                __edata = .;
  98        }
  99
 100        /* The init section should be last, so when we free it, it goes into
 101         * the general memory pool, and (hopefully) will decrease fragmentation
 102         * a tiny bit. The init section has a _requirement_ that it be
 103         * PAGE_SIZE aligned
 104         */
 105        . = ALIGN(PAGE_SIZE);
 106        ___init_begin = .;
 107
 108        INIT_TEXT_SECTION(PAGE_SIZE)
 109        . = ALIGN(16);
 110        INIT_DATA_SECTION(16)
 111        PERCPU(4)
 112
 113        /* we have to discard exit text and such at runtime, not link time, to
 114         * handle embedded cross-section references (alt instructions, bug
 115         * table, eh_frame, etc...)
 116         */
 117        .exit.text :
 118        {
 119                EXIT_TEXT
 120        }
 121        .exit.data :
 122        {
 123                EXIT_DATA
 124        }
 125
 126        __l1_lma_start = .;
 127
 128        .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
 129        {
 130                . = ALIGN(4);
 131                __stext_l1 = .;
 132                *(.l1.text)
 133#ifdef CONFIG_SCHEDULE_L1
 134                SCHED_TEXT
 135#endif
 136                . = ALIGN(4);
 137                __etext_l1 = .;
 138        }
 139        ASSERT (SIZEOF(.text_l1) <= L1_CODE_LENGTH, "L1 text overflow!")
 140
 141        .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
 142        {
 143                . = ALIGN(4);
 144                __sdata_l1 = .;
 145                *(.l1.data)
 146                __edata_l1 = .;
 147
 148                . = ALIGN(32);
 149                *(.data_l1.cacheline_aligned)
 150
 151                . = ALIGN(4);
 152                __sbss_l1 = .;
 153                *(.l1.bss)
 154                . = ALIGN(4);
 155                __ebss_l1 = .;
 156        }
 157        ASSERT (SIZEOF(.data_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!")
 158
 159        .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
 160        {
 161                . = ALIGN(4);
 162                __sdata_b_l1 = .;
 163                *(.l1.data.B)
 164                __edata_b_l1 = .;
 165
 166                . = ALIGN(4);
 167                __sbss_b_l1 = .;
 168                *(.l1.bss.B)
 169                . = ALIGN(4);
 170                __ebss_b_l1 = .;
 171        }
 172        ASSERT (SIZEOF(.data_b_l1) <= L1_DATA_B_LENGTH, "L1 data B overflow!")
 173
 174        __l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
 175
 176        .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
 177        {
 178                . = ALIGN(4);
 179                __stext_l2 = .;
 180                *(.l2.text)
 181                . = ALIGN(4);
 182                __etext_l2 = .;
 183
 184                . = ALIGN(4);
 185                __sdata_l2 = .;
 186                *(.l2.data)
 187                __edata_l2 = .;
 188
 189                . = ALIGN(32);
 190                *(.data_l2.cacheline_aligned)
 191
 192                . = ALIGN(4);
 193                __sbss_l2 = .;
 194                *(.l2.bss)
 195                . = ALIGN(4);
 196                __ebss_l2 = .;
 197        }
 198        ASSERT (SIZEOF(.text_data_l2) <= L2_LENGTH, "L2 overflow!")
 199
 200        /* Force trailing alignment of our init section so that when we
 201         * free our init memory, we don't leave behind a partial page.
 202         */
 203        . = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2);
 204        . = ALIGN(PAGE_SIZE);
 205        ___init_end = .;
 206
 207        __end =.;
 208
 209        STABS_DEBUG
 210
 211        DWARF_DEBUG
 212
 213        DISCARDS
 214}
 215