linux/arch/arm64/kernel/vmlinux.lds.S
<<
>>
Prefs
   1/*
   2 * ld script to make ARM Linux kernel
   3 * taken from the i386 version by Russell King
   4 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
   5 */
   6
   7#include <asm-generic/vmlinux.lds.h>
   8#include <asm/cache.h>
   9#include <asm/kernel-pgtable.h>
  10#include <asm/thread_info.h>
  11#include <asm/memory.h>
  12#include <asm/page.h>
  13#include <asm/pgtable.h>
  14
  15#include "image.h"
  16
  17/* .exit.text needed in case of alternative patching */
  18#define ARM_EXIT_KEEP(x)        x
  19#define ARM_EXIT_DISCARD(x)
  20
  21OUTPUT_ARCH(aarch64)
  22ENTRY(_text)
  23
  24jiffies = jiffies_64;
  25
  26#define HYPERVISOR_TEXT                                 \
  27        /*                                              \
  28         * Align to 4 KB so that                        \
  29         * a) the HYP vector table is at its minimum    \
  30         *    alignment of 2048 bytes                   \
  31         * b) the HYP init code will not cross a page   \
  32         *    boundary if its size does not exceed      \
  33         *    4 KB (see related ASSERT() below)         \
  34         */                                             \
  35        . = ALIGN(SZ_4K);                               \
  36        VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;     \
  37        *(.hyp.idmap.text)                              \
  38        VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;       \
  39        VMLINUX_SYMBOL(__hyp_text_start) = .;           \
  40        *(.hyp.text)                                    \
  41        VMLINUX_SYMBOL(__hyp_text_end) = .;
  42
  43#define IDMAP_TEXT                                      \
  44        . = ALIGN(SZ_4K);                               \
  45        VMLINUX_SYMBOL(__idmap_text_start) = .;         \
  46        *(.idmap.text)                                  \
  47        VMLINUX_SYMBOL(__idmap_text_end) = .;
  48
  49#ifdef CONFIG_HIBERNATION
  50#define HIBERNATE_TEXT                                  \
  51        . = ALIGN(SZ_4K);                               \
  52        VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\
  53        *(.hibernate_exit.text)                         \
  54        VMLINUX_SYMBOL(__hibernate_exit_text_end) = .;
  55#else
  56#define HIBERNATE_TEXT
  57#endif
  58
  59/*
  60 * The size of the PE/COFF section that covers the kernel image, which
  61 * runs from stext to _edata, must be a round multiple of the PE/COFF
  62 * FileAlignment, which we set to its minimum value of 0x200. 'stext'
  63 * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
  64 * boundary should be sufficient.
  65 */
  66PECOFF_FILE_ALIGNMENT = 0x200;
  67
  68#ifdef CONFIG_EFI
  69#define PECOFF_EDATA_PADDING    \
  70        .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
  71#else
  72#define PECOFF_EDATA_PADDING
  73#endif
  74
  75#if defined(CONFIG_DEBUG_ALIGN_RODATA)
  76/*
  77 *  4 KB granule:   1 level 2 entry
  78 * 16 KB granule: 128 level 3 entries, with contiguous bit
  79 * 64 KB granule:  32 level 3 entries, with contiguous bit
  80 */
  81#define SEGMENT_ALIGN                   SZ_2M
  82#else
  83/*
  84 *  4 KB granule:  16 level 3 entries, with contiguous bit
  85 * 16 KB granule:   4 level 3 entries, without contiguous bit
  86 * 64 KB granule:   1 level 3 entry
  87 */
  88#define SEGMENT_ALIGN                   SZ_64K
  89#endif
  90
  91SECTIONS
  92{
  93        /*
  94         * XXX: The linker does not define how output sections are
  95         * assigned to input sections when there are multiple statements
  96         * matching the same input section name.  There is no documented
  97         * order of matching.
  98         */
  99        /DISCARD/ : {
 100                ARM_EXIT_DISCARD(EXIT_TEXT)
 101                ARM_EXIT_DISCARD(EXIT_DATA)
 102                EXIT_CALL
 103                *(.discard)
 104                *(.discard.*)
 105                *(.interp .dynamic)
 106                *(.dynsym .dynstr .hash)
 107        }
 108
 109        . = KIMAGE_VADDR + TEXT_OFFSET;
 110
 111        .head.text : {
 112                _text = .;
 113                HEAD_TEXT
 114        }
 115        .text : {                       /* Real text segment            */
 116                _stext = .;             /* Text and read-only data      */
 117                        __exception_text_start = .;
 118                        *(.exception.text)
 119                        __exception_text_end = .;
 120                        IRQENTRY_TEXT
 121                        SOFTIRQENTRY_TEXT
 122                        ENTRY_TEXT
 123                        TEXT_TEXT
 124                        SCHED_TEXT
 125                        CPUIDLE_TEXT
 126                        LOCK_TEXT
 127                        KPROBES_TEXT
 128                        HYPERVISOR_TEXT
 129                        IDMAP_TEXT
 130                        HIBERNATE_TEXT
 131                        *(.fixup)
 132                        *(.gnu.warning)
 133                . = ALIGN(16);
 134                *(.got)                 /* Global offset table          */
 135        }
 136
 137        . = ALIGN(SEGMENT_ALIGN);
 138        _etext = .;                     /* End of text section */
 139
 140        RO_DATA(PAGE_SIZE)              /* everything from this point to     */
 141        EXCEPTION_TABLE(8)              /* __init_begin will be marked RO NX */
 142        NOTES
 143
 144        . = ALIGN(SEGMENT_ALIGN);
 145        __init_begin = .;
 146
 147        INIT_TEXT_SECTION(8)
 148        .exit.text : {
 149                ARM_EXIT_KEEP(EXIT_TEXT)
 150        }
 151
 152        .init.data : {
 153                INIT_DATA
 154                INIT_SETUP(16)
 155                INIT_CALLS
 156                CON_INITCALL
 157                SECURITY_INITCALL
 158                INIT_RAM_FS
 159                *(.init.rodata.* .init.bss)     /* from the EFI stub */
 160        }
 161        .exit.data : {
 162                ARM_EXIT_KEEP(EXIT_DATA)
 163        }
 164
 165        PERCPU_SECTION(L1_CACHE_BYTES)
 166
 167        . = ALIGN(4);
 168        .altinstructions : {
 169                __alt_instructions = .;
 170                *(.altinstructions)
 171                __alt_instructions_end = .;
 172        }
 173        .altinstr_replacement : {
 174                *(.altinstr_replacement)
 175        }
 176        .rela : ALIGN(8) {
 177                *(.rela .rela*)
 178        }
 179
 180        __rela_offset   = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
 181        __rela_size     = SIZEOF(.rela);
 182
 183        . = ALIGN(SEGMENT_ALIGN);
 184        __init_end = .;
 185
 186        _data = .;
 187        _sdata = .;
 188        RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
 189
 190        /*
 191         * Data written with the MMU off but read with the MMU on requires
 192         * cache lines to be invalidated, discarding up to a Cache Writeback
 193         * Granule (CWG) of data from the cache. Keep the section that
 194         * requires this type of maintenance to be in its own Cache Writeback
 195         * Granule (CWG) area so the cache maintenance operations don't
 196         * interfere with adjacent data.
 197         */
 198        .mmuoff.data.write : ALIGN(SZ_2K) {
 199                __mmuoff_data_start = .;
 200                *(.mmuoff.data.write)
 201        }
 202        . = ALIGN(SZ_2K);
 203        .mmuoff.data.read : {
 204                *(.mmuoff.data.read)
 205                __mmuoff_data_end = .;
 206        }
 207
 208        PECOFF_EDATA_PADDING
 209        _edata = .;
 210
 211        BSS_SECTION(0, 0, 0)
 212
 213        . = ALIGN(PAGE_SIZE);
 214        idmap_pg_dir = .;
 215        . += IDMAP_DIR_SIZE;
 216        swapper_pg_dir = .;
 217        . += SWAPPER_DIR_SIZE;
 218
 219#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 220        reserved_ttbr0 = .;
 221        . += RESERVED_TTBR0_SIZE;
 222#endif
 223
 224        _end = .;
 225
 226        STABS_DEBUG
 227
 228        HEAD_SYMBOLS
 229}
 230
 231/*
 232 * The HYP init code and ID map text can't be longer than a page each,
 233 * and should not cross a page boundary.
 234 */
 235ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
 236        "HYP init code too big or misaligned")
 237ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
 238        "ID map text too big or misaligned")
 239#ifdef CONFIG_HIBERNATION
 240ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
 241        <= SZ_4K, "Hibernate exit text too big or misaligned")
 242#endif
 243
 244/*
 245 * If padding is applied before .head.text, virt<->phys conversions will fail.
 246 */
 247ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
 248