linux/arch/arm64/kernel/vmlinux.lds.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * ld script to make ARM Linux kernel
   4 * taken from the i386 version by Russell King
   5 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
   6 */
   7
   8#include <asm-generic/vmlinux.lds.h>
   9#include <asm/cache.h>
  10#include <asm/kernel-pgtable.h>
  11#include <asm/thread_info.h>
  12#include <asm/memory.h>
  13#include <asm/page.h>
  14#include <asm/pgtable.h>
  15
  16#include "image.h"
  17
  18/* .exit.text needed in case of alternative patching */
  19#define ARM_EXIT_KEEP(x)        x
  20#define ARM_EXIT_DISCARD(x)
  21
  22OUTPUT_ARCH(aarch64)
  23ENTRY(_text)
  24
  25jiffies = jiffies_64;
  26
  27#define HYPERVISOR_TEXT                                 \
  28        /*                                              \
  29         * Align to 4 KB so that                        \
  30         * a) the HYP vector table is at its minimum    \
  31         *    alignment of 2048 bytes                   \
  32         * b) the HYP init code will not cross a page   \
  33         *    boundary if its size does not exceed      \
  34         *    4 KB (see related ASSERT() below)         \
  35         */                                             \
  36        . = ALIGN(SZ_4K);                               \
  37        VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;     \
  38        *(.hyp.idmap.text)                              \
  39        VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;       \
  40        VMLINUX_SYMBOL(__hyp_text_start) = .;           \
  41        *(.hyp.text)                                    \
  42        VMLINUX_SYMBOL(__hyp_text_end) = .;
  43
  44#define IDMAP_TEXT                                      \
  45        . = ALIGN(SZ_4K);                               \
  46        VMLINUX_SYMBOL(__idmap_text_start) = .;         \
  47        *(.idmap.text)                                  \
  48        VMLINUX_SYMBOL(__idmap_text_end) = .;
  49
  50#ifdef CONFIG_HIBERNATION
  51#define HIBERNATE_TEXT                                  \
  52        . = ALIGN(SZ_4K);                               \
  53        VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\
  54        *(.hibernate_exit.text)                         \
  55        VMLINUX_SYMBOL(__hibernate_exit_text_end) = .;
  56#else
  57#define HIBERNATE_TEXT
  58#endif
  59
  60#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  61#define TRAMP_TEXT                                      \
  62        . = ALIGN(PAGE_SIZE);                           \
  63        VMLINUX_SYMBOL(__entry_tramp_text_start) = .;   \
  64        *(.entry.tramp.text)                            \
  65        . = ALIGN(PAGE_SIZE);                           \
  66        VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
  67#else
  68#define TRAMP_TEXT
  69#endif
  70
  71/*
  72 * The size of the PE/COFF section that covers the kernel image, which
  73 * runs from stext to _edata, must be a round multiple of the PE/COFF
  74 * FileAlignment, which we set to its minimum value of 0x200. 'stext'
  75 * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
  76 * boundary should be sufficient.
  77 */
  78PECOFF_FILE_ALIGNMENT = 0x200;
  79
  80#ifdef CONFIG_EFI
  81#define PECOFF_EDATA_PADDING    \
  82        .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
  83#else
  84#define PECOFF_EDATA_PADDING
  85#endif
  86
  87SECTIONS
  88{
  89        /*
  90         * XXX: The linker does not define how output sections are
  91         * assigned to input sections when there are multiple statements
  92         * matching the same input section name.  There is no documented
  93         * order of matching.
  94         */
  95        /DISCARD/ : {
  96                ARM_EXIT_DISCARD(EXIT_TEXT)
  97                ARM_EXIT_DISCARD(EXIT_DATA)
  98                EXIT_CALL
  99                *(.discard)
 100                *(.discard.*)
 101                *(.interp .dynamic)
 102                *(.dynsym .dynstr .hash)
 103        }
 104
 105        . = KIMAGE_VADDR + TEXT_OFFSET;
 106
 107        .head.text : {
 108                _text = .;
 109                HEAD_TEXT
 110        }
 111        .text : {                       /* Real text segment            */
 112                _stext = .;             /* Text and read-only data      */
 113                        __exception_text_start = .;
 114                        *(.exception.text)
 115                        __exception_text_end = .;
 116                        IRQENTRY_TEXT
 117                        SOFTIRQENTRY_TEXT
 118                        ENTRY_TEXT
 119                        TEXT_TEXT
 120                        SCHED_TEXT
 121                        CPUIDLE_TEXT
 122                        LOCK_TEXT
 123                        KPROBES_TEXT
 124                        HYPERVISOR_TEXT
 125                        IDMAP_TEXT
 126                        HIBERNATE_TEXT
 127                        TRAMP_TEXT
 128                        *(.fixup)
 129                        *(.gnu.warning)
 130                . = ALIGN(16);
 131                *(.got)                 /* Global offset table          */
 132        }
 133
 134        . = ALIGN(SEGMENT_ALIGN);
 135        _etext = .;                     /* End of text section */
 136
 137        RO_DATA(PAGE_SIZE)              /* everything from this point to     */
 138        EXCEPTION_TABLE(8)              /* __init_begin will be marked RO NX */
 139        NOTES
 140
 141        . = ALIGN(SEGMENT_ALIGN);
 142        __init_begin = .;
 143        __inittext_begin = .;
 144
 145        INIT_TEXT_SECTION(8)
 146        .exit.text : {
 147                ARM_EXIT_KEEP(EXIT_TEXT)
 148        }
 149
 150        . = ALIGN(4);
 151        .altinstructions : {
 152                __alt_instructions = .;
 153                *(.altinstructions)
 154                __alt_instructions_end = .;
 155        }
 156        .altinstr_replacement : {
 157                *(.altinstr_replacement)
 158        }
 159
 160        . = ALIGN(PAGE_SIZE);
 161        __inittext_end = .;
 162        __initdata_begin = .;
 163
 164        .init.data : {
 165                INIT_DATA
 166                INIT_SETUP(16)
 167                INIT_CALLS
 168                CON_INITCALL
 169                SECURITY_INITCALL
 170                INIT_RAM_FS
 171                *(.init.rodata.* .init.bss)     /* from the EFI stub */
 172        }
 173        .exit.data : {
 174                ARM_EXIT_KEEP(EXIT_DATA)
 175        }
 176
 177        PERCPU_SECTION(L1_CACHE_BYTES)
 178
 179        .rela : ALIGN(8) {
 180                *(.rela .rela*)
 181        }
 182
 183        __rela_offset   = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
 184        __rela_size     = SIZEOF(.rela);
 185
 186        . = ALIGN(SEGMENT_ALIGN);
 187        __initdata_end = .;
 188        __init_end = .;
 189
 190        _data = .;
 191        _sdata = .;
 192        RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
 193
 194        /*
 195         * Data written with the MMU off but read with the MMU on requires
 196         * cache lines to be invalidated, discarding up to a Cache Writeback
 197         * Granule (CWG) of data from the cache. Keep the section that
 198         * requires this type of maintenance to be in its own Cache Writeback
 199         * Granule (CWG) area so the cache maintenance operations don't
 200         * interfere with adjacent data.
 201         */
 202        .mmuoff.data.write : ALIGN(SZ_2K) {
 203                __mmuoff_data_start = .;
 204                *(.mmuoff.data.write)
 205        }
 206        . = ALIGN(SZ_2K);
 207        .mmuoff.data.read : {
 208                *(.mmuoff.data.read)
 209                __mmuoff_data_end = .;
 210        }
 211
 212        PECOFF_EDATA_PADDING
 213        __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
 214        _edata = .;
 215
 216        BSS_SECTION(0, 0, 0)
 217
 218        . = ALIGN(PAGE_SIZE);
 219        idmap_pg_dir = .;
 220        . += IDMAP_DIR_SIZE;
 221
 222#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 223        tramp_pg_dir = .;
 224        . += PAGE_SIZE;
 225#endif
 226
 227#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 228        reserved_ttbr0 = .;
 229        . += RESERVED_TTBR0_SIZE;
 230#endif
 231        swapper_pg_dir = .;
 232        . += SWAPPER_DIR_SIZE;
 233        swapper_pg_end = .;
 234
 235        __pecoff_data_size = ABSOLUTE(. - __initdata_begin);
 236        _end = .;
 237
 238        STABS_DEBUG
 239
 240        HEAD_SYMBOLS
 241}
 242
 243/*
 244 * The HYP init code and ID map text can't be longer than a page each,
 245 * and should not cross a page boundary.
 246 */
 247ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
 248        "HYP init code too big or misaligned")
 249ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
 250        "ID map text too big or misaligned")
 251#ifdef CONFIG_HIBERNATION
 252ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
 253        <= SZ_4K, "Hibernate exit text too big or misaligned")
 254#endif
 255#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 256ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
 257        "Entry trampoline text too big")
 258#endif
 259/*
 260 * If padding is applied before .head.text, virt<->phys conversions will fail.
 261 */
 262ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
 263