linux/arch/arm64/include/asm/assembler.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
   3 *
   4 * Copyright (C) 1996-2000 Russell King
   5 * Copyright (C) 2012 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef __ASSEMBLY__
  20#error "Only include this from assembly code"
  21#endif
  22
  23#ifndef __ASM_ASSEMBLER_H
  24#define __ASM_ASSEMBLER_H
  25
  26#include <asm/asm-offsets.h>
  27#include <asm/cpufeature.h>
  28#include <asm/page.h>
  29#include <asm/pgtable-hwdef.h>
  30#include <asm/ptrace.h>
  31#include <asm/thread_info.h>
  32
  33/*
  34 * Enable and disable interrupts.
  35 */
  36        .macro  disable_irq
  37        msr     daifset, #2
  38        .endm
  39
  40        .macro  enable_irq
  41        msr     daifclr, #2
  42        .endm
  43
  44/*
  45 * Enable and disable debug exceptions.
  46 */
  47        .macro  disable_dbg
  48        msr     daifset, #8
  49        .endm
  50
  51        .macro  enable_dbg
  52        msr     daifclr, #8
  53        .endm
  54
  55        .macro  disable_step_tsk, flgs, tmp
  56        tbz     \flgs, #TIF_SINGLESTEP, 9990f
  57        mrs     \tmp, mdscr_el1
  58        bic     \tmp, \tmp, #1
  59        msr     mdscr_el1, \tmp
  60        isb     // Synchronise with enable_dbg
  619990:
  62        .endm
  63
  64        .macro  enable_step_tsk, flgs, tmp
  65        tbz     \flgs, #TIF_SINGLESTEP, 9990f
  66        disable_dbg
  67        mrs     \tmp, mdscr_el1
  68        orr     \tmp, \tmp, #1
  69        msr     mdscr_el1, \tmp
  709990:
  71        .endm
  72
  73/*
  74 * Enable both debug exceptions and interrupts. This is likely to be
  75 * faster than two daifclr operations, since writes to this register
  76 * are self-synchronising.
  77 */
  78        .macro  enable_dbg_and_irq
  79        msr     daifclr, #(8 | 2)
  80        .endm
  81
  82/*
  83 * SMP data memory barrier
  84 */
  85        .macro  smp_dmb, opt
  86        dmb     \opt
  87        .endm
  88
  89/*
  90 * Emit an entry into the exception table
  91 */
  92        .macro          _asm_extable, from, to
  93        .pushsection    __ex_table, "a"
  94        .align          3
  95        .long           (\from - .), (\to - .)
  96        .popsection
  97        .endm
  98
  99#define USER(l, x...)                           \
 1009999:   x;                                      \
 101        _asm_extable    9999b, l
 102
 103/*
 104 * Register aliases.
 105 */
 106lr      .req    x30             // link register
 107
 108/*
 109 * Vector entry
 110 */
 111         .macro ventry  label
 112        .align  7
 113        b       \label
 114        .endm
 115
 116/*
 117 * Select code when configured for BE.
 118 */
 119#ifdef CONFIG_CPU_BIG_ENDIAN
 120#define CPU_BE(code...) code
 121#else
 122#define CPU_BE(code...)
 123#endif
 124
 125/*
 126 * Select code when configured for LE.
 127 */
 128#ifdef CONFIG_CPU_BIG_ENDIAN
 129#define CPU_LE(code...)
 130#else
 131#define CPU_LE(code...) code
 132#endif
 133
 134/*
 135 * Define a macro that constructs a 64-bit value by concatenating two
 136 * 32-bit registers. Note that on big endian systems the order of the
 137 * registers is swapped.
 138 */
 139#ifndef CONFIG_CPU_BIG_ENDIAN
 140        .macro  regs_to_64, rd, lbits, hbits
 141#else
 142        .macro  regs_to_64, rd, hbits, lbits
 143#endif
 144        orr     \rd, \lbits, \hbits, lsl #32
 145        .endm
 146
 147/*
 148 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
 149 * <symbol> is within the range +/- 4 GB of the PC.
 150 */
 151        /*
 152         * @dst: destination register (64 bit wide)
 153         * @sym: name of the symbol
 154         * @tmp: optional scratch register to be used if <dst> == sp, which
 155         *       is not allowed in an adrp instruction
 156         */
 157        .macro  adr_l, dst, sym, tmp=
 158        .ifb    \tmp
 159        adrp    \dst, \sym
 160        add     \dst, \dst, :lo12:\sym
 161        .else
 162        adrp    \tmp, \sym
 163        add     \dst, \tmp, :lo12:\sym
 164        .endif
 165        .endm
 166
 167        /*
 168         * @dst: destination register (32 or 64 bit wide)
 169         * @sym: name of the symbol
 170         * @tmp: optional 64-bit scratch register to be used if <dst> is a
 171         *       32-bit wide register, in which case it cannot be used to hold
 172         *       the address
 173         */
 174        .macro  ldr_l, dst, sym, tmp=
 175        .ifb    \tmp
 176        adrp    \dst, \sym
 177        ldr     \dst, [\dst, :lo12:\sym]
 178        .else
 179        adrp    \tmp, \sym
 180        ldr     \dst, [\tmp, :lo12:\sym]
 181        .endif
 182        .endm
 183
 184        /*
 185         * @src: source register (32 or 64 bit wide)
 186         * @sym: name of the symbol
 187         * @tmp: mandatory 64-bit scratch register to calculate the address
 188         *       while <src> needs to be preserved.
 189         */
 190        .macro  str_l, src, sym, tmp
 191        adrp    \tmp, \sym
 192        str     \src, [\tmp, :lo12:\sym]
 193        .endm
 194
 195        /*
 196         * @sym: The name of the per-cpu variable
 197         * @reg: Result of per_cpu(sym, smp_processor_id())
 198         * @tmp: scratch register
 199         */
 200        .macro this_cpu_ptr, sym, reg, tmp
 201        adr_l   \reg, \sym
 202        mrs     \tmp, tpidr_el1
 203        add     \reg, \reg, \tmp
 204        .endm
 205
 206/*
 207 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
 208 */
 209        .macro  vma_vm_mm, rd, rn
 210        ldr     \rd, [\rn, #VMA_VM_MM]
 211        .endm
 212
 213/*
 214 * mmid - get context id from mm pointer (mm->context.id)
 215 */
 216        .macro  mmid, rd, rn
 217        ldr     \rd, [\rn, #MM_CONTEXT_ID]
 218        .endm
 219
 220/*
 221 * dcache_line_size - get the minimum D-cache line size from the CTR register.
 222 */
 223        .macro  dcache_line_size, reg, tmp
 224        mrs     \tmp, ctr_el0                   // read CTR
 225        ubfm    \tmp, \tmp, #16, #19            // cache line size encoding
 226        mov     \reg, #4                        // bytes per word
 227        lsl     \reg, \reg, \tmp                // actual cache line size
 228        .endm
 229
 230/*
 231 * icache_line_size - get the minimum I-cache line size from the CTR register.
 232 */
 233        .macro  icache_line_size, reg, tmp
 234        mrs     \tmp, ctr_el0                   // read CTR
 235        and     \tmp, \tmp, #0xf                // cache line size encoding
 236        mov     \reg, #4                        // bytes per word
 237        lsl     \reg, \reg, \tmp                // actual cache line size
 238        .endm
 239
 240/*
 241 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
 242 */
 243        .macro  tcr_set_idmap_t0sz, valreg, tmpreg
 244#ifndef CONFIG_ARM64_VA_BITS_48
 245        ldr_l   \tmpreg, idmap_t0sz
 246        bfi     \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
 247#endif
 248        .endm
 249
 250/*
 251 * Macro to perform a data cache maintenance for the interval
 252 * [kaddr, kaddr + size)
 253 *
 254 *      op:             operation passed to dc instruction
 255 *      domain:         domain used in dsb instruciton
 256 *      kaddr:          starting virtual address of the region
 257 *      size:           size of the region
 258 *      Corrupts:       kaddr, size, tmp1, tmp2
 259 */
 260        .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
 261        dcache_line_size \tmp1, \tmp2
 262        add     \size, \kaddr, \size
 263        sub     \tmp2, \tmp1, #1
 264        bic     \kaddr, \kaddr, \tmp2
 2659998:
 266        .if     (\op == cvau || \op == cvac)
 267alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
 268        dc      \op, \kaddr
 269alternative_else
 270        dc      civac, \kaddr
 271alternative_endif
 272        .else
 273        dc      \op, \kaddr
 274        .endif
 275        add     \kaddr, \kaddr, \tmp1
 276        cmp     \kaddr, \size
 277        b.lo    9998b
 278        dsb     \domain
 279        .endm
 280
 281/*
 282 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
 283 */
 284        .macro  reset_pmuserenr_el0, tmpreg
 285        mrs     \tmpreg, id_aa64dfr0_el1        // Check ID_AA64DFR0_EL1 PMUVer
 286        sbfx    \tmpreg, \tmpreg, #8, #4
 287        cmp     \tmpreg, #1                     // Skip if no PMU present
 288        b.lt    9000f
 289        msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
 2909000:
 291        .endm
 292
 293/*
 294 * copy_page - copy src to dest using temp registers t1-t8
 295 */
 296        .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
 2979998:   ldp     \t1, \t2, [\src]
 298        ldp     \t3, \t4, [\src, #16]
 299        ldp     \t5, \t6, [\src, #32]
 300        ldp     \t7, \t8, [\src, #48]
 301        add     \src, \src, #64
 302        stnp    \t1, \t2, [\dest]
 303        stnp    \t3, \t4, [\dest, #16]
 304        stnp    \t5, \t6, [\dest, #32]
 305        stnp    \t7, \t8, [\dest, #48]
 306        add     \dest, \dest, #64
 307        tst     \src, #(PAGE_SIZE - 1)
 308        b.ne    9998b
 309        .endm
 310
 311/*
 312 * Annotate a function as position independent, i.e., safe to be called before
 313 * the kernel virtual mapping is activated.
 314 */
 315#define ENDPIPROC(x)                    \
 316        .globl  __pi_##x;               \
 317        .type   __pi_##x, %function;    \
 318        .set    __pi_##x, x;            \
 319        .size   __pi_##x, . - x;        \
 320        ENDPROC(x)
 321
 322        /*
 323         * Emit a 64-bit absolute little endian symbol reference in a way that
 324         * ensures that it will be resolved at build time, even when building a
 325         * PIE binary. This requires cooperation from the linker script, which
 326         * must emit the lo32/hi32 halves individually.
 327         */
 328        .macro  le64sym, sym
 329        .long   \sym\()_lo32
 330        .long   \sym\()_hi32
 331        .endm
 332
 333        /*
 334         * mov_q - move an immediate constant into a 64-bit register using
 335         *         between 2 and 4 movz/movk instructions (depending on the
 336         *         magnitude and sign of the operand)
 337         */
 338        .macro  mov_q, reg, val
 339        .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
 340        movz    \reg, :abs_g1_s:\val
 341        .else
 342        .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
 343        movz    \reg, :abs_g2_s:\val
 344        .else
 345        movz    \reg, :abs_g3:\val
 346        movk    \reg, :abs_g2_nc:\val
 347        .endif
 348        movk    \reg, :abs_g1_nc:\val
 349        .endif
 350        movk    \reg, :abs_g0_nc:\val
 351        .endm
 352
 353#endif  /* __ASM_ASSEMBLER_H */
 354