linux/arch/arm64/include/asm/assembler.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
   3 *
   4 * Copyright (C) 1996-2000 Russell King
   5 * Copyright (C) 2012 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef __ASSEMBLY__
  20#error "Only include this from assembly code"
  21#endif
  22
  23#ifndef __ASM_ASSEMBLER_H
  24#define __ASM_ASSEMBLER_H
  25
  26#include <asm/asm-offsets.h>
  27#include <asm/page.h>
  28#include <asm/pgtable-hwdef.h>
  29#include <asm/ptrace.h>
  30#include <asm/thread_info.h>
  31
  32/*
  33 * Enable and disable interrupts.
  34 */
  35        .macro  disable_irq
  36        msr     daifset, #2
  37        .endm
  38
  39        .macro  enable_irq
  40        msr     daifclr, #2
  41        .endm
  42
  43/*
  44 * Enable and disable debug exceptions.
  45 */
  46        .macro  disable_dbg
  47        msr     daifset, #8
  48        .endm
  49
  50        .macro  enable_dbg
  51        msr     daifclr, #8
  52        .endm
  53
  54        .macro  disable_step_tsk, flgs, tmp
  55        tbz     \flgs, #TIF_SINGLESTEP, 9990f
  56        mrs     \tmp, mdscr_el1
  57        bic     \tmp, \tmp, #1
  58        msr     mdscr_el1, \tmp
  59        isb     // Synchronise with enable_dbg
  609990:
  61        .endm
  62
  63        .macro  enable_step_tsk, flgs, tmp
  64        tbz     \flgs, #TIF_SINGLESTEP, 9990f
  65        disable_dbg
  66        mrs     \tmp, mdscr_el1
  67        orr     \tmp, \tmp, #1
  68        msr     mdscr_el1, \tmp
  699990:
  70        .endm
  71
  72/*
  73 * Enable both debug exceptions and interrupts. This is likely to be
  74 * faster than two daifclr operations, since writes to this register
  75 * are self-synchronising.
  76 */
  77        .macro  enable_dbg_and_irq
  78        msr     daifclr, #(8 | 2)
  79        .endm
  80
  81/*
  82 * SMP data memory barrier
  83 */
  84        .macro  smp_dmb, opt
  85        dmb     \opt
  86        .endm
  87
  88/*
  89 * Emit an entry into the exception table
  90 */
  91        .macro          _asm_extable, from, to
  92        .pushsection    __ex_table, "a"
  93        .align          3
  94        .long           (\from - .), (\to - .)
  95        .popsection
  96        .endm
  97
  98#define USER(l, x...)                           \
  999999:   x;                                      \
 100        _asm_extable    9999b, l
 101
 102/*
 103 * Register aliases.
 104 */
 105lr      .req    x30             // link register
 106
 107/*
 108 * Vector entry
 109 */
 110         .macro ventry  label
 111        .align  7
 112        b       \label
 113        .endm
 114
 115/*
 116 * Select code when configured for BE.
 117 */
 118#ifdef CONFIG_CPU_BIG_ENDIAN
 119#define CPU_BE(code...) code
 120#else
 121#define CPU_BE(code...)
 122#endif
 123
 124/*
 125 * Select code when configured for LE.
 126 */
 127#ifdef CONFIG_CPU_BIG_ENDIAN
 128#define CPU_LE(code...)
 129#else
 130#define CPU_LE(code...) code
 131#endif
 132
 133/*
 134 * Define a macro that constructs a 64-bit value by concatenating two
 135 * 32-bit registers. Note that on big endian systems the order of the
 136 * registers is swapped.
 137 */
 138#ifndef CONFIG_CPU_BIG_ENDIAN
 139        .macro  regs_to_64, rd, lbits, hbits
 140#else
 141        .macro  regs_to_64, rd, hbits, lbits
 142#endif
 143        orr     \rd, \lbits, \hbits, lsl #32
 144        .endm
 145
 146/*
 147 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
 148 * <symbol> is within the range +/- 4 GB of the PC.
 149 */
 150        /*
 151         * @dst: destination register (64 bit wide)
 152         * @sym: name of the symbol
 153         * @tmp: optional scratch register to be used if <dst> == sp, which
 154         *       is not allowed in an adrp instruction
 155         */
 156        .macro  adr_l, dst, sym, tmp=
 157        .ifb    \tmp
 158        adrp    \dst, \sym
 159        add     \dst, \dst, :lo12:\sym
 160        .else
 161        adrp    \tmp, \sym
 162        add     \dst, \tmp, :lo12:\sym
 163        .endif
 164        .endm
 165
 166        /*
 167         * @dst: destination register (32 or 64 bit wide)
 168         * @sym: name of the symbol
 169         * @tmp: optional 64-bit scratch register to be used if <dst> is a
 170         *       32-bit wide register, in which case it cannot be used to hold
 171         *       the address
 172         */
 173        .macro  ldr_l, dst, sym, tmp=
 174        .ifb    \tmp
 175        adrp    \dst, \sym
 176        ldr     \dst, [\dst, :lo12:\sym]
 177        .else
 178        adrp    \tmp, \sym
 179        ldr     \dst, [\tmp, :lo12:\sym]
 180        .endif
 181        .endm
 182
 183        /*
 184         * @src: source register (32 or 64 bit wide)
 185         * @sym: name of the symbol
 186         * @tmp: mandatory 64-bit scratch register to calculate the address
 187         *       while <src> needs to be preserved.
 188         */
 189        .macro  str_l, src, sym, tmp
 190        adrp    \tmp, \sym
 191        str     \src, [\tmp, :lo12:\sym]
 192        .endm
 193
 194        /*
 195         * @sym: The name of the per-cpu variable
 196         * @reg: Result of per_cpu(sym, smp_processor_id())
 197         * @tmp: scratch register
 198         */
 199        .macro this_cpu_ptr, sym, reg, tmp
 200        adr_l   \reg, \sym
 201        mrs     \tmp, tpidr_el1
 202        add     \reg, \reg, \tmp
 203        .endm
 204
 205/*
 206 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
 207 */
 208        .macro  vma_vm_mm, rd, rn
 209        ldr     \rd, [\rn, #VMA_VM_MM]
 210        .endm
 211
 212/*
 213 * mmid - get context id from mm pointer (mm->context.id)
 214 */
 215        .macro  mmid, rd, rn
 216        ldr     \rd, [\rn, #MM_CONTEXT_ID]
 217        .endm
 218
 219/*
 220 * dcache_line_size - get the minimum D-cache line size from the CTR register.
 221 */
 222        .macro  dcache_line_size, reg, tmp
 223        mrs     \tmp, ctr_el0                   // read CTR
 224        ubfm    \tmp, \tmp, #16, #19            // cache line size encoding
 225        mov     \reg, #4                        // bytes per word
 226        lsl     \reg, \reg, \tmp                // actual cache line size
 227        .endm
 228
 229/*
 230 * icache_line_size - get the minimum I-cache line size from the CTR register.
 231 */
 232        .macro  icache_line_size, reg, tmp
 233        mrs     \tmp, ctr_el0                   // read CTR
 234        and     \tmp, \tmp, #0xf                // cache line size encoding
 235        mov     \reg, #4                        // bytes per word
 236        lsl     \reg, \reg, \tmp                // actual cache line size
 237        .endm
 238
 239/*
 240 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
 241 */
 242        .macro  tcr_set_idmap_t0sz, valreg, tmpreg
 243#ifndef CONFIG_ARM64_VA_BITS_48
 244        ldr_l   \tmpreg, idmap_t0sz
 245        bfi     \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
 246#endif
 247        .endm
 248
 249/*
 250 * Macro to perform a data cache maintenance for the interval
 251 * [kaddr, kaddr + size)
 252 *
 253 *      op:             operation passed to dc instruction
 254 *      domain:         domain used in dsb instruciton
 255 *      kaddr:          starting virtual address of the region
 256 *      size:           size of the region
 257 *      Corrupts:       kaddr, size, tmp1, tmp2
 258 */
 259        .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
 260        dcache_line_size \tmp1, \tmp2
 261        add     \size, \kaddr, \size
 262        sub     \tmp2, \tmp1, #1
 263        bic     \kaddr, \kaddr, \tmp2
 2649998:   dc      \op, \kaddr
 265        add     \kaddr, \kaddr, \tmp1
 266        cmp     \kaddr, \size
 267        b.lo    9998b
 268        dsb     \domain
 269        .endm
 270
 271/*
 272 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
 273 */
 274        .macro  reset_pmuserenr_el0, tmpreg
 275        mrs     \tmpreg, id_aa64dfr0_el1        // Check ID_AA64DFR0_EL1 PMUVer
 276        sbfx    \tmpreg, \tmpreg, #8, #4
 277        cmp     \tmpreg, #1                     // Skip if no PMU present
 278        b.lt    9000f
 279        msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
 2809000:
 281        .endm
 282
 283/*
 284 * copy_page - copy src to dest using temp registers t1-t8
 285 */
 286        .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
 2879998:   ldp     \t1, \t2, [\src]
 288        ldp     \t3, \t4, [\src, #16]
 289        ldp     \t5, \t6, [\src, #32]
 290        ldp     \t7, \t8, [\src, #48]
 291        add     \src, \src, #64
 292        stnp    \t1, \t2, [\dest]
 293        stnp    \t3, \t4, [\dest, #16]
 294        stnp    \t5, \t6, [\dest, #32]
 295        stnp    \t7, \t8, [\dest, #48]
 296        add     \dest, \dest, #64
 297        tst     \src, #(PAGE_SIZE - 1)
 298        b.ne    9998b
 299        .endm
 300
 301/*
 302 * Annotate a function as position independent, i.e., safe to be called before
 303 * the kernel virtual mapping is activated.
 304 */
 305#define ENDPIPROC(x)                    \
 306        .globl  __pi_##x;               \
 307        .type   __pi_##x, %function;    \
 308        .set    __pi_##x, x;            \
 309        .size   __pi_##x, . - x;        \
 310        ENDPROC(x)
 311
 312        /*
 313         * Emit a 64-bit absolute little endian symbol reference in a way that
 314         * ensures that it will be resolved at build time, even when building a
 315         * PIE binary. This requires cooperation from the linker script, which
 316         * must emit the lo32/hi32 halves individually.
 317         */
 318        .macro  le64sym, sym
 319        .long   \sym\()_lo32
 320        .long   \sym\()_hi32
 321        .endm
 322
 323        /*
 324         * mov_q - move an immediate constant into a 64-bit register using
 325         *         between 2 and 4 movz/movk instructions (depending on the
 326         *         magnitude and sign of the operand)
 327         */
 328        .macro  mov_q, reg, val
 329        .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
 330        movz    \reg, :abs_g1_s:\val
 331        .else
 332        .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
 333        movz    \reg, :abs_g2_s:\val
 334        .else
 335        movz    \reg, :abs_g3:\val
 336        movk    \reg, :abs_g2_nc:\val
 337        .endif
 338        movk    \reg, :abs_g1_nc:\val
 339        .endif
 340        movk    \reg, :abs_g0_nc:\val
 341        .endm
 342
 343#endif  /* __ASM_ASSEMBLER_H */
 344