linux/arch/arm64/include/asm/assembler.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
   3 *
   4 * Copyright (C) 1996-2000 Russell King
   5 * Copyright (C) 2012 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef __ASSEMBLY__
  20#error "Only include this from assembly code"
  21#endif
  22
  23#ifndef __ASM_ASSEMBLER_H
  24#define __ASM_ASSEMBLER_H
  25
  26#include <asm/asm-offsets.h>
  27#include <asm/cpufeature.h>
  28#include <asm/mmu_context.h>
  29#include <asm/page.h>
  30#include <asm/pgtable-hwdef.h>
  31#include <asm/ptrace.h>
  32#include <asm/thread_info.h>
  33
  34/*
  35 * Enable and disable interrupts.
  36 */
  37        .macro  disable_irq
  38        msr     daifset, #2
  39        .endm
  40
  41        .macro  enable_irq
  42        msr     daifclr, #2
  43        .endm
  44
  45        .macro  save_and_disable_irq, flags
  46        mrs     \flags, daif
  47        msr     daifset, #2
  48        .endm
  49
  50        .macro  restore_irq, flags
  51        msr     daif, \flags
  52        .endm
  53
  54/*
  55 * Enable and disable debug exceptions.
  56 */
  57        .macro  disable_dbg
  58        msr     daifset, #8
  59        .endm
  60
  61        .macro  enable_dbg
  62        msr     daifclr, #8
  63        .endm
  64
  65        .macro  disable_step_tsk, flgs, tmp
  66        tbz     \flgs, #TIF_SINGLESTEP, 9990f
  67        mrs     \tmp, mdscr_el1
  68        bic     \tmp, \tmp, #1
  69        msr     mdscr_el1, \tmp
  70        isb     // Synchronise with enable_dbg
  719990:
  72        .endm
  73
  74        .macro  enable_step_tsk, flgs, tmp
  75        tbz     \flgs, #TIF_SINGLESTEP, 9990f
  76        disable_dbg
  77        mrs     \tmp, mdscr_el1
  78        orr     \tmp, \tmp, #1
  79        msr     mdscr_el1, \tmp
  809990:
  81        .endm
  82
  83/*
  84 * Enable both debug exceptions and interrupts. This is likely to be
  85 * faster than two daifclr operations, since writes to this register
  86 * are self-synchronising.
  87 */
  88        .macro  enable_dbg_and_irq
  89        msr     daifclr, #(8 | 2)
  90        .endm
  91
  92/*
  93 * SMP data memory barrier
  94 */
  95        .macro  smp_dmb, opt
  96        dmb     \opt
  97        .endm
  98
  99/*
 100 * NOP sequence
 101 */
 102        .macro  nops, num
 103        .rept   \num
 104        nop
 105        .endr
 106        .endm
 107
 108/*
 109 * Emit an entry into the exception table
 110 */
 111        .macro          _asm_extable, from, to
 112        .pushsection    __ex_table, "a"
 113        .align          3
 114        .long           (\from - .), (\to - .)
 115        .popsection
 116        .endm
 117
 118#define USER(l, x...)                           \
 1199999:   x;                                      \
 120        _asm_extable    9999b, l
 121
 122/*
 123 * Register aliases.
 124 */
 125lr      .req    x30             // link register
 126
 127/*
 128 * Vector entry
 129 */
 130         .macro ventry  label
 131        .align  7
 132        b       \label
 133        .endm
 134
 135/*
 136 * Select code when configured for BE.
 137 */
 138#ifdef CONFIG_CPU_BIG_ENDIAN
 139#define CPU_BE(code...) code
 140#else
 141#define CPU_BE(code...)
 142#endif
 143
 144/*
 145 * Select code when configured for LE.
 146 */
 147#ifdef CONFIG_CPU_BIG_ENDIAN
 148#define CPU_LE(code...)
 149#else
 150#define CPU_LE(code...) code
 151#endif
 152
 153/*
 154 * Define a macro that constructs a 64-bit value by concatenating two
 155 * 32-bit registers. Note that on big endian systems the order of the
 156 * registers is swapped.
 157 */
 158#ifndef CONFIG_CPU_BIG_ENDIAN
 159        .macro  regs_to_64, rd, lbits, hbits
 160#else
 161        .macro  regs_to_64, rd, hbits, lbits
 162#endif
 163        orr     \rd, \lbits, \hbits, lsl #32
 164        .endm
 165
 166/*
 167 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
 168 * <symbol> is within the range +/- 4 GB of the PC when running
 169 * in core kernel context. In module context, a movz/movk sequence
 170 * is used, since modules may be loaded far away from the kernel
 171 * when KASLR is in effect.
 172 */
 173        /*
 174         * @dst: destination register (64 bit wide)
 175         * @sym: name of the symbol
 176         */
 177        .macro  adr_l, dst, sym
 178#ifndef MODULE
 179        adrp    \dst, \sym
 180        add     \dst, \dst, :lo12:\sym
 181#else
 182        movz    \dst, #:abs_g3:\sym
 183        movk    \dst, #:abs_g2_nc:\sym
 184        movk    \dst, #:abs_g1_nc:\sym
 185        movk    \dst, #:abs_g0_nc:\sym
 186#endif
 187        .endm
 188
 189        /*
 190         * @dst: destination register (32 or 64 bit wide)
 191         * @sym: name of the symbol
 192         * @tmp: optional 64-bit scratch register to be used if <dst> is a
 193         *       32-bit wide register, in which case it cannot be used to hold
 194         *       the address
 195         */
 196        .macro  ldr_l, dst, sym, tmp=
 197#ifndef MODULE
 198        .ifb    \tmp
 199        adrp    \dst, \sym
 200        ldr     \dst, [\dst, :lo12:\sym]
 201        .else
 202        adrp    \tmp, \sym
 203        ldr     \dst, [\tmp, :lo12:\sym]
 204        .endif
 205#else
 206        .ifb    \tmp
 207        adr_l   \dst, \sym
 208        ldr     \dst, [\dst]
 209        .else
 210        adr_l   \tmp, \sym
 211        ldr     \dst, [\tmp]
 212        .endif
 213#endif
 214        .endm
 215
 216        /*
 217         * @src: source register (32 or 64 bit wide)
 218         * @sym: name of the symbol
 219         * @tmp: mandatory 64-bit scratch register to calculate the address
 220         *       while <src> needs to be preserved.
 221         */
 222        .macro  str_l, src, sym, tmp
 223#ifndef MODULE
 224        adrp    \tmp, \sym
 225        str     \src, [\tmp, :lo12:\sym]
 226#else
 227        adr_l   \tmp, \sym
 228        str     \src, [\tmp]
 229#endif
 230        .endm
 231
 232        /*
 233         * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for
 234         *       non-module code
 235         * @sym: The name of the per-cpu variable
 236         * @tmp: scratch register
 237         */
 238        .macro adr_this_cpu, dst, sym, tmp
 239#ifndef MODULE
 240        adrp    \tmp, \sym
 241        add     \dst, \tmp, #:lo12:\sym
 242#else
 243        adr_l   \dst, \sym
 244#endif
 245        mrs     \tmp, tpidr_el1
 246        add     \dst, \dst, \tmp
 247        .endm
 248
 249        /*
 250         * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
 251         * @sym: The name of the per-cpu variable
 252         * @tmp: scratch register
 253         */
 254        .macro ldr_this_cpu dst, sym, tmp
 255        adr_l   \dst, \sym
 256        mrs     \tmp, tpidr_el1
 257        ldr     \dst, [\dst, \tmp]
 258        .endm
 259
 260/*
 261 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
 262 */
 263        .macro  vma_vm_mm, rd, rn
 264        ldr     \rd, [\rn, #VMA_VM_MM]
 265        .endm
 266
 267/*
 268 * mmid - get context id from mm pointer (mm->context.id)
 269 */
 270        .macro  mmid, rd, rn
 271        ldr     \rd, [\rn, #MM_CONTEXT_ID]
 272        .endm
 273/*
 274 * read_ctr - read CTR_EL0. If the system has mismatched
 275 * cache line sizes, provide the system wide safe value
 276 * from arm64_ftr_reg_ctrel0.sys_val
 277 */
 278        .macro  read_ctr, reg
 279alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
 280        mrs     \reg, ctr_el0                   // read CTR
 281        nop
 282alternative_else
 283        ldr_l   \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
 284alternative_endif
 285        .endm
 286
 287
 288/*
 289 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
 290 * from the CTR register.
 291 */
 292        .macro  raw_dcache_line_size, reg, tmp
 293        mrs     \tmp, ctr_el0                   // read CTR
 294        ubfm    \tmp, \tmp, #16, #19            // cache line size encoding
 295        mov     \reg, #4                        // bytes per word
 296        lsl     \reg, \reg, \tmp                // actual cache line size
 297        .endm
 298
 299/*
 300 * dcache_line_size - get the safe D-cache line size across all CPUs
 301 */
 302        .macro  dcache_line_size, reg, tmp
 303        read_ctr        \tmp
 304        ubfm            \tmp, \tmp, #16, #19    // cache line size encoding
 305        mov             \reg, #4                // bytes per word
 306        lsl             \reg, \reg, \tmp        // actual cache line size
 307        .endm
 308
 309/*
 310 * raw_icache_line_size - get the minimum I-cache line size on this CPU
 311 * from the CTR register.
 312 */
 313        .macro  raw_icache_line_size, reg, tmp
 314        mrs     \tmp, ctr_el0                   // read CTR
 315        and     \tmp, \tmp, #0xf                // cache line size encoding
 316        mov     \reg, #4                        // bytes per word
 317        lsl     \reg, \reg, \tmp                // actual cache line size
 318        .endm
 319
 320/*
 321 * icache_line_size - get the safe I-cache line size across all CPUs
 322 */
 323        .macro  icache_line_size, reg, tmp
 324        read_ctr        \tmp
 325        and             \tmp, \tmp, #0xf        // cache line size encoding
 326        mov             \reg, #4                // bytes per word
 327        lsl             \reg, \reg, \tmp        // actual cache line size
 328        .endm
 329
 330/*
 331 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
 332 */
 333        .macro  tcr_set_idmap_t0sz, valreg, tmpreg
 334#ifndef CONFIG_ARM64_VA_BITS_48
 335        ldr_l   \tmpreg, idmap_t0sz
 336        bfi     \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
 337#endif
 338        .endm
 339
 340/*
 341 * Macro to perform a data cache maintenance for the interval
 342 * [kaddr, kaddr + size)
 343 *
 344 *      op:             operation passed to dc instruction
 345 *      domain:         domain used in dsb instruciton
 346 *      kaddr:          starting virtual address of the region
 347 *      size:           size of the region
 348 *      Corrupts:       kaddr, size, tmp1, tmp2
 349 */
 350        .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
 351        dcache_line_size \tmp1, \tmp2
 352        add     \size, \kaddr, \size
 353        sub     \tmp2, \tmp1, #1
 354        bic     \kaddr, \kaddr, \tmp2
 3559998:
 356        .if     (\op == cvau || \op == cvac)
 357alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
 358        dc      \op, \kaddr
 359alternative_else
 360        dc      civac, \kaddr
 361alternative_endif
 362        .elseif (\op == cvap)
 363alternative_if ARM64_HAS_DCPOP
 364        sys 3, c7, c12, 1, \kaddr       // dc cvap
 365alternative_else
 366        dc      cvac, \kaddr
 367alternative_endif
 368        .else
 369        dc      \op, \kaddr
 370        .endif
 371        add     \kaddr, \kaddr, \tmp1
 372        cmp     \kaddr, \size
 373        b.lo    9998b
 374        dsb     \domain
 375        .endm
 376
 377/*
 378 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
 379 */
 380        .macro  reset_pmuserenr_el0, tmpreg
 381        mrs     \tmpreg, id_aa64dfr0_el1        // Check ID_AA64DFR0_EL1 PMUVer
 382        sbfx    \tmpreg, \tmpreg, #8, #4
 383        cmp     \tmpreg, #1                     // Skip if no PMU present
 384        b.lt    9000f
 385        msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
 3869000:
 387        .endm
 388
 389/*
 390 * copy_page - copy src to dest using temp registers t1-t8
 391 */
 392        .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
 3939998:   ldp     \t1, \t2, [\src]
 394        ldp     \t3, \t4, [\src, #16]
 395        ldp     \t5, \t6, [\src, #32]
 396        ldp     \t7, \t8, [\src, #48]
 397        add     \src, \src, #64
 398        stnp    \t1, \t2, [\dest]
 399        stnp    \t3, \t4, [\dest, #16]
 400        stnp    \t5, \t6, [\dest, #32]
 401        stnp    \t7, \t8, [\dest, #48]
 402        add     \dest, \dest, #64
 403        tst     \src, #(PAGE_SIZE - 1)
 404        b.ne    9998b
 405        .endm
 406
 407/*
 408 * Annotate a function as position independent, i.e., safe to be called before
 409 * the kernel virtual mapping is activated.
 410 */
 411#define ENDPIPROC(x)                    \
 412        .globl  __pi_##x;               \
 413        .type   __pi_##x, %function;    \
 414        .set    __pi_##x, x;            \
 415        .size   __pi_##x, . - x;        \
 416        ENDPROC(x)
 417
 418/*
 419 * Annotate a function as being unsuitable for kprobes.
 420 */
 421#ifdef CONFIG_KPROBES
 422#define NOKPROBE(x)                             \
 423        .pushsection "_kprobe_blacklist", "aw"; \
 424        .quad   x;                              \
 425        .popsection;
 426#else
 427#define NOKPROBE(x)
 428#endif
 429        /*
 430         * Emit a 64-bit absolute little endian symbol reference in a way that
 431         * ensures that it will be resolved at build time, even when building a
 432         * PIE binary. This requires cooperation from the linker script, which
 433         * must emit the lo32/hi32 halves individually.
 434         */
 435        .macro  le64sym, sym
 436        .long   \sym\()_lo32
 437        .long   \sym\()_hi32
 438        .endm
 439
 440        /*
 441         * mov_q - move an immediate constant into a 64-bit register using
 442         *         between 2 and 4 movz/movk instructions (depending on the
 443         *         magnitude and sign of the operand)
 444         */
 445        .macro  mov_q, reg, val
 446        .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
 447        movz    \reg, :abs_g1_s:\val
 448        .else
 449        .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
 450        movz    \reg, :abs_g2_s:\val
 451        .else
 452        movz    \reg, :abs_g3:\val
 453        movk    \reg, :abs_g2_nc:\val
 454        .endif
 455        movk    \reg, :abs_g1_nc:\val
 456        .endif
 457        movk    \reg, :abs_g0_nc:\val
 458        .endm
 459
 460/*
 461 * Return the current thread_info.
 462 */
 463        .macro  get_thread_info, rd
 464        mrs     \rd, sp_el0
 465        .endm
 466
 467/*
 468 * Errata workaround prior to TTBR0_EL1 update
 469 *
 470 *      val:    TTBR value with new BADDR, preserved
 471 *      tmp0:   temporary register, clobbered
 472 *      tmp1:   other temporary register, clobbered
 473 */
 474        .macro  pre_ttbr0_update_workaround, val, tmp0, tmp1
 475#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
 476alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
 477        mrs     \tmp0, ttbr0_el1
 478        mov     \tmp1, #FALKOR_RESERVED_ASID
 479        bfi     \tmp0, \tmp1, #48, #16          // reserved ASID + old BADDR
 480        msr     ttbr0_el1, \tmp0
 481        isb
 482        bfi     \tmp0, \val, #0, #48            // reserved ASID + new BADDR
 483        msr     ttbr0_el1, \tmp0
 484        isb
 485alternative_else_nop_endif
 486#endif
 487        .endm
 488
 489/*
 490 * Errata workaround post TTBR0_EL1 update.
 491 */
 492        .macro  post_ttbr0_update_workaround
 493#ifdef CONFIG_CAVIUM_ERRATUM_27456
 494alternative_if ARM64_WORKAROUND_CAVIUM_27456
 495        ic      iallu
 496        dsb     nsh
 497        isb
 498alternative_else_nop_endif
 499#endif
 500        .endm
 501
 502#endif  /* __ASM_ASSEMBLER_H */
 503