linux/arch/arm/include/asm/assembler.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *  arch/arm/include/asm/assembler.h
   4 *
   5 *  Copyright (C) 1996-2000 Russell King
   6 *
   7 *  This file contains arm architecture specific defines
   8 *  for the different processors.
   9 *
  10 *  Do not include any C declarations in this file - it is included by
  11 *  assembler source.
  12 */
  13#ifndef __ASM_ASSEMBLER_H__
  14#define __ASM_ASSEMBLER_H__
  15
  16#ifndef __ASSEMBLY__
  17#error "Only include this from assembly code"
  18#endif
  19
  20#include <asm/ptrace.h>
  21#include <asm/opcodes-virt.h>
  22#include <asm/asm-offsets.h>
  23#include <asm/page.h>
  24#include <asm/thread_info.h>
  25#include <asm/uaccess-asm.h>
  26
  27#define IOMEM(x)        (x)
  28
  29/*
  30 * Endian independent macros for shifting bytes within registers.
  31 */
  32#ifndef __ARMEB__
  33#define lspull          lsr
  34#define lspush          lsl
  35#define get_byte_0      lsl #0
  36#define get_byte_1      lsr #8
  37#define get_byte_2      lsr #16
  38#define get_byte_3      lsr #24
  39#define put_byte_0      lsl #0
  40#define put_byte_1      lsl #8
  41#define put_byte_2      lsl #16
  42#define put_byte_3      lsl #24
  43#else
  44#define lspull          lsl
  45#define lspush          lsr
  46#define get_byte_0      lsr #24
  47#define get_byte_1      lsr #16
  48#define get_byte_2      lsr #8
  49#define get_byte_3      lsl #0
  50#define put_byte_0      lsl #24
  51#define put_byte_1      lsl #16
  52#define put_byte_2      lsl #8
  53#define put_byte_3      lsl #0
  54#endif
  55
  56/* Select code for any configuration running in BE8 mode */
  57#ifdef CONFIG_CPU_ENDIAN_BE8
  58#define ARM_BE8(code...) code
  59#else
  60#define ARM_BE8(code...)
  61#endif
  62
  63/*
  64 * Data preload for architectures that support it
  65 */
  66#if __LINUX_ARM_ARCH__ >= 5
  67#define PLD(code...)    code
  68#else
  69#define PLD(code...)
  70#endif
  71
  72/*
  73 * This can be used to enable code to cacheline align the destination
  74 * pointer when bulk writing to memory.  Experiments on StrongARM and
  75 * XScale didn't show this a worthwhile thing to do when the cache is not
  76 * set to write-allocate (this would need further testing on XScale when WA
  77 * is used).
  78 *
  79 * On Feroceon there is much to gain however, regardless of cache mode.
  80 */
  81#ifdef CONFIG_CPU_FEROCEON
  82#define CALGN(code...) code
  83#else
  84#define CALGN(code...)
  85#endif
  86
  87#define IMM12_MASK 0xfff
  88
  89/*
  90 * Enable and disable interrupts
  91 */
  92#if __LINUX_ARM_ARCH__ >= 6
  93        .macro  disable_irq_notrace
  94        cpsid   i
  95        .endm
  96
  97        .macro  enable_irq_notrace
  98        cpsie   i
  99        .endm
 100#else
 101        .macro  disable_irq_notrace
 102        msr     cpsr_c, #PSR_I_BIT | SVC_MODE
 103        .endm
 104
 105        .macro  enable_irq_notrace
 106        msr     cpsr_c, #SVC_MODE
 107        .endm
 108#endif
 109
 110#if __LINUX_ARM_ARCH__ < 7
 111        .macro  dsb, args
 112        mcr     p15, 0, r0, c7, c10, 4
 113        .endm
 114
 115        .macro  isb, args
 116        mcr     p15, 0, r0, c7, c5, 4
 117        .endm
 118#endif
 119
 120        .macro asm_trace_hardirqs_off, save=1
 121#if defined(CONFIG_TRACE_IRQFLAGS)
 122        .if \save
 123        stmdb   sp!, {r0-r3, ip, lr}
 124        .endif
 125        bl      trace_hardirqs_off
 126        .if \save
 127        ldmia   sp!, {r0-r3, ip, lr}
 128        .endif
 129#endif
 130        .endm
 131
 132        .macro asm_trace_hardirqs_on, cond=al, save=1
 133#if defined(CONFIG_TRACE_IRQFLAGS)
 134        /*
 135         * actually the registers should be pushed and pop'd conditionally, but
 136         * after bl the flags are certainly clobbered
 137         */
 138        .if \save
 139        stmdb   sp!, {r0-r3, ip, lr}
 140        .endif
 141        bl\cond trace_hardirqs_on
 142        .if \save
 143        ldmia   sp!, {r0-r3, ip, lr}
 144        .endif
 145#endif
 146        .endm
 147
 148        .macro disable_irq, save=1
 149        disable_irq_notrace
 150        asm_trace_hardirqs_off \save
 151        .endm
 152
 153        .macro enable_irq
 154        asm_trace_hardirqs_on
 155        enable_irq_notrace
 156        .endm
 157/*
 158 * Save the current IRQ state and disable IRQs.  Note that this macro
 159 * assumes FIQs are enabled, and that the processor is in SVC mode.
 160 */
 161        .macro  save_and_disable_irqs, oldcpsr
 162#ifdef CONFIG_CPU_V7M
 163        mrs     \oldcpsr, primask
 164#else
 165        mrs     \oldcpsr, cpsr
 166#endif
 167        disable_irq
 168        .endm
 169
 170        .macro  save_and_disable_irqs_notrace, oldcpsr
 171#ifdef CONFIG_CPU_V7M
 172        mrs     \oldcpsr, primask
 173#else
 174        mrs     \oldcpsr, cpsr
 175#endif
 176        disable_irq_notrace
 177        .endm
 178
 179/*
 180 * Restore interrupt state previously stored in a register.  We don't
 181 * guarantee that this will preserve the flags.
 182 */
 183        .macro  restore_irqs_notrace, oldcpsr
 184#ifdef CONFIG_CPU_V7M
 185        msr     primask, \oldcpsr
 186#else
 187        msr     cpsr_c, \oldcpsr
 188#endif
 189        .endm
 190
 191        .macro restore_irqs, oldcpsr
 192        tst     \oldcpsr, #PSR_I_BIT
 193        asm_trace_hardirqs_on cond=eq
 194        restore_irqs_notrace \oldcpsr
 195        .endm
 196
 197/*
 198 * Assembly version of "adr rd, BSYM(sym)".  This should only be used to
 199 * reference local symbols in the same assembly file which are to be
 200 * resolved by the assembler.  Other usage is undefined.
 201 */
 202        .irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 203        .macro  badr\c, rd, sym
 204#ifdef CONFIG_THUMB2_KERNEL
 205        adr\c   \rd, \sym + 1
 206#else
 207        adr\c   \rd, \sym
 208#endif
 209        .endm
 210        .endr
 211
 212        .macro  get_current, rd
 213#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
 214        mrc     p15, 0, \rd, c13, c0, 3         @ get TPIDRURO register
 215#else
 216        get_thread_info \rd
 217        ldr     \rd, [\rd, #TI_TASK]
 218#endif
 219        .endm
 220
 221        .macro  set_current, rn
 222#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
 223        mcr     p15, 0, \rn, c13, c0, 3         @ set TPIDRURO register
 224#endif
 225        .endm
 226
 227        .macro  reload_current, t1:req, t2:req
 228#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
 229        adr_l   \t1, __entry_task               @ get __entry_task base address
 230        mrc     p15, 0, \t2, c13, c0, 4         @ get per-CPU offset
 231        ldr     \t1, [\t1, \t2]                 @ load variable
 232        mcr     p15, 0, \t1, c13, c0, 3         @ store in TPIDRURO
 233#endif
 234        .endm
 235
 236/*
 237 * Get current thread_info.
 238 */
 239        .macro  get_thread_info, rd
 240#ifdef CONFIG_THREAD_INFO_IN_TASK
 241        /* thread_info is the first member of struct task_struct */
 242        get_current \rd
 243#else
 244 ARM(   mov     \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT    )
 245 THUMB( mov     \rd, sp                 )
 246 THUMB( lsr     \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT       )
 247        mov     \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
 248#endif
 249        .endm
 250
 251/*
 252 * Increment/decrement the preempt count.
 253 */
 254#ifdef CONFIG_PREEMPT_COUNT
 255        .macro  inc_preempt_count, ti, tmp
 256        ldr     \tmp, [\ti, #TI_PREEMPT]        @ get preempt count
 257        add     \tmp, \tmp, #1                  @ increment it
 258        str     \tmp, [\ti, #TI_PREEMPT]
 259        .endm
 260
 261        .macro  dec_preempt_count, ti, tmp
 262        ldr     \tmp, [\ti, #TI_PREEMPT]        @ get preempt count
 263        sub     \tmp, \tmp, #1                  @ decrement it
 264        str     \tmp, [\ti, #TI_PREEMPT]
 265        .endm
 266
 267        .macro  dec_preempt_count_ti, ti, tmp
 268        get_thread_info \ti
 269        dec_preempt_count \ti, \tmp
 270        .endm
 271#else
 272        .macro  inc_preempt_count, ti, tmp
 273        .endm
 274
 275        .macro  dec_preempt_count, ti, tmp
 276        .endm
 277
 278        .macro  dec_preempt_count_ti, ti, tmp
 279        .endm
 280#endif
 281
 282#define USERL(l, x...)                          \
 2839999:   x;                                      \
 284        .pushsection __ex_table,"a";            \
 285        .align  3;                              \
 286        .long   9999b,l;                        \
 287        .popsection
 288
 289#define USER(x...)      USERL(9001f, x)
 290
 291#ifdef CONFIG_SMP
 292#define ALT_SMP(instr...)                                       \
 2939998:   instr
 294/*
 295 * Note: if you get assembler errors from ALT_UP() when building with
 296 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
 297 * ALT_SMP( W(instr) ... )
 298 */
 299#define ALT_UP(instr...)                                        \
 300        .pushsection ".alt.smp.init", "a"                       ;\
 301        .align  2                                               ;\
 302        .long   9998b - .                                       ;\
 3039997:   instr                                                   ;\
 304        .if . - 9997b == 2                                      ;\
 305                nop                                             ;\
 306        .endif                                                  ;\
 307        .if . - 9997b != 4                                      ;\
 308                .error "ALT_UP() content must assemble to exactly 4 bytes";\
 309        .endif                                                  ;\
 310        .popsection
 311#define ALT_UP_B(label)                                 \
 312        .pushsection ".alt.smp.init", "a"                       ;\
 313        .align  2                                               ;\
 314        .long   9998b - .                                       ;\
 315        W(b)    . + (label - 9998b)                                     ;\
 316        .popsection
 317#else
 318#define ALT_SMP(instr...)
 319#define ALT_UP(instr...) instr
 320#define ALT_UP_B(label) b label
 321#endif
 322
 323/*
 324 * Instruction barrier
 325 */
 326        .macro  instr_sync
 327#if __LINUX_ARM_ARCH__ >= 7
 328        isb
 329#elif __LINUX_ARM_ARCH__ == 6
 330        mcr     p15, 0, r0, c7, c5, 4
 331#endif
 332        .endm
 333
 334/*
 335 * SMP data memory barrier
 336 */
 337        .macro  smp_dmb mode
 338#ifdef CONFIG_SMP
 339#if __LINUX_ARM_ARCH__ >= 7
 340        .ifeqs "\mode","arm"
 341        ALT_SMP(dmb     ish)
 342        .else
 343        ALT_SMP(W(dmb)  ish)
 344        .endif
 345#elif __LINUX_ARM_ARCH__ == 6
 346        ALT_SMP(mcr     p15, 0, r0, c7, c10, 5) @ dmb
 347#else
 348#error Incompatible SMP platform
 349#endif
 350        .ifeqs "\mode","arm"
 351        ALT_UP(nop)
 352        .else
 353        ALT_UP(W(nop))
 354        .endif
 355#endif
 356        .endm
 357
 358#if defined(CONFIG_CPU_V7M)
 359        /*
 360         * setmode is used to assert to be in svc mode during boot. For v7-M
 361         * this is done in __v7m_setup, so setmode can be empty here.
 362         */
 363        .macro  setmode, mode, reg
 364        .endm
 365#elif defined(CONFIG_THUMB2_KERNEL)
 366        .macro  setmode, mode, reg
 367        mov     \reg, #\mode
 368        msr     cpsr_c, \reg
 369        .endm
 370#else
 371        .macro  setmode, mode, reg
 372        msr     cpsr_c, #\mode
 373        .endm
 374#endif
 375
 376/*
 377 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
 378 * a scratch register for the macro to overwrite.
 379 *
 380 * This macro is intended for forcing the CPU into SVC mode at boot time.
 381 * you cannot return to the original mode.
 382 */
 383.macro safe_svcmode_maskall reg:req
 384#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
 385        mrs     \reg , cpsr
 386        eor     \reg, \reg, #HYP_MODE
 387        tst     \reg, #MODE_MASK
 388        bic     \reg , \reg , #MODE_MASK
 389        orr     \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
 390THUMB(  orr     \reg , \reg , #PSR_T_BIT        )
 391        bne     1f
 392        orr     \reg, \reg, #PSR_A_BIT
 393        badr    lr, 2f
 394        msr     spsr_cxsf, \reg
 395        __MSR_ELR_HYP(14)
 396        __ERET
 3971:      msr     cpsr_c, \reg
 3982:
 399#else
 400/*
 401 * workaround for possibly broken pre-v6 hardware
 402 * (akita, Sharp Zaurus C-1000, PXA270-based)
 403 */
 404        setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
 405#endif
 406.endm
 407
 408/*
 409 * STRT/LDRT access macros with ARM and Thumb-2 variants
 410 */
 411#ifdef CONFIG_THUMB2_KERNEL
 412
 413        .macro  usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
 4149999:
 415        .if     \inc == 1
 416        \instr\()b\t\cond\().w \reg, [\ptr, #\off]
 417        .elseif \inc == 4
 418        \instr\t\cond\().w \reg, [\ptr, #\off]
 419        .else
 420        .error  "Unsupported inc macro argument"
 421        .endif
 422
 423        .pushsection __ex_table,"a"
 424        .align  3
 425        .long   9999b, \abort
 426        .popsection
 427        .endm
 428
 429        .macro  usracc, instr, reg, ptr, inc, cond, rept, abort
 430        @ explicit IT instruction needed because of the label
 431        @ introduced by the USER macro
 432        .ifnc   \cond,al
 433        .if     \rept == 1
 434        itt     \cond
 435        .elseif \rept == 2
 436        ittt    \cond
 437        .else
 438        .error  "Unsupported rept macro argument"
 439        .endif
 440        .endif
 441
 442        @ Slightly optimised to avoid incrementing the pointer twice
 443        usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
 444        .if     \rept == 2
 445        usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
 446        .endif
 447
 448        add\cond \ptr, #\rept * \inc
 449        .endm
 450
 451#else   /* !CONFIG_THUMB2_KERNEL */
 452
 453        .macro  usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
 454        .rept   \rept
 4559999:
 456        .if     \inc == 1
 457        \instr\()b\t\cond \reg, [\ptr], #\inc
 458        .elseif \inc == 4
 459        \instr\t\cond \reg, [\ptr], #\inc
 460        .else
 461        .error  "Unsupported inc macro argument"
 462        .endif
 463
 464        .pushsection __ex_table,"a"
 465        .align  3
 466        .long   9999b, \abort
 467        .popsection
 468        .endr
 469        .endm
 470
 471#endif  /* CONFIG_THUMB2_KERNEL */
 472
 473        .macro  strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
 474        usracc  str, \reg, \ptr, \inc, \cond, \rept, \abort
 475        .endm
 476
 477        .macro  ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
 478        usracc  ldr, \reg, \ptr, \inc, \cond, \rept, \abort
 479        .endm
 480
 481/* Utility macro for declaring string literals */
 482        .macro  string name:req, string
 483        .type \name , #object
 484\name:
 485        .asciz "\string"
 486        .size \name , . - \name
 487        .endm
 488
 489        .irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 490        .macro  ret\c, reg
 491#if __LINUX_ARM_ARCH__ < 6
 492        mov\c   pc, \reg
 493#else
 494        .ifeqs  "\reg", "lr"
 495        bx\c    \reg
 496        .else
 497        mov\c   pc, \reg
 498        .endif
 499#endif
 500        .endm
 501        .endr
 502
 503        .macro  ret.w, reg
 504        ret     \reg
 505#ifdef CONFIG_THUMB2_KERNEL
 506        nop
 507#endif
 508        .endm
 509
 510        .macro  bug, msg, line
 511#ifdef CONFIG_THUMB2_KERNEL
 5121:      .inst   0xde02
 513#else
 5141:      .inst   0xe7f001f2
 515#endif
 516#ifdef CONFIG_DEBUG_BUGVERBOSE
 517        .pushsection .rodata.str, "aMS", %progbits, 1
 5182:      .asciz  "\msg"
 519        .popsection
 520        .pushsection __bug_table, "aw"
 521        .align  2
 522        .word   1b, 2b
 523        .hword  \line
 524        .popsection
 525#endif
 526        .endm
 527
 528#ifdef CONFIG_KPROBES
 529#define _ASM_NOKPROBE(entry)                            \
 530        .pushsection "_kprobe_blacklist", "aw" ;        \
 531        .balign 4 ;                                     \
 532        .long entry;                                    \
 533        .popsection
 534#else
 535#define _ASM_NOKPROBE(entry)
 536#endif
 537
 538        .macro          __adldst_l, op, reg, sym, tmp, c
 539        .if             __LINUX_ARM_ARCH__ < 7
 540        ldr\c           \tmp, .La\@
 541        .subsection     1
 542        .align          2
 543.La\@:  .long           \sym - .Lpc\@
 544        .previous
 545        .else
 546        .ifnb           \c
 547 THUMB( ittt            \c                      )
 548        .endif
 549        movw\c          \tmp, #:lower16:\sym - .Lpc\@
 550        movt\c          \tmp, #:upper16:\sym - .Lpc\@
 551        .endif
 552
 553#ifndef CONFIG_THUMB2_KERNEL
 554        .set            .Lpc\@, . + 8                   // PC bias
 555        .ifc            \op, add
 556        add\c           \reg, \tmp, pc
 557        .else
 558        \op\c           \reg, [pc, \tmp]
 559        .endif
 560#else
 561.Lb\@:  add\c           \tmp, \tmp, pc
 562        /*
 563         * In Thumb-2 builds, the PC bias depends on whether we are currently
 564         * emitting into a .arm or a .thumb section. The size of the add opcode
 565         * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
 566         * emitting in ARM mode, so let's use this to account for the bias.
 567         */
 568        .set            .Lpc\@, . + (. - .Lb\@)
 569
 570        .ifnc           \op, add
 571        \op\c           \reg, [\tmp]
 572        .endif
 573#endif
 574        .endm
 575
 576        /*
 577         * mov_l - move a constant value or [relocated] address into a register
 578         */
 579        .macro          mov_l, dst:req, imm:req
 580        .if             __LINUX_ARM_ARCH__ < 7
 581        ldr             \dst, =\imm
 582        .else
 583        movw            \dst, #:lower16:\imm
 584        movt            \dst, #:upper16:\imm
 585        .endif
 586        .endm
 587
 588        /*
 589         * adr_l - adr pseudo-op with unlimited range
 590         *
 591         * @dst: destination register
 592         * @sym: name of the symbol
 593         * @cond: conditional opcode suffix
 594         */
 595        .macro          adr_l, dst:req, sym:req, cond
 596        __adldst_l      add, \dst, \sym, \dst, \cond
 597        .endm
 598
 599        /*
 600         * ldr_l - ldr <literal> pseudo-op with unlimited range
 601         *
 602         * @dst: destination register
 603         * @sym: name of the symbol
 604         * @cond: conditional opcode suffix
 605         */
 606        .macro          ldr_l, dst:req, sym:req, cond
 607        __adldst_l      ldr, \dst, \sym, \dst, \cond
 608        .endm
 609
 610        /*
 611         * str_l - str <literal> pseudo-op with unlimited range
 612         *
 613         * @src: source register
 614         * @sym: name of the symbol
 615         * @tmp: mandatory scratch register
 616         * @cond: conditional opcode suffix
 617         */
 618        .macro          str_l, src:req, sym:req, tmp:req, cond
 619        __adldst_l      str, \src, \sym, \tmp, \cond
 620        .endm
 621
 622        /*
 623         * rev_l - byte-swap a 32-bit value
 624         *
 625         * @val: source/destination register
 626         * @tmp: scratch register
 627         */
 628        .macro          rev_l, val:req, tmp:req
 629        .if             __LINUX_ARM_ARCH__ < 6
 630        eor             \tmp, \val, \val, ror #16
 631        bic             \tmp, \tmp, #0x00ff0000
 632        mov             \val, \val, ror #8
 633        eor             \val, \val, \tmp, lsr #8
 634        .else
 635        rev             \val, \val
 636        .endif
 637        .endm
 638
 639#endif /* __ASM_ASSEMBLER_H__ */
 640