linux/arch/arm/include/asm/assembler.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/assembler.h
   3 *
   4 *  Copyright (C) 1996-2000 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 *  This file contains arm architecture specific defines
  11 *  for the different processors.
  12 *
  13 *  Do not include any C declarations in this file - it is included by
  14 *  assembler source.
  15 */
  16#ifndef __ASM_ASSEMBLER_H__
  17#define __ASM_ASSEMBLER_H__
  18
  19#ifndef __ASSEMBLY__
  20#error "Only include this from assembly code"
  21#endif
  22
  23#include <asm/ptrace.h>
  24#include <asm/domain.h>
  25#include <asm/opcodes-virt.h>
  26#include <asm/asm-offsets.h>
  27#include <asm/page.h>
  28#include <asm/thread_info.h>
  29
  30#define IOMEM(x)        (x)
  31
  32/*
  33 * Endian independent macros for shifting bytes within registers.
  34 */
  35#ifndef __ARMEB__
  36#define lspull          lsr
  37#define lspush          lsl
  38#define get_byte_0      lsl #0
  39#define get_byte_1      lsr #8
  40#define get_byte_2      lsr #16
  41#define get_byte_3      lsr #24
  42#define put_byte_0      lsl #0
  43#define put_byte_1      lsl #8
  44#define put_byte_2      lsl #16
  45#define put_byte_3      lsl #24
  46#else
  47#define lspull          lsl
  48#define lspush          lsr
  49#define get_byte_0      lsr #24
  50#define get_byte_1      lsr #16
  51#define get_byte_2      lsr #8
  52#define get_byte_3      lsl #0
  53#define put_byte_0      lsl #24
  54#define put_byte_1      lsl #16
  55#define put_byte_2      lsl #8
  56#define put_byte_3      lsl #0
  57#endif
  58
  59/* Select code for any configuration running in BE8 mode */
  60#ifdef CONFIG_CPU_ENDIAN_BE8
  61#define ARM_BE8(code...) code
  62#else
  63#define ARM_BE8(code...)
  64#endif
  65
  66/*
  67 * Data preload for architectures that support it
  68 */
  69#if __LINUX_ARM_ARCH__ >= 5
  70#define PLD(code...)    code
  71#else
  72#define PLD(code...)
  73#endif
  74
  75/*
  76 * This can be used to enable code to cacheline align the destination
  77 * pointer when bulk writing to memory.  Experiments on StrongARM and
  78 * XScale didn't show this a worthwhile thing to do when the cache is not
  79 * set to write-allocate (this would need further testing on XScale when WA
  80 * is used).
  81 *
  82 * On Feroceon there is much to gain however, regardless of cache mode.
  83 */
  84#ifdef CONFIG_CPU_FEROCEON
  85#define CALGN(code...) code
  86#else
  87#define CALGN(code...)
  88#endif
  89
  90/*
  91 * Enable and disable interrupts
  92 */
  93#if __LINUX_ARM_ARCH__ >= 6
  94        .macro  disable_irq_notrace
  95        cpsid   i
  96        .endm
  97
  98        .macro  enable_irq_notrace
  99        cpsie   i
 100        .endm
 101#else
 102        .macro  disable_irq_notrace
 103        msr     cpsr_c, #PSR_I_BIT | SVC_MODE
 104        .endm
 105
 106        .macro  enable_irq_notrace
 107        msr     cpsr_c, #SVC_MODE
 108        .endm
 109#endif
 110
 111        .macro asm_trace_hardirqs_off, save=1
 112#if defined(CONFIG_TRACE_IRQFLAGS)
 113        .if \save
 114        stmdb   sp!, {r0-r3, ip, lr}
 115        .endif
 116        bl      trace_hardirqs_off
 117        .if \save
 118        ldmia   sp!, {r0-r3, ip, lr}
 119        .endif
 120#endif
 121        .endm
 122
 123        .macro asm_trace_hardirqs_on, cond=al, save=1
 124#if defined(CONFIG_TRACE_IRQFLAGS)
 125        /*
 126         * actually the registers should be pushed and pop'd conditionally, but
 127         * after bl the flags are certainly clobbered
 128         */
 129        .if \save
 130        stmdb   sp!, {r0-r3, ip, lr}
 131        .endif
 132        bl\cond trace_hardirqs_on
 133        .if \save
 134        ldmia   sp!, {r0-r3, ip, lr}
 135        .endif
 136#endif
 137        .endm
 138
 139        .macro disable_irq, save=1
 140        disable_irq_notrace
 141        asm_trace_hardirqs_off \save
 142        .endm
 143
 144        .macro enable_irq
 145        asm_trace_hardirqs_on
 146        enable_irq_notrace
 147        .endm
 148/*
 149 * Save the current IRQ state and disable IRQs.  Note that this macro
 150 * assumes FIQs are enabled, and that the processor is in SVC mode.
 151 */
 152        .macro  save_and_disable_irqs, oldcpsr
 153#ifdef CONFIG_CPU_V7M
 154        mrs     \oldcpsr, primask
 155#else
 156        mrs     \oldcpsr, cpsr
 157#endif
 158        disable_irq
 159        .endm
 160
 161        .macro  save_and_disable_irqs_notrace, oldcpsr
 162        mrs     \oldcpsr, cpsr
 163        disable_irq_notrace
 164        .endm
 165
 166/*
 167 * Restore interrupt state previously stored in a register.  We don't
 168 * guarantee that this will preserve the flags.
 169 */
 170        .macro  restore_irqs_notrace, oldcpsr
 171#ifdef CONFIG_CPU_V7M
 172        msr     primask, \oldcpsr
 173#else
 174        msr     cpsr_c, \oldcpsr
 175#endif
 176        .endm
 177
 178        .macro restore_irqs, oldcpsr
 179        tst     \oldcpsr, #PSR_I_BIT
 180        asm_trace_hardirqs_on cond=eq
 181        restore_irqs_notrace \oldcpsr
 182        .endm
 183
 184/*
 185 * Assembly version of "adr rd, BSYM(sym)".  This should only be used to
 186 * reference local symbols in the same assembly file which are to be
 187 * resolved by the assembler.  Other usage is undefined.
 188 */
 189        .irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 190        .macro  badr\c, rd, sym
 191#ifdef CONFIG_THUMB2_KERNEL
 192        adr\c   \rd, \sym + 1
 193#else
 194        adr\c   \rd, \sym
 195#endif
 196        .endm
 197        .endr
 198
 199/*
 200 * Get current thread_info.
 201 */
 202        .macro  get_thread_info, rd
 203 ARM(   mov     \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT    )
 204 THUMB( mov     \rd, sp                 )
 205 THUMB( lsr     \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT       )
 206        mov     \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
 207        .endm
 208
 209/*
 210 * Increment/decrement the preempt count.
 211 */
 212#ifdef CONFIG_PREEMPT_COUNT
 213        .macro  inc_preempt_count, ti, tmp
 214        ldr     \tmp, [\ti, #TI_PREEMPT]        @ get preempt count
 215        add     \tmp, \tmp, #1                  @ increment it
 216        str     \tmp, [\ti, #TI_PREEMPT]
 217        .endm
 218
 219        .macro  dec_preempt_count, ti, tmp
 220        ldr     \tmp, [\ti, #TI_PREEMPT]        @ get preempt count
 221        sub     \tmp, \tmp, #1                  @ decrement it
 222        str     \tmp, [\ti, #TI_PREEMPT]
 223        .endm
 224
 225        .macro  dec_preempt_count_ti, ti, tmp
 226        get_thread_info \ti
 227        dec_preempt_count \ti, \tmp
 228        .endm
 229#else
 230        .macro  inc_preempt_count, ti, tmp
 231        .endm
 232
 233        .macro  dec_preempt_count, ti, tmp
 234        .endm
 235
 236        .macro  dec_preempt_count_ti, ti, tmp
 237        .endm
 238#endif
 239
 240#define USER(x...)                              \
 2419999:   x;                                      \
 242        .pushsection __ex_table,"a";            \
 243        .align  3;                              \
 244        .long   9999b,9001f;                    \
 245        .popsection
 246
 247#ifdef CONFIG_SMP
 248#define ALT_SMP(instr...)                                       \
 2499998:   instr
 250/*
 251 * Note: if you get assembler errors from ALT_UP() when building with
 252 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
 253 * ALT_SMP( W(instr) ... )
 254 */
 255#define ALT_UP(instr...)                                        \
 256        .pushsection ".alt.smp.init", "a"                       ;\
 257        .long   9998b                                           ;\
 2589997:   instr                                                   ;\
 259        .if . - 9997b == 2                                      ;\
 260                nop                                             ;\
 261        .endif                                                  ;\
 262        .if . - 9997b != 4                                      ;\
 263                .error "ALT_UP() content must assemble to exactly 4 bytes";\
 264        .endif                                                  ;\
 265        .popsection
 266#define ALT_UP_B(label)                                 \
 267        .equ    up_b_offset, label - 9998b                      ;\
 268        .pushsection ".alt.smp.init", "a"                       ;\
 269        .long   9998b                                           ;\
 270        W(b)    . + up_b_offset                                 ;\
 271        .popsection
 272#else
 273#define ALT_SMP(instr...)
 274#define ALT_UP(instr...) instr
 275#define ALT_UP_B(label) b label
 276#endif
 277
 278/*
 279 * Instruction barrier
 280 */
 281        .macro  instr_sync
 282#if __LINUX_ARM_ARCH__ >= 7
 283        isb
 284#elif __LINUX_ARM_ARCH__ == 6
 285        mcr     p15, 0, r0, c7, c5, 4
 286#endif
 287        .endm
 288
 289/*
 290 * SMP data memory barrier
 291 */
 292        .macro  smp_dmb mode
 293#ifdef CONFIG_SMP
 294#if __LINUX_ARM_ARCH__ >= 7
 295        .ifeqs "\mode","arm"
 296        ALT_SMP(dmb     ish)
 297        .else
 298        ALT_SMP(W(dmb)  ish)
 299        .endif
 300#elif __LINUX_ARM_ARCH__ == 6
 301        ALT_SMP(mcr     p15, 0, r0, c7, c10, 5) @ dmb
 302#else
 303#error Incompatible SMP platform
 304#endif
 305        .ifeqs "\mode","arm"
 306        ALT_UP(nop)
 307        .else
 308        ALT_UP(W(nop))
 309        .endif
 310#endif
 311        .endm
 312
 313#if defined(CONFIG_CPU_V7M)
 314        /*
 315         * setmode is used to assert to be in svc mode during boot. For v7-M
 316         * this is done in __v7m_setup, so setmode can be empty here.
 317         */
 318        .macro  setmode, mode, reg
 319        .endm
 320#elif defined(CONFIG_THUMB2_KERNEL)
 321        .macro  setmode, mode, reg
 322        mov     \reg, #\mode
 323        msr     cpsr_c, \reg
 324        .endm
 325#else
 326        .macro  setmode, mode, reg
 327        msr     cpsr_c, #\mode
 328        .endm
 329#endif
 330
 331/*
 332 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
 333 * a scratch register for the macro to overwrite.
 334 *
 335 * This macro is intended for forcing the CPU into SVC mode at boot time.
 336 * you cannot return to the original mode.
 337 */
 338.macro safe_svcmode_maskall reg:req
 339#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
 340        mrs     \reg , cpsr
 341        eor     \reg, \reg, #HYP_MODE
 342        tst     \reg, #MODE_MASK
 343        bic     \reg , \reg , #MODE_MASK
 344        orr     \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
 345THUMB(  orr     \reg , \reg , #PSR_T_BIT        )
 346        bne     1f
 347        orr     \reg, \reg, #PSR_A_BIT
 348        badr    lr, 2f
 349        msr     spsr_cxsf, \reg
 350        __MSR_ELR_HYP(14)
 351        __ERET
 3521:      msr     cpsr_c, \reg
 3532:
 354#else
 355/*
 356 * workaround for possibly broken pre-v6 hardware
 357 * (akita, Sharp Zaurus C-1000, PXA270-based)
 358 */
 359        setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
 360#endif
 361.endm
 362
 363/*
 364 * STRT/LDRT access macros with ARM and Thumb-2 variants
 365 */
 366#ifdef CONFIG_THUMB2_KERNEL
 367
 368        .macro  usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
 3699999:
 370        .if     \inc == 1
 371        \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
 372        .elseif \inc == 4
 373        \instr\cond\()\t\().w \reg, [\ptr, #\off]
 374        .else
 375        .error  "Unsupported inc macro argument"
 376        .endif
 377
 378        .pushsection __ex_table,"a"
 379        .align  3
 380        .long   9999b, \abort
 381        .popsection
 382        .endm
 383
 384        .macro  usracc, instr, reg, ptr, inc, cond, rept, abort
 385        @ explicit IT instruction needed because of the label
 386        @ introduced by the USER macro
 387        .ifnc   \cond,al
 388        .if     \rept == 1
 389        itt     \cond
 390        .elseif \rept == 2
 391        ittt    \cond
 392        .else
 393        .error  "Unsupported rept macro argument"
 394        .endif
 395        .endif
 396
 397        @ Slightly optimised to avoid incrementing the pointer twice
 398        usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
 399        .if     \rept == 2
 400        usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
 401        .endif
 402
 403        add\cond \ptr, #\rept * \inc
 404        .endm
 405
 406#else   /* !CONFIG_THUMB2_KERNEL */
 407
 408        .macro  usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
 409        .rept   \rept
 4109999:
 411        .if     \inc == 1
 412        \instr\cond\()b\()\t \reg, [\ptr], #\inc
 413        .elseif \inc == 4
 414        \instr\cond\()\t \reg, [\ptr], #\inc
 415        .else
 416        .error  "Unsupported inc macro argument"
 417        .endif
 418
 419        .pushsection __ex_table,"a"
 420        .align  3
 421        .long   9999b, \abort
 422        .popsection
 423        .endr
 424        .endm
 425
 426#endif  /* CONFIG_THUMB2_KERNEL */
 427
 428        .macro  strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
 429        usracc  str, \reg, \ptr, \inc, \cond, \rept, \abort
 430        .endm
 431
 432        .macro  ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
 433        usracc  ldr, \reg, \ptr, \inc, \cond, \rept, \abort
 434        .endm
 435
 436/* Utility macro for declaring string literals */
 437        .macro  string name:req, string
 438        .type \name , #object
 439\name:
 440        .asciz "\string"
 441        .size \name , . - \name
 442        .endm
 443
 444        .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
 445#ifndef CONFIG_CPU_USE_DOMAINS
 446        adds    \tmp, \addr, #\size - 1
 447        sbcccs  \tmp, \tmp, \limit
 448        bcs     \bad
 449#endif
 450        .endm
 451
 452        .macro  uaccess_disable, tmp, isb=1
 453#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 454        /*
 455         * Whenever we re-enter userspace, the domains should always be
 456         * set appropriately.
 457         */
 458        mov     \tmp, #DACR_UACCESS_DISABLE
 459        mcr     p15, 0, \tmp, c3, c0, 0         @ Set domain register
 460        .if     \isb
 461        instr_sync
 462        .endif
 463#endif
 464        .endm
 465
 466        .macro  uaccess_enable, tmp, isb=1
 467#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 468        /*
 469         * Whenever we re-enter userspace, the domains should always be
 470         * set appropriately.
 471         */
 472        mov     \tmp, #DACR_UACCESS_ENABLE
 473        mcr     p15, 0, \tmp, c3, c0, 0
 474        .if     \isb
 475        instr_sync
 476        .endif
 477#endif
 478        .endm
 479
 480        .macro  uaccess_save, tmp
 481#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 482        mrc     p15, 0, \tmp, c3, c0, 0
 483        str     \tmp, [sp, #S_FRAME_SIZE]
 484#endif
 485        .endm
 486
 487        .macro  uaccess_restore
 488#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 489        ldr     r0, [sp, #S_FRAME_SIZE]
 490        mcr     p15, 0, r0, c3, c0, 0
 491#endif
 492        .endm
 493
 494        .irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 495        .macro  ret\c, reg
 496#if __LINUX_ARM_ARCH__ < 6
 497        mov\c   pc, \reg
 498#else
 499        .ifeqs  "\reg", "lr"
 500        bx\c    \reg
 501        .else
 502        mov\c   pc, \reg
 503        .endif
 504#endif
 505        .endm
 506        .endr
 507
 508        .macro  ret.w, reg
 509        ret     \reg
 510#ifdef CONFIG_THUMB2_KERNEL
 511        nop
 512#endif
 513        .endm
 514
 515#endif /* __ASM_ASSEMBLER_H__ */
 516