linux/arch/arm/include/asm/assembler.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/assembler.h
   3 *
   4 *  Copyright (C) 1996-2000 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 *  This file contains arm architecture specific defines
  11 *  for the different processors.
  12 *
  13 *  Do not include any C declarations in this file - it is included by
  14 *  assembler source.
  15 */
  16#ifndef __ASM_ASSEMBLER_H__
  17#define __ASM_ASSEMBLER_H__
  18
  19#ifndef __ASSEMBLY__
  20#error "Only include this from assembly code"
  21#endif
  22
  23#include <asm/ptrace.h>
  24#include <asm/domain.h>
  25#include <asm/opcodes-virt.h>
  26
  27#define IOMEM(x)        (x)
  28
  29/*
  30 * Endian independent macros for shifting bytes within registers.
  31 */
  32#ifndef __ARMEB__
  33#define pull            lsr
  34#define push            lsl
  35#define get_byte_0      lsl #0
  36#define get_byte_1      lsr #8
  37#define get_byte_2      lsr #16
  38#define get_byte_3      lsr #24
  39#define put_byte_0      lsl #0
  40#define put_byte_1      lsl #8
  41#define put_byte_2      lsl #16
  42#define put_byte_3      lsl #24
  43#else
  44#define pull            lsl
  45#define push            lsr
  46#define get_byte_0      lsr #24
  47#define get_byte_1      lsr #16
  48#define get_byte_2      lsr #8
  49#define get_byte_3      lsl #0
  50#define put_byte_0      lsl #24
  51#define put_byte_1      lsl #16
  52#define put_byte_2      lsl #8
  53#define put_byte_3      lsl #0
  54#endif
  55
  56/*
  57 * Data preload for architectures that support it
  58 */
  59#if __LINUX_ARM_ARCH__ >= 5
  60#define PLD(code...)    code
  61#else
  62#define PLD(code...)
  63#endif
  64
  65/*
  66 * This can be used to enable code to cacheline align the destination
  67 * pointer when bulk writing to memory.  Experiments on StrongARM and
  68 * XScale didn't show this a worthwhile thing to do when the cache is not
  69 * set to write-allocate (this would need further testing on XScale when WA
  70 * is used).
  71 *
  72 * On Feroceon there is much to gain however, regardless of cache mode.
  73 */
  74#ifdef CONFIG_CPU_FEROCEON
  75#define CALGN(code...) code
  76#else
  77#define CALGN(code...)
  78#endif
  79
  80/*
  81 * Enable and disable interrupts
  82 */
  83#if __LINUX_ARM_ARCH__ >= 6
  84        .macro  disable_irq_notrace
  85        cpsid   i
  86        .endm
  87
  88        .macro  enable_irq_notrace
  89        cpsie   i
  90        .endm
  91#else
  92        .macro  disable_irq_notrace
  93        msr     cpsr_c, #PSR_I_BIT | SVC_MODE
  94        .endm
  95
  96        .macro  enable_irq_notrace
  97        msr     cpsr_c, #SVC_MODE
  98        .endm
  99#endif
 100
 101        .macro asm_trace_hardirqs_off
 102#if defined(CONFIG_TRACE_IRQFLAGS)
 103        stmdb   sp!, {r0-r3, ip, lr}
 104        bl      trace_hardirqs_off
 105        ldmia   sp!, {r0-r3, ip, lr}
 106#endif
 107        .endm
 108
 109        .macro asm_trace_hardirqs_on_cond, cond
 110#if defined(CONFIG_TRACE_IRQFLAGS)
 111        /*
 112         * actually the registers should be pushed and pop'd conditionally, but
 113         * after bl the flags are certainly clobbered
 114         */
 115        stmdb   sp!, {r0-r3, ip, lr}
 116        bl\cond trace_hardirqs_on
 117        ldmia   sp!, {r0-r3, ip, lr}
 118#endif
 119        .endm
 120
 121        .macro asm_trace_hardirqs_on
 122        asm_trace_hardirqs_on_cond al
 123        .endm
 124
 125        .macro disable_irq
 126        disable_irq_notrace
 127        asm_trace_hardirqs_off
 128        .endm
 129
 130        .macro enable_irq
 131        asm_trace_hardirqs_on
 132        enable_irq_notrace
 133        .endm
 134/*
 135 * Save the current IRQ state and disable IRQs.  Note that this macro
 136 * assumes FIQs are enabled, and that the processor is in SVC mode.
 137 */
 138        .macro  save_and_disable_irqs, oldcpsr
 139#ifdef CONFIG_CPU_V7M
 140        mrs     \oldcpsr, primask
 141#else
 142        mrs     \oldcpsr, cpsr
 143#endif
 144        disable_irq
 145        .endm
 146
 147        .macro  save_and_disable_irqs_notrace, oldcpsr
 148        mrs     \oldcpsr, cpsr
 149        disable_irq_notrace
 150        .endm
 151
 152/*
 153 * Restore interrupt state previously stored in a register.  We don't
 154 * guarantee that this will preserve the flags.
 155 */
 156        .macro  restore_irqs_notrace, oldcpsr
 157#ifdef CONFIG_CPU_V7M
 158        msr     primask, \oldcpsr
 159#else
 160        msr     cpsr_c, \oldcpsr
 161#endif
 162        .endm
 163
 164        .macro restore_irqs, oldcpsr
 165        tst     \oldcpsr, #PSR_I_BIT
 166        asm_trace_hardirqs_on_cond eq
 167        restore_irqs_notrace \oldcpsr
 168        .endm
 169
 170#define USER(x...)                              \
 1719999:   x;                                      \
 172        .pushsection __ex_table,"a";            \
 173        .align  3;                              \
 174        .long   9999b,9001f;                    \
 175        .popsection
 176
 177#ifdef CONFIG_SMP
 178#define ALT_SMP(instr...)                                       \
 1799998:   instr
 180/*
 181 * Note: if you get assembler errors from ALT_UP() when building with
 182 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
 183 * ALT_SMP( W(instr) ... )
 184 */
 185#define ALT_UP(instr...)                                        \
 186        .pushsection ".alt.smp.init", "a"                       ;\
 187        .long   9998b                                           ;\
 1889997:   instr                                                   ;\
 189        .if . - 9997b != 4                                      ;\
 190                .error "ALT_UP() content must assemble to exactly 4 bytes";\
 191        .endif                                                  ;\
 192        .popsection
 193#define ALT_UP_B(label)                                 \
 194        .equ    up_b_offset, label - 9998b                      ;\
 195        .pushsection ".alt.smp.init", "a"                       ;\
 196        .long   9998b                                           ;\
 197        W(b)    . + up_b_offset                                 ;\
 198        .popsection
 199#else
 200#define ALT_SMP(instr...)
 201#define ALT_UP(instr...) instr
 202#define ALT_UP_B(label) b label
 203#endif
 204
 205/*
 206 * Instruction barrier
 207 */
 208        .macro  instr_sync
 209#if __LINUX_ARM_ARCH__ >= 7
 210        isb
 211#elif __LINUX_ARM_ARCH__ == 6
 212        mcr     p15, 0, r0, c7, c5, 4
 213#endif
 214        .endm
 215
 216/*
 217 * SMP data memory barrier
 218 */
 219        .macro  smp_dmb mode
 220#ifdef CONFIG_SMP
 221#if __LINUX_ARM_ARCH__ >= 7
 222        .ifeqs "\mode","arm"
 223        ALT_SMP(dmb)
 224        .else
 225        ALT_SMP(W(dmb))
 226        .endif
 227#elif __LINUX_ARM_ARCH__ == 6
 228        ALT_SMP(mcr     p15, 0, r0, c7, c10, 5) @ dmb
 229#else
 230#error Incompatible SMP platform
 231#endif
 232        .ifeqs "\mode","arm"
 233        ALT_UP(nop)
 234        .else
 235        ALT_UP(W(nop))
 236        .endif
 237#endif
 238        .endm
 239
 240#if defined(CONFIG_CPU_V7M)
 241        /*
 242         * setmode is used to assert to be in svc mode during boot. For v7-M
 243         * this is done in __v7m_setup, so setmode can be empty here.
 244         */
 245        .macro  setmode, mode, reg
 246        .endm
 247#elif defined(CONFIG_THUMB2_KERNEL)
 248        .macro  setmode, mode, reg
 249        mov     \reg, #\mode
 250        msr     cpsr_c, \reg
 251        .endm
 252#else
 253        .macro  setmode, mode, reg
 254        msr     cpsr_c, #\mode
 255        .endm
 256#endif
 257
 258/*
 259 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
 260 * a scratch register for the macro to overwrite.
 261 *
 262 * This macro is intended for forcing the CPU into SVC mode at boot time.
 263 * you cannot return to the original mode.
 264 */
 265.macro safe_svcmode_maskall reg:req
 266#if __LINUX_ARM_ARCH__ >= 6
 267        mrs     \reg , cpsr
 268        eor     \reg, \reg, #HYP_MODE
 269        tst     \reg, #MODE_MASK
 270        bic     \reg , \reg , #MODE_MASK
 271        orr     \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
 272THUMB(  orr     \reg , \reg , #PSR_T_BIT        )
 273        bne     1f
 274        orr     \reg, \reg, #PSR_A_BIT
 275        adr     lr, BSYM(2f)
 276        msr     spsr_cxsf, \reg
 277        __MSR_ELR_HYP(14)
 278        __ERET
 2791:      msr     cpsr_c, \reg
 2802:
 281#else
 282/*
 283 * workaround for possibly broken pre-v6 hardware
 284 * (akita, Sharp Zaurus C-1000, PXA270-based)
 285 */
 286        setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
 287#endif
 288.endm
 289
 290/*
 291 * STRT/LDRT access macros with ARM and Thumb-2 variants
 292 */
 293#ifdef CONFIG_THUMB2_KERNEL
 294
 295        .macro  usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
 2969999:
 297        .if     \inc == 1
 298        \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
 299        .elseif \inc == 4
 300        \instr\cond\()\t\().w \reg, [\ptr, #\off]
 301        .else
 302        .error  "Unsupported inc macro argument"
 303        .endif
 304
 305        .pushsection __ex_table,"a"
 306        .align  3
 307        .long   9999b, \abort
 308        .popsection
 309        .endm
 310
 311        .macro  usracc, instr, reg, ptr, inc, cond, rept, abort
 312        @ explicit IT instruction needed because of the label
 313        @ introduced by the USER macro
 314        .ifnc   \cond,al
 315        .if     \rept == 1
 316        itt     \cond
 317        .elseif \rept == 2
 318        ittt    \cond
 319        .else
 320        .error  "Unsupported rept macro argument"
 321        .endif
 322        .endif
 323
 324        @ Slightly optimised to avoid incrementing the pointer twice
 325        usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
 326        .if     \rept == 2
 327        usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
 328        .endif
 329
 330        add\cond \ptr, #\rept * \inc
 331        .endm
 332
 333#else   /* !CONFIG_THUMB2_KERNEL */
 334
 335        .macro  usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
 336        .rept   \rept
 3379999:
 338        .if     \inc == 1
 339        \instr\cond\()b\()\t \reg, [\ptr], #\inc
 340        .elseif \inc == 4
 341        \instr\cond\()\t \reg, [\ptr], #\inc
 342        .else
 343        .error  "Unsupported inc macro argument"
 344        .endif
 345
 346        .pushsection __ex_table,"a"
 347        .align  3
 348        .long   9999b, \abort
 349        .popsection
 350        .endr
 351        .endm
 352
 353#endif  /* CONFIG_THUMB2_KERNEL */
 354
 355        .macro  strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
 356        usracc  str, \reg, \ptr, \inc, \cond, \rept, \abort
 357        .endm
 358
 359        .macro  ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
 360        usracc  ldr, \reg, \ptr, \inc, \cond, \rept, \abort
 361        .endm
 362
 363/* Utility macro for declaring string literals */
 364        .macro  string name:req, string
 365        .type \name , #object
 366\name:
 367        .asciz "\string"
 368        .size \name , . - \name
 369        .endm
 370
 371        .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
 372#ifndef CONFIG_CPU_USE_DOMAINS
 373        adds    \tmp, \addr, #\size - 1
 374        sbcccs  \tmp, \tmp, \limit
 375        bcs     \bad
 376#endif
 377        .endm
 378
 379#endif /* __ASM_ASSEMBLER_H__ */
 380