linux/arch/arm/include/asm/assembler.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/assembler.h
   3 *
   4 *  Copyright (C) 1996-2000 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 *  This file contains arm architecture specific defines
  11 *  for the different processors.
  12 *
  13 *  Do not include any C declarations in this file - it is included by
  14 *  assembler source.
  15 */
  16#ifndef __ASM_ASSEMBLER_H__
  17#define __ASM_ASSEMBLER_H__
  18
  19#ifndef __ASSEMBLY__
  20#error "Only include this from assembly code"
  21#endif
  22
  23#include <asm/ptrace.h>
  24#include <asm/domain.h>
  25#include <asm/opcodes-virt.h>
  26
  27#define IOMEM(x)        (x)
  28
  29/*
  30 * Endian independent macros for shifting bytes within registers.
  31 */
  32#ifndef __ARMEB__
  33#define pull            lsr
  34#define push            lsl
  35#define get_byte_0      lsl #0
  36#define get_byte_1      lsr #8
  37#define get_byte_2      lsr #16
  38#define get_byte_3      lsr #24
  39#define put_byte_0      lsl #0
  40#define put_byte_1      lsl #8
  41#define put_byte_2      lsl #16
  42#define put_byte_3      lsl #24
  43#else
  44#define pull            lsl
  45#define push            lsr
  46#define get_byte_0      lsr #24
  47#define get_byte_1      lsr #16
  48#define get_byte_2      lsr #8
  49#define get_byte_3      lsl #0
  50#define put_byte_0      lsl #24
  51#define put_byte_1      lsl #16
  52#define put_byte_2      lsl #8
  53#define put_byte_3      lsl #0
  54#endif
  55
  56/*
  57 * Data preload for architectures that support it
  58 */
  59#if __LINUX_ARM_ARCH__ >= 5
  60#define PLD(code...)    code
  61#else
  62#define PLD(code...)
  63#endif
  64
  65/*
  66 * This can be used to enable code to cacheline align the destination
  67 * pointer when bulk writing to memory.  Experiments on StrongARM and
  68 * XScale didn't show this a worthwhile thing to do when the cache is not
  69 * set to write-allocate (this would need further testing on XScale when WA
  70 * is used).
  71 *
  72 * On Feroceon there is much to gain however, regardless of cache mode.
  73 */
  74#ifdef CONFIG_CPU_FEROCEON
  75#define CALGN(code...) code
  76#else
  77#define CALGN(code...)
  78#endif
  79
  80/*
  81 * Enable and disable interrupts
  82 */
  83#if __LINUX_ARM_ARCH__ >= 6
  84        .macro  disable_irq_notrace
  85        cpsid   i
  86        .endm
  87
  88        .macro  enable_irq_notrace
  89        cpsie   i
  90        .endm
  91#else
  92        .macro  disable_irq_notrace
  93        msr     cpsr_c, #PSR_I_BIT | SVC_MODE
  94        .endm
  95
  96        .macro  enable_irq_notrace
  97        msr     cpsr_c, #SVC_MODE
  98        .endm
  99#endif
 100
 101        .macro asm_trace_hardirqs_off
 102#if defined(CONFIG_TRACE_IRQFLAGS)
 103        stmdb   sp!, {r0-r3, ip, lr}
 104        bl      trace_hardirqs_off
 105        ldmia   sp!, {r0-r3, ip, lr}
 106#endif
 107        .endm
 108
 109        .macro asm_trace_hardirqs_on_cond, cond
 110#if defined(CONFIG_TRACE_IRQFLAGS)
 111        /*
 112         * actually the registers should be pushed and pop'd conditionally, but
 113         * after bl the flags are certainly clobbered
 114         */
 115        stmdb   sp!, {r0-r3, ip, lr}
 116        bl\cond trace_hardirqs_on
 117        ldmia   sp!, {r0-r3, ip, lr}
 118#endif
 119        .endm
 120
 121        .macro asm_trace_hardirqs_on
 122        asm_trace_hardirqs_on_cond al
 123        .endm
 124
 125        .macro disable_irq
 126        disable_irq_notrace
 127        asm_trace_hardirqs_off
 128        .endm
 129
 130        .macro enable_irq
 131        asm_trace_hardirqs_on
 132        enable_irq_notrace
 133        .endm
 134/*
 135 * Save the current IRQ state and disable IRQs.  Note that this macro
 136 * assumes FIQs are enabled, and that the processor is in SVC mode.
 137 */
 138        .macro  save_and_disable_irqs, oldcpsr
 139        mrs     \oldcpsr, cpsr
 140        disable_irq
 141        .endm
 142
 143        .macro  save_and_disable_irqs_notrace, oldcpsr
 144        mrs     \oldcpsr, cpsr
 145        disable_irq_notrace
 146        .endm
 147
 148/*
 149 * Restore interrupt state previously stored in a register.  We don't
 150 * guarantee that this will preserve the flags.
 151 */
 152        .macro  restore_irqs_notrace, oldcpsr
 153        msr     cpsr_c, \oldcpsr
 154        .endm
 155
 156        .macro restore_irqs, oldcpsr
 157        tst     \oldcpsr, #PSR_I_BIT
 158        asm_trace_hardirqs_on_cond eq
 159        restore_irqs_notrace \oldcpsr
 160        .endm
 161
 162#define USER(x...)                              \
 1639999:   x;                                      \
 164        .pushsection __ex_table,"a";            \
 165        .align  3;                              \
 166        .long   9999b,9001f;                    \
 167        .popsection
 168
 169#ifdef CONFIG_SMP
 170#define ALT_SMP(instr...)                                       \
 1719998:   instr
 172/*
 173 * Note: if you get assembler errors from ALT_UP() when building with
 174 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
 175 * ALT_SMP( W(instr) ... )
 176 */
 177#define ALT_UP(instr...)                                        \
 178        .pushsection ".alt.smp.init", "a"                       ;\
 179        .long   9998b                                           ;\
 1809997:   instr                                                   ;\
 181        .if . - 9997b != 4                                      ;\
 182                .error "ALT_UP() content must assemble to exactly 4 bytes";\
 183        .endif                                                  ;\
 184        .popsection
 185#define ALT_UP_B(label)                                 \
 186        .equ    up_b_offset, label - 9998b                      ;\
 187        .pushsection ".alt.smp.init", "a"                       ;\
 188        .long   9998b                                           ;\
 189        W(b)    . + up_b_offset                                 ;\
 190        .popsection
 191#else
 192#define ALT_SMP(instr...)
 193#define ALT_UP(instr...) instr
 194#define ALT_UP_B(label) b label
 195#endif
 196
 197/*
 198 * Instruction barrier
 199 */
 200        .macro  instr_sync
 201#if __LINUX_ARM_ARCH__ >= 7
 202        isb
 203#elif __LINUX_ARM_ARCH__ == 6
 204        mcr     p15, 0, r0, c7, c5, 4
 205#endif
 206        .endm
 207
 208/*
 209 * SMP data memory barrier
 210 */
 211        .macro  smp_dmb mode
 212#ifdef CONFIG_SMP
 213#if __LINUX_ARM_ARCH__ >= 7
 214        .ifeqs "\mode","arm"
 215        ALT_SMP(dmb)
 216        .else
 217        ALT_SMP(W(dmb))
 218        .endif
 219#elif __LINUX_ARM_ARCH__ == 6
 220        ALT_SMP(mcr     p15, 0, r0, c7, c10, 5) @ dmb
 221#else
 222#error Incompatible SMP platform
 223#endif
 224        .ifeqs "\mode","arm"
 225        ALT_UP(nop)
 226        .else
 227        ALT_UP(W(nop))
 228        .endif
 229#endif
 230        .endm
 231
 232#ifdef CONFIG_THUMB2_KERNEL
 233        .macro  setmode, mode, reg
 234        mov     \reg, #\mode
 235        msr     cpsr_c, \reg
 236        .endm
 237#else
 238        .macro  setmode, mode, reg
 239        msr     cpsr_c, #\mode
 240        .endm
 241#endif
 242
 243/*
 244 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
 245 * a scratch register for the macro to overwrite.
 246 *
 247 * This macro is intended for forcing the CPU into SVC mode at boot time.
 248 * you cannot return to the original mode.
 249 */
 250.macro safe_svcmode_maskall reg:req
 251#if __LINUX_ARM_ARCH__ >= 6
 252        mrs     \reg , cpsr
 253        eor     \reg, \reg, #HYP_MODE
 254        tst     \reg, #MODE_MASK
 255        bic     \reg , \reg , #MODE_MASK
 256        orr     \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
 257THUMB(  orr     \reg , \reg , #PSR_T_BIT        )
 258        bne     1f
 259        orr     \reg, \reg, #PSR_A_BIT
 260        adr     lr, BSYM(2f)
 261        msr     spsr_cxsf, \reg
 262        __MSR_ELR_HYP(14)
 263        __ERET
 2641:      msr     cpsr_c, \reg
 2652:
 266#else
 267/*
 268 * workaround for possibly broken pre-v6 hardware
 269 * (akita, Sharp Zaurus C-1000, PXA270-based)
 270 */
 271        setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
 272#endif
 273.endm
 274
 275/*
 276 * STRT/LDRT access macros with ARM and Thumb-2 variants
 277 */
 278#ifdef CONFIG_THUMB2_KERNEL
 279
 280        .macro  usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
 2819999:
 282        .if     \inc == 1
 283        \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
 284        .elseif \inc == 4
 285        \instr\cond\()\t\().w \reg, [\ptr, #\off]
 286        .else
 287        .error  "Unsupported inc macro argument"
 288        .endif
 289
 290        .pushsection __ex_table,"a"
 291        .align  3
 292        .long   9999b, \abort
 293        .popsection
 294        .endm
 295
 296        .macro  usracc, instr, reg, ptr, inc, cond, rept, abort
 297        @ explicit IT instruction needed because of the label
 298        @ introduced by the USER macro
 299        .ifnc   \cond,al
 300        .if     \rept == 1
 301        itt     \cond
 302        .elseif \rept == 2
 303        ittt    \cond
 304        .else
 305        .error  "Unsupported rept macro argument"
 306        .endif
 307        .endif
 308
 309        @ Slightly optimised to avoid incrementing the pointer twice
 310        usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
 311        .if     \rept == 2
 312        usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
 313        .endif
 314
 315        add\cond \ptr, #\rept * \inc
 316        .endm
 317
 318#else   /* !CONFIG_THUMB2_KERNEL */
 319
 320        .macro  usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
 321        .rept   \rept
 3229999:
 323        .if     \inc == 1
 324        \instr\cond\()b\()\t \reg, [\ptr], #\inc
 325        .elseif \inc == 4
 326        \instr\cond\()\t \reg, [\ptr], #\inc
 327        .else
 328        .error  "Unsupported inc macro argument"
 329        .endif
 330
 331        .pushsection __ex_table,"a"
 332        .align  3
 333        .long   9999b, \abort
 334        .popsection
 335        .endr
 336        .endm
 337
 338#endif  /* CONFIG_THUMB2_KERNEL */
 339
 340        .macro  strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
 341        usracc  str, \reg, \ptr, \inc, \cond, \rept, \abort
 342        .endm
 343
 344        .macro  ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
 345        usracc  ldr, \reg, \ptr, \inc, \cond, \rept, \abort
 346        .endm
 347
 348/* Utility macro for declaring string literals */
 349        .macro  string name:req, string
 350        .type \name , #object
 351\name:
 352        .asciz "\string"
 353        .size \name , . - \name
 354        .endm
 355
 356        .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
 357#ifndef CONFIG_CPU_USE_DOMAINS
 358        adds    \tmp, \addr, #\size - 1
 359        sbcccs  \tmp, \tmp, \limit
 360        bcs     \bad
 361#endif
 362        .endm
 363
 364#endif /* __ASM_ASSEMBLER_H__ */
 365