linux/arch/mips/include/asm/stackframe.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
   7 * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
   8 * Copyright (C) 1999 Silicon Graphics, Inc.
   9 * Copyright (C) 2007  Maciej W. Rozycki
  10 */
  11#ifndef _ASM_STACKFRAME_H
  12#define _ASM_STACKFRAME_H
  13
  14#include <linux/threads.h>
  15
  16#include <asm/asm.h>
  17#include <asm/asmmacro.h>
  18#include <asm/mipsregs.h>
  19#include <asm/asm-offsets.h>
  20#include <asm/thread_info.h>
  21
  22/* Make the addition of cfi info a little easier. */
  23        .macro cfi_rel_offset reg offset=0 docfi=0
  24        .if \docfi
  25        .cfi_rel_offset \reg, \offset
  26        .endif
  27        .endm
  28
  29        .macro cfi_st reg offset=0 docfi=0
  30        LONG_S  \reg, \offset(sp)
  31        cfi_rel_offset \reg, \offset, \docfi
  32        .endm
  33
  34        .macro cfi_restore reg offset=0 docfi=0
  35        .if \docfi
  36        .cfi_restore \reg
  37        .endif
  38        .endm
  39
  40        .macro cfi_ld reg offset=0 docfi=0
  41        LONG_L  \reg, \offset(sp)
  42        cfi_restore \reg \offset \docfi
  43        .endm
  44
  45#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  46#define STATMASK 0x3f
  47#else
  48#define STATMASK 0x1f
  49#endif
  50
  51                .macro  SAVE_AT docfi=0
  52                .set    push
  53                .set    noat
  54                cfi_st  $1, PT_R1, \docfi
  55                .set    pop
  56                .endm
  57
  58                .macro  SAVE_TEMP docfi=0
  59#ifdef CONFIG_CPU_HAS_SMARTMIPS
  60                mflhxu  v1
  61                LONG_S  v1, PT_LO(sp)
  62                mflhxu  v1
  63                LONG_S  v1, PT_HI(sp)
  64                mflhxu  v1
  65                LONG_S  v1, PT_ACX(sp)
  66#elif !defined(CONFIG_CPU_MIPSR6)
  67                mfhi    v1
  68#endif
  69#ifdef CONFIG_32BIT
  70                cfi_st  $8, PT_R8, \docfi
  71                cfi_st  $9, PT_R9, \docfi
  72#endif
  73                cfi_st  $10, PT_R10, \docfi
  74                cfi_st  $11, PT_R11, \docfi
  75                cfi_st  $12, PT_R12, \docfi
  76#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
  77                LONG_S  v1, PT_HI(sp)
  78                mflo    v1
  79#endif
  80                cfi_st  $13, PT_R13, \docfi
  81                cfi_st  $14, PT_R14, \docfi
  82                cfi_st  $15, PT_R15, \docfi
  83                cfi_st  $24, PT_R24, \docfi
  84#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
  85                LONG_S  v1, PT_LO(sp)
  86#endif
  87#ifdef CONFIG_CPU_CAVIUM_OCTEON
  88                /*
  89                 * The Octeon multiplier state is affected by general
  90                 * multiply instructions. It must be saved before and
  91                 * kernel code might corrupt it
  92                 */
  93                jal     octeon_mult_save
  94#endif
  95                .endm
  96
  97                .macro  SAVE_STATIC docfi=0
  98                cfi_st  $16, PT_R16, \docfi
  99                cfi_st  $17, PT_R17, \docfi
 100                cfi_st  $18, PT_R18, \docfi
 101                cfi_st  $19, PT_R19, \docfi
 102                cfi_st  $20, PT_R20, \docfi
 103                cfi_st  $21, PT_R21, \docfi
 104                cfi_st  $22, PT_R22, \docfi
 105                cfi_st  $23, PT_R23, \docfi
 106                cfi_st  $30, PT_R30, \docfi
 107                .endm
 108
 109/*
 110 * get_saved_sp returns the SP for the current CPU by looking in the
 111 * kernelsp array for it.  If tosp is set, it stores the current sp in
 112 * k0 and loads the new value in sp.  If not, it clobbers k0 and
 113 * stores the new value in k1, leaving sp unaffected.
 114 */
 115#ifdef CONFIG_SMP
 116
 117                /* SMP variation */
 118                .macro  get_saved_sp docfi=0 tosp=0
 119                ASM_CPUID_MFC0  k0, ASM_SMP_CPUID_REG
 120#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
 121                lui     k1, %hi(kernelsp)
 122#else
 123                lui     k1, %highest(kernelsp)
 124                daddiu  k1, %higher(kernelsp)
 125                dsll    k1, 16
 126                daddiu  k1, %hi(kernelsp)
 127                dsll    k1, 16
 128#endif
 129                LONG_SRL        k0, SMP_CPUID_PTRSHIFT
 130                LONG_ADDU       k1, k0
 131                .if \tosp
 132                move    k0, sp
 133                .if \docfi
 134                .cfi_register sp, k0
 135                .endif
 136                LONG_L  sp, %lo(kernelsp)(k1)
 137                .else
 138                LONG_L  k1, %lo(kernelsp)(k1)
 139                .endif
 140                .endm
 141
 142                .macro  set_saved_sp stackp temp temp2
 143                ASM_CPUID_MFC0  \temp, ASM_SMP_CPUID_REG
 144                LONG_SRL        \temp, SMP_CPUID_PTRSHIFT
 145                LONG_S  \stackp, kernelsp(\temp)
 146                .endm
 147#else /* !CONFIG_SMP */
 148                /* Uniprocessor variation */
 149                .macro  get_saved_sp docfi=0 tosp=0
 150#ifdef CONFIG_CPU_JUMP_WORKAROUNDS
 151                /*
 152                 * Clear BTB (branch target buffer), forbid RAS (return address
 153                 * stack) to workaround the Out-of-order Issue in Loongson2F
 154                 * via its diagnostic register.
 155                 */
 156                move    k0, ra
 157                jal     1f
 158                 nop
 1591:              jal     1f
 160                 nop
 1611:              jal     1f
 162                 nop
 1631:              jal     1f
 164                 nop
 1651:              move    ra, k0
 166                li      k0, 3
 167                mtc0    k0, $22
 168#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
 169#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
 170                lui     k1, %hi(kernelsp)
 171#else
 172                lui     k1, %highest(kernelsp)
 173                daddiu  k1, %higher(kernelsp)
 174                dsll    k1, k1, 16
 175                daddiu  k1, %hi(kernelsp)
 176                dsll    k1, k1, 16
 177#endif
 178                .if \tosp
 179                move    k0, sp
 180                .if \docfi
 181                .cfi_register sp, k0
 182                .endif
 183                LONG_L  sp, %lo(kernelsp)(k1)
 184                .else
 185                LONG_L  k1, %lo(kernelsp)(k1)
 186                .endif
 187                .endm
 188
 189                .macro  set_saved_sp stackp temp temp2
 190                LONG_S  \stackp, kernelsp
 191                .endm
 192#endif
 193
 194                .macro  SAVE_SOME docfi=0
 195                .set    push
 196                .set    noat
 197                .set    reorder
 198                mfc0    k0, CP0_STATUS
 199                sll     k0, 3           /* extract cu0 bit */
 200                .set    noreorder
 201                bltz    k0, 8f
 202                 move   k0, sp
 203                .if \docfi
 204                .cfi_register sp, k0
 205                .endif
 206#ifdef CONFIG_EVA
 207                /*
 208                 * Flush interAptiv's Return Prediction Stack (RPS) by writing
 209                 * EntryHi. Toggling Config7.RPS is slower and less portable.
 210                 *
 211                 * The RPS isn't automatically flushed when exceptions are
 212                 * taken, which can result in kernel mode speculative accesses
 213                 * to user addresses if the RPS mispredicts. That's harmless
 214                 * when user and kernel share the same address space, but with
 215                 * EVA the same user segments may be unmapped to kernel mode,
 216                 * even containing sensitive MMIO regions or invalid memory.
 217                 *
 218                 * This can happen when the kernel sets the return address to
 219                 * ret_from_* and jr's to the exception handler, which looks
 220                 * more like a tail call than a function call. If nested calls
 221                 * don't evict the last user address in the RPS, it will
 222                 * mispredict the return and fetch from a user controlled
 223                 * address into the icache.
 224                 *
 225                 * More recent EVA-capable cores with MAAR to restrict
 226                 * speculative accesses aren't affected.
 227                 */
 228                MFC0    k0, CP0_ENTRYHI
 229                MTC0    k0, CP0_ENTRYHI
 230#endif
 231                .set    reorder
 232                /* Called from user mode, new stack. */
 233                get_saved_sp docfi=\docfi tosp=1
 2348:
 235#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
 236                .set    at=k1
 237#endif
 238                PTR_SUBU sp, PT_SIZE
 239#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
 240                .set    noat
 241#endif
 242                .if \docfi
 243                .cfi_def_cfa sp,0
 244                .endif
 245                cfi_st  k0, PT_R29, \docfi
 246                cfi_rel_offset  sp, PT_R29, \docfi
 247                cfi_st  v1, PT_R3, \docfi
 248                /*
 249                 * You might think that you don't need to save $0,
 250                 * but the FPU emulator and gdb remote debug stub
 251                 * need it to operate correctly
 252                 */
 253                LONG_S  $0, PT_R0(sp)
 254                mfc0    v1, CP0_STATUS
 255                cfi_st  v0, PT_R2, \docfi
 256                LONG_S  v1, PT_STATUS(sp)
 257                cfi_st  $4, PT_R4, \docfi
 258                mfc0    v1, CP0_CAUSE
 259                cfi_st  $5, PT_R5, \docfi
 260                LONG_S  v1, PT_CAUSE(sp)
 261                cfi_st  $6, PT_R6, \docfi
 262                cfi_st  ra, PT_R31, \docfi
 263                MFC0    ra, CP0_EPC
 264                cfi_st  $7, PT_R7, \docfi
 265#ifdef CONFIG_64BIT
 266                cfi_st  $8, PT_R8, \docfi
 267                cfi_st  $9, PT_R9, \docfi
 268#endif
 269                LONG_S  ra, PT_EPC(sp)
 270                .if \docfi
 271                .cfi_rel_offset ra, PT_EPC
 272                .endif
 273                cfi_st  $25, PT_R25, \docfi
 274                cfi_st  $28, PT_R28, \docfi
 275
 276                /* Set thread_info if we're coming from user mode */
 277                mfc0    k0, CP0_STATUS
 278                sll     k0, 3           /* extract cu0 bit */
 279                bltz    k0, 9f
 280
 281                ori     $28, sp, _THREAD_MASK
 282                xori    $28, _THREAD_MASK
 283#ifdef CONFIG_CPU_CAVIUM_OCTEON
 284                .set    mips64
 285                pref    0, 0($28)       /* Prefetch the current pointer */
 286#endif
 2879:
 288                .set    pop
 289                .endm
 290
 291                .macro  SAVE_ALL docfi=0
 292                SAVE_SOME \docfi
 293                SAVE_AT \docfi
 294                SAVE_TEMP \docfi
 295                SAVE_STATIC \docfi
 296                .endm
 297
 298                .macro  RESTORE_AT docfi=0
 299                .set    push
 300                .set    noat
 301                cfi_ld  $1, PT_R1, \docfi
 302                .set    pop
 303                .endm
 304
 305                .macro  RESTORE_TEMP docfi=0
 306#ifdef CONFIG_CPU_CAVIUM_OCTEON
 307                /* Restore the Octeon multiplier state */
 308                jal     octeon_mult_restore
 309#endif
 310#ifdef CONFIG_CPU_HAS_SMARTMIPS
 311                LONG_L  $24, PT_ACX(sp)
 312                mtlhx   $24
 313                LONG_L  $24, PT_HI(sp)
 314                mtlhx   $24
 315                LONG_L  $24, PT_LO(sp)
 316                mtlhx   $24
 317#elif !defined(CONFIG_CPU_MIPSR6)
 318                LONG_L  $24, PT_LO(sp)
 319                mtlo    $24
 320                LONG_L  $24, PT_HI(sp)
 321                mthi    $24
 322#endif
 323#ifdef CONFIG_32BIT
 324                cfi_ld  $8, PT_R8, \docfi
 325                cfi_ld  $9, PT_R9, \docfi
 326#endif
 327                cfi_ld  $10, PT_R10, \docfi
 328                cfi_ld  $11, PT_R11, \docfi
 329                cfi_ld  $12, PT_R12, \docfi
 330                cfi_ld  $13, PT_R13, \docfi
 331                cfi_ld  $14, PT_R14, \docfi
 332                cfi_ld  $15, PT_R15, \docfi
 333                cfi_ld  $24, PT_R24, \docfi
 334                .endm
 335
 336                .macro  RESTORE_STATIC docfi=0
 337                cfi_ld  $16, PT_R16, \docfi
 338                cfi_ld  $17, PT_R17, \docfi
 339                cfi_ld  $18, PT_R18, \docfi
 340                cfi_ld  $19, PT_R19, \docfi
 341                cfi_ld  $20, PT_R20, \docfi
 342                cfi_ld  $21, PT_R21, \docfi
 343                cfi_ld  $22, PT_R22, \docfi
 344                cfi_ld  $23, PT_R23, \docfi
 345                cfi_ld  $30, PT_R30, \docfi
 346                .endm
 347
 348                .macro  RESTORE_SP docfi=0
 349                cfi_ld  sp, PT_R29, \docfi
 350                .endm
 351
 352#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 353
 354                .macro  RESTORE_SOME docfi=0
 355                .set    push
 356                .set    reorder
 357                .set    noat
 358                mfc0    a0, CP0_STATUS
 359                li      v1, ST0_CU1 | ST0_IM
 360                ori     a0, STATMASK
 361                xori    a0, STATMASK
 362                mtc0    a0, CP0_STATUS
 363                and     a0, v1
 364                LONG_L  v0, PT_STATUS(sp)
 365                nor     v1, $0, v1
 366                and     v0, v1
 367                or      v0, a0
 368                mtc0    v0, CP0_STATUS
 369                cfi_ld  $31, PT_R31, \docfi
 370                cfi_ld  $28, PT_R28, \docfi
 371                cfi_ld  $25, PT_R25, \docfi
 372                cfi_ld  $7,  PT_R7, \docfi
 373                cfi_ld  $6,  PT_R6, \docfi
 374                cfi_ld  $5,  PT_R5, \docfi
 375                cfi_ld  $4,  PT_R4, \docfi
 376                cfi_ld  $3,  PT_R3, \docfi
 377                cfi_ld  $2,  PT_R2, \docfi
 378                .set    pop
 379                .endm
 380
 381                .macro  RESTORE_SP_AND_RET docfi=0
 382                .set    push
 383                .set    noreorder
 384                LONG_L  k0, PT_EPC(sp)
 385                RESTORE_SP \docfi
 386                jr      k0
 387                 rfe
 388                .set    pop
 389                .endm
 390
 391#else
 392                .macro  RESTORE_SOME docfi=0
 393                .set    push
 394                .set    reorder
 395                .set    noat
 396                mfc0    a0, CP0_STATUS
 397                ori     a0, STATMASK
 398                xori    a0, STATMASK
 399                mtc0    a0, CP0_STATUS
 400                li      v1, ST0_CU1 | ST0_FR | ST0_IM
 401                and     a0, v1
 402                LONG_L  v0, PT_STATUS(sp)
 403                nor     v1, $0, v1
 404                and     v0, v1
 405                or      v0, a0
 406                mtc0    v0, CP0_STATUS
 407                LONG_L  v1, PT_EPC(sp)
 408                MTC0    v1, CP0_EPC
 409                cfi_ld  $31, PT_R31, \docfi
 410                cfi_ld  $28, PT_R28, \docfi
 411                cfi_ld  $25, PT_R25, \docfi
 412#ifdef CONFIG_64BIT
 413                cfi_ld  $8, PT_R8, \docfi
 414                cfi_ld  $9, PT_R9, \docfi
 415#endif
 416                cfi_ld  $7,  PT_R7, \docfi
 417                cfi_ld  $6,  PT_R6, \docfi
 418                cfi_ld  $5,  PT_R5, \docfi
 419                cfi_ld  $4,  PT_R4, \docfi
 420                cfi_ld  $3,  PT_R3, \docfi
 421                cfi_ld  $2,  PT_R2, \docfi
 422                .set    pop
 423                .endm
 424
 425                .macro  RESTORE_SP_AND_RET docfi=0
 426                RESTORE_SP \docfi
 427#if defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
 428                eretnc
 429#else
 430                .set    push
 431                .set    arch=r4000
 432                eret
 433                .set    pop
 434#endif
 435                .endm
 436
 437#endif
 438
 439                .macro  RESTORE_ALL docfi=0
 440                RESTORE_TEMP \docfi
 441                RESTORE_STATIC \docfi
 442                RESTORE_AT \docfi
 443                RESTORE_SOME \docfi
 444                RESTORE_SP \docfi
 445                .endm
 446
 447/*
 448 * Move to kernel mode and disable interrupts.
 449 * Set cp0 enable bit as sign that we're running on the kernel stack
 450 */
 451                .macro  CLI
 452                mfc0    t0, CP0_STATUS
 453                li      t1, ST0_KERNEL_CUMASK | STATMASK
 454                or      t0, t1
 455                xori    t0, STATMASK
 456                mtc0    t0, CP0_STATUS
 457                irq_disable_hazard
 458                .endm
 459
 460/*
 461 * Move to kernel mode and enable interrupts.
 462 * Set cp0 enable bit as sign that we're running on the kernel stack
 463 */
 464                .macro  STI
 465                mfc0    t0, CP0_STATUS
 466                li      t1, ST0_KERNEL_CUMASK | STATMASK
 467                or      t0, t1
 468                xori    t0, STATMASK & ~1
 469                mtc0    t0, CP0_STATUS
 470                irq_enable_hazard
 471                .endm
 472
 473/*
 474 * Just move to kernel mode and leave interrupts as they are.  Note
 475 * for the R3000 this means copying the previous enable from IEp.
 476 * Set cp0 enable bit as sign that we're running on the kernel stack
 477 */
 478                .macro  KMODE
 479                mfc0    t0, CP0_STATUS
 480                li      t1, ST0_KERNEL_CUMASK | (STATMASK & ~1)
 481#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 482                andi    t2, t0, ST0_IEP
 483                srl     t2, 2
 484                or      t0, t2
 485#endif
 486                or      t0, t1
 487                xori    t0, STATMASK & ~1
 488                mtc0    t0, CP0_STATUS
 489                irq_disable_hazard
 490                .endm
 491
 492#endif /* _ASM_STACKFRAME_H */
 493