linux/arch/xtensa/kernel/entry.S
<<
>>
Prefs
   1/*
   2 * Low-level exception handling
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 2004 - 2008 by Tensilica Inc.
   9 * Copyright (C) 2015 Cadence Design Systems Inc.
  10 *
  11 * Chris Zankel <chris@zankel.net>
  12 *
  13 */
  14
  15#include <linux/linkage.h>
  16#include <asm/asm-offsets.h>
  17#include <asm/asmmacro.h>
  18#include <asm/processor.h>
  19#include <asm/coprocessor.h>
  20#include <asm/thread_info.h>
  21#include <asm/asm-uaccess.h>
  22#include <asm/unistd.h>
  23#include <asm/ptrace.h>
  24#include <asm/current.h>
  25#include <asm/pgtable.h>
  26#include <asm/page.h>
  27#include <asm/signal.h>
  28#include <asm/tlbflush.h>
  29#include <variant/tie-asm.h>
  30
  31/* Unimplemented features. */
  32
  33#undef KERNEL_STACK_OVERFLOW_CHECK
  34
  35/* Not well tested.
  36 *
  37 * - fast_coprocessor
  38 */
  39
  40/*
  41 * Macro to find first bit set in WINDOWBASE from the left + 1
  42 *
  43 * 100....0 -> 1
  44 * 010....0 -> 2
  45 * 000....1 -> WSBITS
  46 */
  47
  48        .macro ffs_ws bit mask
  49
  50#if XCHAL_HAVE_NSA
  51        nsau    \bit, \mask                     # 32-WSBITS ... 31 (32 iff 0)
  52        addi    \bit, \bit, WSBITS - 32 + 1     # uppest bit set -> return 1
  53#else
  54        movi    \bit, WSBITS
  55#if WSBITS > 16
  56        _bltui  \mask, 0x10000, 99f
  57        addi    \bit, \bit, -16
  58        extui   \mask, \mask, 16, 16
  59#endif
  60#if WSBITS > 8
  6199:     _bltui  \mask, 0x100, 99f
  62        addi    \bit, \bit, -8
  63        srli    \mask, \mask, 8
  64#endif
  6599:     _bltui  \mask, 0x10, 99f
  66        addi    \bit, \bit, -4
  67        srli    \mask, \mask, 4
  6899:     _bltui  \mask, 0x4, 99f
  69        addi    \bit, \bit, -2
  70        srli    \mask, \mask, 2
  7199:     _bltui  \mask, 0x2, 99f
  72        addi    \bit, \bit, -1
  7399:
  74
  75#endif
  76        .endm
  77
  78
  79        .macro  irq_save flags tmp
  80#if XTENSA_FAKE_NMI
  81#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
  82        rsr     \flags, ps
  83        extui   \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
  84        bgei    \tmp, LOCKLEVEL, 99f
  85        rsil    \tmp, LOCKLEVEL
  8699:
  87#else
  88        movi    \tmp, LOCKLEVEL
  89        rsr     \flags, ps
  90        or      \flags, \flags, \tmp
  91        xsr     \flags, ps
  92        rsync
  93#endif
  94#else
  95        rsil    \flags, LOCKLEVEL
  96#endif
  97        .endm
  98
  99/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
 100
 101/*
 102 * First-level exception handler for user exceptions.
 103 * Save some special registers, extra states and all registers in the AR
 104 * register file that were in use in the user task, and jump to the common
 105 * exception code.
 106 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
 107 * save them for kernel exceptions).
 108 *
 109 * Entry condition for user_exception:
 110 *
 111 *   a0:        trashed, original value saved on stack (PT_AREG0)
 112 *   a1:        a1
 113 *   a2:        new stack pointer, original value in depc
 114 *   a3:        a3
 115 *   depc:      a2, original value saved on stack (PT_DEPC)
 116 *   excsave1:  dispatch table
 117 *
 118 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
 119 *           <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
 120 *
 121 * Entry condition for _user_exception:
 122 *
 123 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
 124 *   excsave has been restored, and
 125 *   stack pointer (a1) has been set.
 126 *
 127 * Note: _user_exception might be at an odd address. Don't use call0..call12
 128 */
 129        .literal_position
 130
 131ENTRY(user_exception)
 132
 133        /* Save a1, a2, a3, and set SP. */
 134
 135        rsr     a0, depc
 136        s32i    a1, a2, PT_AREG1
 137        s32i    a0, a2, PT_AREG2
 138        s32i    a3, a2, PT_AREG3
 139        mov     a1, a2
 140
 141        .globl _user_exception
 142_user_exception:
 143
 144        /* Save SAR and turn off single stepping */
 145
 146        movi    a2, 0
 147        wsr     a2, depc                # terminate user stack trace with 0
 148        rsr     a3, sar
 149        xsr     a2, icountlevel
 150        s32i    a3, a1, PT_SAR
 151        s32i    a2, a1, PT_ICOUNTLEVEL
 152
 153#if XCHAL_HAVE_THREADPTR
 154        rur     a2, threadptr
 155        s32i    a2, a1, PT_THREADPTR
 156#endif
 157
 158        /* Rotate ws so that the current windowbase is at bit0. */
 159        /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
 160
 161        rsr     a2, windowbase
 162        rsr     a3, windowstart
 163        ssr     a2
 164        s32i    a2, a1, PT_WINDOWBASE
 165        s32i    a3, a1, PT_WINDOWSTART
 166        slli    a2, a3, 32-WSBITS
 167        src     a2, a3, a2
 168        srli    a2, a2, 32-WSBITS
 169        s32i    a2, a1, PT_WMASK        # needed for restoring registers
 170
 171        /* Save only live registers. */
 172
 173        _bbsi.l a2, 1, 1f
 174        s32i    a4, a1, PT_AREG4
 175        s32i    a5, a1, PT_AREG5
 176        s32i    a6, a1, PT_AREG6
 177        s32i    a7, a1, PT_AREG7
 178        _bbsi.l a2, 2, 1f
 179        s32i    a8, a1, PT_AREG8
 180        s32i    a9, a1, PT_AREG9
 181        s32i    a10, a1, PT_AREG10
 182        s32i    a11, a1, PT_AREG11
 183        _bbsi.l a2, 3, 1f
 184        s32i    a12, a1, PT_AREG12
 185        s32i    a13, a1, PT_AREG13
 186        s32i    a14, a1, PT_AREG14
 187        s32i    a15, a1, PT_AREG15
 188        _bnei   a2, 1, 1f               # only one valid frame?
 189
 190        /* Only one valid frame, skip saving regs. */
 191
 192        j       2f
 193
 194        /* Save the remaining registers.
 195         * We have to save all registers up to the first '1' from
 196         * the right, except the current frame (bit 0).
 197         * Assume a2 is:  001001000110001
 198         * All register frames starting from the top field to the marked '1'
 199         * must be saved.
 200         */
 201
 2021:      addi    a3, a2, -1              # eliminate '1' in bit 0: yyyyxxww0
 203        neg     a3, a3                  # yyyyxxww0 -> YYYYXXWW1+1
 204        and     a3, a3, a2              # max. only one bit is set
 205
 206        /* Find number of frames to save */
 207
 208        ffs_ws  a0, a3                  # number of frames to the '1' from left
 209
 210        /* Store information into WMASK:
 211         * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
 212         * bits 4...: number of valid 4-register frames
 213         */
 214
 215        slli    a3, a0, 4               # number of frames to save in bits 8..4
 216        extui   a2, a2, 0, 4            # mask for the first 16 registers
 217        or      a2, a3, a2
 218        s32i    a2, a1, PT_WMASK        # needed when we restore the reg-file
 219
 220        /* Save 4 registers at a time */
 221
 2221:      rotw    -1
 223        s32i    a0, a5, PT_AREG_END - 16
 224        s32i    a1, a5, PT_AREG_END - 12
 225        s32i    a2, a5, PT_AREG_END - 8
 226        s32i    a3, a5, PT_AREG_END - 4
 227        addi    a0, a4, -1
 228        addi    a1, a5, -16
 229        _bnez   a0, 1b
 230
 231        /* WINDOWBASE still in SAR! */
 232
 233        rsr     a2, sar                 # original WINDOWBASE
 234        movi    a3, 1
 235        ssl     a2
 236        sll     a3, a3
 237        wsr     a3, windowstart         # set corresponding WINDOWSTART bit
 238        wsr     a2, windowbase          # and WINDOWSTART
 239        rsync
 240
 241        /* We are back to the original stack pointer (a1) */
 242
 2432:      /* Now, jump to the common exception handler. */
 244
 245        j       common_exception
 246
 247ENDPROC(user_exception)
 248
 249/*
 250 * First-level exit handler for kernel exceptions
 251 * Save special registers and the live window frame.
 252 * Note: Even though we changes the stack pointer, we don't have to do a
 253 *       MOVSP here, as we do that when we return from the exception.
 254 *       (See comment in the kernel exception exit code)
 255 *
 256 * Entry condition for kernel_exception:
 257 *
 258 *   a0:        trashed, original value saved on stack (PT_AREG0)
 259 *   a1:        a1
 260 *   a2:        new stack pointer, original in DEPC
 261 *   a3:        a3
 262 *   depc:      a2, original value saved on stack (PT_DEPC)
 263 *   excsave_1: dispatch table
 264 *
 265 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
 266 *           <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
 267 *
 268 * Entry condition for _kernel_exception:
 269 *
 270 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
 271 *   excsave has been restored, and
 272 *   stack pointer (a1) has been set.
 273 *
 274 * Note: _kernel_exception might be at an odd address. Don't use call0..call12
 275 */
 276
 277ENTRY(kernel_exception)
 278
 279        /* Save a1, a2, a3, and set SP. */
 280
 281        rsr     a0, depc                # get a2
 282        s32i    a1, a2, PT_AREG1
 283        s32i    a0, a2, PT_AREG2
 284        s32i    a3, a2, PT_AREG3
 285        mov     a1, a2
 286
 287        .globl _kernel_exception
 288_kernel_exception:
 289
 290        /* Save SAR and turn off single stepping */
 291
 292        movi    a2, 0
 293        rsr     a3, sar
 294        xsr     a2, icountlevel
 295        s32i    a3, a1, PT_SAR
 296        s32i    a2, a1, PT_ICOUNTLEVEL
 297
 298        /* Rotate ws so that the current windowbase is at bit0. */
 299        /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
 300
 301        rsr     a2, windowbase          # don't need to save these, we only
 302        rsr     a3, windowstart         # need shifted windowstart: windowmask
 303        ssr     a2
 304        slli    a2, a3, 32-WSBITS
 305        src     a2, a3, a2
 306        srli    a2, a2, 32-WSBITS
 307        s32i    a2, a1, PT_WMASK        # needed for kernel_exception_exit
 308
 309        /* Save only the live window-frame */
 310
 311        _bbsi.l a2, 1, 1f
 312        s32i    a4, a1, PT_AREG4
 313        s32i    a5, a1, PT_AREG5
 314        s32i    a6, a1, PT_AREG6
 315        s32i    a7, a1, PT_AREG7
 316        _bbsi.l a2, 2, 1f
 317        s32i    a8, a1, PT_AREG8
 318        s32i    a9, a1, PT_AREG9
 319        s32i    a10, a1, PT_AREG10
 320        s32i    a11, a1, PT_AREG11
 321        _bbsi.l a2, 3, 1f
 322        s32i    a12, a1, PT_AREG12
 323        s32i    a13, a1, PT_AREG13
 324        s32i    a14, a1, PT_AREG14
 325        s32i    a15, a1, PT_AREG15
 326
 327        _bnei   a2, 1, 1f
 328
 329        /* Copy spill slots of a0 and a1 to imitate movsp
 330         * in order to keep exception stack continuous
 331         */
 332        l32i    a3, a1, PT_SIZE
 333        l32i    a0, a1, PT_SIZE + 4
 334        s32e    a3, a1, -16
 335        s32e    a0, a1, -12
 3361:
 337        l32i    a0, a1, PT_AREG0        # restore saved a0
 338        wsr     a0, depc
 339
 340#ifdef KERNEL_STACK_OVERFLOW_CHECK
 341
 342        /*  Stack overflow check, for debugging  */
 343        extui   a2, a1, TASK_SIZE_BITS,XX
 344        movi    a3, SIZE??
 345        _bge    a2, a3, out_of_stack_panic
 346
 347#endif
 348
 349/*
 350 * This is the common exception handler.
 351 * We get here from the user exception handler or simply by falling through
 352 * from the kernel exception handler.
 353 * Save the remaining special registers, switch to kernel mode, and jump
 354 * to the second-level exception handler.
 355 *
 356 */
 357
 358common_exception:
 359
 360        /* Save some registers, disable loops and clear the syscall flag. */
 361
 362        rsr     a2, debugcause
 363        rsr     a3, epc1
 364        s32i    a2, a1, PT_DEBUGCAUSE
 365        s32i    a3, a1, PT_PC
 366
 367        movi    a2, -1
 368        rsr     a3, excvaddr
 369        s32i    a2, a1, PT_SYSCALL
 370        movi    a2, 0
 371        s32i    a3, a1, PT_EXCVADDR
 372#if XCHAL_HAVE_LOOPS
 373        xsr     a2, lcount
 374        s32i    a2, a1, PT_LCOUNT
 375#endif
 376
 377        /* It is now save to restore the EXC_TABLE_FIXUP variable. */
 378
 379        rsr     a2, exccause
 380        movi    a3, 0
 381        rsr     a0, excsave1
 382        s32i    a2, a1, PT_EXCCAUSE
 383        s32i    a3, a0, EXC_TABLE_FIXUP
 384
 385        /* All unrecoverable states are saved on stack, now, and a1 is valid.
 386         * Now we can allow exceptions again. In case we've got an interrupt
 387         * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
 388         * otherwise it's left unchanged.
 389         *
 390         * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
 391         */
 392
 393        rsr     a3, ps
 394        s32i    a3, a1, PT_PS           # save ps
 395
 396#if XTENSA_FAKE_NMI
 397        /* Correct PS needs to be saved in the PT_PS:
 398         * - in case of exception or level-1 interrupt it's in the PS,
 399         *   and is already saved.
 400         * - in case of medium level interrupt it's in the excsave2.
 401         */
 402        movi    a0, EXCCAUSE_MAPPED_NMI
 403        extui   a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
 404        beq     a2, a0, .Lmedium_level_irq
 405        bnei    a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
 406        beqz    a3, .Llevel1_irq        # level-1 IRQ sets ps.intlevel to 0
 407
 408.Lmedium_level_irq:
 409        rsr     a0, excsave2
 410        s32i    a0, a1, PT_PS           # save medium-level interrupt ps
 411        bgei    a3, LOCKLEVEL, .Lexception
 412
 413.Llevel1_irq:
 414        movi    a3, LOCKLEVEL
 415
 416.Lexception:
 417        movi    a0, 1 << PS_WOE_BIT
 418        or      a3, a3, a0
 419#else
 420        addi    a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
 421        movi    a0, LOCKLEVEL
 422        extui   a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
 423                                        # a3 = PS.INTLEVEL
 424        moveqz  a3, a0, a2              # a3 = LOCKLEVEL iff interrupt
 425        movi    a2, 1 << PS_WOE_BIT
 426        or      a3, a3, a2
 427        rsr     a2, exccause
 428#endif
 429
 430        /* restore return address (or 0 if return to userspace) */
 431        rsr     a0, depc
 432        wsr     a3, ps
 433        rsync                           # PS.WOE => rsync => overflow
 434
 435        /* Save lbeg, lend */
 436#if XCHAL_HAVE_LOOPS
 437        rsr     a4, lbeg
 438        rsr     a3, lend
 439        s32i    a4, a1, PT_LBEG
 440        s32i    a3, a1, PT_LEND
 441#endif
 442
 443        /* Save SCOMPARE1 */
 444
 445#if XCHAL_HAVE_S32C1I
 446        rsr     a3, scompare1
 447        s32i    a3, a1, PT_SCOMPARE1
 448#endif
 449
 450        /* Save optional registers. */
 451
 452        save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
 453        
 454        /* Go to second-level dispatcher. Set up parameters to pass to the
 455         * exception handler and call the exception handler.
 456         */
 457
 458        rsr     a4, excsave1
 459        mov     a6, a1                  # pass stack frame
 460        mov     a7, a2                  # pass EXCCAUSE
 461        addx4   a4, a2, a4
 462        l32i    a4, a4, EXC_TABLE_DEFAULT               # load handler
 463
 464        /* Call the second-level handler */
 465
 466        callx4  a4
 467
 468        /* Jump here for exception exit */
 469        .global common_exception_return
 470common_exception_return:
 471
 472#if XTENSA_FAKE_NMI
 473        l32i    a2, a1, PT_EXCCAUSE
 474        movi    a3, EXCCAUSE_MAPPED_NMI
 475        beq     a2, a3, .LNMIexit
 476#endif
 4771:
 478        irq_save a2, a3
 479#ifdef CONFIG_TRACE_IRQFLAGS
 480        call4   trace_hardirqs_off
 481#endif
 482
 483        /* Jump if we are returning from kernel exceptions. */
 484
 485        l32i    a3, a1, PT_PS
 486        GET_THREAD_INFO(a2, a1)
 487        l32i    a4, a2, TI_FLAGS
 488        _bbci.l a3, PS_UM_BIT, 6f
 489
 490        /* Specific to a user exception exit:
 491         * We need to check some flags for signal handling and rescheduling,
 492         * and have to restore WB and WS, extra states, and all registers
 493         * in the register file that were in use in the user task.
 494         * Note that we don't disable interrupts here. 
 495         */
 496
 497        _bbsi.l a4, TIF_NEED_RESCHED, 3f
 498        _bbsi.l a4, TIF_NOTIFY_RESUME, 2f
 499        _bbci.l a4, TIF_SIGPENDING, 5f
 500
 5012:      l32i    a4, a1, PT_DEPC
 502        bgeui   a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
 503
 504        /* Call do_signal() */
 505
 506#ifdef CONFIG_TRACE_IRQFLAGS
 507        call4   trace_hardirqs_on
 508#endif
 509        rsil    a2, 0
 510        mov     a6, a1
 511        call4   do_notify_resume        # int do_notify_resume(struct pt_regs*)
 512        j       1b
 513
 5143:      /* Reschedule */
 515
 516#ifdef CONFIG_TRACE_IRQFLAGS
 517        call4   trace_hardirqs_on
 518#endif
 519        rsil    a2, 0
 520        call4   schedule        # void schedule (void)
 521        j       1b
 522
 523#ifdef CONFIG_PREEMPT
 5246:
 525        _bbci.l a4, TIF_NEED_RESCHED, 4f
 526
 527        /* Check current_thread_info->preempt_count */
 528
 529        l32i    a4, a2, TI_PRE_COUNT
 530        bnez    a4, 4f
 531        call4   preempt_schedule_irq
 532        j       1b
 533#endif
 534
 535#if XTENSA_FAKE_NMI
 536.LNMIexit:
 537        l32i    a3, a1, PT_PS
 538        _bbci.l a3, PS_UM_BIT, 4f
 539#endif
 540
 5415:
 542#ifdef CONFIG_HAVE_HW_BREAKPOINT
 543        _bbci.l a4, TIF_DB_DISABLED, 7f
 544        call4   restore_dbreak
 5457:
 546#endif
 547#ifdef CONFIG_DEBUG_TLB_SANITY
 548        l32i    a4, a1, PT_DEPC
 549        bgeui   a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
 550        call4   check_tlb_sanity
 551#endif
 5526:
 5534:
 554#ifdef CONFIG_TRACE_IRQFLAGS
 555        extui   a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
 556        bgei    a4, LOCKLEVEL, 1f
 557        call4   trace_hardirqs_on
 5581:
 559#endif
 560        /* Restore optional registers. */
 561
 562        load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
 563
 564        /* Restore SCOMPARE1 */
 565
 566#if XCHAL_HAVE_S32C1I
 567        l32i    a2, a1, PT_SCOMPARE1
 568        wsr     a2, scompare1
 569#endif
 570        wsr     a3, ps          /* disable interrupts */
 571
 572        _bbci.l a3, PS_UM_BIT, kernel_exception_exit
 573
 574user_exception_exit:
 575
 576        /* Restore the state of the task and return from the exception. */
 577
 578        /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
 579
 580        l32i    a2, a1, PT_WINDOWBASE
 581        l32i    a3, a1, PT_WINDOWSTART
 582        wsr     a1, depc                # use DEPC as temp storage
 583        wsr     a3, windowstart         # restore WINDOWSTART
 584        ssr     a2                      # preserve user's WB in the SAR
 585        wsr     a2, windowbase          # switch to user's saved WB
 586        rsync
 587        rsr     a1, depc                # restore stack pointer
 588        l32i    a2, a1, PT_WMASK        # register frames saved (in bits 4...9)
 589        rotw    -1                      # we restore a4..a7
 590        _bltui  a6, 16, 1f              # only have to restore current window?
 591
 592        /* The working registers are a0 and a3.  We are restoring to
 593         * a4..a7.  Be careful not to destroy what we have just restored.
 594         * Note: wmask has the format YYYYM:
 595         *       Y: number of registers saved in groups of 4
 596         *       M: 4 bit mask of first 16 registers
 597         */
 598
 599        mov     a2, a6
 600        mov     a3, a5
 601
 6022:      rotw    -1                      # a0..a3 become a4..a7
 603        addi    a3, a7, -4*4            # next iteration
 604        addi    a2, a6, -16             # decrementing Y in WMASK
 605        l32i    a4, a3, PT_AREG_END + 0
 606        l32i    a5, a3, PT_AREG_END + 4
 607        l32i    a6, a3, PT_AREG_END + 8
 608        l32i    a7, a3, PT_AREG_END + 12
 609        _bgeui  a2, 16, 2b
 610
 611        /* Clear unrestored registers (don't leak anything to user-land */
 612
 6131:      rsr     a0, windowbase
 614        rsr     a3, sar
 615        sub     a3, a0, a3
 616        beqz    a3, 2f
 617        extui   a3, a3, 0, WBBITS
 618
 6191:      rotw    -1
 620        addi    a3, a7, -1
 621        movi    a4, 0
 622        movi    a5, 0
 623        movi    a6, 0
 624        movi    a7, 0
 625        bgei    a3, 1, 1b
 626
 627        /* We are back were we were when we started.
 628         * Note: a2 still contains WMASK (if we've returned to the original
 629         *       frame where we had loaded a2), or at least the lower 4 bits
 630         *       (if we have restored WSBITS-1 frames).
 631         */
 632
 6332:
 634#if XCHAL_HAVE_THREADPTR
 635        l32i    a3, a1, PT_THREADPTR
 636        wur     a3, threadptr
 637#endif
 638
 639        j       common_exception_exit
 640
 641        /* This is the kernel exception exit.
 642         * We avoided to do a MOVSP when we entered the exception, but we
 643         * have to do it here.
 644         */
 645
 646kernel_exception_exit:
 647
 648        /* Check if we have to do a movsp.
 649         *
 650         * We only have to do a movsp if the previous window-frame has
 651         * been spilled to the *temporary* exception stack instead of the
 652         * task's stack. This is the case if the corresponding bit in
 653         * WINDOWSTART for the previous window-frame was set before
 654         * (not spilled) but is zero now (spilled).
 655         * If this bit is zero, all other bits except the one for the
 656         * current window frame are also zero. So, we can use a simple test:
 657         * 'and' WINDOWSTART and WINDOWSTART-1:
 658         *
 659         *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
 660         *
 661         * The result is zero only if one bit was set.
 662         *
 663         * (Note: We might have gone through several task switches before
 664         *        we come back to the current task, so WINDOWBASE might be
 665         *        different from the time the exception occurred.)
 666         */
 667
 668        /* Test WINDOWSTART before and after the exception.
 669         * We actually have WMASK, so we only have to test if it is 1 or not.
 670         */
 671
 672        l32i    a2, a1, PT_WMASK
 673        _beqi   a2, 1, common_exception_exit    # Spilled before exception,jump
 674
 675        /* Test WINDOWSTART now. If spilled, do the movsp */
 676
 677        rsr     a3, windowstart
 678        addi    a0, a3, -1
 679        and     a3, a3, a0
 680        _bnez   a3, common_exception_exit
 681
 682        /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
 683
 684        addi    a0, a1, -16
 685        l32i    a3, a0, 0
 686        l32i    a4, a0, 4
 687        s32i    a3, a1, PT_SIZE+0
 688        s32i    a4, a1, PT_SIZE+4
 689        l32i    a3, a0, 8
 690        l32i    a4, a0, 12
 691        s32i    a3, a1, PT_SIZE+8
 692        s32i    a4, a1, PT_SIZE+12
 693
 694        /* Common exception exit.
 695         * We restore the special register and the current window frame, and
 696         * return from the exception.
 697         *
 698         * Note: We expect a2 to hold PT_WMASK
 699         */
 700
 701common_exception_exit:
 702
 703        /* Restore address registers. */
 704
 705        _bbsi.l a2, 1, 1f
 706        l32i    a4,  a1, PT_AREG4
 707        l32i    a5,  a1, PT_AREG5
 708        l32i    a6,  a1, PT_AREG6
 709        l32i    a7,  a1, PT_AREG7
 710        _bbsi.l a2, 2, 1f
 711        l32i    a8,  a1, PT_AREG8
 712        l32i    a9,  a1, PT_AREG9
 713        l32i    a10, a1, PT_AREG10
 714        l32i    a11, a1, PT_AREG11
 715        _bbsi.l a2, 3, 1f
 716        l32i    a12, a1, PT_AREG12
 717        l32i    a13, a1, PT_AREG13
 718        l32i    a14, a1, PT_AREG14
 719        l32i    a15, a1, PT_AREG15
 720
 721        /* Restore PC, SAR */
 722
 7231:      l32i    a2, a1, PT_PC
 724        l32i    a3, a1, PT_SAR
 725        wsr     a2, epc1
 726        wsr     a3, sar
 727
 728        /* Restore LBEG, LEND, LCOUNT */
 729#if XCHAL_HAVE_LOOPS
 730        l32i    a2, a1, PT_LBEG
 731        l32i    a3, a1, PT_LEND
 732        wsr     a2, lbeg
 733        l32i    a2, a1, PT_LCOUNT
 734        wsr     a3, lend
 735        wsr     a2, lcount
 736#endif
 737
 738        /* We control single stepping through the ICOUNTLEVEL register. */
 739
 740        l32i    a2, a1, PT_ICOUNTLEVEL
 741        movi    a3, -2
 742        wsr     a2, icountlevel
 743        wsr     a3, icount
 744
 745        /* Check if it was double exception. */
 746
 747        l32i    a0, a1, PT_DEPC
 748        l32i    a3, a1, PT_AREG3
 749        l32i    a2, a1, PT_AREG2
 750        _bgeui  a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
 751
 752        /* Restore a0...a3 and return */
 753
 754        l32i    a0, a1, PT_AREG0
 755        l32i    a1, a1, PT_AREG1
 756        rfe
 757
 7581:      wsr     a0, depc
 759        l32i    a0, a1, PT_AREG0
 760        l32i    a1, a1, PT_AREG1
 761        rfde
 762
 763ENDPROC(kernel_exception)
 764
 765/*
 766 * Debug exception handler.
 767 *
 768 * Currently, we don't support KGDB, so only user application can be debugged.
 769 *
 770 * When we get here,  a0 is trashed and saved to excsave[debuglevel]
 771 */
 772
 773        .literal_position
 774
 775ENTRY(debug_exception)
 776
 777        rsr     a0, SREG_EPS + XCHAL_DEBUGLEVEL
 778        bbsi.l  a0, PS_EXCM_BIT, 1f     # exception mode
 779
 780        /* Set EPC1 and EXCCAUSE */
 781
 782        wsr     a2, depc                # save a2 temporarily
 783        rsr     a2, SREG_EPC + XCHAL_DEBUGLEVEL
 784        wsr     a2, epc1
 785
 786        movi    a2, EXCCAUSE_MAPPED_DEBUG
 787        wsr     a2, exccause
 788
 789        /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
 790
 791        movi    a2, 1 << PS_EXCM_BIT
 792        or      a2, a0, a2
 793        wsr     a2, ps
 794
 795        /* Switch to kernel/user stack, restore jump vector, and save a0 */
 796
 797        bbsi.l  a2, PS_UM_BIT, 2f       # jump if user mode
 798
 799        addi    a2, a1, -16-PT_SIZE     # assume kernel stack
 8003:
 801        l32i    a0, a3, DT_DEBUG_SAVE
 802        s32i    a1, a2, PT_AREG1
 803        s32i    a0, a2, PT_AREG0
 804        movi    a0, 0
 805        s32i    a0, a2, PT_DEPC         # mark it as a regular exception
 806        xsr     a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
 807        xsr     a0, depc
 808        s32i    a3, a2, PT_AREG3
 809        s32i    a0, a2, PT_AREG2
 810        mov     a1, a2
 811
 812        /* Debug exception is handled as an exception, so interrupts will
 813         * likely be enabled in the common exception handler. Disable
 814         * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM
 815         * meaning.
 816         */
 817#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT)
 818        GET_THREAD_INFO(a2, a1)
 819        l32i    a3, a2, TI_PRE_COUNT
 820        addi    a3, a3, 1
 821        s32i    a3, a2, TI_PRE_COUNT
 822#endif
 823
 824        rsr     a2, ps
 825        bbsi.l  a2, PS_UM_BIT, _user_exception
 826        j       _kernel_exception
 827
 8282:      rsr     a2, excsave1
 829        l32i    a2, a2, EXC_TABLE_KSTK  # load kernel stack pointer
 830        j       3b
 831
 832#ifdef CONFIG_HAVE_HW_BREAKPOINT
 833        /* Debug exception while in exception mode. This may happen when
 834         * window overflow/underflow handler or fast exception handler hits
 835         * data breakpoint, in which case save and disable all data
 836         * breakpoints, single-step faulting instruction and restore data
 837         * breakpoints.
 838         */
 8391:
 840        bbci.l  a0, PS_UM_BIT, 1b       # jump if kernel mode
 841
 842        rsr     a0, debugcause
 843        bbsi.l  a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak
 844
 845        .set    _index, 0
 846        .rept   XCHAL_NUM_DBREAK
 847        l32i    a0, a3, DT_DBREAKC_SAVE + _index * 4
 848        wsr     a0, SREG_DBREAKC + _index
 849        .set    _index, _index + 1
 850        .endr
 851
 852        l32i    a0, a3, DT_ICOUNT_LEVEL_SAVE
 853        wsr     a0, icountlevel
 854
 855        l32i    a0, a3, DT_ICOUNT_SAVE
 856        xsr     a0, icount
 857
 858        l32i    a0, a3, DT_DEBUG_SAVE
 859        xsr     a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
 860        rfi     XCHAL_DEBUGLEVEL
 861
 862.Ldebug_save_dbreak:
 863        .set    _index, 0
 864        .rept   XCHAL_NUM_DBREAK
 865        movi    a0, 0
 866        xsr     a0, SREG_DBREAKC + _index
 867        s32i    a0, a3, DT_DBREAKC_SAVE + _index * 4
 868        .set    _index, _index + 1
 869        .endr
 870
 871        movi    a0, XCHAL_EXCM_LEVEL + 1
 872        xsr     a0, icountlevel
 873        s32i    a0, a3, DT_ICOUNT_LEVEL_SAVE
 874
 875        movi    a0, 0xfffffffe
 876        xsr     a0, icount
 877        s32i    a0, a3, DT_ICOUNT_SAVE
 878
 879        l32i    a0, a3, DT_DEBUG_SAVE
 880        xsr     a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
 881        rfi     XCHAL_DEBUGLEVEL
 882#else
 883        /* Debug exception while in exception mode. Should not happen. */
 8841:      j       1b      // FIXME!!
 885#endif
 886
 887ENDPROC(debug_exception)
 888
 889/*
 890 * We get here in case of an unrecoverable exception.
 891 * The only thing we can do is to be nice and print a panic message.
 892 * We only produce a single stack frame for panic, so ???
 893 *
 894 *
 895 * Entry conditions:
 896 *
 897 *   - a0 contains the caller address; original value saved in excsave1.
 898 *   - the original a0 contains a valid return address (backtrace) or 0.
 899 *   - a2 contains a valid stackpointer
 900 *
 901 * Notes:
 902 *
 903 *   - If the stack pointer could be invalid, the caller has to setup a
 904 *     dummy stack pointer (e.g. the stack of the init_task)
 905 *
 906 *   - If the return address could be invalid, the caller has to set it
 907 *     to 0, so the backtrace would stop.
 908 *
 909 */
 910        .align 4
 911unrecoverable_text:
 912        .ascii "Unrecoverable error in exception handler\0"
 913
 914        .literal_position
 915
 916ENTRY(unrecoverable_exception)
 917
 918        movi    a0, 1
 919        movi    a1, 0
 920
 921        wsr     a0, windowstart
 922        wsr     a1, windowbase
 923        rsync
 924
 925        movi    a1, (1 << PS_WOE_BIT) | LOCKLEVEL
 926        wsr     a1, ps
 927        rsync
 928
 929        movi    a1, init_task
 930        movi    a0, 0
 931        addi    a1, a1, PT_REGS_OFFSET
 932
 933        movi    a6, unrecoverable_text
 934        call4   panic
 935
 9361:      j       1b
 937
 938ENDPROC(unrecoverable_exception)
 939
 940/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
 941
 942/*
 943 * Fast-handler for alloca exceptions
 944 *
 945 *  The ALLOCA handler is entered when user code executes the MOVSP
 946 *  instruction and the caller's frame is not in the register file.
 947 *
 948 * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
 949 *
 950 *    /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
 951 *
 952 * It leverages the existing window spill/fill routines and their support for
 953 * double exceptions. The 'movsp' instruction will only cause an exception if
 954 * the next window needs to be loaded. In fact this ALLOCA exception may be
 955 * replaced at some point by changing the hardware to do a underflow exception
 956 * of the proper size instead.
 957 *
 958 * This algorithm simply backs out the register changes started by the user
 959 * excpetion handler, makes it appear that we have started a window underflow
 960 * by rotating the window back and then setting the old window base (OWB) in
 961 * the 'ps' register with the rolled back window base. The 'movsp' instruction
 962 * will be re-executed and this time since the next window frames is in the
 963 * active AR registers it won't cause an exception.
 964 *
 965 * If the WindowUnderflow code gets a TLB miss the page will get mapped
 966 * the the partial windeowUnderflow will be handeled in the double exception
 967 * handler.
 968 *
 969 * Entry condition:
 970 *
 971 *   a0:        trashed, original value saved on stack (PT_AREG0)
 972 *   a1:        a1
 973 *   a2:        new stack pointer, original in DEPC
 974 *   a3:        a3
 975 *   depc:      a2, original value saved on stack (PT_DEPC)
 976 *   excsave_1: dispatch table
 977 *
 978 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
 979 *           <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
 980 */
 981
 982ENTRY(fast_alloca)
 983        rsr     a0, windowbase
 984        rotw    -1
 985        rsr     a2, ps
 986        extui   a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
 987        xor     a3, a3, a4
 988        l32i    a4, a6, PT_AREG0
 989        l32i    a1, a6, PT_DEPC
 990        rsr     a6, depc
 991        wsr     a1, depc
 992        slli    a3, a3, PS_OWB_SHIFT
 993        xor     a2, a2, a3
 994        wsr     a2, ps
 995        rsync
 996
 997        _bbci.l a4, 31, 4f
 998        rotw    -1
 999        _bbci.l a8, 30, 8f
1000        rotw    -1
1001        j       _WindowUnderflow12
10028:      j       _WindowUnderflow8
10034:      j       _WindowUnderflow4
1004ENDPROC(fast_alloca)
1005
1006/*
1007 * fast system calls.
1008 *
1009 * WARNING:  The kernel doesn't save the entire user context before
1010 * handling a fast system call.  These functions are small and short,
1011 * usually offering some functionality not available to user tasks.
1012 *
1013 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
1014 *
1015 * Entry condition:
1016 *
1017 *   a0:        trashed, original value saved on stack (PT_AREG0)
1018 *   a1:        a1
1019 *   a2:        new stack pointer, original in DEPC
1020 *   a3:        a3
1021 *   depc:      a2, original value saved on stack (PT_DEPC)
1022 *   excsave_1: dispatch table
1023 */
1024
1025ENTRY(fast_syscall_kernel)
1026
1027        /* Skip syscall. */
1028
1029        rsr     a0, epc1
1030        addi    a0, a0, 3
1031        wsr     a0, epc1
1032
1033        l32i    a0, a2, PT_DEPC
1034        bgeui   a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1035
1036        rsr     a0, depc                        # get syscall-nr
1037        _beqz   a0, fast_syscall_spill_registers
1038        _beqi   a0, __NR_xtensa, fast_syscall_xtensa
1039
1040        j       kernel_exception
1041
1042ENDPROC(fast_syscall_kernel)
1043
1044ENTRY(fast_syscall_user)
1045
1046        /* Skip syscall. */
1047
1048        rsr     a0, epc1
1049        addi    a0, a0, 3
1050        wsr     a0, epc1
1051
1052        l32i    a0, a2, PT_DEPC
1053        bgeui   a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1054
1055        rsr     a0, depc                        # get syscall-nr
1056        _beqz   a0, fast_syscall_spill_registers
1057        _beqi   a0, __NR_xtensa, fast_syscall_xtensa
1058
1059        j       user_exception
1060
1061ENDPROC(fast_syscall_user)
1062
1063ENTRY(fast_syscall_unrecoverable)
1064
1065        /* Restore all states. */
1066
1067        l32i    a0, a2, PT_AREG0        # restore a0
1068        xsr     a2, depc                # restore a2, depc
1069
1070        wsr     a0, excsave1
1071        call0   unrecoverable_exception
1072
1073ENDPROC(fast_syscall_unrecoverable)
1074
1075/*
1076 * sysxtensa syscall handler
1077 *
1078 * int sysxtensa (SYS_XTENSA_ATOMIC_SET,     ptr, val,    unused);
1079 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD,     ptr, val,    unused);
1080 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val,    unused);
1081 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1082 *        a2            a6                   a3    a4      a5
1083 *
1084 * Entry condition:
1085 *
1086 *   a0:        a2 (syscall-nr), original value saved on stack (PT_AREG0)
1087 *   a1:        a1
1088 *   a2:        new stack pointer, original in a0 and DEPC
1089 *   a3:        a3
1090 *   a4..a15:   unchanged
1091 *   depc:      a2, original value saved on stack (PT_DEPC)
1092 *   excsave_1: dispatch table
1093 *
1094 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1095 *           <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1096 *
1097 * Note: we don't have to save a2; a2 holds the return value
1098 */
1099
1100        .literal_position
1101
1102#ifdef CONFIG_FAST_SYSCALL_XTENSA
1103
1104ENTRY(fast_syscall_xtensa)
1105
1106        s32i    a7, a2, PT_AREG7        # we need an additional register
1107        movi    a7, 4                   # sizeof(unsigned int)
1108        access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
1109
1110        _bgeui  a6, SYS_XTENSA_COUNT, .Lill
1111        _bnei   a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
1112
1113        /* Fall through for ATOMIC_CMP_SWP. */
1114
1115.Lswp:  /* Atomic compare and swap */
1116
1117EX(.Leac) l32i  a0, a3, 0               # read old value
1118        bne     a0, a4, 1f              # same as old value? jump
1119EX(.Leac) s32i  a5, a3, 0               # different, modify value
1120        l32i    a7, a2, PT_AREG7        # restore a7
1121        l32i    a0, a2, PT_AREG0        # restore a0
1122        movi    a2, 1                   # and return 1
1123        rfe
1124
11251:      l32i    a7, a2, PT_AREG7        # restore a7
1126        l32i    a0, a2, PT_AREG0        # restore a0
1127        movi    a2, 0                   # return 0 (note that we cannot set
1128        rfe
1129
1130.Lnswp: /* Atomic set, add, and exg_add. */
1131
1132EX(.Leac) l32i  a7, a3, 0               # orig
1133        addi    a6, a6, -SYS_XTENSA_ATOMIC_SET
1134        add     a0, a4, a7              # + arg
1135        moveqz  a0, a4, a6              # set
1136        addi    a6, a6, SYS_XTENSA_ATOMIC_SET
1137EX(.Leac) s32i  a0, a3, 0               # write new value
1138
1139        mov     a0, a2
1140        mov     a2, a7
1141        l32i    a7, a0, PT_AREG7        # restore a7
1142        l32i    a0, a0, PT_AREG0        # restore a0
1143        rfe
1144
1145.Leac:  l32i    a7, a2, PT_AREG7        # restore a7
1146        l32i    a0, a2, PT_AREG0        # restore a0
1147        movi    a2, -EFAULT
1148        rfe
1149
1150.Lill:  l32i    a7, a2, PT_AREG7        # restore a7
1151        l32i    a0, a2, PT_AREG0        # restore a0
1152        movi    a2, -EINVAL
1153        rfe
1154
1155ENDPROC(fast_syscall_xtensa)
1156
1157#else /* CONFIG_FAST_SYSCALL_XTENSA */
1158
1159ENTRY(fast_syscall_xtensa)
1160
1161        l32i    a0, a2, PT_AREG0        # restore a0
1162        movi    a2, -ENOSYS
1163        rfe
1164
1165ENDPROC(fast_syscall_xtensa)
1166
1167#endif /* CONFIG_FAST_SYSCALL_XTENSA */
1168
1169
1170/* fast_syscall_spill_registers.
1171 *
1172 * Entry condition:
1173 *
1174 *   a0:        trashed, original value saved on stack (PT_AREG0)
1175 *   a1:        a1
1176 *   a2:        new stack pointer, original in DEPC
1177 *   a3:        a3
1178 *   depc:      a2, original value saved on stack (PT_DEPC)
1179 *   excsave_1: dispatch table
1180 *
1181 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1182 */
1183
1184#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS
1185
1186ENTRY(fast_syscall_spill_registers)
1187
1188        /* Register a FIXUP handler (pass current wb as a parameter) */
1189
1190        xsr     a3, excsave1
1191        movi    a0, fast_syscall_spill_registers_fixup
1192        s32i    a0, a3, EXC_TABLE_FIXUP
1193        rsr     a0, windowbase
1194        s32i    a0, a3, EXC_TABLE_PARAM
1195        xsr     a3, excsave1            # restore a3 and excsave_1
1196
1197        /* Save a3, a4 and SAR on stack. */
1198
1199        rsr     a0, sar
1200        s32i    a3, a2, PT_AREG3
1201        s32i    a0, a2, PT_SAR
1202
1203        /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
1204
1205        s32i    a4, a2, PT_AREG4
1206        s32i    a7, a2, PT_AREG7
1207        s32i    a8, a2, PT_AREG8
1208        s32i    a11, a2, PT_AREG11
1209        s32i    a12, a2, PT_AREG12
1210        s32i    a15, a2, PT_AREG15
1211
1212        /*
1213         * Rotate ws so that the current windowbase is at bit 0.
1214         * Assume ws = xxxwww1yy (www1 current window frame).
1215         * Rotate ws right so that a4 = yyxxxwww1.
1216         */
1217
1218        rsr     a0, windowbase
1219        rsr     a3, windowstart         # a3 = xxxwww1yy
1220        ssr     a0                      # holds WB
1221        slli    a0, a3, WSBITS
1222        or      a3, a3, a0              # a3 = xxxwww1yyxxxwww1yy
1223        srl     a3, a3                  # a3 = 00xxxwww1yyxxxwww1
1224
1225        /* We are done if there are no more than the current register frame. */
1226
1227        extui   a3, a3, 1, WSBITS-1     # a3 = 0yyxxxwww
1228        movi    a0, (1 << (WSBITS-1))
1229        _beqz   a3, .Lnospill           # only one active frame? jump
1230
1231        /* We want 1 at the top, so that we return to the current windowbase */
1232
1233        or      a3, a3, a0              # 1yyxxxwww
1234
1235        /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1236
1237        wsr     a3, windowstart         # save shifted windowstart
1238        neg     a0, a3
1239        and     a3, a0, a3              # first bit set from right: 000010000
1240
1241        ffs_ws  a0, a3                  # a0: shifts to skip empty frames
1242        movi    a3, WSBITS
1243        sub     a0, a3, a0              # WSBITS-a0:number of 0-bits from right
1244        ssr     a0                      # save in SAR for later.
1245
1246        rsr     a3, windowbase
1247        add     a3, a3, a0
1248        wsr     a3, windowbase
1249        rsync
1250
1251        rsr     a3, windowstart
1252        srl     a3, a3                  # shift windowstart
1253
1254        /* WB is now just one frame below the oldest frame in the register
1255           window. WS is shifted so the oldest frame is in bit 0, thus, WB
1256           and WS differ by one 4-register frame. */
1257
1258        /* Save frames. Depending what call was used (call4, call8, call12),
1259         * we have to save 4,8. or 12 registers.
1260         */
1261
1262
1263.Lloop: _bbsi.l a3, 1, .Lc4
1264        _bbci.l a3, 2, .Lc12
1265
1266.Lc8:   s32e    a4, a13, -16
1267        l32e    a4, a5, -12
1268        s32e    a8, a4, -32
1269        s32e    a5, a13, -12
1270        s32e    a6, a13, -8
1271        s32e    a7, a13, -4
1272        s32e    a9, a4, -28
1273        s32e    a10, a4, -24
1274        s32e    a11, a4, -20
1275        srli    a11, a3, 2              # shift windowbase by 2
1276        rotw    2
1277        _bnei   a3, 1, .Lloop
1278        j       .Lexit
1279
1280.Lc4:   s32e    a4, a9, -16
1281        s32e    a5, a9, -12
1282        s32e    a6, a9, -8
1283        s32e    a7, a9, -4
1284
1285        srli    a7, a3, 1
1286        rotw    1
1287        _bnei   a3, 1, .Lloop
1288        j       .Lexit
1289
1290.Lc12:  _bbci.l a3, 3, .Linvalid_mask   # bit 2 shouldn't be zero!
1291
1292        /* 12-register frame (call12) */
1293
1294        l32e    a0, a5, -12
1295        s32e    a8, a0, -48
1296        mov     a8, a0
1297
1298        s32e    a9, a8, -44
1299        s32e    a10, a8, -40
1300        s32e    a11, a8, -36
1301        s32e    a12, a8, -32
1302        s32e    a13, a8, -28
1303        s32e    a14, a8, -24
1304        s32e    a15, a8, -20
1305        srli    a15, a3, 3
1306
1307        /* The stack pointer for a4..a7 is out of reach, so we rotate the
1308         * window, grab the stackpointer, and rotate back.
1309         * Alternatively, we could also use the following approach, but that
1310         * makes the fixup routine much more complicated:
1311         * rotw 1
1312         * s32e a0, a13, -16
1313         * ...
1314         * rotw 2
1315         */
1316
1317        rotw    1
1318        mov     a4, a13
1319        rotw    -1
1320
1321        s32e    a4, a8, -16
1322        s32e    a5, a8, -12
1323        s32e    a6, a8, -8
1324        s32e    a7, a8, -4
1325
1326        rotw    3
1327
1328        _beqi   a3, 1, .Lexit
1329        j       .Lloop
1330
1331.Lexit:
1332
1333        /* Done. Do the final rotation and set WS */
1334
1335        rotw    1
1336        rsr     a3, windowbase
1337        ssl     a3
1338        movi    a3, 1
1339        sll     a3, a3
1340        wsr     a3, windowstart
1341.Lnospill:
1342
1343        /* Advance PC, restore registers and SAR, and return from exception. */
1344
1345        l32i    a3, a2, PT_SAR
1346        l32i    a0, a2, PT_AREG0
1347        wsr     a3, sar
1348        l32i    a3, a2, PT_AREG3
1349
1350        /* Restore clobbered registers. */
1351
1352        l32i    a4, a2, PT_AREG4
1353        l32i    a7, a2, PT_AREG7
1354        l32i    a8, a2, PT_AREG8
1355        l32i    a11, a2, PT_AREG11
1356        l32i    a12, a2, PT_AREG12
1357        l32i    a15, a2, PT_AREG15
1358
1359        movi    a2, 0
1360        rfe
1361
1362.Linvalid_mask:
1363
1364        /* We get here because of an unrecoverable error in the window
1365         * registers, so set up a dummy frame and kill the user application.
1366         * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1367         */
1368
1369        movi    a0, 1
1370        movi    a1, 0
1371
1372        wsr     a0, windowstart
1373        wsr     a1, windowbase
1374        rsync
1375
1376        movi    a0, 0
1377
1378        rsr     a3, excsave1
1379        l32i    a1, a3, EXC_TABLE_KSTK
1380
1381        movi    a4, (1 << PS_WOE_BIT) | LOCKLEVEL
1382        wsr     a4, ps
1383        rsync
1384
1385        movi    a6, SIGSEGV
1386        call4   do_exit
1387
1388        /* shouldn't return, so panic */
1389
1390        wsr     a0, excsave1
1391        call0   unrecoverable_exception         # should not return
13921:      j       1b
1393
1394
1395ENDPROC(fast_syscall_spill_registers)
1396
1397/* Fixup handler.
1398 *
1399 * We get here if the spill routine causes an exception, e.g. tlb miss.
1400 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1401 * we entered the spill routine and jump to the user exception handler.
1402 *
1403 * Note that we only need to restore the bits in windowstart that have not
1404 * been spilled yet by the _spill_register routine. Luckily, a3 contains a
1405 * rotated windowstart with only those bits set for frames that haven't been
1406 * spilled yet. Because a3 is rotated such that bit 0 represents the register
1407 * frame for the current windowbase - 1, we need to rotate a3 left by the
1408 * value of the current windowbase + 1 and move it to windowstart.
1409 *
1410 * a0: value of depc, original value in depc
1411 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1412 * a3: exctable, original value in excsave1
1413 */
1414
1415ENTRY(fast_syscall_spill_registers_fixup)
1416
1417        rsr     a2, windowbase  # get current windowbase (a2 is saved)
1418        xsr     a0, depc        # restore depc and a0
1419        ssl     a2              # set shift (32 - WB)
1420
1421        /* We need to make sure the current registers (a0-a3) are preserved.
1422         * To do this, we simply set the bit for the current window frame
1423         * in WS, so that the exception handlers save them to the task stack.
1424         *
1425         * Note: we use a3 to set the windowbase, so we take a special care
1426         * of it, saving it in the original _spill_registers frame across
1427         * the exception handler call.
1428         */
1429
1430        xsr     a3, excsave1    # get spill-mask
1431        slli    a3, a3, 1       # shift left by one
1432        addi    a3, a3, 1       # set the bit for the current window frame
1433
1434        slli    a2, a3, 32-WSBITS
1435        src     a2, a3, a2      # a2 = xxwww1yyxxxwww1yy......
1436        wsr     a2, windowstart # set corrected windowstart
1437
1438        srli    a3, a3, 1
1439        rsr     a2, excsave1
1440        l32i    a2, a2, EXC_TABLE_DOUBLE_SAVE   # restore a2
1441        xsr     a2, excsave1
1442        s32i    a3, a2, EXC_TABLE_DOUBLE_SAVE   # save a3
1443        l32i    a3, a2, EXC_TABLE_PARAM # original WB (in user task)
1444        xsr     a2, excsave1
1445
1446        /* Return to the original (user task) WINDOWBASE.
1447         * We leave the following frame behind:
1448         * a0, a1, a2   same
1449         * a3:          trashed (saved in EXC_TABLE_DOUBLE_SAVE)
1450         * depc:        depc (we have to return to that address)
1451         * excsave_1:   exctable
1452         */
1453
1454        wsr     a3, windowbase
1455        rsync
1456
1457        /* We are now in the original frame when we entered _spill_registers:
1458         *  a0: return address
1459         *  a1: used, stack pointer
1460         *  a2: kernel stack pointer
1461         *  a3: available
1462         *  depc: exception address
1463         *  excsave: exctable
1464         * Note: This frame might be the same as above.
1465         */
1466
1467        /* Setup stack pointer. */
1468
1469        addi    a2, a2, -PT_USER_SIZE
1470        s32i    a0, a2, PT_AREG0
1471
1472        /* Make sure we return to this fixup handler. */
1473
1474        movi    a3, fast_syscall_spill_registers_fixup_return
1475        s32i    a3, a2, PT_DEPC         # setup depc
1476
1477        /* Jump to the exception handler. */
1478
1479        rsr     a3, excsave1
1480        rsr     a0, exccause
1481        addx4   a0, a0, a3                      # find entry in table
1482        l32i    a0, a0, EXC_TABLE_FAST_USER     # load handler
1483        l32i    a3, a3, EXC_TABLE_DOUBLE_SAVE
1484        jx      a0
1485
1486ENDPROC(fast_syscall_spill_registers_fixup)
1487
1488ENTRY(fast_syscall_spill_registers_fixup_return)
1489
1490        /* When we return here, all registers have been restored (a2: DEPC) */
1491
1492        wsr     a2, depc                # exception address
1493
1494        /* Restore fixup handler. */
1495
1496        rsr     a2, excsave1
1497        s32i    a3, a2, EXC_TABLE_DOUBLE_SAVE
1498        movi    a3, fast_syscall_spill_registers_fixup
1499        s32i    a3, a2, EXC_TABLE_FIXUP
1500        rsr     a3, windowbase
1501        s32i    a3, a2, EXC_TABLE_PARAM
1502        l32i    a2, a2, EXC_TABLE_KSTK
1503
1504        /* Load WB at the time the exception occurred. */
1505
1506        rsr     a3, sar                 # WB is still in SAR
1507        neg     a3, a3
1508        wsr     a3, windowbase
1509        rsync
1510
1511        rsr     a3, excsave1
1512        l32i    a3, a3, EXC_TABLE_DOUBLE_SAVE
1513
1514        rfde
1515
1516ENDPROC(fast_syscall_spill_registers_fixup_return)
1517
1518#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
1519
1520ENTRY(fast_syscall_spill_registers)
1521
1522        l32i    a0, a2, PT_AREG0        # restore a0
1523        movi    a2, -ENOSYS
1524        rfe
1525
1526ENDPROC(fast_syscall_spill_registers)
1527
1528#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
1529
1530#ifdef CONFIG_MMU
1531/*
1532 * We should never get here. Bail out!
1533 */
1534
1535ENTRY(fast_second_level_miss_double_kernel)
1536
15371:
1538        call0   unrecoverable_exception         # should not return
15391:      j       1b
1540
1541ENDPROC(fast_second_level_miss_double_kernel)
1542
1543/* First-level entry handler for user, kernel, and double 2nd-level
1544 * TLB miss exceptions.  Note that for now, user and kernel miss
1545 * exceptions share the same entry point and are handled identically.
1546 *
1547 * An old, less-efficient C version of this function used to exist.
1548 * We include it below, interleaved as comments, for reference.
1549 *
1550 * Entry condition:
1551 *
1552 *   a0:        trashed, original value saved on stack (PT_AREG0)
1553 *   a1:        a1
1554 *   a2:        new stack pointer, original in DEPC
1555 *   a3:        a3
1556 *   depc:      a2, original value saved on stack (PT_DEPC)
1557 *   excsave_1: dispatch table
1558 *
1559 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1560 *           <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1561 */
1562
1563ENTRY(fast_second_level_miss)
1564
1565        /* Save a1 and a3. Note: we don't expect a double exception. */
1566
1567        s32i    a1, a2, PT_AREG1
1568        s32i    a3, a2, PT_AREG3
1569
1570        /* We need to map the page of PTEs for the user task.  Find
1571         * the pointer to that page.  Also, it's possible for tsk->mm
1572         * to be NULL while tsk->active_mm is nonzero if we faulted on
1573         * a vmalloc address.  In that rare case, we must use
1574         * active_mm instead to avoid a fault in this handler.  See
1575         *
1576         * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1577         *   (or search Internet on "mm vs. active_mm")
1578         *
1579         *      if (!mm)
1580         *              mm = tsk->active_mm;
1581         *      pgd = pgd_offset (mm, regs->excvaddr);
1582         *      pmd = pmd_offset (pgd, regs->excvaddr);
1583         *      pmdval = *pmd;
1584         */
1585
1586        GET_CURRENT(a1,a2)
1587        l32i    a0, a1, TASK_MM         # tsk->mm
1588        beqz    a0, 9f
1589
15908:      rsr     a3, excvaddr            # fault address
1591        _PGD_OFFSET(a0, a3, a1)
1592        l32i    a0, a0, 0               # read pmdval
1593        beqz    a0, 2f
1594
1595        /* Read ptevaddr and convert to top of page-table page.
1596         *
1597         *      vpnval = read_ptevaddr_register() & PAGE_MASK;
1598         *      vpnval += DTLB_WAY_PGTABLE;
1599         *      pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1600         *      write_dtlb_entry (pteval, vpnval);
1601         *
1602         * The messy computation for 'pteval' above really simplifies
1603         * into the following:
1604         *
1605         * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
1606         *                 | PAGE_DIRECTORY
1607         */
1608
1609        movi    a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
1610        add     a0, a0, a1              # pmdval - PAGE_OFFSET
1611        extui   a1, a0, 0, PAGE_SHIFT   # ... & PAGE_MASK
1612        xor     a0, a0, a1
1613
1614        movi    a1, _PAGE_DIRECTORY
1615        or      a0, a0, a1              # ... | PAGE_DIRECTORY
1616
1617        /*
1618         * We utilize all three wired-ways (7-9) to hold pmd translations.
1619         * Memory regions are mapped to the DTLBs according to bits 28 and 29.
1620         * This allows to map the three most common regions to three different
1621         * DTLBs:
1622         *  0,1 -> way 7        program (0040.0000) and virtual (c000.0000)
1623         *  2   -> way 8        shared libaries (2000.0000)
1624         *  3   -> way 0        stack (3000.0000)
1625         */
1626
1627        extui   a3, a3, 28, 2           # addr. bit 28 and 29   0,1,2,3
1628        rsr     a1, ptevaddr
1629        addx2   a3, a3, a3              # ->                    0,3,6,9
1630        srli    a1, a1, PAGE_SHIFT
1631        extui   a3, a3, 2, 2            # ->                    0,0,1,2
1632        slli    a1, a1, PAGE_SHIFT      # ptevaddr & PAGE_MASK
1633        addi    a3, a3, DTLB_WAY_PGD
1634        add     a1, a1, a3              # ... + way_number
1635
16363:      wdtlb   a0, a1
1637        dsync
1638
1639        /* Exit critical section. */
1640
16414:      rsr     a3, excsave1
1642        movi    a0, 0
1643        s32i    a0, a3, EXC_TABLE_FIXUP
1644
1645        /* Restore the working registers, and return. */
1646
1647        l32i    a0, a2, PT_AREG0
1648        l32i    a1, a2, PT_AREG1
1649        l32i    a3, a2, PT_AREG3
1650        l32i    a2, a2, PT_DEPC
1651
1652        bgeui   a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1653
1654        /* Restore excsave1 and return. */
1655
1656        rsr     a2, depc
1657        rfe
1658
1659        /* Return from double exception. */
1660
16611:      xsr     a2, depc
1662        esync
1663        rfde
1664
16659:      l32i    a0, a1, TASK_ACTIVE_MM  # unlikely case mm == 0
1666        bnez    a0, 8b
1667
1668        /* Even more unlikely case active_mm == 0.
1669         * We can get here with NMI in the middle of context_switch that
1670         * touches vmalloc area.
1671         */
1672        movi    a0, init_mm
1673        j       8b
1674
1675#if (DCACHE_WAY_SIZE > PAGE_SIZE)
1676
16772:      /* Special case for cache aliasing.
1678         * We (should) only get here if a clear_user_page, copy_user_page
1679         * or the aliased cache flush functions got preemptively interrupted 
1680         * by another task. Re-establish temporary mapping to the 
1681         * TLBTEMP_BASE areas.
1682         */
1683
1684        /* We shouldn't be in a double exception */
1685
1686        l32i    a0, a2, PT_DEPC
1687        bgeui   a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
1688
1689        /* Make sure the exception originated in the special functions */
1690
1691        movi    a0, __tlbtemp_mapping_start
1692        rsr     a3, epc1
1693        bltu    a3, a0, 2f
1694        movi    a0, __tlbtemp_mapping_end
1695        bgeu    a3, a0, 2f
1696
1697        /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
1698
1699        movi    a3, TLBTEMP_BASE_1
1700        rsr     a0, excvaddr
1701        bltu    a0, a3, 2f
1702
1703        addi    a1, a0, -TLBTEMP_SIZE
1704        bgeu    a1, a3, 2f
1705
1706        /* Check if we have to restore an ITLB mapping. */
1707
1708        movi    a1, __tlbtemp_mapping_itlb
1709        rsr     a3, epc1
1710        sub     a3, a3, a1
1711
1712        /* Calculate VPN */
1713
1714        movi    a1, PAGE_MASK
1715        and     a1, a1, a0
1716
1717        /* Jump for ITLB entry */
1718
1719        bgez    a3, 1f
1720
1721        /* We can use up to two TLBTEMP areas, one for src and one for dst. */
1722
1723        extui   a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
1724        add     a1, a3, a1
1725
1726        /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
1727
1728        mov     a0, a6
1729        movnez  a0, a7, a3
1730        j       3b
1731
1732        /* ITLB entry. We only use dst in a6. */
1733
17341:      witlb   a6, a1
1735        isync
1736        j       4b
1737
1738
1739#endif  // DCACHE_WAY_SIZE > PAGE_SIZE
1740
1741
17422:      /* Invalid PGD, default exception handling */
1743
1744        rsr     a1, depc
1745        s32i    a1, a2, PT_AREG2
1746        mov     a1, a2
1747
1748        rsr     a2, ps
1749        bbsi.l  a2, PS_UM_BIT, 1f
1750        j       _kernel_exception
17511:      j       _user_exception
1752
1753ENDPROC(fast_second_level_miss)
1754
1755/*
1756 * StoreProhibitedException
1757 *
1758 * Update the pte and invalidate the itlb mapping for this pte.
1759 *
1760 * Entry condition:
1761 *
1762 *   a0:        trashed, original value saved on stack (PT_AREG0)
1763 *   a1:        a1
1764 *   a2:        new stack pointer, original in DEPC
1765 *   a3:        a3
1766 *   depc:      a2, original value saved on stack (PT_DEPC)
1767 *   excsave_1: dispatch table
1768 *
1769 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1770 *           <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1771 */
1772
1773ENTRY(fast_store_prohibited)
1774
1775        /* Save a1 and a3. */
1776
1777        s32i    a1, a2, PT_AREG1
1778        s32i    a3, a2, PT_AREG3
1779
1780        GET_CURRENT(a1,a2)
1781        l32i    a0, a1, TASK_MM         # tsk->mm
1782        beqz    a0, 9f
1783
17848:      rsr     a1, excvaddr            # fault address
1785        _PGD_OFFSET(a0, a1, a3)
1786        l32i    a0, a0, 0
1787        beqz    a0, 2f
1788
1789        /*
1790         * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
1791         * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
1792         */
1793
1794        _PTE_OFFSET(a0, a1, a3)
1795        l32i    a3, a0, 0               # read pteval
1796        movi    a1, _PAGE_CA_INVALID
1797        ball    a3, a1, 2f
1798        bbci.l  a3, _PAGE_WRITABLE_BIT, 2f
1799
1800        movi    a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
1801        or      a3, a3, a1
1802        rsr     a1, excvaddr
1803        s32i    a3, a0, 0
1804
1805        /* We need to flush the cache if we have page coloring. */
1806#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1807        dhwb    a0, 0
1808#endif
1809        pdtlb   a0, a1
1810        wdtlb   a3, a0
1811
1812        /* Exit critical section. */
1813
1814        movi    a0, 0
1815        rsr     a3, excsave1
1816        s32i    a0, a3, EXC_TABLE_FIXUP
1817
1818        /* Restore the working registers, and return. */
1819
1820        l32i    a3, a2, PT_AREG3
1821        l32i    a1, a2, PT_AREG1
1822        l32i    a0, a2, PT_AREG0
1823        l32i    a2, a2, PT_DEPC
1824
1825        bgeui   a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1826
1827        rsr     a2, depc
1828        rfe
1829
1830        /* Double exception. Restore FIXUP handler and return. */
1831
18321:      xsr     a2, depc
1833        esync
1834        rfde
1835
18369:      l32i    a0, a1, TASK_ACTIVE_MM  # unlikely case mm == 0
1837        j       8b
1838
18392:      /* If there was a problem, handle fault in C */
1840
1841        rsr     a3, depc        # still holds a2
1842        s32i    a3, a2, PT_AREG2
1843        mov     a1, a2
1844
1845        rsr     a2, ps
1846        bbsi.l  a2, PS_UM_BIT, 1f
1847        j       _kernel_exception
18481:      j       _user_exception
1849
1850ENDPROC(fast_store_prohibited)
1851
1852#endif /* CONFIG_MMU */
1853
1854/*
1855 * System Calls.
1856 *
1857 * void system_call (struct pt_regs* regs, int exccause)
1858 *                            a2                 a3
1859 */
1860        .literal_position
1861
1862ENTRY(system_call)
1863
1864        entry   a1, 32
1865
1866        /* regs->syscall = regs->areg[2] */
1867
1868        l32i    a3, a2, PT_AREG2
1869        mov     a6, a2
1870        s32i    a3, a2, PT_SYSCALL
1871        call4   do_syscall_trace_enter
1872        mov     a3, a6
1873
1874        /* syscall = sys_call_table[syscall_nr] */
1875
1876        movi    a4, sys_call_table
1877        movi    a5, __NR_syscall_count
1878        movi    a6, -ENOSYS
1879        bgeu    a3, a5, 1f
1880
1881        addx4   a4, a3, a4
1882        l32i    a4, a4, 0
1883        movi    a5, sys_ni_syscall;
1884        beq     a4, a5, 1f
1885
1886        /* Load args: arg0 - arg5 are passed via regs. */
1887
1888        l32i    a6, a2, PT_AREG6
1889        l32i    a7, a2, PT_AREG3
1890        l32i    a8, a2, PT_AREG4
1891        l32i    a9, a2, PT_AREG5
1892        l32i    a10, a2, PT_AREG8
1893        l32i    a11, a2, PT_AREG9
1894
1895        /* Pass one additional argument to the syscall: pt_regs (on stack) */
1896        s32i    a2, a1, 0
1897
1898        callx4  a4
1899
19001:      /* regs->areg[2] = return_value */
1901
1902        s32i    a6, a2, PT_AREG2
1903        mov     a6, a2
1904        call4   do_syscall_trace_leave
1905        retw
1906
1907ENDPROC(system_call)
1908
1909/*
1910 * Spill live registers on the kernel stack macro.
1911 *
1912 * Entry condition: ps.woe is set, ps.excm is cleared
1913 * Exit condition: windowstart has single bit set
1914 * May clobber: a12, a13
1915 */
1916        .macro  spill_registers_kernel
1917
1918#if XCHAL_NUM_AREGS > 16
1919        call12  1f
1920        _j      2f
1921        retw
1922        .align  4
19231:
1924        _entry  a1, 48
1925        addi    a12, a0, 3
1926#if XCHAL_NUM_AREGS > 32
1927        .rept   (XCHAL_NUM_AREGS - 32) / 12
1928        _entry  a1, 48
1929        mov     a12, a0
1930        .endr
1931#endif
1932        _entry  a1, 16
1933#if XCHAL_NUM_AREGS % 12 == 0
1934        mov     a8, a8
1935#elif XCHAL_NUM_AREGS % 12 == 4
1936        mov     a12, a12
1937#elif XCHAL_NUM_AREGS % 12 == 8
1938        mov     a4, a4
1939#endif
1940        retw
19412:
1942#else
1943        mov     a12, a12
1944#endif
1945        .endm
1946
1947/*
1948 * Task switch.
1949 *
1950 * struct task*  _switch_to (struct task* prev, struct task* next)
1951 *         a2                              a2                 a3
1952 */
1953
1954ENTRY(_switch_to)
1955
1956        entry   a1, 48
1957
1958        mov     a11, a3                 # and 'next' (a3)
1959
1960        l32i    a4, a2, TASK_THREAD_INFO
1961        l32i    a5, a3, TASK_THREAD_INFO
1962
1963        save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
1964
1965#if THREAD_RA > 1020 || THREAD_SP > 1020
1966        addi    a10, a2, TASK_THREAD
1967        s32i    a0, a10, THREAD_RA - TASK_THREAD        # save return address
1968        s32i    a1, a10, THREAD_SP - TASK_THREAD        # save stack pointer
1969#else
1970        s32i    a0, a2, THREAD_RA       # save return address
1971        s32i    a1, a2, THREAD_SP       # save stack pointer
1972#endif
1973
1974#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
1975        movi    a6, __stack_chk_guard
1976        l32i    a8, a3, TASK_STACK_CANARY
1977        s32i    a8, a6, 0
1978#endif
1979
1980        /* Disable ints while we manipulate the stack pointer. */
1981
1982        irq_save a14, a3
1983        rsync
1984
1985        /* Switch CPENABLE */
1986
1987#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
1988        l32i    a3, a5, THREAD_CPENABLE
1989        xsr     a3, cpenable
1990        s32i    a3, a4, THREAD_CPENABLE
1991#endif
1992
1993        /* Flush register file. */
1994
1995        spill_registers_kernel
1996
1997        /* Set kernel stack (and leave critical section)
1998         * Note: It's save to set it here. The stack will not be overwritten
1999         *       because the kernel stack will only be loaded again after
2000         *       we return from kernel space.
2001         */
2002
2003        rsr     a3, excsave1            # exc_table
2004        addi    a7, a5, PT_REGS_OFFSET
2005        s32i    a7, a3, EXC_TABLE_KSTK
2006
2007        /* restore context of the task 'next' */
2008
2009        l32i    a0, a11, THREAD_RA      # restore return address
2010        l32i    a1, a11, THREAD_SP      # restore stack pointer
2011
2012        load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
2013
2014        wsr     a14, ps
2015        rsync
2016
2017        retw
2018
2019ENDPROC(_switch_to)
2020
2021ENTRY(ret_from_fork)
2022
2023        /* void schedule_tail (struct task_struct *prev)
2024         * Note: prev is still in a6 (return value from fake call4 frame)
2025         */
2026        call4   schedule_tail
2027
2028        mov     a6, a1
2029        call4   do_syscall_trace_leave
2030
2031        j       common_exception_return
2032
2033ENDPROC(ret_from_fork)
2034
2035/*
2036 * Kernel thread creation helper
2037 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
2038 *           left from _switch_to: a6 = prev
2039 */
2040ENTRY(ret_from_kernel_thread)
2041
2042        call4   schedule_tail
2043        mov     a6, a3
2044        callx4  a2
2045        j       common_exception_return
2046
2047ENDPROC(ret_from_kernel_thread)
2048