linux/arch/avr32/kernel/entry-avr32b.S
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2004-2006 Atmel Corporation
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9/*
  10 * This file contains the low-level entry-points into the kernel, that is,
  11 * exception handlers, debug trap handlers, interrupt handlers and the
  12 * system call handler.
  13 */
  14#include <linux/errno.h>
  15
  16#include <asm/asm.h>
  17#include <asm/hardirq.h>
  18#include <asm/irq.h>
  19#include <asm/ocd.h>
  20#include <asm/page.h>
  21#include <asm/pgtable.h>
  22#include <asm/ptrace.h>
  23#include <asm/sysreg.h>
  24#include <asm/thread_info.h>
  25#include <asm/unistd.h>
  26
  27#ifdef CONFIG_PREEMPT
  28# define preempt_stop           mask_interrupts
  29#else
  30# define preempt_stop
  31# define fault_resume_kernel    fault_restore_all
  32#endif
  33
  34#define __MASK(x)       ((1 << (x)) - 1)
  35#define IRQ_MASK        ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
  36                         (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
  37
  38        .section .ex.text,"ax",@progbits
  39        .align  2
  40exception_vectors:
  41        bral    handle_critical
  42        .align  2
  43        bral    handle_critical
  44        .align  2
  45        bral    do_bus_error_write
  46        .align  2
  47        bral    do_bus_error_read
  48        .align  2
  49        bral    do_nmi_ll
  50        .align  2
  51        bral    handle_address_fault
  52        .align  2
  53        bral    handle_protection_fault
  54        .align  2
  55        bral    handle_debug
  56        .align  2
  57        bral    do_illegal_opcode_ll
  58        .align  2
  59        bral    do_illegal_opcode_ll
  60        .align  2
  61        bral    do_illegal_opcode_ll
  62        .align  2
  63        bral    do_fpe_ll
  64        .align  2
  65        bral    do_illegal_opcode_ll
  66        .align  2
  67        bral    handle_address_fault
  68        .align  2
  69        bral    handle_address_fault
  70        .align  2
  71        bral    handle_protection_fault
  72        .align  2
  73        bral    handle_protection_fault
  74        .align  2
  75        bral    do_dtlb_modified
  76
  77#define tlbmiss_save    pushm   r0-r3
  78#define tlbmiss_restore popm    r0-r3
  79
  80        .org    0x50
  81        .global itlb_miss
  82itlb_miss:
  83        tlbmiss_save
  84        rjmp    tlb_miss_common
  85
  86        .org    0x60
  87dtlb_miss_read:
  88        tlbmiss_save
  89        rjmp    tlb_miss_common
  90
  91        .org    0x70
  92dtlb_miss_write:
  93        tlbmiss_save
  94
  95        .global tlb_miss_common
  96        .align  2
  97tlb_miss_common:
  98        mfsr    r0, SYSREG_TLBEAR
  99        mfsr    r1, SYSREG_PTBR
 100
 101        /*
 102         * First level lookup: The PGD contains virtual pointers to
 103         * the second-level page tables, but they may be NULL if not
 104         * present.
 105         */
 106pgtbl_lookup:
 107        lsr     r2, r0, PGDIR_SHIFT
 108        ld.w    r3, r1[r2 << 2]
 109        bfextu  r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
 110        cp.w    r3, 0
 111        breq    page_table_not_present
 112
 113        /* Second level lookup */
 114        ld.w    r2, r3[r1 << 2]
 115        mfsr    r0, SYSREG_TLBARLO
 116        bld     r2, _PAGE_BIT_PRESENT
 117        brcc    page_not_present
 118
 119        /* Mark the page as accessed */
 120        sbr     r2, _PAGE_BIT_ACCESSED
 121        st.w    r3[r1 << 2], r2
 122
 123        /* Drop software flags */
 124        andl    r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
 125        mtsr    SYSREG_TLBELO, r2
 126
 127        /* Figure out which entry we want to replace */
 128        mfsr    r1, SYSREG_MMUCR
 129        clz     r2, r0
 130        brcc    1f
 131        mov     r3, -1                  /* All entries have been accessed, */
 132        mov     r2, 0                   /* so start at 0 */
 133        mtsr    SYSREG_TLBARLO, r3      /* and reset TLBAR */
 134
 1351:      bfins   r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
 136        mtsr    SYSREG_MMUCR, r1
 137        tlbw
 138
 139        tlbmiss_restore
 140        rete
 141
 142        /* The slow path of the TLB miss handler */
 143        .align  2
 144page_table_not_present:
 145        /* Do we need to synchronize with swapper_pg_dir? */
 146        bld     r0, 31
 147        brcs    sync_with_swapper_pg_dir
 148
 149page_not_present:
 150        tlbmiss_restore
 151        sub     sp, 4
 152        stmts   --sp, r0-lr
 153        call    save_full_context_ex
 154        mfsr    r12, SYSREG_ECR
 155        mov     r11, sp
 156        call    do_page_fault
 157        rjmp    ret_from_exception
 158
 159        .align  2
 160sync_with_swapper_pg_dir:
 161        /*
 162         * If swapper_pg_dir contains a non-NULL second-level page
 163         * table pointer, copy it into the current PGD. If not, we
 164         * must handle it as a full-blown page fault.
 165         *
 166         * Jumping back to pgtbl_lookup causes an unnecessary lookup,
 167         * but it is guaranteed to be a cache hit, it won't happen
 168         * very often, and we absolutely do not want to sacrifice any
 169         * performance in the fast path in order to improve this.
 170         */
 171        mov     r1, lo(swapper_pg_dir)
 172        orh     r1, hi(swapper_pg_dir)
 173        ld.w    r3, r1[r2 << 2]
 174        cp.w    r3, 0
 175        breq    page_not_present
 176        mfsr    r1, SYSREG_PTBR
 177        st.w    r1[r2 << 2], r3
 178        rjmp    pgtbl_lookup
 179
 180        /*
 181         * We currently have two bytes left at this point until we
 182         * crash into the system call handler...
 183         *
 184         * Don't worry, the assembler will let us know.
 185         */
 186
 187
 188        /* ---                    System Call                    --- */
 189
 190        .org    0x100
 191system_call:
 192#ifdef CONFIG_PREEMPT
 193        mask_interrupts
 194#endif
 195        pushm   r12             /* r12_orig */
 196        stmts   --sp, r0-lr
 197
 198        mfsr    r0, SYSREG_RAR_SUP
 199        mfsr    r1, SYSREG_RSR_SUP
 200#ifdef CONFIG_PREEMPT
 201        unmask_interrupts
 202#endif
 203        zero_fp
 204        stm     --sp, r0-r1
 205
 206        /* check for syscall tracing */
 207        get_thread_info r0
 208        ld.w    r1, r0[TI_flags]
 209        bld     r1, TIF_SYSCALL_TRACE
 210        brcs    syscall_trace_enter
 211
 212syscall_trace_cont:
 213        cp.w    r8, NR_syscalls
 214        brhs    syscall_badsys
 215
 216        lddpc   lr, syscall_table_addr
 217        ld.w    lr, lr[r8 << 2]
 218        mov     r8, r5          /* 5th argument (6th is pushed by stub) */
 219        icall   lr
 220
 221        .global syscall_return
 222syscall_return:
 223        get_thread_info r0
 224        mask_interrupts         /* make sure we don't miss an interrupt
 225                                   setting need_resched or sigpending
 226                                   between sampling and the rets */
 227
 228        /* Store the return value so that the correct value is loaded below */
 229        stdsp   sp[REG_R12], r12
 230
 231        ld.w    r1, r0[TI_flags]
 232        andl    r1, _TIF_ALLWORK_MASK, COH
 233        brne    syscall_exit_work
 234
 235syscall_exit_cont:
 236        popm    r8-r9
 237        mtsr    SYSREG_RAR_SUP, r8
 238        mtsr    SYSREG_RSR_SUP, r9
 239        ldmts   sp++, r0-lr
 240        sub     sp, -4          /* r12_orig */
 241        rets
 242
 243        .align  2
 244syscall_table_addr:
 245        .long   sys_call_table
 246
 247syscall_badsys:
 248        mov     r12, -ENOSYS
 249        rjmp    syscall_return
 250
 251        .global ret_from_fork
 252ret_from_fork:
 253        call   schedule_tail
 254        mov     r12, 0
 255        rjmp    syscall_return
 256
 257        .global ret_from_kernel_thread
 258ret_from_kernel_thread:
 259        call   schedule_tail
 260        mov     r12, r0
 261        mov     lr, r2  /* syscall_return */
 262        mov     pc, r1
 263
 264syscall_trace_enter:
 265        pushm   r8-r12
 266        call    syscall_trace
 267        popm    r8-r12
 268        rjmp    syscall_trace_cont
 269
 270syscall_exit_work:
 271        bld     r1, TIF_SYSCALL_TRACE
 272        brcc    1f
 273        unmask_interrupts
 274        call    syscall_trace
 275        mask_interrupts
 276        ld.w    r1, r0[TI_flags]
 277
 2781:      bld     r1, TIF_NEED_RESCHED
 279        brcc    2f
 280        unmask_interrupts
 281        call    schedule
 282        mask_interrupts
 283        ld.w    r1, r0[TI_flags]
 284        rjmp    1b
 285
 2862:      mov     r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
 287        tst     r1, r2
 288        breq    3f
 289        unmask_interrupts
 290        mov     r12, sp
 291        mov     r11, r0
 292        call    do_notify_resume
 293        mask_interrupts
 294        ld.w    r1, r0[TI_flags]
 295        rjmp    1b
 296
 2973:      bld     r1, TIF_BREAKPOINT
 298        brcc    syscall_exit_cont
 299        rjmp    enter_monitor_mode
 300
 301        /* This function expects to find offending PC in SYSREG_RAR_EX */
 302        .type   save_full_context_ex, @function
 303        .align  2
 304save_full_context_ex:
 305        mfsr    r11, SYSREG_RAR_EX
 306        sub     r9, pc, . - debug_trampoline
 307        mfsr    r8, SYSREG_RSR_EX
 308        cp.w    r9, r11
 309        breq    3f
 310        mov     r12, r8
 311        andh    r8, (MODE_MASK >> 16), COH
 312        brne    2f
 313
 3141:      pushm   r11, r12        /* PC and SR */
 315        unmask_exceptions
 316        ret     r12
 317
 3182:      sub     r10, sp, -(FRAME_SIZE_FULL - REG_LR)
 319        stdsp   sp[4], r10      /* replace saved SP */
 320        rjmp    1b
 321
 322        /*
 323         * The debug handler set up a trampoline to make us
 324         * automatically enter monitor mode upon return, but since
 325         * we're saving the full context, we must assume that the
 326         * exception handler might want to alter the return address
 327         * and/or status register. So we need to restore the original
 328         * context and enter monitor mode manually after the exception
 329         * has been handled.
 330         */
 3313:      get_thread_info r8
 332        ld.w    r11, r8[TI_rar_saved]
 333        ld.w    r12, r8[TI_rsr_saved]
 334        rjmp    1b
 335        .size   save_full_context_ex, . - save_full_context_ex
 336
 337        /* Low-level exception handlers */
 338handle_critical:
 339        /*
 340         * AT32AP700x errata:
 341         *
 342         * After a Java stack overflow or underflow trap, any CPU
 343         * memory access may cause erratic behavior. This will happen
 344         * when the four least significant bits of the JOSP system
 345         * register contains any value between 9 and 15 (inclusive).
 346         *
 347         * Possible workarounds:
 348         *   - Don't use the Java Extension Module
 349         *   - Ensure that the stack overflow and underflow trap
 350         *     handlers do not do any memory access or trigger any
 351         *     exceptions before the overflow/underflow condition is
 352         *     cleared (by incrementing or decrementing the JOSP)
 353         *   - Make sure that JOSP does not contain any problematic
 354         *     value before doing any exception or interrupt
 355         *     processing.
 356         *   - Set up a critical exception handler which writes a
 357         *     known-to-be-safe value, e.g. 4, to JOSP before doing
 358         *     any further processing.
 359         *
 360         * We'll use the last workaround for now since we cannot
 361         * guarantee that user space processes don't use Java mode.
 362         * Non-well-behaving userland will be terminated with extreme
 363         * prejudice.
 364         */
 365#ifdef CONFIG_CPU_AT32AP700X
 366        /*
 367         * There's a chance we can't touch memory, so temporarily
 368         * borrow PTBR to save the stack pointer while we fix things
 369         * up...
 370         */
 371        mtsr    SYSREG_PTBR, sp
 372        mov     sp, 4
 373        mtsr    SYSREG_JOSP, sp
 374        mfsr    sp, SYSREG_PTBR
 375        sub     pc, -2
 376
 377        /* Push most of pt_regs on stack. We'll do the rest later */
 378        sub     sp, 4
 379        pushm   r0-r12
 380
 381        /* PTBR mirrors current_thread_info()->task->active_mm->pgd */
 382        get_thread_info r0
 383        ld.w    r1, r0[TI_task]
 384        ld.w    r2, r1[TSK_active_mm]
 385        ld.w    r3, r2[MM_pgd]
 386        mtsr    SYSREG_PTBR, r3
 387#else
 388        sub     sp, 4
 389        pushm   r0-r12
 390#endif
 391        sub     r0, sp, -(14 * 4)
 392        mov     r1, lr
 393        mfsr    r2, SYSREG_RAR_EX
 394        mfsr    r3, SYSREG_RSR_EX
 395        pushm   r0-r3
 396
 397        mfsr    r12, SYSREG_ECR
 398        mov     r11, sp
 399        call    do_critical_exception
 400
 401        /* We should never get here... */
 402bad_return:
 403        sub     r12, pc, (. - 1f)
 404        bral    panic
 405        .align  2
 4061:      .asciz  "Return from critical exception!"
 407
 408        .align  1
 409do_bus_error_write:
 410        sub     sp, 4
 411        stmts   --sp, r0-lr
 412        call    save_full_context_ex
 413        mov     r11, 1
 414        rjmp    1f
 415
 416do_bus_error_read:
 417        sub     sp, 4
 418        stmts   --sp, r0-lr
 419        call    save_full_context_ex
 420        mov     r11, 0
 4211:      mfsr    r12, SYSREG_BEAR
 422        mov     r10, sp
 423        call    do_bus_error
 424        rjmp    ret_from_exception
 425
 426        .align  1
 427do_nmi_ll:
 428        sub     sp, 4
 429        stmts   --sp, r0-lr
 430        mfsr    r9, SYSREG_RSR_NMI
 431        mfsr    r8, SYSREG_RAR_NMI
 432        bfextu  r0, r9, MODE_SHIFT, 3
 433        brne    2f
 434
 4351:      pushm   r8, r9  /* PC and SR */
 436        mfsr    r12, SYSREG_ECR
 437        mov     r11, sp
 438        call    do_nmi
 439        popm    r8-r9
 440        mtsr    SYSREG_RAR_NMI, r8
 441        tst     r0, r0
 442        mtsr    SYSREG_RSR_NMI, r9
 443        brne    3f
 444
 445        ldmts   sp++, r0-lr
 446        sub     sp, -4          /* skip r12_orig */
 447        rete
 448
 4492:      sub     r10, sp, -(FRAME_SIZE_FULL - REG_LR)
 450        stdsp   sp[4], r10      /* replace saved SP */
 451        rjmp    1b
 452
 4533:      popm    lr
 454        sub     sp, -4          /* skip sp */
 455        popm    r0-r12
 456        sub     sp, -4          /* skip r12_orig */
 457        rete
 458
 459handle_address_fault:
 460        sub     sp, 4
 461        stmts   --sp, r0-lr
 462        call    save_full_context_ex
 463        mfsr    r12, SYSREG_ECR
 464        mov     r11, sp
 465        call    do_address_exception
 466        rjmp    ret_from_exception
 467
 468handle_protection_fault:
 469        sub     sp, 4
 470        stmts   --sp, r0-lr
 471        call    save_full_context_ex
 472        mfsr    r12, SYSREG_ECR
 473        mov     r11, sp
 474        call    do_page_fault
 475        rjmp    ret_from_exception
 476
 477        .align  1
 478do_illegal_opcode_ll:
 479        sub     sp, 4
 480        stmts   --sp, r0-lr
 481        call    save_full_context_ex
 482        mfsr    r12, SYSREG_ECR
 483        mov     r11, sp
 484        call    do_illegal_opcode
 485        rjmp    ret_from_exception
 486
 487do_dtlb_modified:
 488        pushm   r0-r3
 489        mfsr    r1, SYSREG_TLBEAR
 490        mfsr    r0, SYSREG_PTBR
 491        lsr     r2, r1, PGDIR_SHIFT
 492        ld.w    r0, r0[r2 << 2]
 493        lsl     r1, (32 - PGDIR_SHIFT)
 494        lsr     r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
 495
 496        /* Translate to virtual address in P1 */
 497        andl    r0, 0xf000
 498        sbr     r0, 31
 499        add     r2, r0, r1 << 2
 500        ld.w    r3, r2[0]
 501        sbr     r3, _PAGE_BIT_DIRTY
 502        mov     r0, r3
 503        st.w    r2[0], r3
 504
 505        /* The page table is up-to-date. Update the TLB entry as well */
 506        andl    r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
 507        mtsr    SYSREG_TLBELO, r0
 508
 509        /* MMUCR[DRP] is updated automatically, so let's go... */
 510        tlbw
 511
 512        popm    r0-r3
 513        rete
 514
 515do_fpe_ll:
 516        sub     sp, 4
 517        stmts   --sp, r0-lr
 518        call    save_full_context_ex
 519        unmask_interrupts
 520        mov     r12, 26
 521        mov     r11, sp
 522        call    do_fpe
 523        rjmp    ret_from_exception
 524
 525ret_from_exception:
 526        mask_interrupts
 527        lddsp   r4, sp[REG_SR]
 528
 529        andh    r4, (MODE_MASK >> 16), COH
 530        brne    fault_resume_kernel
 531
 532        get_thread_info r0
 533        ld.w    r1, r0[TI_flags]
 534        andl    r1, _TIF_WORK_MASK, COH
 535        brne    fault_exit_work
 536
 537fault_resume_user:
 538        popm    r8-r9
 539        mask_exceptions
 540        mtsr    SYSREG_RAR_EX, r8
 541        mtsr    SYSREG_RSR_EX, r9
 542        ldmts   sp++, r0-lr
 543        sub     sp, -4
 544        rete
 545
 546fault_resume_kernel:
 547#ifdef CONFIG_PREEMPT
 548        get_thread_info r0
 549        ld.w    r2, r0[TI_preempt_count]
 550        cp.w    r2, 0
 551        brne    1f
 552        ld.w    r1, r0[TI_flags]
 553        bld     r1, TIF_NEED_RESCHED
 554        brcc    1f
 555        lddsp   r4, sp[REG_SR]
 556        bld     r4, SYSREG_GM_OFFSET
 557        brcs    1f
 558        call    preempt_schedule_irq
 5591:
 560#endif
 561
 562        popm    r8-r9
 563        mask_exceptions
 564        mfsr    r1, SYSREG_SR
 565        mtsr    SYSREG_RAR_EX, r8
 566        mtsr    SYSREG_RSR_EX, r9
 567        popm    lr
 568        sub     sp, -4          /* ignore SP */
 569        popm    r0-r12
 570        sub     sp, -4          /* ignore r12_orig */
 571        rete
 572
 573irq_exit_work:
 574        /* Switch to exception mode so that we can share the same code. */
 575        mfsr    r8, SYSREG_SR
 576        cbr     r8, SYSREG_M0_OFFSET
 577        orh     r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
 578        mtsr    SYSREG_SR, r8
 579        sub     pc, -2
 580        get_thread_info r0
 581        ld.w    r1, r0[TI_flags]
 582
 583fault_exit_work:
 584        bld     r1, TIF_NEED_RESCHED
 585        brcc    1f
 586        unmask_interrupts
 587        call    schedule
 588        mask_interrupts
 589        ld.w    r1, r0[TI_flags]
 590        rjmp    fault_exit_work
 591
 5921:      mov     r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
 593        tst     r1, r2
 594        breq    2f
 595        unmask_interrupts
 596        mov     r12, sp
 597        mov     r11, r0
 598        call    do_notify_resume
 599        mask_interrupts
 600        ld.w    r1, r0[TI_flags]
 601        rjmp    fault_exit_work
 602
 6032:      bld     r1, TIF_BREAKPOINT
 604        brcc    fault_resume_user
 605        rjmp    enter_monitor_mode
 606
 607        .section .kprobes.text, "ax", @progbits
 608        .type   handle_debug, @function
 609handle_debug:
 610        sub     sp, 4           /* r12_orig */
 611        stmts   --sp, r0-lr
 612        mfsr    r8, SYSREG_RAR_DBG
 613        mfsr    r9, SYSREG_RSR_DBG
 614        unmask_exceptions
 615        pushm   r8-r9
 616        bfextu  r9, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
 617        brne    debug_fixup_regs
 618
 619.Ldebug_fixup_cont:
 620#ifdef CONFIG_TRACE_IRQFLAGS
 621        call    trace_hardirqs_off
 622#endif
 623        mov     r12, sp
 624        call    do_debug
 625        mov     sp, r12
 626
 627        lddsp   r2, sp[REG_SR]
 628        bfextu  r3, r2, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
 629        brne    debug_resume_kernel
 630
 631        get_thread_info r0
 632        ld.w    r1, r0[TI_flags]
 633        mov     r2, _TIF_DBGWORK_MASK
 634        tst     r1, r2
 635        brne    debug_exit_work
 636
 637        bld     r1, TIF_SINGLE_STEP
 638        brcc    1f
 639        mfdr    r4, OCD_DC
 640        sbr     r4, OCD_DC_SS_BIT
 641        mtdr    OCD_DC, r4
 642
 6431:      popm    r10,r11
 644        mask_exceptions
 645        mtsr    SYSREG_RSR_DBG, r11
 646        mtsr    SYSREG_RAR_DBG, r10
 647#ifdef CONFIG_TRACE_IRQFLAGS
 648        call    trace_hardirqs_on
 6491:
 650#endif
 651        ldmts   sp++, r0-lr
 652        sub     sp, -4
 653        retd
 654        .size   handle_debug, . - handle_debug
 655
 656        /* Mode of the trapped context is in r9 */
 657        .type   debug_fixup_regs, @function
 658debug_fixup_regs:
 659        mfsr    r8, SYSREG_SR
 660        mov     r10, r8
 661        bfins   r8, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
 662        mtsr    SYSREG_SR, r8
 663        sub     pc, -2
 664        stdsp   sp[REG_LR], lr
 665        mtsr    SYSREG_SR, r10
 666        sub     pc, -2
 667        sub     r8, sp, -FRAME_SIZE_FULL
 668        stdsp   sp[REG_SP], r8
 669        rjmp    .Ldebug_fixup_cont
 670        .size   debug_fixup_regs, . - debug_fixup_regs
 671
 672        .type   debug_resume_kernel, @function
 673debug_resume_kernel:
 674        mask_exceptions
 675        popm    r10, r11
 676        mtsr    SYSREG_RAR_DBG, r10
 677        mtsr    SYSREG_RSR_DBG, r11
 678#ifdef CONFIG_TRACE_IRQFLAGS
 679        bld     r11, SYSREG_GM_OFFSET
 680        brcc    1f
 681        call    trace_hardirqs_on
 6821:
 683#endif
 684        mfsr    r2, SYSREG_SR
 685        mov     r1, r2
 686        bfins   r2, r3, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
 687        mtsr    SYSREG_SR, r2
 688        sub     pc, -2
 689        popm    lr
 690        mtsr    SYSREG_SR, r1
 691        sub     pc, -2
 692        sub     sp, -4          /* skip SP */
 693        popm    r0-r12
 694        sub     sp, -4
 695        retd
 696        .size   debug_resume_kernel, . - debug_resume_kernel
 697
 698        .type   debug_exit_work, @function
 699debug_exit_work:
 700        /*
 701         * We must return from Monitor Mode using a retd, and we must
 702         * not schedule since that involves the D bit in SR getting
 703         * cleared by something other than the debug hardware. This
 704         * may cause undefined behaviour according to the Architecture
 705         * manual.
 706         *
 707         * So we fix up the return address and status and return to a
 708         * stub below in Exception mode. From there, we can follow the
 709         * normal exception return path.
 710         *
 711         * The real return address and status registers are stored on
 712         * the stack in the way the exception return path understands,
 713         * so no need to fix anything up there.
 714         */
 715        sub     r8, pc, . - fault_exit_work
 716        mtsr    SYSREG_RAR_DBG, r8
 717        mov     r9, 0
 718        orh     r9, hi(SR_EM | SR_GM | MODE_EXCEPTION)
 719        mtsr    SYSREG_RSR_DBG, r9
 720        sub     pc, -2
 721        retd
 722        .size   debug_exit_work, . - debug_exit_work
 723
 724        .set    rsr_int0,       SYSREG_RSR_INT0
 725        .set    rsr_int1,       SYSREG_RSR_INT1
 726        .set    rsr_int2,       SYSREG_RSR_INT2
 727        .set    rsr_int3,       SYSREG_RSR_INT3
 728        .set    rar_int0,       SYSREG_RAR_INT0
 729        .set    rar_int1,       SYSREG_RAR_INT1
 730        .set    rar_int2,       SYSREG_RAR_INT2
 731        .set    rar_int3,       SYSREG_RAR_INT3
 732
 733        .macro  IRQ_LEVEL level
 734        .type   irq_level\level, @function
 735irq_level\level:
 736        sub     sp, 4           /* r12_orig */
 737        stmts   --sp,r0-lr
 738        mfsr    r8, rar_int\level
 739        mfsr    r9, rsr_int\level
 740
 741#ifdef CONFIG_PREEMPT
 742        sub     r11, pc, (. - system_call)
 743        cp.w    r11, r8
 744        breq    4f
 745#endif
 746
 747        pushm   r8-r9
 748
 749        mov     r11, sp
 750        mov     r12, \level
 751
 752        call    do_IRQ
 753
 754        lddsp   r4, sp[REG_SR]
 755        bfextu  r4, r4, SYSREG_M0_OFFSET, 3
 756        cp.w    r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
 757        breq    2f
 758        cp.w    r4, MODE_USER >> SYSREG_M0_OFFSET
 759#ifdef CONFIG_PREEMPT
 760        brne    3f
 761#else
 762        brne    1f
 763#endif
 764
 765        get_thread_info r0
 766        ld.w    r1, r0[TI_flags]
 767        andl    r1, _TIF_WORK_MASK, COH
 768        brne    irq_exit_work
 769
 7701:
 771#ifdef CONFIG_TRACE_IRQFLAGS
 772        call    trace_hardirqs_on
 773#endif
 774        popm    r8-r9
 775        mtsr    rar_int\level, r8
 776        mtsr    rsr_int\level, r9
 777        ldmts   sp++,r0-lr
 778        sub     sp, -4          /* ignore r12_orig */
 779        rete
 780
 781#ifdef CONFIG_PREEMPT
 7824:      mask_interrupts
 783        mfsr    r8, rsr_int\level
 784        sbr     r8, 16
 785        mtsr    rsr_int\level, r8
 786        ldmts   sp++, r0-lr
 787        sub     sp, -4          /* ignore r12_orig */
 788        rete
 789#endif
 790
 7912:      get_thread_info r0
 792        ld.w    r1, r0[TI_flags]
 793        bld     r1, TIF_CPU_GOING_TO_SLEEP
 794#ifdef CONFIG_PREEMPT
 795        brcc    3f
 796#else
 797        brcc    1b
 798#endif
 799        sub     r1, pc, . - cpu_idle_skip_sleep
 800        stdsp   sp[REG_PC], r1
 801#ifdef CONFIG_PREEMPT
 8023:      get_thread_info r0
 803        ld.w    r2, r0[TI_preempt_count]
 804        cp.w    r2, 0
 805        brne    1b
 806        ld.w    r1, r0[TI_flags]
 807        bld     r1, TIF_NEED_RESCHED
 808        brcc    1b
 809        lddsp   r4, sp[REG_SR]
 810        bld     r4, SYSREG_GM_OFFSET
 811        brcs    1b
 812        call    preempt_schedule_irq
 813#endif
 814        rjmp    1b
 815        .endm
 816
 817        .section .irq.text,"ax",@progbits
 818
 819        .global irq_level0
 820        .global irq_level1
 821        .global irq_level2
 822        .global irq_level3
 823        IRQ_LEVEL 0
 824        IRQ_LEVEL 1
 825        IRQ_LEVEL 2
 826        IRQ_LEVEL 3
 827
 828        .section .kprobes.text, "ax", @progbits
 829        .type   enter_monitor_mode, @function
 830enter_monitor_mode:
 831        /*
 832         * We need to enter monitor mode to do a single step. The
 833         * monitor code will alter the return address so that we
 834         * return directly to the user instead of returning here.
 835         */
 836        breakpoint
 837        rjmp    breakpoint_failed
 838
 839        .size   enter_monitor_mode, . - enter_monitor_mode
 840
 841        .type   debug_trampoline, @function
 842        .global debug_trampoline
 843debug_trampoline:
 844        /*
 845         * Save the registers on the stack so that the monitor code
 846         * can find them easily.
 847         */
 848        sub     sp, 4           /* r12_orig */
 849        stmts   --sp, r0-lr
 850        get_thread_info r0
 851        ld.w    r8, r0[TI_rar_saved]
 852        ld.w    r9, r0[TI_rsr_saved]
 853        pushm   r8-r9
 854
 855        /*
 856         * The monitor code will alter the return address so we don't
 857         * return here.
 858         */
 859        breakpoint
 860        rjmp    breakpoint_failed
 861        .size   debug_trampoline, . - debug_trampoline
 862
 863        .type breakpoint_failed, @function
 864breakpoint_failed:
 865        /*
 866         * Something went wrong. Perhaps the debug hardware isn't
 867         * enabled?
 868         */
 869        lda.w   r12, msg_breakpoint_failed
 870        mov     r11, sp
 871        mov     r10, 9          /* SIGKILL */
 872        call    die
 8731:      rjmp    1b
 874
 875msg_breakpoint_failed:
 876        .asciz  "Failed to enter Debug Mode"
 877