linux/arch/microblaze/kernel/entry-nommu.S
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
   3 * Copyright (C) 2007-2009 PetaLogix
   4 * Copyright (C) 2006 Atmark Techno, Inc.
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License. See the file "COPYING" in the main directory of this archive
   8 * for more details.
   9 */
  10
  11#include <linux/linkage.h>
  12#include <asm/thread_info.h>
  13#include <linux/errno.h>
  14#include <asm/entry.h>
  15#include <asm/asm-offsets.h>
  16#include <asm/registers.h>
  17#include <asm/unistd.h>
  18#include <asm/percpu.h>
  19#include <asm/signal.h>
  20
  21#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  22        .macro  disable_irq
  23        msrclr r0, MSR_IE
  24        .endm
  25
  26        .macro  enable_irq
  27        msrset r0, MSR_IE
  28        .endm
  29
  30        .macro  clear_bip
  31        msrclr r0, MSR_BIP
  32        .endm
  33#else
  34        .macro  disable_irq
  35        mfs r11, rmsr
  36        andi r11, r11, ~MSR_IE
  37        mts rmsr, r11
  38        .endm
  39
  40        .macro  enable_irq
  41        mfs r11, rmsr
  42        ori r11, r11, MSR_IE
  43        mts rmsr, r11
  44        .endm
  45
  46        .macro  clear_bip
  47        mfs r11, rmsr
  48        andi r11, r11, ~MSR_BIP
  49        mts rmsr, r11
  50        .endm
  51#endif
  52
  53ENTRY(_interrupt)
  54        swi     r1, r0, PER_CPU(ENTRY_SP)       /* save the current sp */
  55        swi     r11, r0, PER_CPU(R11_SAVE)      /* temporarily save r11 */
  56        lwi     r11, r0, PER_CPU(KM)            /* load mode indicator */
  57        beqid   r11, 1f
  58        nop
  59        brid    2f                              /* jump over */
  60        addik   r1, r1, (-PT_SIZE)      /* room for pt_regs (delay slot) */
  611:                                              /* switch to kernel stack */
  62        lwi     r1, r0, PER_CPU(CURRENT_SAVE)   /* get the saved current */
  63        lwi     r1, r1, TS_THREAD_INFO          /* get the thread info */
  64        /* calculate kernel stack pointer */
  65        addik   r1, r1, THREAD_SIZE - PT_SIZE
  662:
  67        swi     r11, r1, PT_MODE                /* store the mode */
  68        lwi     r11, r0, PER_CPU(R11_SAVE)      /* reload r11 */
  69        swi     r2, r1, PT_R2
  70        swi     r3, r1, PT_R3
  71        swi     r4, r1, PT_R4
  72        swi     r5, r1, PT_R5
  73        swi     r6, r1, PT_R6
  74        swi     r7, r1, PT_R7
  75        swi     r8, r1, PT_R8
  76        swi     r9, r1, PT_R9
  77        swi     r10, r1, PT_R10
  78        swi     r11, r1, PT_R11
  79        swi     r12, r1, PT_R12
  80        swi     r13, r1, PT_R13
  81        swi     r14, r1, PT_R14
  82        swi     r14, r1, PT_PC
  83        swi     r15, r1, PT_R15
  84        swi     r16, r1, PT_R16
  85        swi     r17, r1, PT_R17
  86        swi     r18, r1, PT_R18
  87        swi     r19, r1, PT_R19
  88        swi     r20, r1, PT_R20
  89        swi     r21, r1, PT_R21
  90        swi     r22, r1, PT_R22
  91        swi     r23, r1, PT_R23
  92        swi     r24, r1, PT_R24
  93        swi     r25, r1, PT_R25
  94        swi     r26, r1, PT_R26
  95        swi     r27, r1, PT_R27
  96        swi     r28, r1, PT_R28
  97        swi     r29, r1, PT_R29
  98        swi     r30, r1, PT_R30
  99        swi     r31, r1, PT_R31
 100        /* special purpose registers */
 101        mfs     r11, rmsr
 102        swi     r11, r1, PT_MSR
 103        mfs     r11, rear
 104        swi     r11, r1, PT_EAR
 105        mfs     r11, resr
 106        swi     r11, r1, PT_ESR
 107        mfs     r11, rfsr
 108        swi     r11, r1, PT_FSR
 109        /* reload original stack pointer and save it */
 110        lwi     r11, r0, PER_CPU(ENTRY_SP)
 111        swi     r11, r1, PT_R1
 112        /* update mode indicator we are in kernel mode */
 113        addik   r11, r0, 1
 114        swi     r11, r0, PER_CPU(KM)
 115        /* restore r31 */
 116        lwi     r31, r0, PER_CPU(CURRENT_SAVE)
 117        /* prepare the link register, the argument and jump */
 118        addik   r15, r0, ret_from_intr - 8
 119        addk    r6, r0, r15
 120        braid   do_IRQ
 121        add     r5, r0, r1
 122
 123ret_from_intr:
 124        lwi     r11, r1, PT_MODE
 125        bneid   r11, no_intr_resched
 126
 127        lwi     r6, r31, TS_THREAD_INFO /* get thread info */
 128        lwi     r19, r6, TI_FLAGS       /* get flags in thread info */
 129                                /* do an extra work if any bits are set */
 130
 131        andi    r11, r19, _TIF_NEED_RESCHED
 132        beqi    r11, 1f
 133        bralid  r15, schedule
 134        nop
 1351:      andi    r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
 136        beqid   r11, no_intr_resched
 137        addk    r5, r1, r0
 138        bralid  r15, do_notify_resume
 139        addk    r6, r0, r0
 140
 141no_intr_resched:
 142        /* Disable interrupts, we are now committed to the state restore */
 143        disable_irq
 144
 145        /* save mode indicator */
 146        lwi     r11, r1, PT_MODE
 147        swi     r11, r0, PER_CPU(KM)
 148
 149        /* save r31 */
 150        swi     r31, r0, PER_CPU(CURRENT_SAVE)
 151restore_context:
 152        /* special purpose registers */
 153        lwi     r11, r1, PT_FSR
 154        mts     rfsr, r11
 155        lwi     r11, r1, PT_ESR
 156        mts     resr, r11
 157        lwi     r11, r1, PT_EAR
 158        mts     rear, r11
 159        lwi     r11, r1, PT_MSR
 160        mts     rmsr, r11
 161
 162        lwi     r31, r1, PT_R31
 163        lwi     r30, r1, PT_R30
 164        lwi     r29, r1, PT_R29
 165        lwi     r28, r1, PT_R28
 166        lwi     r27, r1, PT_R27
 167        lwi     r26, r1, PT_R26
 168        lwi     r25, r1, PT_R25
 169        lwi     r24, r1, PT_R24
 170        lwi     r23, r1, PT_R23
 171        lwi     r22, r1, PT_R22
 172        lwi     r21, r1, PT_R21
 173        lwi     r20, r1, PT_R20
 174        lwi     r19, r1, PT_R19
 175        lwi     r18, r1, PT_R18
 176        lwi     r17, r1, PT_R17
 177        lwi     r16, r1, PT_R16
 178        lwi     r15, r1, PT_R15
 179        lwi     r14, r1, PT_PC
 180        lwi     r13, r1, PT_R13
 181        lwi     r12, r1, PT_R12
 182        lwi     r11, r1, PT_R11
 183        lwi     r10, r1, PT_R10
 184        lwi     r9, r1, PT_R9
 185        lwi     r8, r1, PT_R8
 186        lwi     r7, r1, PT_R7
 187        lwi     r6, r1, PT_R6
 188        lwi     r5, r1, PT_R5
 189        lwi     r4, r1, PT_R4
 190        lwi     r3, r1, PT_R3
 191        lwi     r2, r1, PT_R2
 192        lwi     r1, r1, PT_R1
 193        rtid    r14, 0
 194        nop
 195
 196ENTRY(_reset)
 197        brai    0;
 198
 199ENTRY(_user_exception)
 200        swi     r1, r0, PER_CPU(ENTRY_SP)       /* save the current sp */
 201        swi     r11, r0, PER_CPU(R11_SAVE)      /* temporarily save r11 */
 202        lwi     r11, r0, PER_CPU(KM)            /* load mode indicator */
 203        beqid   r11, 1f                         /* Already in kernel mode? */
 204        nop
 205        brid    2f                              /* jump over */
 206        addik   r1, r1, (-PT_SIZE)      /* Room for pt_regs (delay slot) */
 2071:                                              /* Switch to kernel stack */
 208        lwi     r1, r0, PER_CPU(CURRENT_SAVE)   /* get the saved current */
 209        lwi     r1, r1, TS_THREAD_INFO          /* get the thread info */
 210        /* calculate kernel stack pointer */
 211        addik   r1, r1, THREAD_SIZE - PT_SIZE
 2122:
 213        swi     r11, r1, PT_MODE                /* store the mode */
 214        lwi     r11, r0, PER_CPU(R11_SAVE)      /* reload r11 */
 215        /* save them on stack */
 216        swi     r2, r1, PT_R2
 217        swi     r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
 218        swi     r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
 219        swi     r5, r1, PT_R5
 220        swi     r6, r1, PT_R6
 221        swi     r7, r1, PT_R7
 222        swi     r8, r1, PT_R8
 223        swi     r9, r1, PT_R9
 224        swi     r10, r1, PT_R10
 225        swi     r11, r1, PT_R11
 226        /* r12: _always_ in clobber list; see unistd.h */
 227        swi     r12, r1, PT_R12
 228        swi     r13, r1, PT_R13
 229        /* r14: _always_ in clobber list; see unistd.h */
 230        swi     r14, r1, PT_R14
 231        /* but we want to return to the next inst. */
 232        addik   r14, r14, 0x4
 233        swi     r14, r1, PT_PC          /* increment by 4 and store in pc */
 234        swi     r15, r1, PT_R15
 235        swi     r16, r1, PT_R16
 236        swi     r17, r1, PT_R17
 237        swi     r18, r1, PT_R18
 238        swi     r19, r1, PT_R19
 239        swi     r20, r1, PT_R20
 240        swi     r21, r1, PT_R21
 241        swi     r22, r1, PT_R22
 242        swi     r23, r1, PT_R23
 243        swi     r24, r1, PT_R24
 244        swi     r25, r1, PT_R25
 245        swi     r26, r1, PT_R26
 246        swi     r27, r1, PT_R27
 247        swi     r28, r1, PT_R28
 248        swi     r29, r1, PT_R29
 249        swi     r30, r1, PT_R30
 250        swi     r31, r1, PT_R31
 251
 252        disable_irq
 253        nop             /* make sure IE bit is in effect */
 254        clear_bip       /* once IE is in effect it is safe to clear BIP */
 255        nop
 256
 257        /* special purpose registers */
 258        mfs     r11, rmsr
 259        swi     r11, r1, PT_MSR
 260        mfs     r11, rear
 261        swi     r11, r1, PT_EAR
 262        mfs     r11, resr
 263        swi     r11, r1, PT_ESR
 264        mfs     r11, rfsr
 265        swi     r11, r1, PT_FSR
 266        /* reload original stack pointer and save it */
 267        lwi     r11, r0, PER_CPU(ENTRY_SP)
 268        swi     r11, r1, PT_R1
 269        /* update mode indicator we are in kernel mode */
 270        addik   r11, r0, 1
 271        swi     r11, r0, PER_CPU(KM)
 272        /* restore r31 */
 273        lwi     r31, r0, PER_CPU(CURRENT_SAVE)
 274        /* re-enable interrupts now we are in kernel mode */
 275        enable_irq
 276
 277        /* See if the system call number is valid. */
 278        addi    r11, r12, -__NR_syscalls
 279        bgei    r11, 1f                 /* return to user if not valid */
 280        /* Figure out which function to use for this system call. */
 281        /* Note Microblaze barrel shift is optional, so don't rely on it */
 282        add     r12, r12, r12                   /* convert num -> ptr */
 283        add     r12, r12, r12
 284        lwi     r12, r12, sys_call_table        /* Get function pointer */
 285        addik   r15, r0, ret_to_user-8          /* set return address */
 286        bra     r12                             /* Make the system call. */
 287        bri     0                               /* won't reach here */
 2881:
 289        brid    ret_to_user                     /* jump to syscall epilogue */
 290        addi    r3, r0, -ENOSYS                 /* set errno in delay slot */
 291
 292/*
 293 * Debug traps are like a system call, but entered via brki r14, 0x60
 294 * All we need to do is send the SIGTRAP signal to current, ptrace and
 295 * do_notify_resume will handle the rest
 296 */
 297ENTRY(_debug_exception)
 298        swi     r1, r0, PER_CPU(ENTRY_SP)       /* save the current sp */
 299        lwi     r1, r0, PER_CPU(CURRENT_SAVE)   /* get the saved current */
 300        lwi     r1, r1, TS_THREAD_INFO          /* get the thread info */
 301        addik   r1, r1, THREAD_SIZE - PT_SIZE   /* get the kernel stack */
 302        swi     r11, r0, PER_CPU(R11_SAVE)      /* temporarily save r11 */
 303        lwi     r11, r0, PER_CPU(KM)            /* load mode indicator */
 304//save_context:
 305        swi     r11, r1, PT_MODE        /* store the mode */
 306        lwi     r11, r0, PER_CPU(R11_SAVE)      /* reload r11 */
 307        /* save them on stack */
 308        swi     r2, r1, PT_R2
 309        swi     r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
 310        swi     r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
 311        swi     r5, r1, PT_R5
 312        swi     r6, r1, PT_R6
 313        swi     r7, r1, PT_R7
 314        swi     r8, r1, PT_R8
 315        swi     r9, r1, PT_R9
 316        swi     r10, r1, PT_R10
 317        swi     r11, r1, PT_R11
 318        /* r12: _always_ in clobber list; see unistd.h */
 319        swi     r12, r1, PT_R12
 320        swi     r13, r1, PT_R13
 321        /* r14: _always_ in clobber list; see unistd.h */
 322        swi     r14, r1, PT_R14
 323        swi     r14, r1, PT_PC /* Will return to interrupted instruction */
 324        swi     r15, r1, PT_R15
 325        swi     r16, r1, PT_R16
 326        swi     r17, r1, PT_R17
 327        swi     r18, r1, PT_R18
 328        swi     r19, r1, PT_R19
 329        swi     r20, r1, PT_R20
 330        swi     r21, r1, PT_R21
 331        swi     r22, r1, PT_R22
 332        swi     r23, r1, PT_R23
 333        swi     r24, r1, PT_R24
 334        swi     r25, r1, PT_R25
 335        swi     r26, r1, PT_R26
 336        swi     r27, r1, PT_R27
 337        swi     r28, r1, PT_R28
 338        swi     r29, r1, PT_R29
 339        swi     r30, r1, PT_R30
 340        swi     r31, r1, PT_R31
 341
 342        disable_irq
 343        nop             /* make sure IE bit is in effect */
 344        clear_bip       /* once IE is in effect it is safe to clear BIP */
 345        nop
 346
 347        /* special purpose registers */
 348        mfs     r11, rmsr
 349        swi     r11, r1, PT_MSR
 350        mfs     r11, rear
 351        swi     r11, r1, PT_EAR
 352        mfs     r11, resr
 353        swi     r11, r1, PT_ESR
 354        mfs     r11, rfsr
 355        swi     r11, r1, PT_FSR
 356        /* reload original stack pointer and save it */
 357        lwi     r11, r0, PER_CPU(ENTRY_SP)
 358        swi     r11, r1, PT_R1
 359        /* update mode indicator we are in kernel mode */
 360        addik   r11, r0, 1
 361        swi     r11, r0, PER_CPU(KM)
 362        /* restore r31 */
 363        lwi     r31, r0, PER_CPU(CURRENT_SAVE)
 364        /* re-enable interrupts now we are in kernel mode */
 365        enable_irq
 366
 367        addi    r5, r0, SIGTRAP                 /* sending the trap signal */
 368        add     r6, r0, r31                     /* to current */
 369        bralid  r15, send_sig
 370        add     r7, r0, r0                      /* 3rd param zero */
 371
 372        /* Restore r3/r4 to work around how ret_to_user works */
 373        lwi     r3, r1, PT_R3
 374        lwi     r4, r1, PT_R4
 375        bri     ret_to_user
 376
 377ENTRY(_break)
 378        bri     0
 379
 380/* struct task_struct *_switch_to(struct thread_info *prev,
 381                                        struct thread_info *next); */
 382ENTRY(_switch_to)
 383        /* prepare return value */
 384        addk    r3, r0, r31
 385
 386        /* save registers in cpu_context */
 387        /* use r11 and r12, volatile registers, as temp register */
 388        addik   r11, r5, TI_CPU_CONTEXT
 389        swi     r1, r11, CC_R1
 390        swi     r2, r11, CC_R2
 391        /* skip volatile registers.
 392         * they are saved on stack when we jumped to _switch_to() */
 393        /* dedicated registers */
 394        swi     r13, r11, CC_R13
 395        swi     r14, r11, CC_R14
 396        swi     r15, r11, CC_R15
 397        swi     r16, r11, CC_R16
 398        swi     r17, r11, CC_R17
 399        swi     r18, r11, CC_R18
 400        /* save non-volatile registers */
 401        swi     r19, r11, CC_R19
 402        swi     r20, r11, CC_R20
 403        swi     r21, r11, CC_R21
 404        swi     r22, r11, CC_R22
 405        swi     r23, r11, CC_R23
 406        swi     r24, r11, CC_R24
 407        swi     r25, r11, CC_R25
 408        swi     r26, r11, CC_R26
 409        swi     r27, r11, CC_R27
 410        swi     r28, r11, CC_R28
 411        swi     r29, r11, CC_R29
 412        swi     r30, r11, CC_R30
 413        /* special purpose registers */
 414        mfs     r12, rmsr
 415        swi     r12, r11, CC_MSR
 416        mfs     r12, rear
 417        swi     r12, r11, CC_EAR
 418        mfs     r12, resr
 419        swi     r12, r11, CC_ESR
 420        mfs     r12, rfsr
 421        swi     r12, r11, CC_FSR
 422
 423        /* update r31, the current */
 424        lwi     r31, r6, TI_TASK
 425        swi     r31, r0, PER_CPU(CURRENT_SAVE)
 426
 427        /* get new process' cpu context and restore */
 428        addik   r11, r6, TI_CPU_CONTEXT
 429
 430        /* special purpose registers */
 431        lwi     r12, r11, CC_FSR
 432        mts     rfsr, r12
 433        lwi     r12, r11, CC_ESR
 434        mts     resr, r12
 435        lwi     r12, r11, CC_EAR
 436        mts     rear, r12
 437        lwi     r12, r11, CC_MSR
 438        mts     rmsr, r12
 439        /* non-volatile registers */
 440        lwi     r30, r11, CC_R30
 441        lwi     r29, r11, CC_R29
 442        lwi     r28, r11, CC_R28
 443        lwi     r27, r11, CC_R27
 444        lwi     r26, r11, CC_R26
 445        lwi     r25, r11, CC_R25
 446        lwi     r24, r11, CC_R24
 447        lwi     r23, r11, CC_R23
 448        lwi     r22, r11, CC_R22
 449        lwi     r21, r11, CC_R21
 450        lwi     r20, r11, CC_R20
 451        lwi     r19, r11, CC_R19
 452        /* dedicated registers */
 453        lwi     r18, r11, CC_R18
 454        lwi     r17, r11, CC_R17
 455        lwi     r16, r11, CC_R16
 456        lwi     r15, r11, CC_R15
 457        lwi     r14, r11, CC_R14
 458        lwi     r13, r11, CC_R13
 459        /* skip volatile registers */
 460        lwi     r2, r11, CC_R2
 461        lwi     r1, r11, CC_R1
 462
 463        rtsd    r15, 8
 464        nop
 465
 466ENTRY(ret_from_fork)
 467        addk    r5, r0, r3
 468        addk    r6, r0, r1
 469        brlid   r15, schedule_tail
 470        nop
 471        swi     r31, r1, PT_R31         /* save r31 in user context. */
 472                        /* will soon be restored to r31 in ret_to_user */
 473        addk    r3, r0, r0
 474        brid    ret_to_user
 475        nop
 476
 477work_pending:
 478        enable_irq
 479
 480        andi    r11, r19, _TIF_NEED_RESCHED
 481        beqi    r11, 1f
 482        bralid  r15, schedule
 483        nop
 4841:      andi    r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
 485        beqi    r11, no_work_pending
 486        addk    r5, r1, r0
 487        bralid  r15, do_notify_resume
 488        addik   r6, r0, 1
 489        bri     no_work_pending
 490
 491ENTRY(ret_to_user)
 492        disable_irq
 493
 494        swi     r4, r1, PT_R4           /* return val */
 495        swi     r3, r1, PT_R3           /* return val */
 496
 497        lwi     r6, r31, TS_THREAD_INFO /* get thread info */
 498        lwi     r19, r6, TI_FLAGS /* get flags in thread info */
 499        bnei    r19, work_pending /* do an extra work if any bits are set */
 500no_work_pending:
 501        disable_irq
 502
 503        /* save r31 */
 504        swi     r31, r0, PER_CPU(CURRENT_SAVE)
 505        /* save mode indicator */
 506        lwi     r18, r1, PT_MODE
 507        swi     r18, r0, PER_CPU(KM)
 508//restore_context:
 509        /* special purpose registers */
 510        lwi     r18, r1, PT_FSR
 511        mts     rfsr, r18
 512        lwi     r18, r1, PT_ESR
 513        mts     resr, r18
 514        lwi     r18, r1, PT_EAR
 515        mts     rear, r18
 516        lwi     r18, r1, PT_MSR
 517        mts     rmsr, r18
 518
 519        lwi     r31, r1, PT_R31
 520        lwi     r30, r1, PT_R30
 521        lwi     r29, r1, PT_R29
 522        lwi     r28, r1, PT_R28
 523        lwi     r27, r1, PT_R27
 524        lwi     r26, r1, PT_R26
 525        lwi     r25, r1, PT_R25
 526        lwi     r24, r1, PT_R24
 527        lwi     r23, r1, PT_R23
 528        lwi     r22, r1, PT_R22
 529        lwi     r21, r1, PT_R21
 530        lwi     r20, r1, PT_R20
 531        lwi     r19, r1, PT_R19
 532        lwi     r18, r1, PT_R18
 533        lwi     r17, r1, PT_R17
 534        lwi     r16, r1, PT_R16
 535        lwi     r15, r1, PT_R15
 536        lwi     r14, r1, PT_PC
 537        lwi     r13, r1, PT_R13
 538        lwi     r12, r1, PT_R12
 539        lwi     r11, r1, PT_R11
 540        lwi     r10, r1, PT_R10
 541        lwi     r9, r1, PT_R9
 542        lwi     r8, r1, PT_R8
 543        lwi     r7, r1, PT_R7
 544        lwi     r6, r1, PT_R6
 545        lwi     r5, r1, PT_R5
 546        lwi     r4, r1, PT_R4           /* return val */
 547        lwi     r3, r1, PT_R3           /* return val */
 548        lwi     r2, r1, PT_R2
 549        lwi     r1, r1, PT_R1
 550
 551        rtid    r14, 0
 552        nop
 553
 554sys_vfork:
 555        brid    microblaze_vfork
 556        addk    r5, r1, r0
 557
 558sys_clone:
 559        brid    microblaze_clone
 560        addk    r7, r1, r0
 561
 562sys_execve:
 563        brid    microblaze_execve
 564        addk    r8, r1, r0
 565
 566sys_rt_sigreturn_wrapper:
 567        brid    sys_rt_sigreturn
 568        addk    r5, r1, r0
 569
 570        /* Interrupt vector table */
 571        .section        .init.ivt, "ax"
 572        .org 0x0
 573        brai    _reset
 574        brai    _user_exception
 575        brai    _interrupt
 576        brai    _break
 577        brai    _hw_exception_handler
 578        .org 0x60
 579        brai    _debug_exception
 580
 581.section .rodata,"a"
 582#include "syscall_table.S"
 583
 584syscall_table_size=(.-sys_call_table)
 585
 586type_SYSCALL:
 587        .ascii "SYSCALL\0"
 588type_IRQ:
 589        .ascii "IRQ\0"
 590type_IRQ_PREEMPT:
 591        .ascii "IRQ (PREEMPTED)\0"
 592type_SYSCALL_PREEMPT:
 593        .ascii " SYSCALL (PREEMPTED)\0"
 594
 595        /*
 596         * Trap decoding for stack unwinder
 597         * Tuples are (start addr, end addr, string)
 598         * If return address lies on [start addr, end addr],
 599         * unwinder displays 'string'
 600         */
 601
 602        .align 4
 603.global microblaze_trap_handlers
 604microblaze_trap_handlers:
 605        /* Exact matches come first */
 606        .word ret_to_user  ; .word ret_to_user    ; .word type_SYSCALL
 607        .word ret_from_intr; .word ret_from_intr  ; .word type_IRQ
 608        /* Fuzzy matches go here */
 609        .word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
 610        .word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
 611        /* End of table */
 612        .word 0             ; .word 0               ; .word 0
 613