linux/arch/mips/kernel/genex.S
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
   7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   8 * Copyright (C) 2002, 2007  Maciej W. Rozycki
   9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  10 */
  11#include <linux/init.h>
  12
  13#include <asm/asm.h>
  14#include <asm/asmmacro.h>
  15#include <asm/cacheops.h>
  16#include <asm/irqflags.h>
  17#include <asm/regdef.h>
  18#include <asm/fpregdef.h>
  19#include <asm/mipsregs.h>
  20#include <asm/stackframe.h>
  21#include <asm/war.h>
  22#include <asm/thread_info.h>
  23
  24        __INIT
  25
  26/*
  27 * General exception vector for all other CPUs.
  28 *
  29 * Be careful when changing this, it has to be at most 128 bytes
  30 * to fit into space reserved for the exception handler.
  31 */
  32NESTED(except_vec3_generic, 0, sp)
  33        .set    push
  34        .set    noat
  35#if R5432_CP0_INTERRUPT_WAR
  36        mfc0    k0, CP0_INDEX
  37#endif
  38        mfc0    k1, CP0_CAUSE
  39        andi    k1, k1, 0x7c
  40#ifdef CONFIG_64BIT
  41        dsll    k1, k1, 1
  42#endif
  43        PTR_L   k0, exception_handlers(k1)
  44        jr      k0
  45        .set    pop
  46        END(except_vec3_generic)
  47
  48/*
  49 * General exception handler for CPUs with virtual coherency exception.
  50 *
  51 * Be careful when changing this, it has to be at most 256 (as a special
  52 * exception) bytes to fit into space reserved for the exception handler.
  53 */
  54NESTED(except_vec3_r4000, 0, sp)
  55        .set    push
  56        .set    arch=r4000
  57        .set    noat
  58        mfc0    k1, CP0_CAUSE
  59        li      k0, 31<<2
  60        andi    k1, k1, 0x7c
  61        .set    push
  62        .set    noreorder
  63        .set    nomacro
  64        beq     k1, k0, handle_vced
  65         li     k0, 14<<2
  66        beq     k1, k0, handle_vcei
  67#ifdef CONFIG_64BIT
  68         dsll   k1, k1, 1
  69#endif
  70        .set    pop
  71        PTR_L   k0, exception_handlers(k1)
  72        jr      k0
  73
  74        /*
  75         * Big shit, we now may have two dirty primary cache lines for the same
  76         * physical address.  We can safely invalidate the line pointed to by
  77         * c0_badvaddr because after return from this exception handler the
  78         * load / store will be re-executed.
  79         */
  80handle_vced:
  81        MFC0    k0, CP0_BADVADDR
  82        li      k1, -4                                  # Is this ...
  83        and     k0, k1                                  # ... really needed?
  84        mtc0    zero, CP0_TAGLO
  85        cache   Index_Store_Tag_D, (k0)
  86        cache   Hit_Writeback_Inv_SD, (k0)
  87#ifdef CONFIG_PROC_FS
  88        PTR_LA  k0, vced_count
  89        lw      k1, (k0)
  90        addiu   k1, 1
  91        sw      k1, (k0)
  92#endif
  93        eret
  94
  95handle_vcei:
  96        MFC0    k0, CP0_BADVADDR
  97        cache   Hit_Writeback_Inv_SD, (k0)              # also cleans pi
  98#ifdef CONFIG_PROC_FS
  99        PTR_LA  k0, vcei_count
 100        lw      k1, (k0)
 101        addiu   k1, 1
 102        sw      k1, (k0)
 103#endif
 104        eret
 105        .set    pop
 106        END(except_vec3_r4000)
 107
 108        __FINIT
 109
 110        .align  5       /* 32 byte rollback region */
 111LEAF(__r4k_wait)
 112        .set    push
 113        .set    noreorder
 114        /* start of rollback region */
 115        LONG_L  t0, TI_FLAGS($28)
 116        nop
 117        andi    t0, _TIF_NEED_RESCHED
 118        bnez    t0, 1f
 119         nop
 120        nop
 121        nop
 122#ifdef CONFIG_CPU_MICROMIPS
 123        nop
 124        nop
 125        nop
 126        nop
 127#endif
 128        .set    MIPS_ISA_ARCH_LEVEL_RAW
 129        wait
 130        /* end of rollback region (the region size must be power of two) */
 1311:
 132        jr      ra
 133         nop
 134        .set    pop
 135        END(__r4k_wait)
 136
 137        .macro  BUILD_ROLLBACK_PROLOGUE handler
 138        FEXPORT(rollback_\handler)
 139        .set    push
 140        .set    noat
 141        MFC0    k0, CP0_EPC
 142        PTR_LA  k1, __r4k_wait
 143        ori     k0, 0x1f        /* 32 byte rollback region */
 144        xori    k0, 0x1f
 145        bne     k0, k1, \handler
 146        MTC0    k0, CP0_EPC
 147        .set pop
 148        .endm
 149
 150        .align  5
 151BUILD_ROLLBACK_PROLOGUE handle_int
 152NESTED(handle_int, PT_SIZE, sp)
 153#ifdef CONFIG_TRACE_IRQFLAGS
 154        /*
 155         * Check to see if the interrupted code has just disabled
 156         * interrupts and ignore this interrupt for now if so.
 157         *
 158         * local_irq_disable() disables interrupts and then calls
 159         * trace_hardirqs_off() to track the state. If an interrupt is taken
 160         * after interrupts are disabled but before the state is updated
 161         * it will appear to restore_all that it is incorrectly returning with
 162         * interrupts disabled
 163         */
 164        .set    push
 165        .set    noat
 166        mfc0    k0, CP0_STATUS
 167#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 168        and     k0, ST0_IEP
 169        bnez    k0, 1f
 170
 171        mfc0    k0, CP0_EPC
 172        .set    noreorder
 173        j       k0
 174         rfe
 175#else
 176        and     k0, ST0_IE
 177        bnez    k0, 1f
 178
 179        eret
 180#endif
 1811:
 182        .set pop
 183#endif
 184        SAVE_ALL
 185        CLI
 186        TRACE_IRQS_OFF
 187
 188        LONG_L  s0, TI_REGS($28)
 189        LONG_S  sp, TI_REGS($28)
 190        PTR_LA  ra, ret_from_irq
 191        PTR_LA  v0, plat_irq_dispatch
 192        jr      v0
 193#ifdef CONFIG_CPU_MICROMIPS
 194        nop
 195#endif
 196        END(handle_int)
 197
 198        __INIT
 199
 200/*
 201 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
 202 * This is a dedicated interrupt exception vector which reduces the
 203 * interrupt processing overhead.  The jump instruction will be replaced
 204 * at the initialization time.
 205 *
 206 * Be careful when changing this, it has to be at most 128 bytes
 207 * to fit into space reserved for the exception handler.
 208 */
 209NESTED(except_vec4, 0, sp)
 2101:      j       1b                      /* Dummy, will be replaced */
 211        END(except_vec4)
 212
 213/*
 214 * EJTAG debug exception handler.
 215 * The EJTAG debug exception entry point is 0xbfc00480, which
 216 * normally is in the boot PROM, so the boot PROM must do an
 217 * unconditional jump to this vector.
 218 */
 219NESTED(except_vec_ejtag_debug, 0, sp)
 220        j       ejtag_debug_handler
 221#ifdef CONFIG_CPU_MICROMIPS
 222         nop
 223#endif
 224        END(except_vec_ejtag_debug)
 225
 226        __FINIT
 227
 228/*
 229 * Vectored interrupt handler.
 230 * This prototype is copied to ebase + n*IntCtl.VS and patched
 231 * to invoke the handler
 232 */
 233BUILD_ROLLBACK_PROLOGUE except_vec_vi
 234NESTED(except_vec_vi, 0, sp)
 235        SAVE_SOME
 236        SAVE_AT
 237        .set    push
 238        .set    noreorder
 239        PTR_LA  v1, except_vec_vi_handler
 240FEXPORT(except_vec_vi_lui)
 241        lui     v0, 0           /* Patched */
 242        jr      v1
 243FEXPORT(except_vec_vi_ori)
 244         ori    v0, 0           /* Patched */
 245        .set    pop
 246        END(except_vec_vi)
 247EXPORT(except_vec_vi_end)
 248
 249/*
 250 * Common Vectored Interrupt code
 251 * Complete the register saves and invoke the handler which is passed in $v0
 252 */
 253NESTED(except_vec_vi_handler, 0, sp)
 254        SAVE_TEMP
 255        SAVE_STATIC
 256        CLI
 257#ifdef CONFIG_TRACE_IRQFLAGS
 258        move    s0, v0
 259        TRACE_IRQS_OFF
 260        move    v0, s0
 261#endif
 262
 263        LONG_L  s0, TI_REGS($28)
 264        LONG_S  sp, TI_REGS($28)
 265        PTR_LA  ra, ret_from_irq
 266        jr      v0
 267        END(except_vec_vi_handler)
 268
 269/*
 270 * EJTAG debug exception handler.
 271 */
 272NESTED(ejtag_debug_handler, PT_SIZE, sp)
 273        .set    push
 274        .set    noat
 275        MTC0    k0, CP0_DESAVE
 276        mfc0    k0, CP0_DEBUG
 277
 278        sll     k0, k0, 30      # Check for SDBBP.
 279        bgez    k0, ejtag_return
 280
 281        PTR_LA  k0, ejtag_debug_buffer
 282        LONG_S  k1, 0(k0)
 283        SAVE_ALL
 284        move    a0, sp
 285        jal     ejtag_exception_handler
 286        RESTORE_ALL
 287        PTR_LA  k0, ejtag_debug_buffer
 288        LONG_L  k1, 0(k0)
 289
 290ejtag_return:
 291        MFC0    k0, CP0_DESAVE
 292        .set    mips32
 293        deret
 294        .set    pop
 295        END(ejtag_debug_handler)
 296
 297/*
 298 * This buffer is reserved for the use of the EJTAG debug
 299 * handler.
 300 */
 301        .data
 302EXPORT(ejtag_debug_buffer)
 303        .fill   LONGSIZE
 304        .previous
 305
 306        __INIT
 307
 308/*
 309 * NMI debug exception handler for MIPS reference boards.
 310 * The NMI debug exception entry point is 0xbfc00000, which
 311 * normally is in the boot PROM, so the boot PROM must do a
 312 * unconditional jump to this vector.
 313 */
 314NESTED(except_vec_nmi, 0, sp)
 315        j       nmi_handler
 316#ifdef CONFIG_CPU_MICROMIPS
 317         nop
 318#endif
 319        END(except_vec_nmi)
 320
 321        __FINIT
 322
 323NESTED(nmi_handler, PT_SIZE, sp)
 324        .set    push
 325        .set    noat
 326        /*
 327         * Clear ERL - restore segment mapping
 328         * Clear BEV - required for page fault exception handler to work
 329         */
 330        mfc0    k0, CP0_STATUS
 331        ori     k0, k0, ST0_EXL
 332        li      k1, ~(ST0_BEV | ST0_ERL)
 333        and     k0, k0, k1
 334        mtc0    k0, CP0_STATUS
 335        _ehb
 336        SAVE_ALL
 337        move    a0, sp
 338        jal     nmi_exception_handler
 339        /* nmi_exception_handler never returns */
 340        .set    pop
 341        END(nmi_handler)
 342
 343        .macro  __build_clear_none
 344        .endm
 345
 346        .macro  __build_clear_sti
 347        TRACE_IRQS_ON
 348        STI
 349        .endm
 350
 351        .macro  __build_clear_cli
 352        CLI
 353        TRACE_IRQS_OFF
 354        .endm
 355
 356        .macro  __build_clear_fpe
 357        .set    push
 358        /* gas fails to assemble cfc1 for some archs (octeon).*/ \
 359        .set    mips1
 360        SET_HARDFLOAT
 361        cfc1    a1, fcr31
 362        .set    pop
 363        CLI
 364        TRACE_IRQS_OFF
 365        .endm
 366
 367        .macro  __build_clear_msa_fpe
 368        _cfcmsa a1, MSA_CSR
 369        CLI
 370        TRACE_IRQS_OFF
 371        .endm
 372
 373        .macro  __build_clear_ade
 374        MFC0    t0, CP0_BADVADDR
 375        PTR_S   t0, PT_BVADDR(sp)
 376        KMODE
 377        .endm
 378
 379        .macro  __BUILD_silent exception
 380        .endm
 381
 382        /* Gas tries to parse the PRINT argument as a string containing
 383           string escapes and emits bogus warnings if it believes to
 384           recognize an unknown escape code.  So make the arguments
 385           start with an n and gas will believe \n is ok ...  */
 386        .macro  __BUILD_verbose nexception
 387        LONG_L  a1, PT_EPC(sp)
 388#ifdef CONFIG_32BIT
 389        PRINT("Got \nexception at %08lx\012")
 390#endif
 391#ifdef CONFIG_64BIT
 392        PRINT("Got \nexception at %016lx\012")
 393#endif
 394        .endm
 395
 396        .macro  __BUILD_count exception
 397        LONG_L  t0,exception_count_\exception
 398        LONG_ADDIU      t0, 1
 399        LONG_S  t0,exception_count_\exception
 400        .comm   exception_count\exception, 8, 8
 401        .endm
 402
 403        .macro  __BUILD_HANDLER exception handler clear verbose ext
 404        .align  5
 405        NESTED(handle_\exception, PT_SIZE, sp)
 406        .set    noat
 407        SAVE_ALL
 408        FEXPORT(handle_\exception\ext)
 409        __build_clear_\clear
 410        .set    at
 411        __BUILD_\verbose \exception
 412        move    a0, sp
 413        PTR_LA  ra, ret_from_exception
 414        j       do_\handler
 415        END(handle_\exception)
 416        .endm
 417
 418        .macro  BUILD_HANDLER exception handler clear verbose
 419        __BUILD_HANDLER \exception \handler \clear \verbose _int
 420        .endm
 421
 422        BUILD_HANDLER adel ade ade silent               /* #4  */
 423        BUILD_HANDLER ades ade ade silent               /* #5  */
 424        BUILD_HANDLER ibe be cli silent                 /* #6  */
 425        BUILD_HANDLER dbe be cli silent                 /* #7  */
 426        BUILD_HANDLER bp bp sti silent                  /* #9  */
 427        BUILD_HANDLER ri ri sti silent                  /* #10 */
 428        BUILD_HANDLER cpu cpu sti silent                /* #11 */
 429        BUILD_HANDLER ov ov sti silent                  /* #12 */
 430        BUILD_HANDLER tr tr sti silent                  /* #13 */
 431        BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent    /* #14 */
 432        BUILD_HANDLER fpe fpe fpe silent                /* #15 */
 433        BUILD_HANDLER ftlb ftlb none silent             /* #16 */
 434        BUILD_HANDLER msa msa sti silent                /* #21 */
 435        BUILD_HANDLER mdmx mdmx sti silent              /* #22 */
 436#ifdef  CONFIG_HARDWARE_WATCHPOINTS
 437        /*
 438         * For watch, interrupts will be enabled after the watch
 439         * registers are read.
 440         */
 441        BUILD_HANDLER watch watch cli silent            /* #23 */
 442#else
 443        BUILD_HANDLER watch watch sti verbose           /* #23 */
 444#endif
 445        BUILD_HANDLER mcheck mcheck cli verbose         /* #24 */
 446        BUILD_HANDLER mt mt sti silent                  /* #25 */
 447        BUILD_HANDLER dsp dsp sti silent                /* #26 */
 448        BUILD_HANDLER reserved reserved sti verbose     /* others */
 449
 450        .align  5
 451        LEAF(handle_ri_rdhwr_vivt)
 452        .set    push
 453        .set    noat
 454        .set    noreorder
 455        /* check if TLB contains a entry for EPC */
 456        MFC0    k1, CP0_ENTRYHI
 457        andi    k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
 458        MFC0    k0, CP0_EPC
 459        PTR_SRL k0, _PAGE_SHIFT + 1
 460        PTR_SLL k0, _PAGE_SHIFT + 1
 461        or      k1, k0
 462        MTC0    k1, CP0_ENTRYHI
 463        mtc0_tlbw_hazard
 464        tlbp
 465        tlb_probe_hazard
 466        mfc0    k1, CP0_INDEX
 467        .set    pop
 468        bltz    k1, handle_ri   /* slow path */
 469        /* fall thru */
 470        END(handle_ri_rdhwr_vivt)
 471
 472        LEAF(handle_ri_rdhwr)
 473        .set    push
 474        .set    noat
 475        .set    noreorder
 476        /* MIPS32:    0x7c03e83b: rdhwr v1,$29 */
 477        /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
 478        MFC0    k1, CP0_EPC
 479#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
 480        and     k0, k1, 1
 481        beqz    k0, 1f
 482         xor    k1, k0
 483        lhu     k0, (k1)
 484        lhu     k1, 2(k1)
 485        ins     k1, k0, 16, 16
 486        lui     k0, 0x007d
 487        b       docheck
 488         ori    k0, 0x6b3c
 4891:
 490        lui     k0, 0x7c03
 491        lw      k1, (k1)
 492        ori     k0, 0xe83b
 493#else
 494        andi    k0, k1, 1
 495        bnez    k0, handle_ri
 496         lui    k0, 0x7c03
 497        lw      k1, (k1)
 498        ori     k0, 0xe83b
 499#endif
 500        .set    reorder
 501docheck:
 502        bne     k0, k1, handle_ri       /* if not ours */
 503
 504isrdhwr:
 505        /* The insn is rdhwr.  No need to check CAUSE.BD here. */
 506        get_saved_sp    /* k1 := current_thread_info */
 507        .set    noreorder
 508        MFC0    k0, CP0_EPC
 509#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 510        ori     k1, _THREAD_MASK
 511        xori    k1, _THREAD_MASK
 512        LONG_L  v1, TI_TP_VALUE(k1)
 513        LONG_ADDIU      k0, 4
 514        jr      k0
 515         rfe
 516#else
 517#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
 518        LONG_ADDIU      k0, 4           /* stall on $k0 */
 519#else
 520        .set    at=v1
 521        LONG_ADDIU      k0, 4
 522        .set    noat
 523#endif
 524        MTC0    k0, CP0_EPC
 525        /* I hope three instructions between MTC0 and ERET are enough... */
 526        ori     k1, _THREAD_MASK
 527        xori    k1, _THREAD_MASK
 528        LONG_L  v1, TI_TP_VALUE(k1)
 529        .set    arch=r4000
 530        eret
 531        .set    mips0
 532#endif
 533        .set    pop
 534        END(handle_ri_rdhwr)
 535
 536#ifdef CONFIG_64BIT
 537/* A temporary overflow handler used by check_daddi(). */
 538
 539        __INIT
 540
 541        BUILD_HANDLER  daddi_ov daddi_ov none silent    /* #12 */
 542#endif
 543