linux/arch/sparc/kernel/entry.S
<<
>>
Prefs
   1/* arch/sparc/kernel/entry.S:  Sparc trap low-level entry points.
   2 *
   3 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
   4 * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
   5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
   6 * Copyright (C) 1996-1999 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
   7 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
   8 */
   9
  10#include <linux/errno.h>
  11
  12#include <asm/head.h>
  13#include <asm/asi.h>
  14#include <asm/smp.h>
  15#include <asm/contregs.h>
  16#include <asm/ptrace.h>
  17#include <asm/asm-offsets.h>
  18#include <asm/psr.h>
  19#include <asm/vaddrs.h>
  20#include <asm/memreg.h>
  21#include <asm/page.h>
  22#include <asm/pgtable.h>
  23#include <asm/pgtsun4c.h>
  24#include <asm/winmacro.h>
  25#include <asm/signal.h>
  26#include <asm/obio.h>
  27#include <asm/mxcc.h>
  28#include <asm/thread_info.h>
  29#include <asm/param.h>
  30#include <asm/unistd.h>
  31
  32#include <asm/asmmacro.h>
  33
  34#define curptr      g6
  35
  36/* These are just handy. */
  37#define _SV     save    %sp, -STACKFRAME_SZ, %sp
  38#define _RS     restore 
  39
  40#define FLUSH_ALL_KERNEL_WINDOWS \
  41        _SV; _SV; _SV; _SV; _SV; _SV; _SV; \
  42        _RS; _RS; _RS; _RS; _RS; _RS; _RS;
  43
  44        .text
  45
  46#ifdef CONFIG_KGDB
  47        .align  4
  48        .globl          arch_kgdb_breakpoint
  49        .type           arch_kgdb_breakpoint,#function
  50arch_kgdb_breakpoint:
  51        ta              0x7d
  52        retl
  53         nop
  54        .size           arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
  55#endif
  56
  57#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
  58        .align  4
  59        .globl  floppy_hardint
  60floppy_hardint:
  61        /*
  62         * This code cannot touch registers %l0 %l1 and %l2
  63         * because SAVE_ALL depends on their values. It depends
  64         * on %l3 also, but we regenerate it before a call.
  65         * Other registers are:
  66         * %l3 -- base address of fdc registers
  67         * %l4 -- pdma_vaddr
  68         * %l5 -- scratch for ld/st address
  69         * %l6 -- pdma_size
  70         * %l7 -- scratch [floppy byte, ld/st address, aux. data]
  71         */
  72
  73        /* Do we have work to do? */
  74        sethi   %hi(doing_pdma), %l7
  75        ld      [%l7 + %lo(doing_pdma)], %l7
  76        cmp     %l7, 0
  77        be      floppy_dosoftint
  78         nop
  79
  80        /* Load fdc register base */
  81        sethi   %hi(fdc_status), %l3
  82        ld      [%l3 + %lo(fdc_status)], %l3
  83
  84        /* Setup register addresses */
  85        sethi   %hi(pdma_vaddr), %l5    ! transfer buffer
  86        ld      [%l5 + %lo(pdma_vaddr)], %l4
  87        sethi   %hi(pdma_size), %l5     ! bytes to go
  88        ld      [%l5 + %lo(pdma_size)], %l6
  89next_byte:
  90        ldub    [%l3], %l7
  91
  92        andcc   %l7, 0x80, %g0          ! Does fifo still have data
  93        bz      floppy_fifo_emptied     ! fifo has been emptied...
  94         andcc  %l7, 0x20, %g0          ! in non-dma mode still?
  95        bz      floppy_overrun          ! nope, overrun
  96         andcc  %l7, 0x40, %g0          ! 0=write 1=read
  97        bz      floppy_write
  98         sub    %l6, 0x1, %l6
  99
 100        /* Ok, actually read this byte */
 101        ldub    [%l3 + 1], %l7
 102        orcc    %g0, %l6, %g0
 103        stb     %l7, [%l4]
 104        bne     next_byte
 105         add    %l4, 0x1, %l4
 106
 107        b       floppy_tdone
 108         nop
 109
 110floppy_write:
 111        /* Ok, actually write this byte */
 112        ldub    [%l4], %l7
 113        orcc    %g0, %l6, %g0
 114        stb     %l7, [%l3 + 1]
 115        bne     next_byte
 116         add    %l4, 0x1, %l4
 117
 118        /* fall through... */
 119floppy_tdone:
 120        sethi   %hi(pdma_vaddr), %l5
 121        st      %l4, [%l5 + %lo(pdma_vaddr)]
 122        sethi   %hi(pdma_size), %l5
 123        st      %l6, [%l5 + %lo(pdma_size)]
 124        /* Flip terminal count pin */
 125        set     auxio_register, %l7
 126        ld      [%l7], %l7
 127
 128        set     sparc_cpu_model, %l5
 129        ld      [%l5], %l5
 130        subcc   %l5, 1, %g0             /* enum { sun4c = 1 }; */
 131        be      1f
 132         ldub   [%l7], %l5
 133
 134        or      %l5, 0xc2, %l5
 135        stb     %l5, [%l7]
 136        andn    %l5, 0x02, %l5
 137        b       2f
 138         nop
 139
 1401:
 141        or      %l5, 0xf4, %l5
 142        stb     %l5, [%l7]
 143        andn    %l5, 0x04, %l5
 144
 1452:
 146        /* Kill some time so the bits set */
 147        WRITE_PAUSE
 148        WRITE_PAUSE
 149
 150        stb     %l5, [%l7]
 151
 152        /* Prevent recursion */
 153        sethi   %hi(doing_pdma), %l7
 154        b       floppy_dosoftint
 155         st     %g0, [%l7 + %lo(doing_pdma)]
 156
 157        /* We emptied the FIFO, but we haven't read everything
 158         * as of yet.  Store the current transfer address and
 159         * bytes left to read so we can continue when the next
 160         * fast IRQ comes in.
 161         */
 162floppy_fifo_emptied:
 163        sethi   %hi(pdma_vaddr), %l5
 164        st      %l4, [%l5 + %lo(pdma_vaddr)]
 165        sethi   %hi(pdma_size), %l7
 166        st      %l6, [%l7 + %lo(pdma_size)]
 167
 168        /* Restore condition codes */
 169        wr      %l0, 0x0, %psr
 170        WRITE_PAUSE
 171
 172        jmp     %l1
 173        rett    %l2
 174
 175floppy_overrun:
 176        sethi   %hi(pdma_vaddr), %l5
 177        st      %l4, [%l5 + %lo(pdma_vaddr)]
 178        sethi   %hi(pdma_size), %l5
 179        st      %l6, [%l5 + %lo(pdma_size)]
 180        /* Prevent recursion */
 181        sethi   %hi(doing_pdma), %l7
 182        st      %g0, [%l7 + %lo(doing_pdma)]
 183
 184        /* fall through... */
 185floppy_dosoftint:
 186        rd      %wim, %l3
 187        SAVE_ALL
 188
 189        /* Set all IRQs off. */
 190        or      %l0, PSR_PIL, %l4
 191        wr      %l4, 0x0, %psr
 192        WRITE_PAUSE
 193        wr      %l4, PSR_ET, %psr
 194        WRITE_PAUSE
 195
 196        mov     11, %o0                 ! floppy irq level (unused anyway)
 197        mov     %g0, %o1                ! devid is not used in fast interrupts
 198        call    sparc_floppy_irq
 199         add    %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs
 200
 201        RESTORE_ALL
 202        
 203#endif /* (CONFIG_BLK_DEV_FD) */
 204
 205        /* Bad trap handler */
 206        .globl  bad_trap_handler
 207bad_trap_handler:
 208        SAVE_ALL
 209
 210        wr      %l0, PSR_ET, %psr
 211        WRITE_PAUSE
 212
 213        add     %sp, STACKFRAME_SZ, %o0 ! pt_regs
 214        call    do_hw_interrupt
 215         mov    %l7, %o1                ! trap number
 216
 217        RESTORE_ALL
 218        
 219/* For now all IRQ's not registered get sent here. handler_irq() will
 220 * see if a routine is registered to handle this interrupt and if not
 221 * it will say so on the console.
 222 */
 223
 224        .align  4
 225        .globl  real_irq_entry, patch_handler_irq
 226real_irq_entry:
 227        SAVE_ALL
 228
 229#ifdef CONFIG_SMP
 230        .globl  patchme_maybe_smp_msg
 231
 232        cmp     %l7, 11
 233patchme_maybe_smp_msg:
 234        bgu     maybe_smp4m_msg
 235         nop
 236#endif
 237
 238real_irq_continue:
 239        or      %l0, PSR_PIL, %g2
 240        wr      %g2, 0x0, %psr
 241        WRITE_PAUSE
 242        wr      %g2, PSR_ET, %psr
 243        WRITE_PAUSE
 244        mov     %l7, %o0                ! irq level
 245patch_handler_irq:
 246        call    handler_irq
 247         add    %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
 248        or      %l0, PSR_PIL, %g2       ! restore PIL after handler_irq
 249        wr      %g2, PSR_ET, %psr       ! keep ET up
 250        WRITE_PAUSE
 251
 252        RESTORE_ALL
 253
 254#ifdef CONFIG_SMP
 255        /* SMP per-cpu ticker interrupts are handled specially. */
 256smp4m_ticker:
 257        bne     real_irq_continue+4
 258         or     %l0, PSR_PIL, %g2
 259        wr      %g2, 0x0, %psr
 260        WRITE_PAUSE
 261        wr      %g2, PSR_ET, %psr
 262        WRITE_PAUSE
 263        call    smp4m_percpu_timer_interrupt
 264         add    %sp, STACKFRAME_SZ, %o0
 265        wr      %l0, PSR_ET, %psr
 266        WRITE_PAUSE
 267        RESTORE_ALL
 268
 269        /* Here is where we check for possible SMP IPI passed to us
 270         * on some level other than 15 which is the NMI and only used
 271         * for cross calls.  That has a separate entry point below.
 272         *
 273         * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*.
 274         */
 275maybe_smp4m_msg:
 276        GET_PROCESSOR4M_ID(o3)
 277        sethi   %hi(sun4m_irq_percpu), %l5
 278        sll     %o3, 2, %o3
 279        or      %l5, %lo(sun4m_irq_percpu), %o5
 280        sethi   %hi(0x70000000), %o2    ! Check all soft-IRQs
 281        ld      [%o5 + %o3], %o1
 282        ld      [%o1 + 0x00], %o3       ! sun4m_irq_percpu[cpu]->pending
 283        andcc   %o3, %o2, %g0
 284        be,a    smp4m_ticker
 285         cmp    %l7, 14
 286        /* Soft-IRQ IPI */
 287        st      %o2, [%o1 + 0x04]       ! sun4m_irq_percpu[cpu]->clear=0x70000000
 288        WRITE_PAUSE
 289        ld      [%o1 + 0x00], %g0       ! sun4m_irq_percpu[cpu]->pending
 290        WRITE_PAUSE
 291        or      %l0, PSR_PIL, %l4
 292        wr      %l4, 0x0, %psr
 293        WRITE_PAUSE
 294        wr      %l4, PSR_ET, %psr
 295        WRITE_PAUSE
 296        srl     %o3, 28, %o2            ! shift for simpler checks below
 297maybe_smp4m_msg_check_single:
 298        andcc   %o2, 0x1, %g0
 299        beq,a   maybe_smp4m_msg_check_mask
 300         andcc  %o2, 0x2, %g0
 301        call    smp_call_function_single_interrupt
 302         nop
 303        andcc   %o2, 0x2, %g0
 304maybe_smp4m_msg_check_mask:
 305        beq,a   maybe_smp4m_msg_check_resched
 306         andcc  %o2, 0x4, %g0
 307        call    smp_call_function_interrupt
 308         nop
 309        andcc   %o2, 0x4, %g0
 310maybe_smp4m_msg_check_resched:
 311        /* rescheduling is done in RESTORE_ALL regardless, but incr stats */
 312        beq,a   maybe_smp4m_msg_out
 313         nop
 314        call    smp_resched_interrupt
 315         nop
 316maybe_smp4m_msg_out:
 317        RESTORE_ALL
 318
 319        .align  4
 320        .globl  linux_trap_ipi15_sun4m
 321linux_trap_ipi15_sun4m:
 322        SAVE_ALL
 323        sethi   %hi(0x80000000), %o2
 324        GET_PROCESSOR4M_ID(o0)
 325        sethi   %hi(sun4m_irq_percpu), %l5
 326        or      %l5, %lo(sun4m_irq_percpu), %o5
 327        sll     %o0, 2, %o0
 328        ld      [%o5 + %o0], %o5
 329        ld      [%o5 + 0x00], %o3       ! sun4m_irq_percpu[cpu]->pending
 330        andcc   %o3, %o2, %g0
 331        be      1f                      ! Must be an NMI async memory error
 332         st     %o2, [%o5 + 0x04]       ! sun4m_irq_percpu[cpu]->clear=0x80000000
 333        WRITE_PAUSE
 334        ld      [%o5 + 0x00], %g0       ! sun4m_irq_percpu[cpu]->pending
 335        WRITE_PAUSE
 336        or      %l0, PSR_PIL, %l4
 337        wr      %l4, 0x0, %psr
 338        WRITE_PAUSE
 339        wr      %l4, PSR_ET, %psr
 340        WRITE_PAUSE
 341        call    smp4m_cross_call_irq
 342         nop
 343        b       ret_trap_lockless_ipi
 344         clr    %l6
 3451:
 346        /* NMI async memory error handling. */
 347        sethi   %hi(0x80000000), %l4
 348        sethi   %hi(sun4m_irq_global), %o5
 349        ld      [%o5 + %lo(sun4m_irq_global)], %l5
 350        st      %l4, [%l5 + 0x0c]       ! sun4m_irq_global->mask_set=0x80000000
 351        WRITE_PAUSE
 352        ld      [%l5 + 0x00], %g0       ! sun4m_irq_global->pending
 353        WRITE_PAUSE
 354        or      %l0, PSR_PIL, %l4
 355        wr      %l4, 0x0, %psr
 356        WRITE_PAUSE
 357        wr      %l4, PSR_ET, %psr
 358        WRITE_PAUSE
 359        call    sun4m_nmi
 360         nop
 361        st      %l4, [%l5 + 0x08]       ! sun4m_irq_global->mask_clear=0x80000000
 362        WRITE_PAUSE
 363        ld      [%l5 + 0x00], %g0       ! sun4m_irq_global->pending
 364        WRITE_PAUSE
 365        RESTORE_ALL
 366
 367        .globl  smp4d_ticker
 368        /* SMP per-cpu ticker interrupts are handled specially. */
 369smp4d_ticker:
 370        SAVE_ALL
 371        or      %l0, PSR_PIL, %g2
 372        sethi   %hi(CC_ICLR), %o0
 373        sethi   %hi(1 << 14), %o1
 374        or      %o0, %lo(CC_ICLR), %o0
 375        stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 14 in MXCC's ICLR */
 376        wr      %g2, 0x0, %psr
 377        WRITE_PAUSE
 378        wr      %g2, PSR_ET, %psr
 379        WRITE_PAUSE
 380        call    smp4d_percpu_timer_interrupt
 381         add    %sp, STACKFRAME_SZ, %o0
 382        wr      %l0, PSR_ET, %psr
 383        WRITE_PAUSE
 384        RESTORE_ALL
 385
 386        .align  4
 387        .globl  linux_trap_ipi15_sun4d
 388linux_trap_ipi15_sun4d:
 389        SAVE_ALL
 390        sethi   %hi(CC_BASE), %o4
 391        sethi   %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
 392        or      %o4, (CC_EREG - CC_BASE), %o0
 393        ldda    [%o0] ASI_M_MXCC, %o0
 394        andcc   %o0, %o2, %g0
 395        bne     1f
 396         sethi  %hi(BB_STAT2), %o2
 397        lduba   [%o2] ASI_M_CTL, %o2
 398        andcc   %o2, BB_STAT2_MASK, %g0
 399        bne     2f
 400         or     %o4, (CC_ICLR - CC_BASE), %o0
 401        sethi   %hi(1 << 15), %o1
 402        stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 15 in MXCC's ICLR */
 403        or      %l0, PSR_PIL, %l4
 404        wr      %l4, 0x0, %psr
 405        WRITE_PAUSE
 406        wr      %l4, PSR_ET, %psr
 407        WRITE_PAUSE
 408        call    smp4d_cross_call_irq
 409         nop
 410        b       ret_trap_lockless_ipi
 411         clr    %l6
 412
 4131:      /* MXCC error */
 4142:      /* BB error */
 415        /* Disable PIL 15 */
 416        set     CC_IMSK, %l4
 417        lduha   [%l4] ASI_M_MXCC, %l5
 418        sethi   %hi(1 << 15), %l7
 419        or      %l5, %l7, %l5
 420        stha    %l5, [%l4] ASI_M_MXCC
 421        /* FIXME */
 4221:      b,a     1b
 423
 424#ifdef CONFIG_SPARC_LEON
 425        .globl  smpleon_ipi
 426        .extern leon_ipi_interrupt
 427        /* SMP per-cpu IPI interrupts are handled specially. */
 428smpleon_ipi:
 429        SAVE_ALL
 430        or      %l0, PSR_PIL, %g2
 431        wr      %g2, 0x0, %psr
 432        WRITE_PAUSE
 433        wr      %g2, PSR_ET, %psr
 434        WRITE_PAUSE
 435        call    leonsmp_ipi_interrupt
 436         add    %sp, STACKFRAME_SZ, %o1 ! pt_regs
 437        wr      %l0, PSR_ET, %psr
 438        WRITE_PAUSE
 439        RESTORE_ALL
 440
 441        .align  4
 442        .globl  linux_trap_ipi15_leon
 443linux_trap_ipi15_leon:
 444        SAVE_ALL
 445        or      %l0, PSR_PIL, %l4
 446        wr      %l4, 0x0, %psr
 447        WRITE_PAUSE
 448        wr      %l4, PSR_ET, %psr
 449        WRITE_PAUSE
 450        call    leon_cross_call_irq
 451         nop
 452        b       ret_trap_lockless_ipi
 453         clr    %l6
 454
 455#endif /* CONFIG_SPARC_LEON */
 456
 457#endif /* CONFIG_SMP */
 458
 459        /* This routine handles illegal instructions and privileged
 460         * instruction attempts from user code.
 461         */
 462        .align  4
 463        .globl  bad_instruction
 464bad_instruction:
 465        sethi   %hi(0xc1f80000), %l4
 466        ld      [%l1], %l5
 467        sethi   %hi(0x81d80000), %l7
 468        and     %l5, %l4, %l5
 469        cmp     %l5, %l7
 470        be      1f
 471        SAVE_ALL
 472
 473        wr      %l0, PSR_ET, %psr               ! re-enable traps
 474        WRITE_PAUSE
 475
 476        add     %sp, STACKFRAME_SZ, %o0
 477        mov     %l1, %o1
 478        mov     %l2, %o2
 479        call    do_illegal_instruction
 480         mov    %l0, %o3
 481
 482        RESTORE_ALL
 483
 4841:      /* unimplemented flush - just skip */
 485        jmpl    %l2, %g0
 486         rett   %l2 + 4
 487
 488        .align  4
 489        .globl  priv_instruction
 490priv_instruction:
 491        SAVE_ALL
 492
 493        wr      %l0, PSR_ET, %psr
 494        WRITE_PAUSE
 495
 496        add     %sp, STACKFRAME_SZ, %o0
 497        mov     %l1, %o1
 498        mov     %l2, %o2
 499        call    do_priv_instruction
 500         mov    %l0, %o3
 501
 502        RESTORE_ALL
 503
 504        /* This routine handles unaligned data accesses. */
 505        .align  4
 506        .globl  mna_handler
 507mna_handler:
 508        andcc   %l0, PSR_PS, %g0
 509        be      mna_fromuser
 510         nop
 511
 512        SAVE_ALL
 513
 514        wr      %l0, PSR_ET, %psr
 515        WRITE_PAUSE
 516
 517        ld      [%l1], %o1
 518        call    kernel_unaligned_trap
 519         add    %sp, STACKFRAME_SZ, %o0
 520
 521        RESTORE_ALL
 522
 523mna_fromuser:
 524        SAVE_ALL
 525
 526        wr      %l0, PSR_ET, %psr               ! re-enable traps
 527        WRITE_PAUSE
 528
 529        ld      [%l1], %o1
 530        call    user_unaligned_trap
 531         add    %sp, STACKFRAME_SZ, %o0
 532
 533        RESTORE_ALL
 534
 535        /* This routine handles floating point disabled traps. */
 536        .align  4
 537        .globl  fpd_trap_handler
 538fpd_trap_handler:
 539        SAVE_ALL
 540
 541        wr      %l0, PSR_ET, %psr               ! re-enable traps
 542        WRITE_PAUSE
 543
 544        add     %sp, STACKFRAME_SZ, %o0
 545        mov     %l1, %o1
 546        mov     %l2, %o2
 547        call    do_fpd_trap
 548         mov    %l0, %o3
 549
 550        RESTORE_ALL
 551
 552        /* This routine handles Floating Point Exceptions. */
 553        .align  4
 554        .globl  fpe_trap_handler
 555fpe_trap_handler:
 556        set     fpsave_magic, %l5
 557        cmp     %l1, %l5
 558        be      1f
 559         sethi  %hi(fpsave), %l5
 560        or      %l5, %lo(fpsave), %l5
 561        cmp     %l1, %l5
 562        bne     2f
 563         sethi  %hi(fpsave_catch2), %l5
 564        or      %l5, %lo(fpsave_catch2), %l5
 565        wr      %l0, 0x0, %psr
 566        WRITE_PAUSE
 567        jmp     %l5
 568         rett   %l5 + 4
 5691:      
 570        sethi   %hi(fpsave_catch), %l5
 571        or      %l5, %lo(fpsave_catch), %l5
 572        wr      %l0, 0x0, %psr
 573        WRITE_PAUSE
 574        jmp     %l5
 575         rett   %l5 + 4
 576
 5772:
 578        SAVE_ALL
 579
 580        wr      %l0, PSR_ET, %psr               ! re-enable traps
 581        WRITE_PAUSE
 582
 583        add     %sp, STACKFRAME_SZ, %o0
 584        mov     %l1, %o1
 585        mov     %l2, %o2
 586        call    do_fpe_trap
 587         mov    %l0, %o3
 588
 589        RESTORE_ALL
 590
 591        /* This routine handles Tag Overflow Exceptions. */
 592        .align  4
 593        .globl  do_tag_overflow
 594do_tag_overflow:
 595        SAVE_ALL
 596
 597        wr      %l0, PSR_ET, %psr               ! re-enable traps
 598        WRITE_PAUSE
 599
 600        add     %sp, STACKFRAME_SZ, %o0
 601        mov     %l1, %o1
 602        mov     %l2, %o2
 603        call    handle_tag_overflow
 604         mov    %l0, %o3
 605
 606        RESTORE_ALL
 607
 608        /* This routine handles Watchpoint Exceptions. */
 609        .align  4
 610        .globl  do_watchpoint
 611do_watchpoint:
 612        SAVE_ALL
 613
 614        wr      %l0, PSR_ET, %psr               ! re-enable traps
 615        WRITE_PAUSE
 616
 617        add     %sp, STACKFRAME_SZ, %o0
 618        mov     %l1, %o1
 619        mov     %l2, %o2
 620        call    handle_watchpoint
 621         mov    %l0, %o3
 622
 623        RESTORE_ALL
 624
 625        /* This routine handles Register Access Exceptions. */
 626        .align  4
 627        .globl  do_reg_access
 628do_reg_access:
 629        SAVE_ALL
 630
 631        wr      %l0, PSR_ET, %psr               ! re-enable traps
 632        WRITE_PAUSE
 633
 634        add     %sp, STACKFRAME_SZ, %o0
 635        mov     %l1, %o1
 636        mov     %l2, %o2
 637        call    handle_reg_access
 638         mov    %l0, %o3
 639
 640        RESTORE_ALL
 641
 642        /* This routine handles Co-Processor Disabled Exceptions. */
 643        .align  4
 644        .globl  do_cp_disabled
 645do_cp_disabled:
 646        SAVE_ALL
 647
 648        wr      %l0, PSR_ET, %psr               ! re-enable traps
 649        WRITE_PAUSE
 650
 651        add     %sp, STACKFRAME_SZ, %o0
 652        mov     %l1, %o1
 653        mov     %l2, %o2
 654        call    handle_cp_disabled
 655         mov    %l0, %o3
 656
 657        RESTORE_ALL
 658
 659        /* This routine handles Co-Processor Exceptions. */
 660        .align  4
 661        .globl  do_cp_exception
 662do_cp_exception:
 663        SAVE_ALL
 664
 665        wr      %l0, PSR_ET, %psr               ! re-enable traps
 666        WRITE_PAUSE
 667
 668        add     %sp, STACKFRAME_SZ, %o0
 669        mov     %l1, %o1
 670        mov     %l2, %o2
 671        call    handle_cp_exception
 672         mov    %l0, %o3
 673
 674        RESTORE_ALL
 675
 676        /* This routine handles Hardware Divide By Zero Exceptions. */
 677        .align  4
 678        .globl  do_hw_divzero
 679do_hw_divzero:
 680        SAVE_ALL
 681
 682        wr      %l0, PSR_ET, %psr               ! re-enable traps
 683        WRITE_PAUSE
 684
 685        add     %sp, STACKFRAME_SZ, %o0
 686        mov     %l1, %o1
 687        mov     %l2, %o2
 688        call    handle_hw_divzero
 689         mov    %l0, %o3
 690
 691        RESTORE_ALL
 692
 693        .align  4
 694        .globl  do_flush_windows
 695do_flush_windows:
 696        SAVE_ALL
 697
 698        wr      %l0, PSR_ET, %psr
 699        WRITE_PAUSE
 700
 701        andcc   %l0, PSR_PS, %g0
 702        bne     dfw_kernel
 703         nop
 704
 705        call    flush_user_windows
 706         nop
 707
 708        /* Advance over the trap instruction. */
 709        ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1
 710        add     %l1, 0x4, %l2
 711        st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
 712        st      %l2, [%sp + STACKFRAME_SZ + PT_NPC]
 713
 714        RESTORE_ALL
 715
 716        .globl  flush_patch_one
 717
 718        /* We get these for debugging routines using __builtin_return_address() */
 719dfw_kernel:
 720flush_patch_one:
 721        FLUSH_ALL_KERNEL_WINDOWS
 722
 723        /* Advance over the trap instruction. */
 724        ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1
 725        add     %l1, 0x4, %l2
 726        st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
 727        st      %l2, [%sp + STACKFRAME_SZ + PT_NPC]
 728
 729        RESTORE_ALL
 730
 731        /* The getcc software trap.  The user wants the condition codes from
 732         * the %psr in register %g1.
 733         */
 734
 735        .align  4
 736        .globl  getcc_trap_handler
 737getcc_trap_handler:
 738        srl     %l0, 20, %g1    ! give user
 739        and     %g1, 0xf, %g1   ! only ICC bits in %psr
 740        jmp     %l2             ! advance over trap instruction
 741        rett    %l2 + 0x4       ! like this...
 742
 743        /* The setcc software trap.  The user has condition codes in %g1
 744         * that it would like placed in the %psr.  Be careful not to flip
 745         * any unintentional bits!
 746         */
 747
 748        .align  4
 749        .globl  setcc_trap_handler
 750setcc_trap_handler:
 751        sll     %g1, 0x14, %l4
 752        set     PSR_ICC, %l5
 753        andn    %l0, %l5, %l0   ! clear ICC bits in %psr
 754        and     %l4, %l5, %l4   ! clear non-ICC bits in user value
 755        or      %l4, %l0, %l4   ! or them in... mix mix mix
 756
 757        wr      %l4, 0x0, %psr  ! set new %psr
 758        WRITE_PAUSE             ! TI scumbags...
 759
 760        jmp     %l2             ! advance over trap instruction
 761        rett    %l2 + 0x4       ! like this...
 762
 763        .align  4
 764        .globl  linux_trap_nmi_sun4c
 765linux_trap_nmi_sun4c:
 766        SAVE_ALL
 767
 768        /* Ugh, we need to clear the IRQ line.  This is now
 769         * a very sun4c specific trap handler...
 770         */
 771        sethi   %hi(interrupt_enable), %l5
 772        ld      [%l5 + %lo(interrupt_enable)], %l5
 773        ldub    [%l5], %l6
 774        andn    %l6, INTS_ENAB, %l6
 775        stb     %l6, [%l5]
 776
 777        /* Now it is safe to re-enable traps without recursion. */
 778        or      %l0, PSR_PIL, %l0
 779        wr      %l0, PSR_ET, %psr
 780        WRITE_PAUSE
 781
 782        /* Now call the c-code with the pt_regs frame ptr and the
 783         * memory error registers as arguments.  The ordering chosen
 784         * here is due to unlatching semantics.
 785         */
 786        sethi   %hi(AC_SYNC_ERR), %o0
 787        add     %o0, 0x4, %o0
 788        lda     [%o0] ASI_CONTROL, %o2  ! sync vaddr
 789        sub     %o0, 0x4, %o0
 790        lda     [%o0] ASI_CONTROL, %o1  ! sync error
 791        add     %o0, 0xc, %o0
 792        lda     [%o0] ASI_CONTROL, %o4  ! async vaddr
 793        sub     %o0, 0x4, %o0
 794        lda     [%o0] ASI_CONTROL, %o3  ! async error
 795        call    sparc_lvl15_nmi
 796         add    %sp, STACKFRAME_SZ, %o0
 797
 798        RESTORE_ALL
 799
 800        .align  4
 801        .globl  invalid_segment_patch1_ff
 802        .globl  invalid_segment_patch2_ff
 803invalid_segment_patch1_ff:      cmp     %l4, 0xff
 804invalid_segment_patch2_ff:      mov     0xff, %l3
 805
 806        .align  4
 807        .globl  invalid_segment_patch1_1ff
 808        .globl  invalid_segment_patch2_1ff
 809invalid_segment_patch1_1ff:     cmp     %l4, 0x1ff
 810invalid_segment_patch2_1ff:     mov     0x1ff, %l3
 811
 812        .align  4
 813        .globl  num_context_patch1_16, num_context_patch2_16
 814num_context_patch1_16:          mov     0x10, %l7
 815num_context_patch2_16:          mov     0x10, %l7
 816
 817        .align  4
 818        .globl  vac_linesize_patch_32
 819vac_linesize_patch_32:          subcc   %l7, 32, %l7
 820
 821        .align  4
 822        .globl  vac_hwflush_patch1_on, vac_hwflush_patch2_on
 823
 824/*
 825 * Ugly, but we can't use hardware flushing on the sun4 and we'd require
 826 * two instructions (Anton)
 827 */
 828vac_hwflush_patch1_on:          addcc   %l7, -PAGE_SIZE, %l7
 829
 830vac_hwflush_patch2_on:          sta     %g0, [%l3 + %l7] ASI_HWFLUSHSEG
 831
 832        .globl  invalid_segment_patch1, invalid_segment_patch2
 833        .globl  num_context_patch1
 834        .globl  vac_linesize_patch, vac_hwflush_patch1
 835        .globl  vac_hwflush_patch2
 836
 837        .align  4
 838        .globl  sun4c_fault
 839
 840! %l0 = %psr
 841! %l1 = %pc
 842! %l2 = %npc
 843! %l3 = %wim
 844! %l7 = 1 for textfault
 845! We want error in %l5, vaddr in %l6
 846sun4c_fault:
 847        sethi   %hi(AC_SYNC_ERR), %l4
 848        add     %l4, 0x4, %l6                   ! AC_SYNC_VA in %l6
 849        lda     [%l6] ASI_CONTROL, %l5          ! Address
 850        lda     [%l4] ASI_CONTROL, %l6          ! Error, retained for a bit
 851
 852        andn    %l5, 0xfff, %l5                 ! Encode all info into l7
 853        srl     %l6, 14, %l4
 854
 855        and     %l4, 2, %l4
 856        or      %l5, %l4, %l4
 857
 858        or      %l4, %l7, %l7                   ! l7 = [addr,write,txtfault]
 859
 860        andcc   %l0, PSR_PS, %g0
 861        be      sun4c_fault_fromuser
 862         andcc  %l7, 1, %g0                     ! Text fault?
 863
 864        be      1f
 865         sethi  %hi(KERNBASE), %l4
 866
 867        mov     %l1, %l5                        ! PC
 868
 8691:
 870        cmp     %l5, %l4
 871        blu     sun4c_fault_fromuser
 872         sethi  %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
 873
 874        /* If the kernel references a bum kernel pointer, or a pte which
 875         * points to a non existent page in ram, we will run this code
 876         * _forever_ and lock up the machine!!!!! So we must check for
 877         * this condition, the AC_SYNC_ERR bits are what we must examine.
 878         * Also a parity error would make this happen as well.  So we just
 879         * check that we are in fact servicing a tlb miss and not some
 880         * other type of fault for the kernel.
 881         */
 882        andcc   %l6, 0x80, %g0
 883        be      sun4c_fault_fromuser
 884         and    %l5, %l4, %l5
 885
 886        /* Test for NULL pte_t * in vmalloc area. */
 887        sethi   %hi(VMALLOC_START), %l4
 888        cmp     %l5, %l4
 889        blu,a   invalid_segment_patch1
 890         lduXa  [%l5] ASI_SEGMAP, %l4
 891
 892        sethi   %hi(swapper_pg_dir), %l4
 893        srl     %l5, SUN4C_PGDIR_SHIFT, %l6
 894        or      %l4, %lo(swapper_pg_dir), %l4
 895        sll     %l6, 2, %l6
 896        ld      [%l4 + %l6], %l4
 897        andcc   %l4, PAGE_MASK, %g0
 898        be      sun4c_fault_fromuser
 899         lduXa  [%l5] ASI_SEGMAP, %l4
 900
 901invalid_segment_patch1:
 902        cmp     %l4, 0x7f
 903        bne     1f
 904         sethi  %hi(sun4c_kfree_ring), %l4
 905        or      %l4, %lo(sun4c_kfree_ring), %l4
 906        ld      [%l4 + 0x18], %l3
 907        deccc   %l3                     ! do we have a free entry?
 908        bcs,a   2f                      ! no, unmap one.
 909         sethi  %hi(sun4c_kernel_ring), %l4
 910
 911        st      %l3, [%l4 + 0x18]       ! sun4c_kfree_ring.num_entries--
 912
 913        ld      [%l4 + 0x00], %l6       ! entry = sun4c_kfree_ring.ringhd.next
 914        st      %l5, [%l6 + 0x08]       ! entry->vaddr = address
 915
 916        ld      [%l6 + 0x00], %l3       ! next = entry->next
 917        ld      [%l6 + 0x04], %l7       ! entry->prev
 918
 919        st      %l7, [%l3 + 0x04]       ! next->prev = entry->prev
 920        st      %l3, [%l7 + 0x00]       ! entry->prev->next = next
 921
 922        sethi   %hi(sun4c_kernel_ring), %l4
 923        or      %l4, %lo(sun4c_kernel_ring), %l4
 924                                        ! head = &sun4c_kernel_ring.ringhd
 925
 926        ld      [%l4 + 0x00], %l7       ! head->next
 927
 928        st      %l4, [%l6 + 0x04]       ! entry->prev = head
 929        st      %l7, [%l6 + 0x00]       ! entry->next = head->next
 930        st      %l6, [%l7 + 0x04]       ! head->next->prev = entry
 931
 932        st      %l6, [%l4 + 0x00]       ! head->next = entry
 933
 934        ld      [%l4 + 0x18], %l3
 935        inc     %l3                     ! sun4c_kernel_ring.num_entries++
 936        st      %l3, [%l4 + 0x18]
 937        b       4f
 938         ld     [%l6 + 0x08], %l5
 939
 9402:
 941        or      %l4, %lo(sun4c_kernel_ring), %l4
 942                                        ! head = &sun4c_kernel_ring.ringhd
 943
 944        ld      [%l4 + 0x04], %l6       ! entry = head->prev
 945
 946        ld      [%l6 + 0x08], %l3       ! tmp = entry->vaddr
 947
 948        ! Flush segment from the cache.
 949        sethi   %hi((64 * 1024)), %l7
 9509:
 951vac_hwflush_patch1:
 952vac_linesize_patch:
 953        subcc   %l7, 16, %l7
 954        bne     9b
 955vac_hwflush_patch2:
 956         sta    %g0, [%l3 + %l7] ASI_FLUSHSEG
 957
 958        st      %l5, [%l6 + 0x08]       ! entry->vaddr = address
 959
 960        ld      [%l6 + 0x00], %l5       ! next = entry->next
 961        ld      [%l6 + 0x04], %l7       ! entry->prev
 962
 963        st      %l7, [%l5 + 0x04]       ! next->prev = entry->prev
 964        st      %l5, [%l7 + 0x00]       ! entry->prev->next = next
 965        st      %l4, [%l6 + 0x04]       ! entry->prev = head
 966
 967        ld      [%l4 + 0x00], %l7       ! head->next
 968
 969        st      %l7, [%l6 + 0x00]       ! entry->next = head->next
 970        st      %l6, [%l7 + 0x04]       ! head->next->prev = entry
 971        st      %l6, [%l4 + 0x00]       ! head->next = entry
 972
 973        mov     %l3, %l5                ! address = tmp
 974
 9754:
 976num_context_patch1:
 977        mov     0x08, %l7
 978
 979        ld      [%l6 + 0x08], %l4
 980        ldub    [%l6 + 0x0c], %l3
 981        or      %l4, %l3, %l4           ! encode new vaddr/pseg into l4
 982
 983        sethi   %hi(AC_CONTEXT), %l3
 984        lduba   [%l3] ASI_CONTROL, %l6
 985
 986        /* Invalidate old mapping, instantiate new mapping,
 987         * for each context.  Registers l6/l7 are live across
 988         * this loop.
 989         */
 9903:      deccc   %l7
 991        sethi   %hi(AC_CONTEXT), %l3
 992        stba    %l7, [%l3] ASI_CONTROL
 993invalid_segment_patch2:
 994        mov     0x7f, %l3
 995        stXa    %l3, [%l5] ASI_SEGMAP
 996        andn    %l4, 0x1ff, %l3
 997        bne     3b
 998         stXa   %l4, [%l3] ASI_SEGMAP
 999
1000        sethi   %hi(AC_CONTEXT), %l3
1001        stba    %l6, [%l3] ASI_CONTROL
1002
1003        andn    %l4, 0x1ff, %l5
1004
10051:
1006        sethi   %hi(VMALLOC_START), %l4
1007        cmp     %l5, %l4
1008
1009        bgeu    1f
1010         mov    1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7
1011
1012        sethi   %hi(KERNBASE), %l6
1013
1014        sub     %l5, %l6, %l4
1015        srl     %l4, PAGE_SHIFT, %l4
1016        sethi   %hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3
1017        or      %l3, %l4, %l3
1018
1019        sethi   %hi(PAGE_SIZE), %l4
1020
10212:
1022        sta     %l3, [%l5] ASI_PTE
1023        deccc   %l7
1024        inc     %l3
1025        bne     2b
1026         add    %l5, %l4, %l5
1027
1028        b       7f
1029         sethi  %hi(sun4c_kernel_faults), %l4
1030
10311:
1032        srl     %l5, SUN4C_PGDIR_SHIFT, %l3
1033        sethi   %hi(swapper_pg_dir), %l4
1034        or      %l4, %lo(swapper_pg_dir), %l4
1035        sll     %l3, 2, %l3
1036        ld      [%l4 + %l3], %l4
1037        and     %l4, PAGE_MASK, %l4
1038
1039        srl     %l5, (PAGE_SHIFT - 2), %l6
1040        and     %l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6
1041        add     %l6, %l4, %l6
1042
1043        sethi   %hi(PAGE_SIZE), %l4
1044
10452:
1046        ld      [%l6], %l3
1047        deccc   %l7
1048        sta     %l3, [%l5] ASI_PTE
1049        add     %l6, 0x4, %l6
1050        bne     2b
1051         add    %l5, %l4, %l5
1052
1053        sethi   %hi(sun4c_kernel_faults), %l4
10547:
1055        ld      [%l4 + %lo(sun4c_kernel_faults)], %l3
1056        inc     %l3
1057        st      %l3, [%l4 + %lo(sun4c_kernel_faults)]
1058
1059        /* Restore condition codes */
1060        wr      %l0, 0x0, %psr
1061        WRITE_PAUSE
1062        jmp     %l1
1063         rett   %l2
1064
1065sun4c_fault_fromuser:
1066        SAVE_ALL
1067         nop
1068        
1069        mov     %l7, %o1                ! Decode the info from %l7
1070        mov     %l7, %o2
1071        and     %o1, 1, %o1             ! arg2 = text_faultp
1072        mov     %l7, %o3
1073        and     %o2, 2, %o2             ! arg3 = writep
1074        andn    %o3, 0xfff, %o3         ! arg4 = faulting address
1075
1076        wr      %l0, PSR_ET, %psr
1077        WRITE_PAUSE
1078
1079        call    do_sun4c_fault
1080         add    %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
1081
1082        RESTORE_ALL
1083
1084        .align  4
1085        .globl  srmmu_fault
1086srmmu_fault:
1087        mov     0x400, %l5
1088        mov     0x300, %l4
1089
1090        lda     [%l5] ASI_M_MMUREGS, %l6        ! read sfar first
1091        lda     [%l4] ASI_M_MMUREGS, %l5        ! read sfsr last
1092
1093        andn    %l6, 0xfff, %l6
1094        srl     %l5, 6, %l5                     ! and encode all info into l7
1095
1096        and     %l5, 2, %l5
1097        or      %l5, %l6, %l6
1098
1099        or      %l6, %l7, %l7                   ! l7 = [addr,write,txtfault]
1100
1101        SAVE_ALL
1102
1103        mov     %l7, %o1
1104        mov     %l7, %o2
1105        and     %o1, 1, %o1             ! arg2 = text_faultp
1106        mov     %l7, %o3
1107        and     %o2, 2, %o2             ! arg3 = writep
1108        andn    %o3, 0xfff, %o3         ! arg4 = faulting address
1109
1110        wr      %l0, PSR_ET, %psr
1111        WRITE_PAUSE
1112
1113        call    do_sparc_fault
1114         add    %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
1115
1116        RESTORE_ALL
1117
1118        .align  4
1119        .globl  sys_nis_syscall
1120sys_nis_syscall:
1121        mov     %o7, %l5
1122        add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
1123        call    c_sys_nis_syscall
1124         mov    %l5, %o7
1125
1126        .align  4
1127        .globl  sys_execve
1128sys_execve:
1129        mov     %o7, %l5
1130        add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
1131        call    sparc_execve
1132         mov    %l5, %o7
1133
1134        .globl  sunos_execv
1135sunos_execv:
1136        st      %g0, [%sp + STACKFRAME_SZ + PT_I2]
1137
1138        call    sparc_execve
1139         add    %sp, STACKFRAME_SZ, %o0
1140
1141        b       ret_sys_call
1142         ld     [%sp + STACKFRAME_SZ + PT_I0], %o0
1143
1144        .align  4
1145        .globl  sys_sparc_pipe
1146sys_sparc_pipe:
1147        mov     %o7, %l5
1148        add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
1149        call    sparc_pipe
1150         mov    %l5, %o7
1151
1152        .align  4
1153        .globl  sys_sigaltstack
1154sys_sigaltstack:
1155        mov     %o7, %l5
1156        mov     %fp, %o2
1157        call    do_sigaltstack
1158         mov    %l5, %o7
1159
1160        .align  4
1161        .globl  sys_sigstack
1162sys_sigstack:
1163        mov     %o7, %l5
1164        mov     %fp, %o2
1165        call    do_sys_sigstack
1166         mov    %l5, %o7
1167
1168        .align  4
1169        .globl  sys_sigreturn
1170sys_sigreturn:
1171        call    do_sigreturn
1172         add    %sp, STACKFRAME_SZ, %o0
1173
1174        ld      [%curptr + TI_FLAGS], %l5
1175        andcc   %l5, _TIF_SYSCALL_TRACE, %g0
1176        be      1f
1177         nop
1178
1179        call    syscall_trace
1180         nop
1181
11821:
1183        /* We don't want to muck with user registers like a
1184         * normal syscall, just return.
1185         */
1186        RESTORE_ALL
1187
1188        .align  4
1189        .globl  sys_rt_sigreturn
1190sys_rt_sigreturn:
1191        call    do_rt_sigreturn
1192         add    %sp, STACKFRAME_SZ, %o0
1193
1194        ld      [%curptr + TI_FLAGS], %l5
1195        andcc   %l5, _TIF_SYSCALL_TRACE, %g0
1196        be      1f
1197         nop
1198
1199        add     %sp, STACKFRAME_SZ, %o0
1200        call    syscall_trace
1201         mov    1, %o1
1202
12031:
1204        /* We are returning to a signal handler. */
1205        RESTORE_ALL
1206
1207        /* Now that we have a real sys_clone, sys_fork() is
1208         * implemented in terms of it.  Our _real_ implementation
1209         * of SunOS vfork() will use sys_vfork().
1210         *
1211         * XXX These three should be consolidated into mostly shared
1212         * XXX code just like on sparc64... -DaveM
1213         */
1214        .align  4
1215        .globl  sys_fork, flush_patch_two
1216sys_fork:
1217        mov     %o7, %l5
1218flush_patch_two:
1219        FLUSH_ALL_KERNEL_WINDOWS;
1220        ld      [%curptr + TI_TASK], %o4
1221        rd      %psr, %g4
1222        WRITE_PAUSE
1223        mov     SIGCHLD, %o0                    ! arg0: clone flags
1224        rd      %wim, %g5
1225        WRITE_PAUSE
1226        mov     %fp, %o1                        ! arg1: usp
1227        std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1228        add     %sp, STACKFRAME_SZ, %o2         ! arg2: pt_regs ptr
1229        mov     0, %o3
1230        call    sparc_do_fork
1231         mov    %l5, %o7
1232
1233        /* Whee, kernel threads! */
1234        .globl  sys_clone, flush_patch_three
1235sys_clone:
1236        mov     %o7, %l5
1237flush_patch_three:
1238        FLUSH_ALL_KERNEL_WINDOWS;
1239        ld      [%curptr + TI_TASK], %o4
1240        rd      %psr, %g4
1241        WRITE_PAUSE
1242
1243        /* arg0,1: flags,usp  -- loaded already */
1244        cmp     %o1, 0x0                        ! Is new_usp NULL?
1245        rd      %wim, %g5
1246        WRITE_PAUSE
1247        be,a    1f
1248         mov    %fp, %o1                        ! yes, use callers usp
1249        andn    %o1, 7, %o1                     ! no, align to 8 bytes
12501:
1251        std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1252        add     %sp, STACKFRAME_SZ, %o2         ! arg2: pt_regs ptr
1253        mov     0, %o3
1254        call    sparc_do_fork
1255         mov    %l5, %o7
1256
1257        /* Whee, real vfork! */
1258        .globl  sys_vfork, flush_patch_four
1259sys_vfork:
1260flush_patch_four:
1261        FLUSH_ALL_KERNEL_WINDOWS;
1262        ld      [%curptr + TI_TASK], %o4
1263        rd      %psr, %g4
1264        WRITE_PAUSE
1265        rd      %wim, %g5
1266        WRITE_PAUSE
1267        std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1268        sethi   %hi(0x4000 | 0x0100 | SIGCHLD), %o0
1269        mov     %fp, %o1
1270        or      %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
1271        sethi   %hi(sparc_do_fork), %l1
1272        mov     0, %o3
1273        jmpl    %l1 + %lo(sparc_do_fork), %g0
1274         add    %sp, STACKFRAME_SZ, %o2
1275
1276        .align  4
1277linux_sparc_ni_syscall:
1278        sethi   %hi(sys_ni_syscall), %l7
1279        b       syscall_is_too_hard
1280         or     %l7, %lo(sys_ni_syscall), %l7
1281
1282linux_fast_syscall:
1283        andn    %l7, 3, %l7
1284        mov     %i0, %o0
1285        mov     %i1, %o1
1286        mov     %i2, %o2
1287        jmpl    %l7 + %g0, %g0
1288         mov    %i3, %o3
1289
1290linux_syscall_trace:
1291        add     %sp, STACKFRAME_SZ, %o0
1292        call    syscall_trace
1293         mov    0, %o1
1294        cmp     %o0, 0
1295        bne     3f
1296         mov    -ENOSYS, %o0
1297        mov     %i0, %o0
1298        mov     %i1, %o1
1299        mov     %i2, %o2
1300        mov     %i3, %o3
1301        b       2f
1302         mov    %i4, %o4
1303
1304        .globl  ret_from_fork
1305ret_from_fork:
1306        call    schedule_tail
1307         ld     [%g3 + TI_TASK], %o0
1308        b       ret_sys_call
1309         ld     [%sp + STACKFRAME_SZ + PT_I0], %o0
1310
1311        /* Linux native system calls enter here... */
1312        .align  4
1313        .globl  linux_sparc_syscall
1314linux_sparc_syscall:
1315        sethi   %hi(PSR_SYSCALL), %l4
1316        or      %l0, %l4, %l0
1317        /* Direct access to user regs, must faster. */
1318        cmp     %g1, NR_syscalls
1319        bgeu    linux_sparc_ni_syscall
1320         sll    %g1, 2, %l4
1321        ld      [%l7 + %l4], %l7
1322        andcc   %l7, 1, %g0
1323        bne     linux_fast_syscall
1324         /* Just do first insn from SAVE_ALL in the delay slot */
1325
1326syscall_is_too_hard:
1327        SAVE_ALL_HEAD
1328         rd     %wim, %l3
1329
1330        wr      %l0, PSR_ET, %psr
1331        mov     %i0, %o0
1332        mov     %i1, %o1
1333        mov     %i2, %o2
1334
1335        ld      [%curptr + TI_FLAGS], %l5
1336        mov     %i3, %o3
1337        andcc   %l5, _TIF_SYSCALL_TRACE, %g0
1338        mov     %i4, %o4
1339        bne     linux_syscall_trace
1340         mov    %i0, %l5
13412:
1342        call    %l7
1343         mov    %i5, %o5
1344
13453:
1346        st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
1347
1348ret_sys_call:
1349        ld      [%curptr + TI_FLAGS], %l6
1350        cmp     %o0, -ERESTART_RESTARTBLOCK
1351        ld      [%sp + STACKFRAME_SZ + PT_PSR], %g3
1352        set     PSR_C, %g2
1353        bgeu    1f
1354         andcc  %l6, _TIF_SYSCALL_TRACE, %g0
1355
1356        /* System call success, clear Carry condition code. */
1357        andn    %g3, %g2, %g3
1358        clr     %l6
1359        st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]     
1360        bne     linux_syscall_trace2
1361         ld     [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1362        add     %l1, 0x4, %l2                   /* npc = npc+4 */
1363        st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
1364        b       ret_trap_entry
1365         st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
13661:
1367        /* System call failure, set Carry condition code.
1368         * Also, get abs(errno) to return to the process.
1369         */
1370        sub     %g0, %o0, %o0
1371        or      %g3, %g2, %g3
1372        st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
1373        mov     1, %l6
1374        st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1375        bne     linux_syscall_trace2
1376         ld     [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1377        add     %l1, 0x4, %l2                   /* npc = npc+4 */
1378        st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
1379        b       ret_trap_entry
1380         st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1381
1382linux_syscall_trace2:
1383        add     %sp, STACKFRAME_SZ, %o0
1384        mov     1, %o1
1385        call    syscall_trace
1386         add    %l1, 0x4, %l2                   /* npc = npc+4 */
1387        st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
1388        b       ret_trap_entry
1389         st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1390
1391
1392/* Saving and restoring the FPU state is best done from lowlevel code.
1393 *
1394 * void fpsave(unsigned long *fpregs, unsigned long *fsr,
1395 *             void *fpqueue, unsigned long *fpqdepth)
1396 */
1397
1398        .globl  fpsave
1399fpsave:
1400        st      %fsr, [%o1]     ! this can trap on us if fpu is in bogon state
1401        ld      [%o1], %g1
1402        set     0x2000, %g4
1403        andcc   %g1, %g4, %g0
1404        be      2f
1405         mov    0, %g2
1406
1407        /* We have an fpqueue to save. */
14081:
1409        std     %fq, [%o2]
1410fpsave_magic:
1411        st      %fsr, [%o1]
1412        ld      [%o1], %g3
1413        andcc   %g3, %g4, %g0
1414        add     %g2, 1, %g2
1415        bne     1b
1416         add    %o2, 8, %o2
1417
14182:
1419        st      %g2, [%o3]
1420
1421        std     %f0, [%o0 + 0x00]
1422        std     %f2, [%o0 + 0x08]
1423        std     %f4, [%o0 + 0x10]
1424        std     %f6, [%o0 + 0x18]
1425        std     %f8, [%o0 + 0x20]
1426        std     %f10, [%o0 + 0x28]
1427        std     %f12, [%o0 + 0x30]
1428        std     %f14, [%o0 + 0x38]
1429        std     %f16, [%o0 + 0x40]
1430        std     %f18, [%o0 + 0x48]
1431        std     %f20, [%o0 + 0x50]
1432        std     %f22, [%o0 + 0x58]
1433        std     %f24, [%o0 + 0x60]
1434        std     %f26, [%o0 + 0x68]
1435        std     %f28, [%o0 + 0x70]
1436        retl
1437         std    %f30, [%o0 + 0x78]
1438
1439        /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
1440         * code for pointing out this possible deadlock, while we save state
1441         * above we could trap on the fsr store so our low level fpu trap
1442         * code has to know how to deal with this.
1443         */
1444fpsave_catch:
1445        b       fpsave_magic + 4
1446         st     %fsr, [%o1]
1447
1448fpsave_catch2:
1449        b       fpsave + 4
1450         st     %fsr, [%o1]
1451
1452        /* void fpload(unsigned long *fpregs, unsigned long *fsr); */
1453
1454        .globl  fpload
1455fpload:
1456        ldd     [%o0 + 0x00], %f0
1457        ldd     [%o0 + 0x08], %f2
1458        ldd     [%o0 + 0x10], %f4
1459        ldd     [%o0 + 0x18], %f6
1460        ldd     [%o0 + 0x20], %f8
1461        ldd     [%o0 + 0x28], %f10
1462        ldd     [%o0 + 0x30], %f12
1463        ldd     [%o0 + 0x38], %f14
1464        ldd     [%o0 + 0x40], %f16
1465        ldd     [%o0 + 0x48], %f18
1466        ldd     [%o0 + 0x50], %f20
1467        ldd     [%o0 + 0x58], %f22
1468        ldd     [%o0 + 0x60], %f24
1469        ldd     [%o0 + 0x68], %f26
1470        ldd     [%o0 + 0x70], %f28
1471        ldd     [%o0 + 0x78], %f30
1472        ld      [%o1], %fsr
1473        retl
1474         nop
1475
1476        /* __ndelay and __udelay take two arguments:
1477         * 0 - nsecs or usecs to delay
1478         * 1 - per_cpu udelay_val (loops per jiffy)
1479         *
1480         * Note that ndelay gives HZ times higher resolution but has a 10ms
1481         * limit.  udelay can handle up to 1s.
1482         */
1483        .globl  __ndelay
1484__ndelay:
1485        save    %sp, -STACKFRAME_SZ, %sp
1486        mov     %i0, %o0
1487        call    .umul                   ! round multiplier up so large ns ok
1488         mov    0x1ae, %o1              ! 2**32 / (1 000 000 000 / HZ)
1489        call    .umul
1490         mov    %i1, %o1                ! udelay_val
1491        ba      delay_continue
1492         mov    %o1, %o0                ! >>32 later for better resolution
1493
1494        .globl  __udelay
1495__udelay:
1496        save    %sp, -STACKFRAME_SZ, %sp
1497        mov     %i0, %o0
1498        sethi   %hi(0x10c7), %o1        ! round multiplier up so large us ok
1499        call    .umul
1500         or     %o1, %lo(0x10c7), %o1   ! 2**32 / 1 000 000
1501        call    .umul
1502         mov    %i1, %o1                ! udelay_val
1503        sethi   %hi(0x028f4b62), %l0    ! Add in rounding constant * 2**32,
1504        or      %g0, %lo(0x028f4b62), %l0
1505        addcc   %o0, %l0, %o0           ! 2**32 * 0.009 999
1506        bcs,a   3f
1507         add    %o1, 0x01, %o1
15083:
1509        call    .umul
1510         mov    HZ, %o0                 ! >>32 earlier for wider range
1511
1512delay_continue:
1513        cmp     %o0, 0x0
15141:
1515        bne     1b
1516         subcc  %o0, 1, %o0
1517        
1518        ret
1519        restore
1520
1521        /* Handle a software breakpoint */
1522        /* We have to inform parent that child has stopped */
1523        .align 4
1524        .globl breakpoint_trap
1525breakpoint_trap:
1526        rd      %wim,%l3
1527        SAVE_ALL
1528        wr      %l0, PSR_ET, %psr
1529        WRITE_PAUSE
1530
1531        st      %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
1532        call    sparc_breakpoint
1533         add    %sp, STACKFRAME_SZ, %o0
1534
1535        RESTORE_ALL
1536
1537#ifdef CONFIG_KGDB
1538        .align  4
1539        .globl  kgdb_trap_low
1540        .type   kgdb_trap_low,#function
1541kgdb_trap_low:
1542        rd      %wim,%l3
1543        SAVE_ALL
1544        wr      %l0, PSR_ET, %psr
1545        WRITE_PAUSE
1546
1547        call    kgdb_trap
1548         add    %sp, STACKFRAME_SZ, %o0
1549
1550        RESTORE_ALL
1551        .size   kgdb_trap_low,.-kgdb_trap_low
1552#endif
1553
1554        .align  4
1555        .globl  flush_patch_exception
1556flush_patch_exception:
1557        FLUSH_ALL_KERNEL_WINDOWS;
1558        ldd     [%o0], %o6
1559        jmpl    %o7 + 0xc, %g0                  ! see asm-sparc/processor.h
1560         mov    1, %g1                          ! signal EFAULT condition
1561
1562        .align  4
1563        .globl  kill_user_windows, kuw_patch1_7win
1564        .globl  kuw_patch1
1565kuw_patch1_7win:        sll     %o3, 6, %o3
1566
1567        /* No matter how much overhead this routine has in the worst
1568         * case scenerio, it is several times better than taking the
1569         * traps with the old method of just doing flush_user_windows().
1570         */
1571kill_user_windows:
1572        ld      [%g6 + TI_UWINMASK], %o0        ! get current umask
1573        orcc    %g0, %o0, %g0                   ! if no bits set, we are done
1574        be      3f                              ! nothing to do
1575         rd     %psr, %o5                       ! must clear interrupts
1576        or      %o5, PSR_PIL, %o4               ! or else that could change
1577        wr      %o4, 0x0, %psr                  ! the uwinmask state
1578        WRITE_PAUSE                             ! burn them cycles
15791:
1580        ld      [%g6 + TI_UWINMASK], %o0        ! get consistent state
1581        orcc    %g0, %o0, %g0                   ! did an interrupt come in?
1582        be      4f                              ! yep, we are done
1583         rd     %wim, %o3                       ! get current wim
1584        srl     %o3, 1, %o4                     ! simulate a save
1585kuw_patch1:
1586        sll     %o3, 7, %o3                     ! compute next wim
1587        or      %o4, %o3, %o3                   ! result
1588        andncc  %o0, %o3, %o0                   ! clean this bit in umask
1589        bne     kuw_patch1                      ! not done yet
1590         srl    %o3, 1, %o4                     ! begin another save simulation
1591        wr      %o3, 0x0, %wim                  ! set the new wim
1592        st      %g0, [%g6 + TI_UWINMASK]        ! clear uwinmask
15934:
1594        wr      %o5, 0x0, %psr                  ! re-enable interrupts
1595        WRITE_PAUSE                             ! burn baby burn
15963:
1597        retl                                    ! return
1598         st     %g0, [%g6 + TI_W_SAVED]         ! no windows saved
1599
1600        .align  4
1601        .globl  restore_current
1602restore_current:
1603        LOAD_CURRENT(g6, o0)
1604        retl
1605         nop
1606
1607#ifdef CONFIG_PCIC_PCI
1608#include <asm/pcic.h>
1609
1610        .align  4
1611        .globl  linux_trap_ipi15_pcic
1612linux_trap_ipi15_pcic:
1613        rd      %wim, %l3
1614        SAVE_ALL
1615
1616        /*
1617         * First deactivate NMI
1618         * or we cannot drop ET, cannot get window spill traps.
1619         * The busy loop is necessary because the PIO error
1620         * sometimes does not go away quickly and we trap again.
1621         */
1622        sethi   %hi(pcic_regs), %o1
1623        ld      [%o1 + %lo(pcic_regs)], %o2
1624
1625        ! Get pending status for printouts later.
1626        ld      [%o2 + PCI_SYS_INT_PENDING], %o0
1627
1628        mov     PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1629        stb     %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
16301:
1631        ld      [%o2 + PCI_SYS_INT_PENDING], %o1
1632        andcc   %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1633        bne     1b
1634         nop
1635
1636        or      %l0, PSR_PIL, %l4
1637        wr      %l4, 0x0, %psr
1638        WRITE_PAUSE
1639        wr      %l4, PSR_ET, %psr
1640        WRITE_PAUSE
1641
1642        call    pcic_nmi
1643         add    %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
1644        RESTORE_ALL
1645
1646        .globl  pcic_nmi_trap_patch
1647pcic_nmi_trap_patch:
1648        sethi   %hi(linux_trap_ipi15_pcic), %l3
1649        jmpl    %l3 + %lo(linux_trap_ipi15_pcic), %g0
1650         rd     %psr, %l0
1651        .word   0
1652
1653#endif /* CONFIG_PCIC_PCI */
1654
1655        .globl  flushw_all
1656flushw_all:
1657        save    %sp, -0x40, %sp
1658        save    %sp, -0x40, %sp
1659        save    %sp, -0x40, %sp
1660        save    %sp, -0x40, %sp
1661        save    %sp, -0x40, %sp
1662        save    %sp, -0x40, %sp
1663        save    %sp, -0x40, %sp
1664        restore
1665        restore
1666        restore
1667        restore
1668        restore
1669        restore
1670        ret
1671         restore
1672
1673/* End of entry.S */
1674