linux/arch/ia64/kernel/mca_asm.S
<<
>>
Prefs
   1/*
   2 * File:        mca_asm.S
   3 * Purpose:     assembly portion of the IA64 MCA handling
   4 *
   5 * Mods by cfleck to integrate into kernel build
   6 *
   7 * 2000-03-15 David Mosberger-Tang <davidm@hpl.hp.com>
   8 *              Added various stop bits to get a clean compile
   9 *
  10 * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
  11 *              Added code to save INIT handoff state in pt_regs format,
  12 *              switch to temp kstack, switch modes, jump to C INIT handler
  13 *
  14 * 2002-01-04 J.Hall <jenna.s.hall@intel.com>
  15 *              Before entering virtual mode code:
  16 *               1. Check for TLB CPU error
  17 *               2. Restore current thread pointer to kr6
  18 *               3. Move stack ptr 16 bytes to conform to C calling convention
  19 *
  20 * 2004-11-12 Russ Anderson <rja@sgi.com>
  21 *              Added per cpu MCA/INIT stack save areas.
  22 *
  23 * 2005-12-08 Keith Owens <kaos@sgi.com>
  24 *              Use per cpu MCA/INIT stacks for all data.
  25 */
  26#include <linux/threads.h>
  27
  28#include <asm/asmmacro.h>
  29#include <asm/pgtable.h>
  30#include <asm/processor.h>
  31#include <asm/mca_asm.h>
  32#include <asm/mca.h>
  33
  34#include "entry.h"
  35
  36#define GET_IA64_MCA_DATA(reg)                                          \
  37        GET_THIS_PADDR(reg, ia64_mca_data)                              \
  38        ;;                                                              \
  39        ld8 reg=[reg]
  40
  41        .global ia64_do_tlb_purge
  42        .global ia64_os_mca_dispatch
  43        .global ia64_os_init_on_kdump
  44        .global ia64_os_init_dispatch_monarch
  45        .global ia64_os_init_dispatch_slave
  46
  47        .text
  48        .align 16
  49
  50//StartMain////////////////////////////////////////////////////////////////////
  51
  52/*
  53 * Just the TLB purge part is moved to a separate function
  54 * so we can re-use the code for cpu hotplug code as well
  55 * Caller should now setup b1, so we can branch once the
  56 * tlb flush is complete.
  57 */
  58
  59ia64_do_tlb_purge:
  60#define O(member)       IA64_CPUINFO_##member##_OFFSET
  61
  62        GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
  63        ;;
  64        addl r17=O(PTCE_STRIDE),r2
  65        addl r2=O(PTCE_BASE),r2
  66        ;;
  67        ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));;     // r18=ptce_base
  68        ld4 r19=[r2],4                                  // r19=ptce_count[0]
  69        ld4 r21=[r17],4                                 // r21=ptce_stride[0]
  70        ;;
  71        ld4 r20=[r2]                                    // r20=ptce_count[1]
  72        ld4 r22=[r17]                                   // r22=ptce_stride[1]
  73        mov r24=0
  74        ;;
  75        adds r20=-1,r20
  76        ;;
  77#undef O
  78
  792:
  80        cmp.ltu p6,p7=r24,r19
  81(p7)    br.cond.dpnt.few 4f
  82        mov ar.lc=r20
  833:
  84        ptc.e r18
  85        ;;
  86        add r18=r22,r18
  87        br.cloop.sptk.few 3b
  88        ;;
  89        add r18=r21,r18
  90        add r24=1,r24
  91        ;;
  92        br.sptk.few 2b
  934:
  94        srlz.i                  // srlz.i implies srlz.d
  95        ;;
  96
  97        // Now purge addresses formerly mapped by TR registers
  98        // 1. Purge ITR&DTR for kernel.
  99        movl r16=KERNEL_START
 100        mov r18=KERNEL_TR_PAGE_SHIFT<<2
 101        ;;
 102        ptr.i r16, r18
 103        ptr.d r16, r18
 104        ;;
 105        srlz.i
 106        ;;
 107        srlz.d
 108        ;;
 109        // 3. Purge ITR for PAL code.
 110        GET_THIS_PADDR(r2, ia64_mca_pal_base)
 111        ;;
 112        ld8 r16=[r2]
 113        mov r18=IA64_GRANULE_SHIFT<<2
 114        ;;
 115        ptr.i r16,r18
 116        ;;
 117        srlz.i
 118        ;;
 119        // 4. Purge DTR for stack.
 120        mov r16=IA64_KR(CURRENT_STACK)
 121        ;;
 122        shl r16=r16,IA64_GRANULE_SHIFT
 123        movl r19=PAGE_OFFSET
 124        ;;
 125        add r16=r19,r16
 126        mov r18=IA64_GRANULE_SHIFT<<2
 127        ;;
 128        ptr.d r16,r18
 129        ;;
 130        srlz.i
 131        ;;
 132        // Now branch away to caller.
 133        br.sptk.many b1
 134        ;;
 135
 136//EndMain//////////////////////////////////////////////////////////////////////
 137
 138//StartMain////////////////////////////////////////////////////////////////////
 139
 140ia64_os_mca_dispatch:
 141        mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET    // use the MCA stack
 142        LOAD_PHYSICAL(p0,r2,1f)                 // return address
 143        mov r19=1                               // All MCA events are treated as monarch (for now)
 144        br.sptk ia64_state_save                 // save the state that is not in minstate
 1451:
 146
 147        GET_IA64_MCA_DATA(r2)
 148        // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
 149        ;;
 150        add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+SOS(PROC_STATE_PARAM), r2
 151        ;;
 152        ld8 r18=[r3]                            // Get processor state parameter on existing PALE_CHECK.
 153        ;;
 154        tbit.nz p6,p7=r18,60
 155(p7)    br.spnt done_tlb_purge_and_reload
 156
 157        // The following code purges TC and TR entries. Then reload all TC entries.
 158        // Purge percpu data TC entries.
 159begin_tlb_purge_and_reload:
 160        movl r18=ia64_reload_tr;;
 161        LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
 162        mov b1=r18;;
 163        br.sptk.many ia64_do_tlb_purge;;
 164
 165ia64_reload_tr:
 166        // Finally reload the TR registers.
 167        // 1. Reload DTR/ITR registers for kernel.
 168        mov r18=KERNEL_TR_PAGE_SHIFT<<2
 169        movl r17=KERNEL_START
 170        ;;
 171        mov cr.itir=r18
 172        mov cr.ifa=r17
 173        mov r16=IA64_TR_KERNEL
 174        mov r19=ip
 175        movl r18=PAGE_KERNEL
 176        ;;
 177        dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
 178        ;;
 179        or r18=r17,r18
 180        ;;
 181        itr.i itr[r16]=r18
 182        ;;
 183        itr.d dtr[r16]=r18
 184        ;;
 185        srlz.i
 186        srlz.d
 187        ;;
 188        // 3. Reload ITR for PAL code.
 189        GET_THIS_PADDR(r2, ia64_mca_pal_pte)
 190        ;;
 191        ld8 r18=[r2]                    // load PAL PTE
 192        ;;
 193        GET_THIS_PADDR(r2, ia64_mca_pal_base)
 194        ;;
 195        ld8 r16=[r2]                    // load PAL vaddr
 196        mov r19=IA64_GRANULE_SHIFT<<2
 197        ;;
 198        mov cr.itir=r19
 199        mov cr.ifa=r16
 200        mov r20=IA64_TR_PALCODE
 201        ;;
 202        itr.i itr[r20]=r18
 203        ;;
 204        srlz.i
 205        ;;
 206        // 4. Reload DTR for stack.
 207        mov r16=IA64_KR(CURRENT_STACK)
 208        ;;
 209        shl r16=r16,IA64_GRANULE_SHIFT
 210        movl r19=PAGE_OFFSET
 211        ;;
 212        add r18=r19,r16
 213        movl r20=PAGE_KERNEL
 214        ;;
 215        add r16=r20,r16
 216        mov r19=IA64_GRANULE_SHIFT<<2
 217        ;;
 218        mov cr.itir=r19
 219        mov cr.ifa=r18
 220        mov r20=IA64_TR_CURRENT_STACK
 221        ;;
 222        itr.d dtr[r20]=r16
 223        GET_THIS_PADDR(r2, ia64_mca_tr_reload)
 224        mov r18 = 1
 225        ;;
 226        srlz.d
 227        ;;
 228        st8 [r2] =r18
 229        ;;
 230
 231done_tlb_purge_and_reload:
 232
 233        // switch to per cpu MCA stack
 234        mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET    // use the MCA stack
 235        LOAD_PHYSICAL(p0,r2,1f)                 // return address
 236        br.sptk ia64_new_stack
 2371:
 238
 239        // everything saved, now we can set the kernel registers
 240        mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET    // use the MCA stack
 241        LOAD_PHYSICAL(p0,r2,1f)                 // return address
 242        br.sptk ia64_set_kernel_registers
 2431:
 244
 245        // This must be done in physical mode
 246        GET_IA64_MCA_DATA(r2)
 247        ;;
 248        mov r7=r2
 249
 250        // Enter virtual mode from physical mode
 251        VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
 252
 253        // This code returns to SAL via SOS r2, in general SAL has no unwind
 254        // data.  To get a clean termination when backtracing the C MCA/INIT
 255        // handler, set a dummy return address of 0 in this routine.  That
 256        // requires that ia64_os_mca_virtual_begin be a global function.
 257ENTRY(ia64_os_mca_virtual_begin)
 258        .prologue
 259        .save rp,r0
 260        .body
 261
 262        mov ar.rsc=3                            // set eager mode for C handler
 263        mov r2=r7                               // see GET_IA64_MCA_DATA above
 264        ;;
 265
 266        // Call virtual mode handler
 267        alloc r14=ar.pfs,0,0,3,0
 268        ;;
 269        DATA_PA_TO_VA(r2,r7)
 270        ;;
 271        add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
 272        add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
 273        add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2
 274        br.call.sptk.many    b0=ia64_mca_handler
 275
 276        // Revert back to physical mode before going back to SAL
 277        PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
 278ia64_os_mca_virtual_end:
 279
 280END(ia64_os_mca_virtual_begin)
 281
 282        // switch back to previous stack
 283        alloc r14=ar.pfs,0,0,0,0                // remove the MCA handler frame
 284        mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET    // use the MCA stack
 285        LOAD_PHYSICAL(p0,r2,1f)                 // return address
 286        br.sptk ia64_old_stack
 2871:
 288
 289        mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET    // use the MCA stack
 290        LOAD_PHYSICAL(p0,r2,1f)                 // return address
 291        br.sptk ia64_state_restore              // restore the SAL state
 2921:
 293
 294        mov             b0=r12                  // SAL_CHECK return address
 295
 296        br              b0
 297
 298//EndMain//////////////////////////////////////////////////////////////////////
 299
 300//StartMain////////////////////////////////////////////////////////////////////
 301
 302//
 303// NOP init handler for kdump.  In panic situation, we may receive INIT
 304// while kernel transition.  Since we initialize registers on leave from
 305// current kernel, no longer monarch/slave handlers of current kernel in
 306// virtual mode are called safely.
 307// We can unregister these init handlers from SAL, however then the INIT
 308// will result in warmboot by SAL and we cannot retrieve the crashdump.
 309// Therefore register this NOP function to SAL, to prevent entering virtual
 310// mode and resulting warmboot by SAL.
 311//
 312ia64_os_init_on_kdump:
 313        mov             r8=r0           // IA64_INIT_RESUME
 314        mov             r9=r10          // SAL_GP
 315        mov             r22=r17         // *minstate
 316        ;;
 317        mov             r10=r0          // return to same context
 318        mov             b0=r12          // SAL_CHECK return address
 319        br              b0
 320
 321//
 322// SAL to OS entry point for INIT on all processors.  This has been defined for
 323// registration purposes with SAL as a part of ia64_mca_init.  Monarch and
 324// slave INIT have identical processing, except for the value of the
 325// sos->monarch flag in r19.
 326//
 327
 328ia64_os_init_dispatch_monarch:
 329        mov r19=1                               // Bow, bow, ye lower middle classes!
 330        br.sptk ia64_os_init_dispatch
 331
 332ia64_os_init_dispatch_slave:
 333        mov r19=0                               // <igor>yeth, mathter</igor>
 334
 335ia64_os_init_dispatch:
 336
 337        mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET   // use the INIT stack
 338        LOAD_PHYSICAL(p0,r2,1f)                 // return address
 339        br.sptk ia64_state_save                 // save the state that is not in minstate
 3401:
 341
 342        // switch to per cpu INIT stack
 343        mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET   // use the INIT stack
 344        LOAD_PHYSICAL(p0,r2,1f)                 // return address
 345        br.sptk ia64_new_stack
 3461:
 347
 348        // everything saved, now we can set the kernel registers
 349        mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET   // use the INIT stack
 350        LOAD_PHYSICAL(p0,r2,1f)                 // return address
 351        br.sptk ia64_set_kernel_registers
 3521:
 353
 354        // This must be done in physical mode
 355        GET_IA64_MCA_DATA(r2)
 356        ;;
 357        mov r7=r2
 358
 359        // Enter virtual mode from physical mode
 360        VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4)
 361
 362        // This code returns to SAL via SOS r2, in general SAL has no unwind
 363        // data.  To get a clean termination when backtracing the C MCA/INIT
 364        // handler, set a dummy return address of 0 in this routine.  That
 365        // requires that ia64_os_init_virtual_begin be a global function.
 366ENTRY(ia64_os_init_virtual_begin)
 367        .prologue
 368        .save rp,r0
 369        .body
 370
 371        mov ar.rsc=3                            // set eager mode for C handler
 372        mov r2=r7                               // see GET_IA64_MCA_DATA above
 373        ;;
 374
 375        // Call virtual mode handler
 376        alloc r14=ar.pfs,0,0,3,0
 377        ;;
 378        DATA_PA_TO_VA(r2,r7)
 379        ;;
 380        add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
 381        add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
 382        add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2
 383        br.call.sptk.many    b0=ia64_init_handler
 384
 385        // Revert back to physical mode before going back to SAL
 386        PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4)
 387ia64_os_init_virtual_end:
 388
 389END(ia64_os_init_virtual_begin)
 390
 391        mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET   // use the INIT stack
 392        LOAD_PHYSICAL(p0,r2,1f)                 // return address
 393        br.sptk ia64_state_restore              // restore the SAL state
 3941:
 395
 396        // switch back to previous stack
 397        alloc r14=ar.pfs,0,0,0,0                // remove the INIT handler frame
 398        mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET   // use the INIT stack
 399        LOAD_PHYSICAL(p0,r2,1f)                 // return address
 400        br.sptk ia64_old_stack
 4011:
 402
 403        mov             b0=r12                  // SAL_CHECK return address
 404        br              b0
 405
 406//EndMain//////////////////////////////////////////////////////////////////////
 407
 408// common defines for the stubs
 409#define ms              r4
 410#define regs            r5
 411#define temp1           r2      /* careful, it overlaps with input registers */
 412#define temp2           r3      /* careful, it overlaps with input registers */
 413#define temp3           r7
 414#define temp4           r14
 415
 416
 417//++
 418// Name:
 419//      ia64_state_save()
 420//
 421// Stub Description:
 422//
 423//      Save the state that is not in minstate.  This is sensitive to the layout of
 424//      struct ia64_sal_os_state in mca.h.
 425//
 426//      r2 contains the return address, r3 contains either
 427//      IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
 428//
 429//      The OS to SAL section of struct ia64_sal_os_state is set to a default
 430//      value of cold boot (MCA) or warm boot (INIT) and return to the same
 431//      context.  ia64_sal_os_state is also used to hold some registers that
 432//      need to be saved and restored across the stack switches.
 433//
 434//      Most input registers to this stub come from PAL/SAL
 435//      r1  os gp, physical
 436//      r8  pal_proc entry point
 437//      r9  sal_proc entry point
 438//      r10 sal gp
 439//      r11 MCA - rendevzous state, INIT - reason code
 440//      r12 sal return address
 441//      r17 pal min_state
 442//      r18 processor state parameter
 443//      r19 monarch flag, set by the caller of this routine
 444//
 445//      In addition to the SAL to OS state, this routine saves all the
 446//      registers that appear in struct pt_regs and struct switch_stack,
 447//      excluding those that are already in the PAL minstate area.  This
 448//      results in a partial pt_regs and switch_stack, the C code copies the
 449//      remaining registers from PAL minstate to pt_regs and switch_stack.  The
 450//      resulting structures contain all the state of the original process when
 451//      MCA/INIT occurred.
 452//
 453//--
 454
 455ia64_state_save:
 456        add regs=MCA_SOS_OFFSET, r3
 457        add ms=MCA_SOS_OFFSET+8, r3
 458        mov b0=r2               // save return address
 459        cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3
 460        ;;
 461        GET_IA64_MCA_DATA(temp2)
 462        ;;
 463        add temp1=temp2, regs   // struct ia64_sal_os_state on MCA or INIT stack
 464        add temp2=temp2, ms     // struct ia64_sal_os_state+8 on MCA or INIT stack
 465        ;;
 466        mov regs=temp1          // save the start of sos
 467        st8 [temp1]=r1,16       // os_gp
 468        st8 [temp2]=r8,16       // pal_proc
 469        ;;
 470        st8 [temp1]=r9,16       // sal_proc
 471        st8 [temp2]=r11,16      // rv_rc
 472        mov r11=cr.iipa
 473        ;;
 474        st8 [temp1]=r18         // proc_state_param
 475        st8 [temp2]=r19         // monarch
 476        mov r6=IA64_KR(CURRENT)
 477        add temp1=SOS(SAL_RA), regs
 478        add temp2=SOS(SAL_GP), regs
 479        ;;
 480        st8 [temp1]=r12,16      // sal_ra
 481        st8 [temp2]=r10,16      // sal_gp
 482        mov r12=cr.isr
 483        ;;
 484        st8 [temp1]=r17,16      // pal_min_state
 485        st8 [temp2]=r6,16       // prev_IA64_KR_CURRENT
 486        mov r6=IA64_KR(CURRENT_STACK)
 487        ;;
 488        st8 [temp1]=r6,16       // prev_IA64_KR_CURRENT_STACK
 489        st8 [temp2]=r0,16       // prev_task, starts off as NULL
 490        mov r6=cr.ifa
 491        ;;
 492        st8 [temp1]=r12,16      // cr.isr
 493        st8 [temp2]=r6,16       // cr.ifa
 494        mov r12=cr.itir
 495        ;;
 496        st8 [temp1]=r12,16      // cr.itir
 497        st8 [temp2]=r11,16      // cr.iipa
 498        mov r12=cr.iim
 499        ;;
 500        st8 [temp1]=r12         // cr.iim
 501(p1)    mov r12=IA64_MCA_COLD_BOOT
 502(p2)    mov r12=IA64_INIT_WARM_BOOT
 503        mov r6=cr.iha
 504        add temp1=SOS(OS_STATUS), regs
 505        ;;
 506        st8 [temp2]=r6          // cr.iha
 507        add temp2=SOS(CONTEXT), regs
 508        st8 [temp1]=r12         // os_status, default is cold boot
 509        mov r6=IA64_MCA_SAME_CONTEXT
 510        ;;
 511        st8 [temp2]=r6          // context, default is same context
 512
 513        // Save the pt_regs data that is not in minstate.  The previous code
 514        // left regs at sos.
 515        add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs
 516        ;;
 517        add temp1=PT(B6), regs
 518        mov temp3=b6
 519        mov temp4=b7
 520        add temp2=PT(B7), regs
 521        ;;
 522        st8 [temp1]=temp3,PT(AR_CSD)-PT(B6)             // save b6
 523        st8 [temp2]=temp4,PT(AR_SSD)-PT(B7)             // save b7
 524        mov temp3=ar.csd
 525        mov temp4=ar.ssd
 526        cover                                           // must be last in group
 527        ;;
 528        st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD)        // save ar.csd
 529        st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD)         // save ar.ssd
 530        mov temp3=ar.unat
 531        mov temp4=ar.pfs
 532        ;;
 533        st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT)       // save ar.unat
 534        st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS)    // save ar.pfs
 535        mov temp3=ar.rnat
 536        mov temp4=ar.bspstore
 537        ;;
 538        st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT)        // save ar.rnat
 539        st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE)   // save ar.bspstore
 540        mov temp3=ar.bsp
 541        ;;
 542        sub temp3=temp3, temp4  // ar.bsp - ar.bspstore
 543        mov temp4=ar.fpsr
 544        ;;
 545        shl temp3=temp3,16      // compute ar.rsc to be used for "loadrs"
 546        ;;
 547        st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS)         // save loadrs
 548        st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR)            // save ar.fpsr
 549        mov temp3=ar.ccv
 550        ;;
 551        st8 [temp1]=temp3,PT(F7)-PT(AR_CCV)             // save ar.ccv
 552        stf.spill [temp2]=f6,PT(F8)-PT(F6)
 553        ;;
 554        stf.spill [temp1]=f7,PT(F9)-PT(F7)
 555        stf.spill [temp2]=f8,PT(F10)-PT(F8)
 556        ;;
 557        stf.spill [temp1]=f9,PT(F11)-PT(F9)
 558        stf.spill [temp2]=f10
 559        ;;
 560        stf.spill [temp1]=f11
 561
 562        // Save the switch_stack data that is not in minstate nor pt_regs.  The
 563        // previous code left regs at pt_regs.
 564        add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs
 565        ;;
 566        add temp1=SW(F2), regs
 567        add temp2=SW(F3), regs
 568        ;;
 569        stf.spill [temp1]=f2,32
 570        stf.spill [temp2]=f3,32
 571        ;;
 572        stf.spill [temp1]=f4,32
 573        stf.spill [temp2]=f5,32
 574        ;;
 575        stf.spill [temp1]=f12,32
 576        stf.spill [temp2]=f13,32
 577        ;;
 578        stf.spill [temp1]=f14,32
 579        stf.spill [temp2]=f15,32
 580        ;;
 581        stf.spill [temp1]=f16,32
 582        stf.spill [temp2]=f17,32
 583        ;;
 584        stf.spill [temp1]=f18,32
 585        stf.spill [temp2]=f19,32
 586        ;;
 587        stf.spill [temp1]=f20,32
 588        stf.spill [temp2]=f21,32
 589        ;;
 590        stf.spill [temp1]=f22,32
 591        stf.spill [temp2]=f23,32
 592        ;;
 593        stf.spill [temp1]=f24,32
 594        stf.spill [temp2]=f25,32
 595        ;;
 596        stf.spill [temp1]=f26,32
 597        stf.spill [temp2]=f27,32
 598        ;;
 599        stf.spill [temp1]=f28,32
 600        stf.spill [temp2]=f29,32
 601        ;;
 602        stf.spill [temp1]=f30,SW(B2)-SW(F30)
 603        stf.spill [temp2]=f31,SW(B3)-SW(F31)
 604        mov temp3=b2
 605        mov temp4=b3
 606        ;;
 607        st8 [temp1]=temp3,16    // save b2
 608        st8 [temp2]=temp4,16    // save b3
 609        mov temp3=b4
 610        mov temp4=b5
 611        ;;
 612        st8 [temp1]=temp3,SW(AR_LC)-SW(B4)      // save b4
 613        st8 [temp2]=temp4       // save b5
 614        mov temp3=ar.lc
 615        ;;
 616        st8 [temp1]=temp3       // save ar.lc
 617
 618        // FIXME: Some proms are incorrectly accessing the minstate area as
 619        // cached data.  The C code uses region 6, uncached virtual.  Ensure
 620        // that there is no cache data lying around for the first 1K of the
 621        // minstate area.
 622        // Remove this code in September 2006, that gives platforms a year to
 623        // fix their proms and get their customers updated.
 624
 625        add r1=32*1,r17
 626        add r2=32*2,r17
 627        add r3=32*3,r17
 628        add r4=32*4,r17
 629        add r5=32*5,r17
 630        add r6=32*6,r17
 631        add r7=32*7,r17
 632        ;;
 633        fc r17
 634        fc r1
 635        fc r2
 636        fc r3
 637        fc r4
 638        fc r5
 639        fc r6
 640        fc r7
 641        add r17=32*8,r17
 642        add r1=32*8,r1
 643        add r2=32*8,r2
 644        add r3=32*8,r3
 645        add r4=32*8,r4
 646        add r5=32*8,r5
 647        add r6=32*8,r6
 648        add r7=32*8,r7
 649        ;;
 650        fc r17
 651        fc r1
 652        fc r2
 653        fc r3
 654        fc r4
 655        fc r5
 656        fc r6
 657        fc r7
 658        add r17=32*8,r17
 659        add r1=32*8,r1
 660        add r2=32*8,r2
 661        add r3=32*8,r3
 662        add r4=32*8,r4
 663        add r5=32*8,r5
 664        add r6=32*8,r6
 665        add r7=32*8,r7
 666        ;;
 667        fc r17
 668        fc r1
 669        fc r2
 670        fc r3
 671        fc r4
 672        fc r5
 673        fc r6
 674        fc r7
 675        add r17=32*8,r17
 676        add r1=32*8,r1
 677        add r2=32*8,r2
 678        add r3=32*8,r3
 679        add r4=32*8,r4
 680        add r5=32*8,r5
 681        add r6=32*8,r6
 682        add r7=32*8,r7
 683        ;;
 684        fc r17
 685        fc r1
 686        fc r2
 687        fc r3
 688        fc r4
 689        fc r5
 690        fc r6
 691        fc r7
 692
 693        br.sptk b0
 694
 695//EndStub//////////////////////////////////////////////////////////////////////
 696
 697
 698//++
 699// Name:
 700//      ia64_state_restore()
 701//
 702// Stub Description:
 703//
 704//      Restore the SAL/OS state.  This is sensitive to the layout of struct
 705//      ia64_sal_os_state in mca.h.
 706//
 707//      r2 contains the return address, r3 contains either
 708//      IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
 709//
 710//      In addition to the SAL to OS state, this routine restores all the
 711//      registers that appear in struct pt_regs and struct switch_stack,
 712//      excluding those in the PAL minstate area.
 713//
 714//--
 715
 716ia64_state_restore:
 717        // Restore the switch_stack data that is not in minstate nor pt_regs.
 718        add regs=MCA_SWITCH_STACK_OFFSET, r3
 719        mov b0=r2               // save return address
 720        ;;
 721        GET_IA64_MCA_DATA(temp2)
 722        ;;
 723        add regs=temp2, regs
 724        ;;
 725        add temp1=SW(F2), regs
 726        add temp2=SW(F3), regs
 727        ;;
 728        ldf.fill f2=[temp1],32
 729        ldf.fill f3=[temp2],32
 730        ;;
 731        ldf.fill f4=[temp1],32
 732        ldf.fill f5=[temp2],32
 733        ;;
 734        ldf.fill f12=[temp1],32
 735        ldf.fill f13=[temp2],32
 736        ;;
 737        ldf.fill f14=[temp1],32
 738        ldf.fill f15=[temp2],32
 739        ;;
 740        ldf.fill f16=[temp1],32
 741        ldf.fill f17=[temp2],32
 742        ;;
 743        ldf.fill f18=[temp1],32
 744        ldf.fill f19=[temp2],32
 745        ;;
 746        ldf.fill f20=[temp1],32
 747        ldf.fill f21=[temp2],32
 748        ;;
 749        ldf.fill f22=[temp1],32
 750        ldf.fill f23=[temp2],32
 751        ;;
 752        ldf.fill f24=[temp1],32
 753        ldf.fill f25=[temp2],32
 754        ;;
 755        ldf.fill f26=[temp1],32
 756        ldf.fill f27=[temp2],32
 757        ;;
 758        ldf.fill f28=[temp1],32
 759        ldf.fill f29=[temp2],32
 760        ;;
 761        ldf.fill f30=[temp1],SW(B2)-SW(F30)
 762        ldf.fill f31=[temp2],SW(B3)-SW(F31)
 763        ;;
 764        ld8 temp3=[temp1],16    // restore b2
 765        ld8 temp4=[temp2],16    // restore b3
 766        ;;
 767        mov b2=temp3
 768        mov b3=temp4
 769        ld8 temp3=[temp1],SW(AR_LC)-SW(B4)      // restore b4
 770        ld8 temp4=[temp2]       // restore b5
 771        ;;
 772        mov b4=temp3
 773        mov b5=temp4
 774        ld8 temp3=[temp1]       // restore ar.lc
 775        ;;
 776        mov ar.lc=temp3
 777
 778        // Restore the pt_regs data that is not in minstate.  The previous code
 779        // left regs at switch_stack.
 780        add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs
 781        ;;
 782        add temp1=PT(B6), regs
 783        add temp2=PT(B7), regs
 784        ;;
 785        ld8 temp3=[temp1],PT(AR_CSD)-PT(B6)             // restore b6
 786        ld8 temp4=[temp2],PT(AR_SSD)-PT(B7)             // restore b7
 787        ;;
 788        mov b6=temp3
 789        mov b7=temp4
 790        ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD)        // restore ar.csd
 791        ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD)         // restore ar.ssd
 792        ;;
 793        mov ar.csd=temp3
 794        mov ar.ssd=temp4
 795        ld8 temp3=[temp1]                               // restore ar.unat
 796        add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1
 797        ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS)        // restore ar.pfs
 798        ;;
 799        mov ar.unat=temp3
 800        mov ar.pfs=temp4
 801        // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack.
 802        ld8 temp3=[temp1],PT(F6)-PT(AR_CCV)             // restore ar.ccv
 803        ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR)            // restore ar.fpsr
 804        ;;
 805        mov ar.ccv=temp3
 806        mov ar.fpsr=temp4
 807        ldf.fill f6=[temp1],PT(F8)-PT(F6)
 808        ldf.fill f7=[temp2],PT(F9)-PT(F7)
 809        ;;
 810        ldf.fill f8=[temp1],PT(F10)-PT(F8)
 811        ldf.fill f9=[temp2],PT(F11)-PT(F9)
 812        ;;
 813        ldf.fill f10=[temp1]
 814        ldf.fill f11=[temp2]
 815
 816        // Restore the SAL to OS state. The previous code left regs at pt_regs.
 817        add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs
 818        ;;
 819        add temp1=SOS(SAL_RA), regs
 820        add temp2=SOS(SAL_GP), regs
 821        ;;
 822        ld8 r12=[temp1],16      // sal_ra
 823        ld8 r9=[temp2],16       // sal_gp
 824        ;;
 825        ld8 r22=[temp1],16      // pal_min_state, virtual
 826        ld8 r13=[temp2],16      // prev_IA64_KR_CURRENT
 827        ;;
 828        ld8 r16=[temp1],16      // prev_IA64_KR_CURRENT_STACK
 829        ld8 r20=[temp2],16      // prev_task
 830        ;;
 831        ld8 temp3=[temp1],16    // cr.isr
 832        ld8 temp4=[temp2],16    // cr.ifa
 833        ;;
 834        mov cr.isr=temp3
 835        mov cr.ifa=temp4
 836        ld8 temp3=[temp1],16    // cr.itir
 837        ld8 temp4=[temp2],16    // cr.iipa
 838        ;;
 839        mov cr.itir=temp3
 840        mov cr.iipa=temp4
 841        ld8 temp3=[temp1]       // cr.iim
 842        ld8 temp4=[temp2]               // cr.iha
 843        add temp1=SOS(OS_STATUS), regs
 844        add temp2=SOS(CONTEXT), regs
 845        ;;
 846        mov cr.iim=temp3
 847        mov cr.iha=temp4
 848        dep r22=0,r22,62,1      // pal_min_state, physical, uncached
 849        mov IA64_KR(CURRENT)=r13
 850        ld8 r8=[temp1]          // os_status
 851        ld8 r10=[temp2]         // context
 852
 853        /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to.  To
 854         * avoid any dependencies on the algorithm in ia64_switch_to(), just
 855         * purge any existing CURRENT_STACK mapping and insert the new one.
 856         *
 857         * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains
 858         * prev_IA64_KR_CURRENT, these values may have been changed by the C
 859         * code.  Do not use r8, r9, r10, r22, they contain values ready for
 860         * the return to SAL.
 861         */
 862
 863        mov r15=IA64_KR(CURRENT_STACK)          // physical granule mapped by IA64_TR_CURRENT_STACK
 864        ;;
 865        shl r15=r15,IA64_GRANULE_SHIFT
 866        ;;
 867        dep r15=-1,r15,61,3                     // virtual granule
 868        mov r18=IA64_GRANULE_SHIFT<<2           // for cr.itir.ps
 869        ;;
 870        ptr.d r15,r18
 871        ;;
 872        srlz.d
 873
 874        extr.u r19=r13,61,3                     // r13 = prev_IA64_KR_CURRENT
 875        shl r20=r16,IA64_GRANULE_SHIFT          // r16 = prev_IA64_KR_CURRENT_STACK
 876        movl r21=PAGE_KERNEL                    // page properties
 877        ;;
 878        mov IA64_KR(CURRENT_STACK)=r16
 879        cmp.ne p6,p0=RGN_KERNEL,r19             // new stack is in the kernel region?
 880        or r21=r20,r21                          // construct PA | page properties
 881(p6)    br.spnt 1f                              // the dreaded cpu 0 idle task in region 5:(
 882        ;;
 883        mov cr.itir=r18
 884        mov cr.ifa=r13
 885        mov r20=IA64_TR_CURRENT_STACK
 886        ;;
 887        itr.d dtr[r20]=r21
 888        ;;
 889        srlz.d
 8901:
 891
 892        br.sptk b0
 893
 894//EndStub//////////////////////////////////////////////////////////////////////
 895
 896
 897//++
 898// Name:
 899//      ia64_new_stack()
 900//
 901// Stub Description:
 902//
 903//      Switch to the MCA/INIT stack.
 904//
 905//      r2 contains the return address, r3 contains either
 906//      IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
 907//
 908//      On entry RBS is still on the original stack, this routine switches RBS
 909//      to use the MCA/INIT stack.
 910//
 911//      On entry, sos->pal_min_state is physical, on exit it is virtual.
 912//
 913//--
 914
 915ia64_new_stack:
 916        add regs=MCA_PT_REGS_OFFSET, r3
 917        add temp2=MCA_SOS_OFFSET+SOS(PAL_MIN_STATE), r3
 918        mov b0=r2                       // save return address
 919        GET_IA64_MCA_DATA(temp1)
 920        invala
 921        ;;
 922        add temp2=temp2, temp1          // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack
 923        add regs=regs, temp1            // struct pt_regs on MCA or INIT stack
 924        ;;
 925        // Address of minstate area provided by PAL is physical, uncacheable.
 926        // Convert to Linux virtual address in region 6 for C code.
 927        ld8 ms=[temp2]                  // pal_min_state, physical
 928        ;;
 929        dep temp1=-1,ms,62,2            // set region 6
 930        mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET
 931        ;;
 932        st8 [temp2]=temp1               // pal_min_state, virtual
 933
 934        add temp4=temp3, regs           // start of bspstore on new stack
 935        ;;
 936        mov ar.bspstore=temp4           // switch RBS to MCA/INIT stack
 937        ;;
 938        flushrs                         // must be first in group
 939        br.sptk b0
 940
 941//EndStub//////////////////////////////////////////////////////////////////////
 942
 943
 944//++
 945// Name:
 946//      ia64_old_stack()
 947//
 948// Stub Description:
 949//
 950//      Switch to the old stack.
 951//
 952//      r2 contains the return address, r3 contains either
 953//      IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
 954//
 955//      On entry, pal_min_state is virtual, on exit it is physical.
 956//
 957//      On entry RBS is on the MCA/INIT stack, this routine switches RBS
 958//      back to the previous stack.
 959//
 960//      The psr is set to all zeroes.  SAL return requires either all zeroes or
 961//      just psr.mc set.  Leaving psr.mc off allows INIT to be issued if this
 962//      code does not perform correctly.
 963//
 964//      The dirty registers at the time of the event were flushed to the
 965//      MCA/INIT stack in ia64_pt_regs_save().  Restore the dirty registers
 966//      before reverting to the previous bspstore.
 967//--
 968
 969ia64_old_stack:
 970        add regs=MCA_PT_REGS_OFFSET, r3
 971        mov b0=r2                       // save return address
 972        GET_IA64_MCA_DATA(temp2)
 973        LOAD_PHYSICAL(p0,temp1,1f)
 974        ;;
 975        mov cr.ipsr=r0
 976        mov cr.ifs=r0
 977        mov cr.iip=temp1
 978        ;;
 979        invala
 980        rfi
 9811:
 982
 983        add regs=regs, temp2            // struct pt_regs on MCA or INIT stack
 984        ;;
 985        add temp1=PT(LOADRS), regs
 986        ;;
 987        ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS)    // restore loadrs
 988        ;;
 989        ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE)   // restore ar.bspstore
 990        mov ar.rsc=temp2
 991        ;;
 992        loadrs
 993        ld8 temp4=[temp1]               // restore ar.rnat
 994        ;;
 995        mov ar.bspstore=temp3           // back to old stack
 996        ;;
 997        mov ar.rnat=temp4
 998        ;;
 999
1000        br.sptk b0
1001
1002//EndStub//////////////////////////////////////////////////////////////////////
1003
1004
1005//++
1006// Name:
1007//      ia64_set_kernel_registers()
1008//
1009// Stub Description:
1010//
1011//      Set the registers that are required by the C code in order to run on an
1012//      MCA/INIT stack.
1013//
1014//      r2 contains the return address, r3 contains either
1015//      IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
1016//
1017//--
1018
1019ia64_set_kernel_registers:
1020        add temp3=MCA_SP_OFFSET, r3
1021        mov b0=r2               // save return address
1022        GET_IA64_MCA_DATA(temp1)
1023        ;;
1024        add r12=temp1, temp3    // kernel stack pointer on MCA/INIT stack
1025        add r13=temp1, r3       // set current to start of MCA/INIT stack
1026        add r20=temp1, r3       // physical start of MCA/INIT stack
1027        ;;
1028        DATA_PA_TO_VA(r12,temp2)
1029        DATA_PA_TO_VA(r13,temp3)
1030        ;;
1031        mov IA64_KR(CURRENT)=r13
1032
1033        /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack.  To avoid
1034         * any dependencies on the algorithm in ia64_switch_to(), just purge
1035         * any existing CURRENT_STACK mapping and insert the new one.
1036         */
1037
1038        mov r16=IA64_KR(CURRENT_STACK)          // physical granule mapped by IA64_TR_CURRENT_STACK
1039        ;;
1040        shl r16=r16,IA64_GRANULE_SHIFT
1041        ;;
1042        dep r16=-1,r16,61,3                     // virtual granule
1043        mov r18=IA64_GRANULE_SHIFT<<2           // for cr.itir.ps
1044        ;;
1045        ptr.d r16,r18
1046        ;;
1047        srlz.d
1048
1049        shr.u r16=r20,IA64_GRANULE_SHIFT        // r20 = physical start of MCA/INIT stack
1050        movl r21=PAGE_KERNEL                    // page properties
1051        ;;
1052        mov IA64_KR(CURRENT_STACK)=r16
1053        or r21=r20,r21                          // construct PA | page properties
1054        ;;
1055        mov cr.itir=r18
1056        mov cr.ifa=r13
1057        mov r20=IA64_TR_CURRENT_STACK
1058
1059        movl r17=FPSR_DEFAULT
1060        ;;
1061        mov.m ar.fpsr=r17                       // set ar.fpsr to kernel default value
1062        ;;
1063        itr.d dtr[r20]=r21
1064        ;;
1065        srlz.d
1066
1067        br.sptk b0
1068
1069//EndStub//////////////////////////////////////////////////////////////////////
1070
1071#undef  ms
1072#undef  regs
1073#undef  temp1
1074#undef  temp2
1075#undef  temp3
1076#undef  temp4
1077
1078
1079// Support function for mca.c, it is here to avoid using inline asm.  Given the
1080// address of an rnat slot, if that address is below the current ar.bspstore
1081// then return the contents of that slot, otherwise return the contents of
1082// ar.rnat.
1083GLOBAL_ENTRY(ia64_get_rnat)
1084        alloc r14=ar.pfs,1,0,0,0
1085        mov ar.rsc=0
1086        ;;
1087        mov r14=ar.bspstore
1088        ;;
1089        cmp.lt p6,p7=in0,r14
1090        ;;
1091(p6)    ld8 r8=[in0]
1092(p7)    mov r8=ar.rnat
1093        mov ar.rsc=3
1094        br.ret.sptk.many rp
1095END(ia64_get_rnat)
1096
1097
1098// void ia64_set_psr_mc(void)
1099//
1100// Set psr.mc bit to mask MCA/INIT.
1101GLOBAL_ENTRY(ia64_set_psr_mc)
1102        rsm psr.i | psr.ic              // disable interrupts
1103        ;;
1104        srlz.d
1105        ;;
1106        mov r14 = psr                   // get psr{36:35,31:0}
1107        movl r15 = 1f
1108        ;;
1109        dep r14 = -1, r14, PSR_MC, 1    // set psr.mc
1110        ;;
1111        dep r14 = -1, r14, PSR_IC, 1    // set psr.ic
1112        ;;
1113        dep r14 = -1, r14, PSR_BN, 1    // keep bank1 in use
1114        ;;
1115        mov cr.ipsr = r14
1116        mov cr.ifs = r0
1117        mov cr.iip = r15
1118        ;;
1119        rfi
11201:
1121        br.ret.sptk.many rp
1122END(ia64_set_psr_mc)
1123