linux/arch/parisc/kernel/entry.S
<<
>>
Prefs
   1/*
   2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
   3 *
   4 * kernel entry points (interruptions, system call wrappers)
   5 *  Copyright (C) 1999,2000 Philipp Rumpf 
   6 *  Copyright (C) 1999 SuSE GmbH Nuernberg 
   7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
   8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
   9 *
  10 *    This program is free software; you can redistribute it and/or modify
  11 *    it under the terms of the GNU General Public License as published by
  12 *    the Free Software Foundation; either version 2, or (at your option)
  13 *    any later version.
  14 *
  15 *    This program is distributed in the hope that it will be useful,
  16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 *    GNU General Public License for more details.
  19 *
  20 *    You should have received a copy of the GNU General Public License
  21 *    along with this program; if not, write to the Free Software
  22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  23 */
  24
  25#include <asm/asm-offsets.h>
  26
  27/* we have the following possibilities to act on an interruption:
  28 *  - handle in assembly and use shadowed registers only
  29 *  - save registers to kernel stack and handle in assembly or C */
  30
  31
  32#include <asm/psw.h>
  33#include <asm/cache.h>          /* for L1_CACHE_SHIFT */
  34#include <asm/assembly.h>       /* for LDREG/STREG defines */
  35#include <asm/pgtable.h>
  36#include <asm/signal.h>
  37#include <asm/unistd.h>
  38#include <asm/thread_info.h>
  39
  40#include <linux/linkage.h>
  41
  42#ifdef CONFIG_64BIT
  43        .level 2.0w
  44#else
  45        .level 2.0
  46#endif
  47
  48        .import         pa_dbit_lock,data
  49
  50        /* space_to_prot macro creates a prot id from a space id */
  51
  52#if (SPACEID_SHIFT) == 0
  53        .macro  space_to_prot spc prot
  54        depd,z  \spc,62,31,\prot
  55        .endm
  56#else
  57        .macro  space_to_prot spc prot
  58        extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
  59        .endm
  60#endif
  61
  62        /* Switch to virtual mapping, trashing only %r1 */
  63        .macro  virt_map
  64        /* pcxt_ssm_bug */
  65        rsm     PSW_SM_I, %r0   /* barrier for "Relied upon Translation */
  66        mtsp    %r0, %sr4
  67        mtsp    %r0, %sr5
  68        mtsp    %r0, %sr6
  69        tovirt_r1 %r29
  70        load32  KERNEL_PSW, %r1
  71
  72        rsm     PSW_SM_QUIET,%r0        /* second "heavy weight" ctl op */
  73        mtctl   %r0, %cr17      /* Clear IIASQ tail */
  74        mtctl   %r0, %cr17      /* Clear IIASQ head */
  75        mtctl   %r1, %ipsw
  76        load32  4f, %r1
  77        mtctl   %r1, %cr18      /* Set IIAOQ tail */
  78        ldo     4(%r1), %r1
  79        mtctl   %r1, %cr18      /* Set IIAOQ head */
  80        rfir
  81        nop
  824:
  83        .endm
  84
  85        /*
  86         * The "get_stack" macros are responsible for determining the
  87         * kernel stack value.
  88         *
  89         *      If sr7 == 0
  90         *          Already using a kernel stack, so call the
  91         *          get_stack_use_r30 macro to push a pt_regs structure
  92         *          on the stack, and store registers there.
  93         *      else
  94         *          Need to set up a kernel stack, so call the
  95         *          get_stack_use_cr30 macro to set up a pointer
  96         *          to the pt_regs structure contained within the
  97         *          task pointer pointed to by cr30. Set the stack
  98         *          pointer to point to the end of the task structure.
  99         *
 100         * Note that we use shadowed registers for temps until
 101         * we can save %r26 and %r29. %r26 is used to preserve
 102         * %r8 (a shadowed register) which temporarily contained
 103         * either the fault type ("code") or the eirr. We need
 104         * to use a non-shadowed register to carry the value over
 105         * the rfir in virt_map. We use %r26 since this value winds
 106         * up being passed as the argument to either do_cpu_irq_mask
 107         * or handle_interruption. %r29 is used to hold a pointer
 108         * the register save area, and once again, it needs to
 109         * be a non-shadowed register so that it survives the rfir.
 110         *
 111         * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
 112         */
 113
 114        .macro  get_stack_use_cr30
 115
 116        /* we save the registers in the task struct */
 117
 118        copy    %r30, %r17
 119        mfctl   %cr30, %r1
 120        ldo     THREAD_SZ_ALGN(%r1), %r30
 121        mtsp    %r0,%sr7
 122        mtsp    %r16,%sr3
 123        tophys  %r1,%r9
 124        LDREG   TI_TASK(%r9), %r1       /* thread_info -> task_struct */
 125        tophys  %r1,%r9
 126        ldo     TASK_REGS(%r9),%r9
 127        STREG   %r17,PT_GR30(%r9)
 128        STREG   %r29,PT_GR29(%r9)
 129        STREG   %r26,PT_GR26(%r9)
 130        STREG   %r16,PT_SR7(%r9)
 131        copy    %r9,%r29
 132        .endm
 133
 134        .macro  get_stack_use_r30
 135
 136        /* we put a struct pt_regs on the stack and save the registers there */
 137
 138        tophys  %r30,%r9
 139        copy    %r30,%r1
 140        ldo     PT_SZ_ALGN(%r30),%r30
 141        STREG   %r1,PT_GR30(%r9)
 142        STREG   %r29,PT_GR29(%r9)
 143        STREG   %r26,PT_GR26(%r9)
 144        STREG   %r16,PT_SR7(%r9)
 145        copy    %r9,%r29
 146        .endm
 147
 148        .macro  rest_stack
 149        LDREG   PT_GR1(%r29), %r1
 150        LDREG   PT_GR30(%r29),%r30
 151        LDREG   PT_GR29(%r29),%r29
 152        .endm
 153
 154        /* default interruption handler
 155         * (calls traps.c:handle_interruption) */
 156        .macro  def code
 157        b       intr_save
 158        ldi     \code, %r8
 159        .align  32
 160        .endm
 161
 162        /* Interrupt interruption handler
 163         * (calls irq.c:do_cpu_irq_mask) */
 164        .macro  extint code
 165        b       intr_extint
 166        mfsp    %sr7,%r16
 167        .align  32
 168        .endm   
 169
 170        .import os_hpmc, code
 171
 172        /* HPMC handler */
 173        .macro  hpmc code
 174        nop                     /* must be a NOP, will be patched later */
 175        load32  PA(os_hpmc), %r3
 176        bv,n    0(%r3)
 177        nop
 178        .word   0               /* checksum (will be patched) */
 179        .word   PA(os_hpmc)     /* address of handler */
 180        .word   0               /* length of handler */
 181        .endm
 182
 183        /*
 184         * Performance Note: Instructions will be moved up into
 185         * this part of the code later on, once we are sure
 186         * that the tlb miss handlers are close to final form.
 187         */
 188
 189        /* Register definitions for tlb miss handler macros */
 190
 191        va  = r8        /* virtual address for which the trap occurred */
 192        spc = r24       /* space for which the trap occurred */
 193
 194#ifndef CONFIG_64BIT
 195
 196        /*
 197         * itlb miss interruption handler (parisc 1.1 - 32 bit)
 198         */
 199
 200        .macro  itlb_11 code
 201
 202        mfctl   %pcsq, spc
 203        b       itlb_miss_11
 204        mfctl   %pcoq, va
 205
 206        .align          32
 207        .endm
 208#endif
 209        
 210        /*
 211         * itlb miss interruption handler (parisc 2.0)
 212         */
 213
 214        .macro  itlb_20 code
 215        mfctl   %pcsq, spc
 216#ifdef CONFIG_64BIT
 217        b       itlb_miss_20w
 218#else
 219        b       itlb_miss_20
 220#endif
 221        mfctl   %pcoq, va
 222
 223        .align          32
 224        .endm
 225        
 226#ifndef CONFIG_64BIT
 227        /*
 228         * naitlb miss interruption handler (parisc 1.1 - 32 bit)
 229         */
 230
 231        .macro  naitlb_11 code
 232
 233        mfctl   %isr,spc
 234        b       naitlb_miss_11
 235        mfctl   %ior,va
 236
 237        .align          32
 238        .endm
 239#endif
 240        
 241        /*
 242         * naitlb miss interruption handler (parisc 2.0)
 243         */
 244
 245        .macro  naitlb_20 code
 246
 247        mfctl   %isr,spc
 248#ifdef CONFIG_64BIT
 249        b       naitlb_miss_20w
 250#else
 251        b       naitlb_miss_20
 252#endif
 253        mfctl   %ior,va
 254
 255        .align          32
 256        .endm
 257        
 258#ifndef CONFIG_64BIT
 259        /*
 260         * dtlb miss interruption handler (parisc 1.1 - 32 bit)
 261         */
 262
 263        .macro  dtlb_11 code
 264
 265        mfctl   %isr, spc
 266        b       dtlb_miss_11
 267        mfctl   %ior, va
 268
 269        .align          32
 270        .endm
 271#endif
 272
 273        /*
 274         * dtlb miss interruption handler (parisc 2.0)
 275         */
 276
 277        .macro  dtlb_20 code
 278
 279        mfctl   %isr, spc
 280#ifdef CONFIG_64BIT
 281        b       dtlb_miss_20w
 282#else
 283        b       dtlb_miss_20
 284#endif
 285        mfctl   %ior, va
 286
 287        .align          32
 288        .endm
 289        
 290#ifndef CONFIG_64BIT
 291        /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
 292
 293        .macro  nadtlb_11 code
 294
 295        mfctl   %isr,spc
 296        b       nadtlb_miss_11
 297        mfctl   %ior,va
 298
 299        .align          32
 300        .endm
 301#endif
 302        
 303        /* nadtlb miss interruption handler (parisc 2.0) */
 304
 305        .macro  nadtlb_20 code
 306
 307        mfctl   %isr,spc
 308#ifdef CONFIG_64BIT
 309        b       nadtlb_miss_20w
 310#else
 311        b       nadtlb_miss_20
 312#endif
 313        mfctl   %ior,va
 314
 315        .align          32
 316        .endm
 317        
 318#ifndef CONFIG_64BIT
 319        /*
 320         * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
 321         */
 322
 323        .macro  dbit_11 code
 324
 325        mfctl   %isr,spc
 326        b       dbit_trap_11
 327        mfctl   %ior,va
 328
 329        .align          32
 330        .endm
 331#endif
 332
 333        /*
 334         * dirty bit trap interruption handler (parisc 2.0)
 335         */
 336
 337        .macro  dbit_20 code
 338
 339        mfctl   %isr,spc
 340#ifdef CONFIG_64BIT
 341        b       dbit_trap_20w
 342#else
 343        b       dbit_trap_20
 344#endif
 345        mfctl   %ior,va
 346
 347        .align          32
 348        .endm
 349
 350        /* In LP64, the space contains part of the upper 32 bits of the
 351         * fault.  We have to extract this and place it in the va,
 352         * zeroing the corresponding bits in the space register */
 353        .macro          space_adjust    spc,va,tmp
 354#ifdef CONFIG_64BIT
 355        extrd,u         \spc,63,SPACEID_SHIFT,\tmp
 356        depd            %r0,63,SPACEID_SHIFT,\spc
 357        depd            \tmp,31,SPACEID_SHIFT,\va
 358#endif
 359        .endm
 360
 361        .import         swapper_pg_dir,code
 362
 363        /* Get the pgd.  For faults on space zero (kernel space), this
 364         * is simply swapper_pg_dir.  For user space faults, the
 365         * pgd is stored in %cr25 */
 366        .macro          get_pgd         spc,reg
 367        ldil            L%PA(swapper_pg_dir),\reg
 368        ldo             R%PA(swapper_pg_dir)(\reg),\reg
 369        or,COND(=)      %r0,\spc,%r0
 370        mfctl           %cr25,\reg
 371        .endm
 372
 373        /* 
 374                space_check(spc,tmp,fault)
 375
 376                spc - The space we saw the fault with.
 377                tmp - The place to store the current space.
 378                fault - Function to call on failure.
 379
 380                Only allow faults on different spaces from the
 381                currently active one if we're the kernel 
 382
 383        */
 384        .macro          space_check     spc,tmp,fault
 385        mfsp            %sr7,\tmp
 386        or,COND(<>)     %r0,\spc,%r0    /* user may execute gateway page
 387                                         * as kernel, so defeat the space
 388                                         * check if it is */
 389        copy            \spc,\tmp
 390        or,COND(=)      %r0,\tmp,%r0    /* nullify if executing as kernel */
 391        cmpb,COND(<>),n \tmp,\spc,\fault
 392        .endm
 393
 394        /* Look up a PTE in a 2-Level scheme (faulting at each
 395         * level if the entry isn't present 
 396         *
 397         * NOTE: we use ldw even for LP64, since the short pointers
 398         * can address up to 1TB
 399         */
 400        .macro          L2_ptep pmd,pte,index,va,fault
 401#if PT_NLEVELS == 3
 402        extru           \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
 403#else
 404# if defined(CONFIG_64BIT)
 405        extrd,u         \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
 406  #else
 407  # if PAGE_SIZE > 4096
 408        extru           \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
 409  # else
 410        extru           \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
 411  # endif
 412# endif
 413#endif
 414        dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
 415        copy            %r0,\pte
 416        ldw,s           \index(\pmd),\pmd
 417        bb,>=,n         \pmd,_PxD_PRESENT_BIT,\fault
 418        dep             %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
 419        copy            \pmd,%r9
 420        SHLREG          %r9,PxD_VALUE_SHIFT,\pmd
 421        extru           \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
 422        dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
 423        shladd          \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
 424        LDREG           %r0(\pmd),\pte          /* pmd is now pte */
 425        bb,>=,n         \pte,_PAGE_PRESENT_BIT,\fault
 426        .endm
 427
 428        /* Look up PTE in a 3-Level scheme.
 429         *
 430         * Here we implement a Hybrid L2/L3 scheme: we allocate the
 431         * first pmd adjacent to the pgd.  This means that we can
 432         * subtract a constant offset to get to it.  The pmd and pgd
 433         * sizes are arranged so that a single pmd covers 4GB (giving
 434         * a full LP64 process access to 8TB) so our lookups are
 435         * effectively L2 for the first 4GB of the kernel (i.e. for
 436         * all ILP32 processes and all the kernel for machines with
 437         * under 4GB of memory) */
 438        .macro          L3_ptep pgd,pte,index,va,fault
 439#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
 440        extrd,u         \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
 441        copy            %r0,\pte
 442        extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 443        ldw,s           \index(\pgd),\pgd
 444        extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 445        bb,>=,n         \pgd,_PxD_PRESENT_BIT,\fault
 446        extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 447        shld            \pgd,PxD_VALUE_SHIFT,\index
 448        extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 449        copy            \index,\pgd
 450        extrd,u,*<>     \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 451        ldo             ASM_PGD_PMD_OFFSET(\pgd),\pgd
 452#endif
 453        L2_ptep         \pgd,\pte,\index,\va,\fault
 454        .endm
 455
 456        /* Acquire pa_dbit_lock lock. */
 457        .macro          dbit_lock       spc,tmp,tmp1
 458#ifdef CONFIG_SMP
 459        cmpib,COND(=),n 0,\spc,2f
 460        load32          PA(pa_dbit_lock),\tmp
 4611:      LDCW            0(\tmp),\tmp1
 462        cmpib,COND(=)   0,\tmp1,1b
 463        nop
 4642:
 465#endif
 466        .endm
 467
 468        /* Release pa_dbit_lock lock without reloading lock address. */
 469        .macro          dbit_unlock0    spc,tmp
 470#ifdef CONFIG_SMP
 471        or,COND(=)      %r0,\spc,%r0
 472        stw             \spc,0(\tmp)
 473#endif
 474        .endm
 475
 476        /* Release pa_dbit_lock lock. */
 477        .macro          dbit_unlock1    spc,tmp
 478#ifdef CONFIG_SMP
 479        load32          PA(pa_dbit_lock),\tmp
 480        dbit_unlock0    \spc,\tmp
 481#endif
 482        .endm
 483
 484        /* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
 485         * don't needlessly dirty the cache line if it was already set */
 486        .macro          update_ptep     spc,ptep,pte,tmp,tmp1
 487#ifdef CONFIG_SMP
 488        or,COND(=)      %r0,\spc,%r0
 489        LDREG           0(\ptep),\pte
 490#endif
 491        ldi             _PAGE_ACCESSED,\tmp1
 492        or              \tmp1,\pte,\tmp
 493        and,COND(<>)    \tmp1,\pte,%r0
 494        STREG           \tmp,0(\ptep)
 495        .endm
 496
 497        /* Set the dirty bit (and accessed bit).  No need to be
 498         * clever, this is only used from the dirty fault */
 499        .macro          update_dirty    spc,ptep,pte,tmp
 500#ifdef CONFIG_SMP
 501        or,COND(=)      %r0,\spc,%r0
 502        LDREG           0(\ptep),\pte
 503#endif
 504        ldi             _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
 505        or              \tmp,\pte,\pte
 506        STREG           \pte,0(\ptep)
 507        .endm
 508
 509        /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
 510         * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
 511        #define PAGE_ADD_SHIFT  (PAGE_SHIFT-12)
 512
 513        /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
 514        .macro          convert_for_tlb_insert20 pte
 515        extrd,u         \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
 516                                64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
 517        depdi           _PAGE_SIZE_ENCODING_DEFAULT,63,\
 518                                (63-58)+PAGE_ADD_SHIFT,\pte
 519        .endm
 520
 521        /* Convert the pte and prot to tlb insertion values.  How
 522         * this happens is quite subtle, read below */
 523        .macro          make_insert_tlb spc,pte,prot
 524        space_to_prot   \spc \prot        /* create prot id from space */
 525        /* The following is the real subtlety.  This is depositing
 526         * T <-> _PAGE_REFTRAP
 527         * D <-> _PAGE_DIRTY
 528         * B <-> _PAGE_DMB (memory break)
 529         *
 530         * Then incredible subtlety: The access rights are
 531         * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
 532         * See 3-14 of the parisc 2.0 manual
 533         *
 534         * Finally, _PAGE_READ goes in the top bit of PL1 (so we
 535         * trigger an access rights trap in user space if the user
 536         * tries to read an unreadable page */
 537        depd            \pte,8,7,\prot
 538
 539        /* PAGE_USER indicates the page can be read with user privileges,
 540         * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
 541         * contains _PAGE_READ) */
 542        extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
 543        depdi           7,11,3,\prot
 544        /* If we're a gateway page, drop PL2 back to zero for promotion
 545         * to kernel privilege (so we can execute the page as kernel).
 546         * Any privilege promotion page always denys read and write */
 547        extrd,u,*=      \pte,_PAGE_GATEWAY_BIT+32,1,%r0
 548        depd            %r0,11,2,\prot  /* If Gateway, Set PL2 to 0 */
 549
 550        /* Enforce uncacheable pages.
 551         * This should ONLY be use for MMIO on PA 2.0 machines.
 552         * Memory/DMA is cache coherent on all PA2.0 machines we support
 553         * (that means T-class is NOT supported) and the memory controllers
 554         * on most of those machines only handles cache transactions.
 555         */
 556        extrd,u,*=      \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
 557        depdi           1,12,1,\prot
 558
 559        /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
 560        convert_for_tlb_insert20 \pte
 561        .endm
 562
 563        /* Identical macro to make_insert_tlb above, except it
 564         * makes the tlb entry for the differently formatted pa11
 565         * insertion instructions */
 566        .macro          make_insert_tlb_11      spc,pte,prot
 567        zdep            \spc,30,15,\prot
 568        dep             \pte,8,7,\prot
 569        extru,=         \pte,_PAGE_NO_CACHE_BIT,1,%r0
 570        depi            1,12,1,\prot
 571        extru,=         \pte,_PAGE_USER_BIT,1,%r0
 572        depi            7,11,3,\prot   /* Set for user space (1 rsvd for read) */
 573        extru,=         \pte,_PAGE_GATEWAY_BIT,1,%r0
 574        depi            0,11,2,\prot    /* If Gateway, Set PL2 to 0 */
 575
 576        /* Get rid of prot bits and convert to page addr for iitlba */
 577
 578        depi            0,31,ASM_PFN_PTE_SHIFT,\pte
 579        SHRREG          \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
 580        .endm
 581
 582        /* This is for ILP32 PA2.0 only.  The TLB insertion needs
 583         * to extend into I/O space if the address is 0xfXXXXXXX
 584         * so we extend the f's into the top word of the pte in
 585         * this case */
 586        .macro          f_extend        pte,tmp
 587        extrd,s         \pte,42,4,\tmp
 588        addi,<>         1,\tmp,%r0
 589        extrd,s         \pte,63,25,\pte
 590        .endm
 591
 592        /* The alias region is an 8MB aligned 16MB to do clear and
 593         * copy user pages at addresses congruent with the user
 594         * virtual address.
 595         *
 596         * To use the alias page, you set %r26 up with the to TLB
 597         * entry (identifying the physical page) and %r23 up with
 598         * the from tlb entry (or nothing if only a to entry---for
 599         * clear_user_page_asm) */
 600        .macro          do_alias        spc,tmp,tmp1,va,pte,prot,fault,patype
 601        cmpib,COND(<>),n 0,\spc,\fault
 602        ldil            L%(TMPALIAS_MAP_START),\tmp
 603#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
 604        /* on LP64, ldi will sign extend into the upper 32 bits,
 605         * which is behaviour we don't want */
 606        depdi           0,31,32,\tmp
 607#endif
 608        copy            \va,\tmp1
 609        depi            0,31,23,\tmp1
 610        cmpb,COND(<>),n \tmp,\tmp1,\fault
 611        mfctl           %cr19,\tmp      /* iir */
 612        /* get the opcode (first six bits) into \tmp */
 613        extrw,u         \tmp,5,6,\tmp
 614        /*
 615         * Only setting the T bit prevents data cache movein
 616         * Setting access rights to zero prevents instruction cache movein
 617         *
 618         * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
 619         * to type field and _PAGE_READ goes to top bit of PL1
 620         */
 621        ldi             (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
 622        /*
 623         * so if the opcode is one (i.e. this is a memory management
 624         * instruction) nullify the next load so \prot is only T.
 625         * Otherwise this is a normal data operation
 626         */
 627        cmpiclr,=       0x01,\tmp,%r0
 628        ldi             (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
 629.ifc \patype,20
 630        depd,z          \prot,8,7,\prot
 631.else
 632.ifc \patype,11
 633        depw,z          \prot,8,7,\prot
 634.else
 635        .error "undefined PA type to do_alias"
 636.endif
 637.endif
 638        /*
 639         * OK, it is in the temp alias region, check whether "from" or "to".
 640         * Check "subtle" note in pacache.S re: r23/r26.
 641         */
 642#ifdef CONFIG_64BIT
 643        extrd,u,*=      \va,41,1,%r0
 644#else
 645        extrw,u,=       \va,9,1,%r0
 646#endif
 647        or,COND(tr)     %r23,%r0,\pte
 648        or              %r26,%r0,\pte
 649        .endm 
 650
 651
 652        /*
 653         * Align fault_vector_20 on 4K boundary so that both
 654         * fault_vector_11 and fault_vector_20 are on the
 655         * same page. This is only necessary as long as we
 656         * write protect the kernel text, which we may stop
 657         * doing once we use large page translations to cover
 658         * the static part of the kernel address space.
 659         */
 660
 661        .text
 662
 663        .align 4096
 664
 665ENTRY(fault_vector_20)
 666        /* First vector is invalid (0) */
 667        .ascii  "cows can fly"
 668        .byte 0
 669        .align 32
 670
 671        hpmc             1
 672        def              2
 673        def              3
 674        extint           4
 675        def              5
 676        itlb_20          6
 677        def              7
 678        def              8
 679        def              9
 680        def             10
 681        def             11
 682        def             12
 683        def             13
 684        def             14
 685        dtlb_20         15
 686        naitlb_20       16
 687        nadtlb_20       17
 688        def             18
 689        def             19
 690        dbit_20         20
 691        def             21
 692        def             22
 693        def             23
 694        def             24
 695        def             25
 696        def             26
 697        def             27
 698        def             28
 699        def             29
 700        def             30
 701        def             31
 702END(fault_vector_20)
 703
 704#ifndef CONFIG_64BIT
 705
 706        .align 2048
 707
 708ENTRY(fault_vector_11)
 709        /* First vector is invalid (0) */
 710        .ascii  "cows can fly"
 711        .byte 0
 712        .align 32
 713
 714        hpmc             1
 715        def              2
 716        def              3
 717        extint           4
 718        def              5
 719        itlb_11          6
 720        def              7
 721        def              8
 722        def              9
 723        def             10
 724        def             11
 725        def             12
 726        def             13
 727        def             14
 728        dtlb_11         15
 729        naitlb_11       16
 730        nadtlb_11       17
 731        def             18
 732        def             19
 733        dbit_11         20
 734        def             21
 735        def             22
 736        def             23
 737        def             24
 738        def             25
 739        def             26
 740        def             27
 741        def             28
 742        def             29
 743        def             30
 744        def             31
 745END(fault_vector_11)
 746
 747#endif
 748        /* Fault vector is separately protected and *must* be on its own page */
 749        .align          PAGE_SIZE
 750ENTRY(end_fault_vector)
 751
 752        .import         handle_interruption,code
 753        .import         do_cpu_irq_mask,code
 754
 755        /*
 756         * Child Returns here
 757         *
 758         * copy_thread moved args into task save area.
 759         */
 760
 761ENTRY(ret_from_kernel_thread)
 762
 763        /* Call schedule_tail first though */
 764        BL      schedule_tail, %r2
 765        nop
 766
 767        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
 768        LDREG   TASK_PT_GR25(%r1), %r26
 769#ifdef CONFIG_64BIT
 770        LDREG   TASK_PT_GR27(%r1), %r27
 771#endif
 772        LDREG   TASK_PT_GR26(%r1), %r1
 773        ble     0(%sr7, %r1)
 774        copy    %r31, %r2
 775        b       finish_child_return
 776        nop
 777ENDPROC(ret_from_kernel_thread)
 778
 779
 780        /*
 781         * struct task_struct *_switch_to(struct task_struct *prev,
 782         *      struct task_struct *next)
 783         *
 784         * switch kernel stacks and return prev */
 785ENTRY(_switch_to)
 786        STREG    %r2, -RP_OFFSET(%r30)
 787
 788        callee_save_float
 789        callee_save
 790
 791        load32  _switch_to_ret, %r2
 792
 793        STREG   %r2, TASK_PT_KPC(%r26)
 794        LDREG   TASK_PT_KPC(%r25), %r2
 795
 796        STREG   %r30, TASK_PT_KSP(%r26)
 797        LDREG   TASK_PT_KSP(%r25), %r30
 798        LDREG   TASK_THREAD_INFO(%r25), %r25
 799        bv      %r0(%r2)
 800        mtctl   %r25,%cr30
 801
 802_switch_to_ret:
 803        mtctl   %r0, %cr0               /* Needed for single stepping */
 804        callee_rest
 805        callee_rest_float
 806
 807        LDREG   -RP_OFFSET(%r30), %r2
 808        bv      %r0(%r2)
 809        copy    %r26, %r28
 810ENDPROC(_switch_to)
 811
 812        /*
 813         * Common rfi return path for interruptions, kernel execve, and
 814         * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
 815         * return via this path if the signal was received when the process
 816         * was running; if the process was blocked on a syscall then the
 817         * normal syscall_exit path is used.  All syscalls for traced
 818         * proceses exit via intr_restore.
 819         *
 820         * XXX If any syscalls that change a processes space id ever exit
 821         * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
 822         * adjust IASQ[0..1].
 823         *
 824         */
 825
 826        .align  PAGE_SIZE
 827
 828ENTRY(syscall_exit_rfi)
 829        mfctl   %cr30,%r16
 830        LDREG   TI_TASK(%r16), %r16     /* thread_info -> task_struct */
 831        ldo     TASK_REGS(%r16),%r16
 832        /* Force iaoq to userspace, as the user has had access to our current
 833         * context via sigcontext. Also Filter the PSW for the same reason.
 834         */
 835        LDREG   PT_IAOQ0(%r16),%r19
 836        depi    3,31,2,%r19
 837        STREG   %r19,PT_IAOQ0(%r16)
 838        LDREG   PT_IAOQ1(%r16),%r19
 839        depi    3,31,2,%r19
 840        STREG   %r19,PT_IAOQ1(%r16)
 841        LDREG   PT_PSW(%r16),%r19
 842        load32  USER_PSW_MASK,%r1
 843#ifdef CONFIG_64BIT
 844        load32  USER_PSW_HI_MASK,%r20
 845        depd    %r20,31,32,%r1
 846#endif
 847        and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
 848        load32  USER_PSW,%r1
 849        or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
 850        STREG   %r19,PT_PSW(%r16)
 851
 852        /*
 853         * If we aren't being traced, we never saved space registers
 854         * (we don't store them in the sigcontext), so set them
 855         * to "proper" values now (otherwise we'll wind up restoring
 856         * whatever was last stored in the task structure, which might
 857         * be inconsistent if an interrupt occurred while on the gateway
 858         * page). Note that we may be "trashing" values the user put in
 859         * them, but we don't support the user changing them.
 860         */
 861
 862        STREG   %r0,PT_SR2(%r16)
 863        mfsp    %sr3,%r19
 864        STREG   %r19,PT_SR0(%r16)
 865        STREG   %r19,PT_SR1(%r16)
 866        STREG   %r19,PT_SR3(%r16)
 867        STREG   %r19,PT_SR4(%r16)
 868        STREG   %r19,PT_SR5(%r16)
 869        STREG   %r19,PT_SR6(%r16)
 870        STREG   %r19,PT_SR7(%r16)
 871
 872intr_return:
 873        /* check for reschedule */
 874        mfctl   %cr30,%r1
 875        LDREG   TI_FLAGS(%r1),%r19      /* sched.h: TIF_NEED_RESCHED */
 876        bb,<,n  %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
 877
 878        .import do_notify_resume,code
 879intr_check_sig:
 880        /* As above */
 881        mfctl   %cr30,%r1
 882        LDREG   TI_FLAGS(%r1),%r19
 883        ldi     (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
 884        and,COND(<>)    %r19, %r20, %r0
 885        b,n     intr_restore    /* skip past if we've nothing to do */
 886
 887        /* This check is critical to having LWS
 888         * working. The IASQ is zero on the gateway
 889         * page and we cannot deliver any signals until
 890         * we get off the gateway page.
 891         *
 892         * Only do signals if we are returning to user space
 893         */
 894        LDREG   PT_IASQ0(%r16), %r20
 895        cmpib,COND(=),n 0,%r20,intr_restore /* backward */
 896        LDREG   PT_IASQ1(%r16), %r20
 897        cmpib,COND(=),n 0,%r20,intr_restore /* backward */
 898
 899        /* NOTE: We need to enable interrupts if we have to deliver
 900         * signals. We used to do this earlier but it caused kernel
 901         * stack overflows. */
 902        ssm     PSW_SM_I, %r0
 903
 904        copy    %r0, %r25                       /* long in_syscall = 0 */
 905#ifdef CONFIG_64BIT
 906        ldo     -16(%r30),%r29                  /* Reference param save area */
 907#endif
 908
 909        BL      do_notify_resume,%r2
 910        copy    %r16, %r26                      /* struct pt_regs *regs */
 911
 912        b,n     intr_check_sig
 913
 914intr_restore:
 915        copy            %r16,%r29
 916        ldo             PT_FR31(%r29),%r1
 917        rest_fp         %r1
 918        rest_general    %r29
 919
 920        /* inverse of virt_map */
 921        pcxt_ssm_bug
 922        rsm             PSW_SM_QUIET,%r0        /* prepare for rfi */
 923        tophys_r1       %r29
 924
 925        /* Restore space id's and special cr's from PT_REGS
 926         * structure pointed to by r29
 927         */
 928        rest_specials   %r29
 929
 930        /* IMPORTANT: rest_stack restores r29 last (we are using it)!
 931         * It also restores r1 and r30.
 932         */
 933        rest_stack
 934
 935        rfi
 936        nop
 937
 938#ifndef CONFIG_PREEMPT
 939# define intr_do_preempt        intr_restore
 940#endif /* !CONFIG_PREEMPT */
 941
 942        .import schedule,code
 943intr_do_resched:
 944        /* Only call schedule on return to userspace. If we're returning
 945         * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
 946         * we jump back to intr_restore.
 947         */
 948        LDREG   PT_IASQ0(%r16), %r20
 949        cmpib,COND(=)   0, %r20, intr_do_preempt
 950        nop
 951        LDREG   PT_IASQ1(%r16), %r20
 952        cmpib,COND(=)   0, %r20, intr_do_preempt
 953        nop
 954
 955        /* NOTE: We need to enable interrupts if we schedule.  We used
 956         * to do this earlier but it caused kernel stack overflows. */
 957        ssm     PSW_SM_I, %r0
 958
 959#ifdef CONFIG_64BIT
 960        ldo     -16(%r30),%r29          /* Reference param save area */
 961#endif
 962
 963        ldil    L%intr_check_sig, %r2
 964#ifndef CONFIG_64BIT
 965        b       schedule
 966#else
 967        load32  schedule, %r20
 968        bv      %r0(%r20)
 969#endif
 970        ldo     R%intr_check_sig(%r2), %r2
 971
 972        /* preempt the current task on returning to kernel
 973         * mode from an interrupt, iff need_resched is set,
 974         * and preempt_count is 0. otherwise, we continue on
 975         * our merry way back to the current running task.
 976         */
 977#ifdef CONFIG_PREEMPT
 978        .import preempt_schedule_irq,code
 979intr_do_preempt:
 980        rsm     PSW_SM_I, %r0           /* disable interrupts */
 981
 982        /* current_thread_info()->preempt_count */
 983        mfctl   %cr30, %r1
 984        LDREG   TI_PRE_COUNT(%r1), %r19
 985        cmpib,COND(<>)  0, %r19, intr_restore   /* if preempt_count > 0 */
 986        nop                             /* prev insn branched backwards */
 987
 988        /* check if we interrupted a critical path */
 989        LDREG   PT_PSW(%r16), %r20
 990        bb,<,n  %r20, 31 - PSW_SM_I, intr_restore
 991        nop
 992
 993        BL      preempt_schedule_irq, %r2
 994        nop
 995
 996        b,n     intr_restore            /* ssm PSW_SM_I done by intr_restore */
 997#endif /* CONFIG_PREEMPT */
 998
 999        /*
1000         * External interrupts.
1001         */
1002
1003intr_extint:
1004        cmpib,COND(=),n 0,%r16,1f
1005
1006        get_stack_use_cr30
1007        b,n 2f
1008
10091:
1010        get_stack_use_r30
10112:
1012        save_specials   %r29
1013        virt_map
1014        save_general    %r29
1015
1016        ldo     PT_FR0(%r29), %r24
1017        save_fp %r24
1018        
1019        loadgp
1020
1021        copy    %r29, %r26      /* arg0 is pt_regs */
1022        copy    %r29, %r16      /* save pt_regs */
1023
1024        ldil    L%intr_return, %r2
1025
1026#ifdef CONFIG_64BIT
1027        ldo     -16(%r30),%r29  /* Reference param save area */
1028#endif
1029
1030        b       do_cpu_irq_mask
1031        ldo     R%intr_return(%r2), %r2 /* return to intr_return, not here */
1032ENDPROC(syscall_exit_rfi)
1033
1034
1035        /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1036
1037ENTRY(intr_save)                /* for os_hpmc */
1038        mfsp    %sr7,%r16
1039        cmpib,COND(=),n 0,%r16,1f
1040        get_stack_use_cr30
1041        b       2f
1042        copy    %r8,%r26
1043
10441:
1045        get_stack_use_r30
1046        copy    %r8,%r26
1047
10482:
1049        save_specials   %r29
1050
1051        /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1052
1053        /*
1054         * FIXME: 1) Use a #define for the hardwired "6" below (and in
1055         *           traps.c.
1056         *        2) Once we start executing code above 4 Gb, we need
1057         *           to adjust iasq/iaoq here in the same way we
1058         *           adjust isr/ior below.
1059         */
1060
1061        cmpib,COND(=),n        6,%r26,skip_save_ior
1062
1063
1064        mfctl           %cr20, %r16 /* isr */
1065        nop             /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1066        mfctl           %cr21, %r17 /* ior */
1067
1068
1069#ifdef CONFIG_64BIT
1070        /*
1071         * If the interrupted code was running with W bit off (32 bit),
1072         * clear the b bits (bits 0 & 1) in the ior.
1073         * save_specials left ipsw value in r8 for us to test.
1074         */
1075        extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1076        depdi           0,1,2,%r17
1077
1078        /*
1079         * FIXME: This code has hardwired assumptions about the split
1080         *        between space bits and offset bits. This will change
1081         *        when we allow alternate page sizes.
1082         */
1083
1084        /* adjust isr/ior. */
1085        extrd,u         %r16,63,SPACEID_SHIFT,%r1       /* get high bits from isr for ior */
1086        depd            %r1,31,SPACEID_SHIFT,%r17       /* deposit them into ior */
1087        depdi           0,63,SPACEID_SHIFT,%r16         /* clear them from isr */
1088#endif
1089        STREG           %r16, PT_ISR(%r29)
1090        STREG           %r17, PT_IOR(%r29)
1091
1092
1093skip_save_ior:
1094        virt_map
1095        save_general    %r29
1096
1097        ldo             PT_FR0(%r29), %r25
1098        save_fp         %r25
1099        
1100        loadgp
1101
1102        copy            %r29, %r25      /* arg1 is pt_regs */
1103#ifdef CONFIG_64BIT
1104        ldo             -16(%r30),%r29  /* Reference param save area */
1105#endif
1106
1107        ldil            L%intr_check_sig, %r2
1108        copy            %r25, %r16      /* save pt_regs */
1109
1110        b               handle_interruption
1111        ldo             R%intr_check_sig(%r2), %r2
1112ENDPROC(intr_save)
1113
1114
1115        /*
1116         * Note for all tlb miss handlers:
1117         *
1118         * cr24 contains a pointer to the kernel address space
1119         * page directory.
1120         *
1121         * cr25 contains a pointer to the current user address
1122         * space page directory.
1123         *
1124         * sr3 will contain the space id of the user address space
1125         * of the current running thread while that thread is
1126         * running in the kernel.
1127         */
1128
1129        /*
1130         * register number allocations.  Note that these are all
1131         * in the shadowed registers
1132         */
1133
1134        t0 = r1         /* temporary register 0 */
1135        va = r8         /* virtual address for which the trap occurred */
1136        t1 = r9         /* temporary register 1 */
1137        pte  = r16      /* pte/phys page # */
1138        prot = r17      /* prot bits */
1139        spc  = r24      /* space for which the trap occurred */
1140        ptp = r25       /* page directory/page table pointer */
1141
1142#ifdef CONFIG_64BIT
1143
1144dtlb_miss_20w:
1145        space_adjust    spc,va,t0
1146        get_pgd         spc,ptp
1147        space_check     spc,t0,dtlb_fault
1148
1149        L3_ptep         ptp,pte,t0,va,dtlb_check_alias_20w
1150
1151        dbit_lock       spc,t0,t1
1152        update_ptep     spc,ptp,pte,t0,t1
1153
1154        make_insert_tlb spc,pte,prot
1155        
1156        idtlbt          pte,prot
1157        dbit_unlock1    spc,t0
1158
1159        rfir
1160        nop
1161
1162dtlb_check_alias_20w:
1163        do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
1164
1165        idtlbt          pte,prot
1166
1167        rfir
1168        nop
1169
1170nadtlb_miss_20w:
1171        space_adjust    spc,va,t0
1172        get_pgd         spc,ptp
1173        space_check     spc,t0,nadtlb_fault
1174
1175        L3_ptep         ptp,pte,t0,va,nadtlb_check_alias_20w
1176
1177        dbit_lock       spc,t0,t1
1178        update_ptep     spc,ptp,pte,t0,t1
1179
1180        make_insert_tlb spc,pte,prot
1181
1182        idtlbt          pte,prot
1183        dbit_unlock1    spc,t0
1184
1185        rfir
1186        nop
1187
1188nadtlb_check_alias_20w:
1189        do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1190
1191        idtlbt          pte,prot
1192
1193        rfir
1194        nop
1195
1196#else
1197
1198dtlb_miss_11:
1199        get_pgd         spc,ptp
1200
1201        space_check     spc,t0,dtlb_fault
1202
1203        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_11
1204
1205        dbit_lock       spc,t0,t1
1206        update_ptep     spc,ptp,pte,t0,t1
1207
1208        make_insert_tlb_11      spc,pte,prot
1209
1210        mfsp            %sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1211        mtsp            spc,%sr1
1212
1213        idtlba          pte,(%sr1,va)
1214        idtlbp          prot,(%sr1,va)
1215
1216        mtsp            t0, %sr1        /* Restore sr1 */
1217        dbit_unlock1    spc,t0
1218
1219        rfir
1220        nop
1221
1222dtlb_check_alias_11:
1223        do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,11
1224
1225        idtlba          pte,(va)
1226        idtlbp          prot,(va)
1227
1228        rfir
1229        nop
1230
1231nadtlb_miss_11:
1232        get_pgd         spc,ptp
1233
1234        space_check     spc,t0,nadtlb_fault
1235
1236        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_11
1237
1238        dbit_lock       spc,t0,t1
1239        update_ptep     spc,ptp,pte,t0,t1
1240
1241        make_insert_tlb_11      spc,pte,prot
1242
1243
1244        mfsp            %sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1245        mtsp            spc,%sr1
1246
1247        idtlba          pte,(%sr1,va)
1248        idtlbp          prot,(%sr1,va)
1249
1250        mtsp            t0, %sr1        /* Restore sr1 */
1251        dbit_unlock1    spc,t0
1252
1253        rfir
1254        nop
1255
1256nadtlb_check_alias_11:
1257        do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1258
1259        idtlba          pte,(va)
1260        idtlbp          prot,(va)
1261
1262        rfir
1263        nop
1264
1265dtlb_miss_20:
1266        space_adjust    spc,va,t0
1267        get_pgd         spc,ptp
1268        space_check     spc,t0,dtlb_fault
1269
1270        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_20
1271
1272        dbit_lock       spc,t0,t1
1273        update_ptep     spc,ptp,pte,t0,t1
1274
1275        make_insert_tlb spc,pte,prot
1276
1277        f_extend        pte,t0
1278
1279        idtlbt          pte,prot
1280        dbit_unlock1    spc,t0
1281
1282        rfir
1283        nop
1284
1285dtlb_check_alias_20:
1286        do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
1287        
1288        idtlbt          pte,prot
1289
1290        rfir
1291        nop
1292
1293nadtlb_miss_20:
1294        get_pgd         spc,ptp
1295
1296        space_check     spc,t0,nadtlb_fault
1297
1298        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_20
1299
1300        dbit_lock       spc,t0,t1
1301        update_ptep     spc,ptp,pte,t0,t1
1302
1303        make_insert_tlb spc,pte,prot
1304
1305        f_extend        pte,t0
1306        
1307        idtlbt          pte,prot
1308        dbit_unlock1    spc,t0
1309
1310        rfir
1311        nop
1312
1313nadtlb_check_alias_20:
1314        do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1315
1316        idtlbt          pte,prot
1317
1318        rfir
1319        nop
1320
1321#endif
1322
1323nadtlb_emulate:
1324
1325        /*
1326         * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1327         * probei instructions. We don't want to fault for these
1328         * instructions (not only does it not make sense, it can cause
1329         * deadlocks, since some flushes are done with the mmap
1330         * semaphore held). If the translation doesn't exist, we can't
1331         * insert a translation, so have to emulate the side effects
1332         * of the instruction. Since we don't insert a translation
1333         * we can get a lot of faults during a flush loop, so it makes
1334         * sense to try to do it here with minimum overhead. We only
1335         * emulate fdc,fic,pdc,probew,prober instructions whose base 
1336         * and index registers are not shadowed. We defer everything 
1337         * else to the "slow" path.
1338         */
1339
1340        mfctl           %cr19,%r9 /* Get iir */
1341
1342        /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1343           Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1344
1345        /* Checks for fdc,fdce,pdc,"fic,4f" only */
1346        ldi             0x280,%r16
1347        and             %r9,%r16,%r17
1348        cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1349        bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1350        BL              get_register,%r25
1351        extrw,u         %r9,15,5,%r8           /* Get index register # */
1352        cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1353        copy            %r1,%r24
1354        BL              get_register,%r25
1355        extrw,u         %r9,10,5,%r8           /* Get base register # */
1356        cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1357        BL              set_register,%r25
1358        add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1359
1360nadtlb_nullify:
1361        mfctl           %ipsw,%r8
1362        ldil            L%PSW_N,%r9
1363        or              %r8,%r9,%r8            /* Set PSW_N */
1364        mtctl           %r8,%ipsw
1365
1366        rfir
1367        nop
1368
1369        /* 
1370                When there is no translation for the probe address then we
1371                must nullify the insn and return zero in the target regsiter.
1372                This will indicate to the calling code that it does not have 
1373                write/read privileges to this address.
1374
1375                This should technically work for prober and probew in PA 1.1,
1376                and also probe,r and probe,w in PA 2.0
1377
1378                WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1379                THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1380
1381        */
1382nadtlb_probe_check:
1383        ldi             0x80,%r16
1384        and             %r9,%r16,%r17
1385        cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1386        BL              get_register,%r25      /* Find the target register */
1387        extrw,u         %r9,31,5,%r8           /* Get target register */
1388        cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1389        BL              set_register,%r25
1390        copy            %r0,%r1                /* Write zero to target register */
1391        b nadtlb_nullify                       /* Nullify return insn */
1392        nop
1393
1394
1395#ifdef CONFIG_64BIT
1396itlb_miss_20w:
1397
1398        /*
1399         * I miss is a little different, since we allow users to fault
1400         * on the gateway page which is in the kernel address space.
1401         */
1402
1403        space_adjust    spc,va,t0
1404        get_pgd         spc,ptp
1405        space_check     spc,t0,itlb_fault
1406
1407        L3_ptep         ptp,pte,t0,va,itlb_fault
1408
1409        dbit_lock       spc,t0,t1
1410        update_ptep     spc,ptp,pte,t0,t1
1411
1412        make_insert_tlb spc,pte,prot
1413        
1414        iitlbt          pte,prot
1415        dbit_unlock1    spc,t0
1416
1417        rfir
1418        nop
1419
1420naitlb_miss_20w:
1421
1422        /*
1423         * I miss is a little different, since we allow users to fault
1424         * on the gateway page which is in the kernel address space.
1425         */
1426
1427        space_adjust    spc,va,t0
1428        get_pgd         spc,ptp
1429        space_check     spc,t0,naitlb_fault
1430
1431        L3_ptep         ptp,pte,t0,va,naitlb_check_alias_20w
1432
1433        dbit_lock       spc,t0,t1
1434        update_ptep     spc,ptp,pte,t0,t1
1435
1436        make_insert_tlb spc,pte,prot
1437
1438        iitlbt          pte,prot
1439        dbit_unlock1    spc,t0
1440
1441        rfir
1442        nop
1443
1444naitlb_check_alias_20w:
1445        do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
1446
1447        iitlbt          pte,prot
1448
1449        rfir
1450        nop
1451
1452#else
1453
1454itlb_miss_11:
1455        get_pgd         spc,ptp
1456
1457        space_check     spc,t0,itlb_fault
1458
1459        L2_ptep         ptp,pte,t0,va,itlb_fault
1460
1461        dbit_lock       spc,t0,t1
1462        update_ptep     spc,ptp,pte,t0,t1
1463
1464        make_insert_tlb_11      spc,pte,prot
1465
1466        mfsp            %sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1467        mtsp            spc,%sr1
1468
1469        iitlba          pte,(%sr1,va)
1470        iitlbp          prot,(%sr1,va)
1471
1472        mtsp            t0, %sr1        /* Restore sr1 */
1473        dbit_unlock1    spc,t0
1474
1475        rfir
1476        nop
1477
1478naitlb_miss_11:
1479        get_pgd         spc,ptp
1480
1481        space_check     spc,t0,naitlb_fault
1482
1483        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_11
1484
1485        dbit_lock       spc,t0,t1
1486        update_ptep     spc,ptp,pte,t0,t1
1487
1488        make_insert_tlb_11      spc,pte,prot
1489
1490        mfsp            %sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1491        mtsp            spc,%sr1
1492
1493        iitlba          pte,(%sr1,va)
1494        iitlbp          prot,(%sr1,va)
1495
1496        mtsp            t0, %sr1        /* Restore sr1 */
1497        dbit_unlock1    spc,t0
1498
1499        rfir
1500        nop
1501
1502naitlb_check_alias_11:
1503        do_alias        spc,t0,t1,va,pte,prot,itlb_fault,11
1504
1505        iitlba          pte,(%sr0, va)
1506        iitlbp          prot,(%sr0, va)
1507
1508        rfir
1509        nop
1510
1511
1512itlb_miss_20:
1513        get_pgd         spc,ptp
1514
1515        space_check     spc,t0,itlb_fault
1516
1517        L2_ptep         ptp,pte,t0,va,itlb_fault
1518
1519        dbit_lock       spc,t0,t1
1520        update_ptep     spc,ptp,pte,t0,t1
1521
1522        make_insert_tlb spc,pte,prot
1523
1524        f_extend        pte,t0  
1525
1526        iitlbt          pte,prot
1527        dbit_unlock1    spc,t0
1528
1529        rfir
1530        nop
1531
1532naitlb_miss_20:
1533        get_pgd         spc,ptp
1534
1535        space_check     spc,t0,naitlb_fault
1536
1537        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_20
1538
1539        dbit_lock       spc,t0,t1
1540        update_ptep     spc,ptp,pte,t0,t1
1541
1542        make_insert_tlb spc,pte,prot
1543
1544        f_extend        pte,t0
1545
1546        iitlbt          pte,prot
1547        dbit_unlock1    spc,t0
1548
1549        rfir
1550        nop
1551
1552naitlb_check_alias_20:
1553        do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
1554
1555        iitlbt          pte,prot
1556
1557        rfir
1558        nop
1559
1560#endif
1561
1562#ifdef CONFIG_64BIT
1563
1564dbit_trap_20w:
1565        space_adjust    spc,va,t0
1566        get_pgd         spc,ptp
1567        space_check     spc,t0,dbit_fault
1568
1569        L3_ptep         ptp,pte,t0,va,dbit_fault
1570
1571        dbit_lock       spc,t0,t1
1572        update_dirty    spc,ptp,pte,t1
1573
1574        make_insert_tlb spc,pte,prot
1575                
1576        idtlbt          pte,prot
1577        dbit_unlock0    spc,t0
1578
1579        rfir
1580        nop
1581#else
1582
1583dbit_trap_11:
1584
1585        get_pgd         spc,ptp
1586
1587        space_check     spc,t0,dbit_fault
1588
1589        L2_ptep         ptp,pte,t0,va,dbit_fault
1590
1591        dbit_lock       spc,t0,t1
1592        update_dirty    spc,ptp,pte,t1
1593
1594        make_insert_tlb_11      spc,pte,prot
1595
1596        mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1597        mtsp            spc,%sr1
1598
1599        idtlba          pte,(%sr1,va)
1600        idtlbp          prot,(%sr1,va)
1601
1602        mtsp            t1, %sr1     /* Restore sr1 */
1603        dbit_unlock0    spc,t0
1604
1605        rfir
1606        nop
1607
1608dbit_trap_20:
1609        get_pgd         spc,ptp
1610
1611        space_check     spc,t0,dbit_fault
1612
1613        L2_ptep         ptp,pte,t0,va,dbit_fault
1614
1615        dbit_lock       spc,t0,t1
1616        update_dirty    spc,ptp,pte,t1
1617
1618        make_insert_tlb spc,pte,prot
1619
1620        f_extend        pte,t1
1621        
1622        idtlbt          pte,prot
1623        dbit_unlock0    spc,t0
1624
1625        rfir
1626        nop
1627#endif
1628
1629        .import handle_interruption,code
1630
1631kernel_bad_space:
1632        b               intr_save
1633        ldi             31,%r8  /* Use an unused code */
1634
1635dbit_fault:
1636        b               intr_save
1637        ldi             20,%r8
1638
1639itlb_fault:
1640        b               intr_save
1641        ldi             6,%r8
1642
1643nadtlb_fault:
1644        b               intr_save
1645        ldi             17,%r8
1646
1647naitlb_fault:
1648        b               intr_save
1649        ldi             16,%r8
1650
1651dtlb_fault:
1652        b               intr_save
1653        ldi             15,%r8
1654
1655        /* Register saving semantics for system calls:
1656
1657           %r1             clobbered by system call macro in userspace
1658           %r2             saved in PT_REGS by gateway page
1659           %r3  - %r18     preserved by C code (saved by signal code)
1660           %r19 - %r20     saved in PT_REGS by gateway page
1661           %r21 - %r22     non-standard syscall args
1662                           stored in kernel stack by gateway page
1663           %r23 - %r26     arg3-arg0, saved in PT_REGS by gateway page
1664           %r27 - %r30     saved in PT_REGS by gateway page
1665           %r31            syscall return pointer
1666         */
1667
1668        /* Floating point registers (FIXME: what do we do with these?)
1669
1670           %fr0  - %fr3    status/exception, not preserved
1671           %fr4  - %fr7    arguments
1672           %fr8  - %fr11   not preserved by C code
1673           %fr12 - %fr21   preserved by C code
1674           %fr22 - %fr31   not preserved by C code
1675         */
1676
1677        .macro  reg_save regs
1678        STREG   %r3, PT_GR3(\regs)
1679        STREG   %r4, PT_GR4(\regs)
1680        STREG   %r5, PT_GR5(\regs)
1681        STREG   %r6, PT_GR6(\regs)
1682        STREG   %r7, PT_GR7(\regs)
1683        STREG   %r8, PT_GR8(\regs)
1684        STREG   %r9, PT_GR9(\regs)
1685        STREG   %r10,PT_GR10(\regs)
1686        STREG   %r11,PT_GR11(\regs)
1687        STREG   %r12,PT_GR12(\regs)
1688        STREG   %r13,PT_GR13(\regs)
1689        STREG   %r14,PT_GR14(\regs)
1690        STREG   %r15,PT_GR15(\regs)
1691        STREG   %r16,PT_GR16(\regs)
1692        STREG   %r17,PT_GR17(\regs)
1693        STREG   %r18,PT_GR18(\regs)
1694        .endm
1695
1696        .macro  reg_restore regs
1697        LDREG   PT_GR3(\regs), %r3
1698        LDREG   PT_GR4(\regs), %r4
1699        LDREG   PT_GR5(\regs), %r5
1700        LDREG   PT_GR6(\regs), %r6
1701        LDREG   PT_GR7(\regs), %r7
1702        LDREG   PT_GR8(\regs), %r8
1703        LDREG   PT_GR9(\regs), %r9
1704        LDREG   PT_GR10(\regs),%r10
1705        LDREG   PT_GR11(\regs),%r11
1706        LDREG   PT_GR12(\regs),%r12
1707        LDREG   PT_GR13(\regs),%r13
1708        LDREG   PT_GR14(\regs),%r14
1709        LDREG   PT_GR15(\regs),%r15
1710        LDREG   PT_GR16(\regs),%r16
1711        LDREG   PT_GR17(\regs),%r17
1712        LDREG   PT_GR18(\regs),%r18
1713        .endm
1714
1715        .macro  fork_like name
1716ENTRY(sys_\name\()_wrapper)
1717        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1718        ldo     TASK_REGS(%r1),%r1
1719        reg_save %r1
1720        mfctl   %cr27, %r28
1721        ldil    L%sys_\name, %r31
1722        be      R%sys_\name(%sr4,%r31)
1723        STREG   %r28, PT_CR27(%r1)
1724ENDPROC(sys_\name\()_wrapper)
1725        .endm
1726
1727fork_like clone
1728fork_like fork
1729fork_like vfork
1730
1731        /* Set the return value for the child */
1732ENTRY(child_return)
1733        BL      schedule_tail, %r2
1734        nop
1735finish_child_return:
1736        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1737        ldo     TASK_REGS(%r1),%r1       /* get pt regs */
1738
1739        LDREG   PT_CR27(%r1), %r3
1740        mtctl   %r3, %cr27
1741        reg_restore %r1
1742        b       syscall_exit
1743        copy    %r0,%r28
1744ENDPROC(child_return)
1745
1746ENTRY(sys_rt_sigreturn_wrapper)
1747        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1748        ldo     TASK_REGS(%r26),%r26    /* get pt regs */
1749        /* Don't save regs, we are going to restore them from sigcontext. */
1750        STREG   %r2, -RP_OFFSET(%r30)
1751#ifdef CONFIG_64BIT
1752        ldo     FRAME_SIZE(%r30), %r30
1753        BL      sys_rt_sigreturn,%r2
1754        ldo     -16(%r30),%r29          /* Reference param save area */
1755#else
1756        BL      sys_rt_sigreturn,%r2
1757        ldo     FRAME_SIZE(%r30), %r30
1758#endif
1759
1760        ldo     -FRAME_SIZE(%r30), %r30
1761        LDREG   -RP_OFFSET(%r30), %r2
1762
1763        /* FIXME: I think we need to restore a few more things here. */
1764        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1765        ldo     TASK_REGS(%r1),%r1      /* get pt regs */
1766        reg_restore %r1
1767
1768        /* If the signal was received while the process was blocked on a
1769         * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1770         * take us to syscall_exit_rfi and on to intr_return.
1771         */
1772        bv      %r0(%r2)
1773        LDREG   PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1774ENDPROC(sys_rt_sigreturn_wrapper)
1775
1776ENTRY(syscall_exit)
1777        /* NOTE: HP-UX syscalls also come through here
1778         * after hpux_syscall_exit fixes up return
1779         * values. */
1780
1781        /* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1782         * via syscall_exit_rfi if the signal was received while the process
1783         * was running.
1784         */
1785
1786        /* save return value now */
1787
1788        mfctl     %cr30, %r1
1789        LDREG     TI_TASK(%r1),%r1
1790        STREG     %r28,TASK_PT_GR28(%r1)
1791
1792#ifdef CONFIG_HPUX
1793/* <linux/personality.h> cannot be easily included */
1794#define PER_HPUX 0x10
1795        ldw     TASK_PERSONALITY(%r1),%r19
1796
1797        /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
1798        ldo       -PER_HPUX(%r19), %r19
1799        cmpib,COND(<>),n 0,%r19,1f
1800
1801        /* Save other hpux returns if personality is PER_HPUX */
1802        STREG     %r22,TASK_PT_GR22(%r1)
1803        STREG     %r29,TASK_PT_GR29(%r1)
18041:
1805
1806#endif /* CONFIG_HPUX */
1807
1808        /* Seems to me that dp could be wrong here, if the syscall involved
1809         * calling a module, and nothing got round to restoring dp on return.
1810         */
1811        loadgp
1812
1813syscall_check_resched:
1814
1815        /* check for reschedule */
1816
1817        LDREG   TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19   /* long */
1818        bb,<,n  %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1819
1820        .import do_signal,code
1821syscall_check_sig:
1822        LDREG   TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1823        ldi     (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1824        and,COND(<>)    %r19, %r26, %r0
1825        b,n     syscall_restore /* skip past if we've nothing to do */
1826
1827syscall_do_signal:
1828        /* Save callee-save registers (for sigcontext).
1829         * FIXME: After this point the process structure should be
1830         * consistent with all the relevant state of the process
1831         * before the syscall.  We need to verify this.
1832         */
1833        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1834        ldo     TASK_REGS(%r1), %r26            /* struct pt_regs *regs */
1835        reg_save %r26
1836
1837#ifdef CONFIG_64BIT
1838        ldo     -16(%r30),%r29                  /* Reference param save area */
1839#endif
1840
1841        BL      do_notify_resume,%r2
1842        ldi     1, %r25                         /* long in_syscall = 1 */
1843
1844        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1845        ldo     TASK_REGS(%r1), %r20            /* reload pt_regs */
1846        reg_restore %r20
1847
1848        b,n     syscall_check_sig
1849
1850syscall_restore:
1851        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1852
1853        /* Are we being ptraced? */
1854        ldw     TASK_FLAGS(%r1),%r19
1855        ldi     _TIF_SYSCALL_TRACE_MASK,%r2
1856        and,COND(=)     %r19,%r2,%r0
1857        b,n     syscall_restore_rfi
1858
1859        ldo     TASK_PT_FR31(%r1),%r19             /* reload fpregs */
1860        rest_fp %r19
1861
1862        LDREG   TASK_PT_SAR(%r1),%r19              /* restore SAR */
1863        mtsar   %r19
1864
1865        LDREG   TASK_PT_GR2(%r1),%r2               /* restore user rp */
1866        LDREG   TASK_PT_GR19(%r1),%r19
1867        LDREG   TASK_PT_GR20(%r1),%r20
1868        LDREG   TASK_PT_GR21(%r1),%r21
1869        LDREG   TASK_PT_GR22(%r1),%r22
1870        LDREG   TASK_PT_GR23(%r1),%r23
1871        LDREG   TASK_PT_GR24(%r1),%r24
1872        LDREG   TASK_PT_GR25(%r1),%r25
1873        LDREG   TASK_PT_GR26(%r1),%r26
1874        LDREG   TASK_PT_GR27(%r1),%r27     /* restore user dp */
1875        LDREG   TASK_PT_GR28(%r1),%r28     /* syscall return value */
1876        LDREG   TASK_PT_GR29(%r1),%r29
1877        LDREG   TASK_PT_GR31(%r1),%r31     /* restore syscall rp */
1878
1879        /* NOTE: We use rsm/ssm pair to make this operation atomic */
1880        LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1881        rsm     PSW_SM_I, %r0
1882        copy    %r1,%r30                           /* Restore user sp */
1883        mfsp    %sr3,%r1                           /* Get user space id */
1884        mtsp    %r1,%sr7                           /* Restore sr7 */
1885        ssm     PSW_SM_I, %r0
1886
1887        /* Set sr2 to zero for userspace syscalls to work. */
1888        mtsp    %r0,%sr2 
1889        mtsp    %r1,%sr4                           /* Restore sr4 */
1890        mtsp    %r1,%sr5                           /* Restore sr5 */
1891        mtsp    %r1,%sr6                           /* Restore sr6 */
1892
1893        depi    3,31,2,%r31                        /* ensure return to user mode. */
1894
1895#ifdef CONFIG_64BIT
1896        /* decide whether to reset the wide mode bit
1897         *
1898         * For a syscall, the W bit is stored in the lowest bit
1899         * of sp.  Extract it and reset W if it is zero */
1900        extrd,u,*<>     %r30,63,1,%r1
1901        rsm     PSW_SM_W, %r0
1902        /* now reset the lowest bit of sp if it was set */
1903        xor     %r30,%r1,%r30
1904#endif
1905        be,n    0(%sr3,%r31)                       /* return to user space */
1906
1907        /* We have to return via an RFI, so that PSW T and R bits can be set
1908         * appropriately.
1909         * This sets up pt_regs so we can return via intr_restore, which is not
1910         * the most efficient way of doing things, but it works.
1911         */
1912syscall_restore_rfi:
1913        ldo     -1(%r0),%r2                        /* Set recovery cntr to -1 */
1914        mtctl   %r2,%cr0                           /*   for immediate trap */
1915        LDREG   TASK_PT_PSW(%r1),%r2               /* Get old PSW */
1916        ldi     0x0b,%r20                          /* Create new PSW */
1917        depi    -1,13,1,%r20                       /* C, Q, D, and I bits */
1918
1919        /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1920         * set in thread_info.h and converted to PA bitmap
1921         * numbers in asm-offsets.c */
1922
1923        /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1924        extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1925        depi    -1,27,1,%r20                       /* R bit */
1926
1927        /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1928        extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1929        depi    -1,7,1,%r20                        /* T bit */
1930
1931        STREG   %r20,TASK_PT_PSW(%r1)
1932
1933        /* Always store space registers, since sr3 can be changed (e.g. fork) */
1934
1935        mfsp    %sr3,%r25
1936        STREG   %r25,TASK_PT_SR3(%r1)
1937        STREG   %r25,TASK_PT_SR4(%r1)
1938        STREG   %r25,TASK_PT_SR5(%r1)
1939        STREG   %r25,TASK_PT_SR6(%r1)
1940        STREG   %r25,TASK_PT_SR7(%r1)
1941        STREG   %r25,TASK_PT_IASQ0(%r1)
1942        STREG   %r25,TASK_PT_IASQ1(%r1)
1943
1944        /* XXX W bit??? */
1945        /* Now if old D bit is clear, it means we didn't save all registers
1946         * on syscall entry, so do that now.  This only happens on TRACEME
1947         * calls, or if someone attached to us while we were on a syscall.
1948         * We could make this more efficient by not saving r3-r18, but
1949         * then we wouldn't be able to use the common intr_restore path.
1950         * It is only for traced processes anyway, so performance is not
1951         * an issue.
1952         */
1953        bb,<    %r2,30,pt_regs_ok                  /* Branch if D set */
1954        ldo     TASK_REGS(%r1),%r25
1955        reg_save %r25                              /* Save r3 to r18 */
1956
1957        /* Save the current sr */
1958        mfsp    %sr0,%r2
1959        STREG   %r2,TASK_PT_SR0(%r1)
1960
1961        /* Save the scratch sr */
1962        mfsp    %sr1,%r2
1963        STREG   %r2,TASK_PT_SR1(%r1)
1964
1965        /* sr2 should be set to zero for userspace syscalls */
1966        STREG   %r0,TASK_PT_SR2(%r1)
1967
1968        LDREG   TASK_PT_GR31(%r1),%r2
1969        depi    3,31,2,%r2                 /* ensure return to user mode. */
1970        STREG   %r2,TASK_PT_IAOQ0(%r1)
1971        ldo     4(%r2),%r2
1972        STREG   %r2,TASK_PT_IAOQ1(%r1)
1973        b       intr_restore
1974        copy    %r25,%r16
1975
1976pt_regs_ok:
1977        LDREG   TASK_PT_IAOQ0(%r1),%r2
1978        depi    3,31,2,%r2                 /* ensure return to user mode. */
1979        STREG   %r2,TASK_PT_IAOQ0(%r1)
1980        LDREG   TASK_PT_IAOQ1(%r1),%r2
1981        depi    3,31,2,%r2
1982        STREG   %r2,TASK_PT_IAOQ1(%r1)
1983        b       intr_restore
1984        copy    %r25,%r16
1985
1986        .import schedule,code
1987syscall_do_resched:
1988        BL      schedule,%r2
1989#ifdef CONFIG_64BIT
1990        ldo     -16(%r30),%r29          /* Reference param save area */
1991#else
1992        nop
1993#endif
1994        b       syscall_check_resched   /* if resched, we start over again */
1995        nop
1996ENDPROC(syscall_exit)
1997
1998
1999#ifdef CONFIG_FUNCTION_TRACER
2000        .import ftrace_function_trampoline,code
2001ENTRY(_mcount)
2002        copy    %r3, %arg2
2003        b       ftrace_function_trampoline
2004        nop
2005ENDPROC(_mcount)
2006
2007ENTRY(return_to_handler)
2008        load32  return_trampoline, %rp
2009        copy    %ret0, %arg0
2010        copy    %ret1, %arg1
2011        b       ftrace_return_to_handler
2012        nop
2013return_trampoline:
2014        copy    %ret0, %rp
2015        copy    %r23, %ret0
2016        copy    %r24, %ret1
2017
2018.globl ftrace_stub
2019ftrace_stub:
2020        bv      %r0(%rp)
2021        nop
2022ENDPROC(return_to_handler)
2023#endif  /* CONFIG_FUNCTION_TRACER */
2024
2025#ifdef CONFIG_IRQSTACKS
2026/* void call_on_stack(unsigned long param1, void *func,
2027                      unsigned long new_stack) */
2028ENTRY(call_on_stack)
2029        copy    %sp, %r1
2030
2031        /* Regarding the HPPA calling conventions for function pointers,
2032           we assume the PIC register is not changed across call.  For
2033           CONFIG_64BIT, the argument pointer is left to point at the
2034           argument region allocated for the call to call_on_stack. */
2035# ifdef CONFIG_64BIT
2036        /* Switch to new stack.  We allocate two 128 byte frames.  */
2037        ldo     256(%arg2), %sp
2038        /* Save previous stack pointer and return pointer in frame marker */
2039        STREG   %rp, -144(%sp)
2040        /* Calls always use function descriptor */
2041        LDREG   16(%arg1), %arg1
2042        bve,l   (%arg1), %rp
2043        STREG   %r1, -136(%sp)
2044        LDREG   -144(%sp), %rp
2045        bve     (%rp)
2046        LDREG   -136(%sp), %sp
2047# else
2048        /* Switch to new stack.  We allocate two 64 byte frames.  */
2049        ldo     128(%arg2), %sp
2050        /* Save previous stack pointer and return pointer in frame marker */
2051        STREG   %r1, -68(%sp)
2052        STREG   %rp, -84(%sp)
2053        /* Calls use function descriptor if PLABEL bit is set */
2054        bb,>=,n %arg1, 30, 1f
2055        depwi   0,31,2, %arg1
2056        LDREG   0(%arg1), %arg1
20571:
2058        be,l    0(%sr4,%arg1), %sr0, %r31
2059        copy    %r31, %rp
2060        LDREG   -84(%sp), %rp
2061        bv      (%rp)
2062        LDREG   -68(%sp), %sp
2063# endif /* CONFIG_64BIT */
2064ENDPROC(call_on_stack)
2065#endif /* CONFIG_IRQSTACKS */
2066
2067get_register:
2068        /*
2069         * get_register is used by the non access tlb miss handlers to
2070         * copy the value of the general register specified in r8 into
2071         * r1. This routine can't be used for shadowed registers, since
2072         * the rfir will restore the original value. So, for the shadowed
2073         * registers we put a -1 into r1 to indicate that the register
2074         * should not be used (the register being copied could also have
2075         * a -1 in it, but that is OK, it just means that we will have
2076         * to use the slow path instead).
2077         */
2078        blr     %r8,%r0
2079        nop
2080        bv      %r0(%r25)    /* r0 */
2081        copy    %r0,%r1
2082        bv      %r0(%r25)    /* r1 - shadowed */
2083        ldi     -1,%r1
2084        bv      %r0(%r25)    /* r2 */
2085        copy    %r2,%r1
2086        bv      %r0(%r25)    /* r3 */
2087        copy    %r3,%r1
2088        bv      %r0(%r25)    /* r4 */
2089        copy    %r4,%r1
2090        bv      %r0(%r25)    /* r5 */
2091        copy    %r5,%r1
2092        bv      %r0(%r25)    /* r6 */
2093        copy    %r6,%r1
2094        bv      %r0(%r25)    /* r7 */
2095        copy    %r7,%r1
2096        bv      %r0(%r25)    /* r8 - shadowed */
2097        ldi     -1,%r1
2098        bv      %r0(%r25)    /* r9 - shadowed */
2099        ldi     -1,%r1
2100        bv      %r0(%r25)    /* r10 */
2101        copy    %r10,%r1
2102        bv      %r0(%r25)    /* r11 */
2103        copy    %r11,%r1
2104        bv      %r0(%r25)    /* r12 */
2105        copy    %r12,%r1
2106        bv      %r0(%r25)    /* r13 */
2107        copy    %r13,%r1
2108        bv      %r0(%r25)    /* r14 */
2109        copy    %r14,%r1
2110        bv      %r0(%r25)    /* r15 */
2111        copy    %r15,%r1
2112        bv      %r0(%r25)    /* r16 - shadowed */
2113        ldi     -1,%r1
2114        bv      %r0(%r25)    /* r17 - shadowed */
2115        ldi     -1,%r1
2116        bv      %r0(%r25)    /* r18 */
2117        copy    %r18,%r1
2118        bv      %r0(%r25)    /* r19 */
2119        copy    %r19,%r1
2120        bv      %r0(%r25)    /* r20 */
2121        copy    %r20,%r1
2122        bv      %r0(%r25)    /* r21 */
2123        copy    %r21,%r1
2124        bv      %r0(%r25)    /* r22 */
2125        copy    %r22,%r1
2126        bv      %r0(%r25)    /* r23 */
2127        copy    %r23,%r1
2128        bv      %r0(%r25)    /* r24 - shadowed */
2129        ldi     -1,%r1
2130        bv      %r0(%r25)    /* r25 - shadowed */
2131        ldi     -1,%r1
2132        bv      %r0(%r25)    /* r26 */
2133        copy    %r26,%r1
2134        bv      %r0(%r25)    /* r27 */
2135        copy    %r27,%r1
2136        bv      %r0(%r25)    /* r28 */
2137        copy    %r28,%r1
2138        bv      %r0(%r25)    /* r29 */
2139        copy    %r29,%r1
2140        bv      %r0(%r25)    /* r30 */
2141        copy    %r30,%r1
2142        bv      %r0(%r25)    /* r31 */
2143        copy    %r31,%r1
2144
2145
2146set_register:
2147        /*
2148         * set_register is used by the non access tlb miss handlers to
2149         * copy the value of r1 into the general register specified in
2150         * r8.
2151         */
2152        blr     %r8,%r0
2153        nop
2154        bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2155        copy    %r1,%r0
2156        bv      %r0(%r25)    /* r1 */
2157        copy    %r1,%r1
2158        bv      %r0(%r25)    /* r2 */
2159        copy    %r1,%r2
2160        bv      %r0(%r25)    /* r3 */
2161        copy    %r1,%r3
2162        bv      %r0(%r25)    /* r4 */
2163        copy    %r1,%r4
2164        bv      %r0(%r25)    /* r5 */
2165        copy    %r1,%r5
2166        bv      %r0(%r25)    /* r6 */
2167        copy    %r1,%r6
2168        bv      %r0(%r25)    /* r7 */
2169        copy    %r1,%r7
2170        bv      %r0(%r25)    /* r8 */
2171        copy    %r1,%r8
2172        bv      %r0(%r25)    /* r9 */
2173        copy    %r1,%r9
2174        bv      %r0(%r25)    /* r10 */
2175        copy    %r1,%r10
2176        bv      %r0(%r25)    /* r11 */
2177        copy    %r1,%r11
2178        bv      %r0(%r25)    /* r12 */
2179        copy    %r1,%r12
2180        bv      %r0(%r25)    /* r13 */
2181        copy    %r1,%r13
2182        bv      %r0(%r25)    /* r14 */
2183        copy    %r1,%r14
2184        bv      %r0(%r25)    /* r15 */
2185        copy    %r1,%r15
2186        bv      %r0(%r25)    /* r16 */
2187        copy    %r1,%r16
2188        bv      %r0(%r25)    /* r17 */
2189        copy    %r1,%r17
2190        bv      %r0(%r25)    /* r18 */
2191        copy    %r1,%r18
2192        bv      %r0(%r25)    /* r19 */
2193        copy    %r1,%r19
2194        bv      %r0(%r25)    /* r20 */
2195        copy    %r1,%r20
2196        bv      %r0(%r25)    /* r21 */
2197        copy    %r1,%r21
2198        bv      %r0(%r25)    /* r22 */
2199        copy    %r1,%r22
2200        bv      %r0(%r25)    /* r23 */
2201        copy    %r1,%r23
2202        bv      %r0(%r25)    /* r24 */
2203        copy    %r1,%r24
2204        bv      %r0(%r25)    /* r25 */
2205        copy    %r1,%r25
2206        bv      %r0(%r25)    /* r26 */
2207        copy    %r1,%r26
2208        bv      %r0(%r25)    /* r27 */
2209        copy    %r1,%r27
2210        bv      %r0(%r25)    /* r28 */
2211        copy    %r1,%r28
2212        bv      %r0(%r25)    /* r29 */
2213        copy    %r1,%r29
2214        bv      %r0(%r25)    /* r30 */
2215        copy    %r1,%r30
2216        bv      %r0(%r25)    /* r31 */
2217        copy    %r1,%r31
2218
2219