linux/arch/parisc/kernel/entry.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
   4 *
   5 * kernel entry points (interruptions, system call wrappers)
   6 *  Copyright (C) 1999,2000 Philipp Rumpf 
   7 *  Copyright (C) 1999 SuSE GmbH Nuernberg 
   8 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
   9 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
  10 */
  11
  12#include <asm/asm-offsets.h>
  13
  14/* we have the following possibilities to act on an interruption:
  15 *  - handle in assembly and use shadowed registers only
  16 *  - save registers to kernel stack and handle in assembly or C */
  17
  18
  19#include <asm/psw.h>
  20#include <asm/cache.h>          /* for L1_CACHE_SHIFT */
  21#include <asm/assembly.h>       /* for LDREG/STREG defines */
  22#include <asm/pgtable.h>
  23#include <asm/signal.h>
  24#include <asm/unistd.h>
  25#include <asm/ldcw.h>
  26#include <asm/traps.h>
  27#include <asm/thread_info.h>
  28#include <asm/alternative.h>
  29
  30#include <linux/linkage.h>
  31
  32#ifdef CONFIG_64BIT
  33        .level 2.0w
  34#else
  35        .level 2.0
  36#endif
  37
  38        .import         pa_tlb_lock,data
  39        .macro  load_pa_tlb_lock reg
  40        mfctl           %cr25,\reg
  41        addil           L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg
  42        .endm
  43
  44        /* space_to_prot macro creates a prot id from a space id */
  45
  46#if (SPACEID_SHIFT) == 0
  47        .macro  space_to_prot spc prot
  48        depd,z  \spc,62,31,\prot
  49        .endm
  50#else
  51        .macro  space_to_prot spc prot
  52        extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
  53        .endm
  54#endif
  55
  56        /* Switch to virtual mapping, trashing only %r1 */
  57        .macro  virt_map
  58        /* pcxt_ssm_bug */
  59        rsm     PSW_SM_I, %r0   /* barrier for "Relied upon Translation */
  60        mtsp    %r0, %sr4
  61        mtsp    %r0, %sr5
  62        mtsp    %r0, %sr6
  63        tovirt_r1 %r29
  64        load32  KERNEL_PSW, %r1
  65
  66        rsm     PSW_SM_QUIET,%r0        /* second "heavy weight" ctl op */
  67        mtctl   %r0, %cr17      /* Clear IIASQ tail */
  68        mtctl   %r0, %cr17      /* Clear IIASQ head */
  69        mtctl   %r1, %ipsw
  70        load32  4f, %r1
  71        mtctl   %r1, %cr18      /* Set IIAOQ tail */
  72        ldo     4(%r1), %r1
  73        mtctl   %r1, %cr18      /* Set IIAOQ head */
  74        rfir
  75        nop
  764:
  77        .endm
  78
  79        /*
  80         * The "get_stack" macros are responsible for determining the
  81         * kernel stack value.
  82         *
  83         *      If sr7 == 0
  84         *          Already using a kernel stack, so call the
  85         *          get_stack_use_r30 macro to push a pt_regs structure
  86         *          on the stack, and store registers there.
  87         *      else
  88         *          Need to set up a kernel stack, so call the
  89         *          get_stack_use_cr30 macro to set up a pointer
  90         *          to the pt_regs structure contained within the
  91         *          task pointer pointed to by cr30. Set the stack
  92         *          pointer to point to the end of the task structure.
  93         *
  94         * Note that we use shadowed registers for temps until
  95         * we can save %r26 and %r29. %r26 is used to preserve
  96         * %r8 (a shadowed register) which temporarily contained
  97         * either the fault type ("code") or the eirr. We need
  98         * to use a non-shadowed register to carry the value over
  99         * the rfir in virt_map. We use %r26 since this value winds
 100         * up being passed as the argument to either do_cpu_irq_mask
 101         * or handle_interruption. %r29 is used to hold a pointer
 102         * the register save area, and once again, it needs to
 103         * be a non-shadowed register so that it survives the rfir.
 104         *
 105         * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
 106         */
 107
 108        .macro  get_stack_use_cr30
 109
 110        /* we save the registers in the task struct */
 111
 112        copy    %r30, %r17
 113        mfctl   %cr30, %r1
 114        ldo     THREAD_SZ_ALGN(%r1), %r30
 115        mtsp    %r0,%sr7
 116        mtsp    %r16,%sr3
 117        tophys  %r1,%r9
 118        LDREG   TI_TASK(%r9), %r1       /* thread_info -> task_struct */
 119        tophys  %r1,%r9
 120        ldo     TASK_REGS(%r9),%r9
 121        STREG   %r17,PT_GR30(%r9)
 122        STREG   %r29,PT_GR29(%r9)
 123        STREG   %r26,PT_GR26(%r9)
 124        STREG   %r16,PT_SR7(%r9)
 125        copy    %r9,%r29
 126        .endm
 127
 128        .macro  get_stack_use_r30
 129
 130        /* we put a struct pt_regs on the stack and save the registers there */
 131
 132        tophys  %r30,%r9
 133        copy    %r30,%r1
 134        ldo     PT_SZ_ALGN(%r30),%r30
 135        STREG   %r1,PT_GR30(%r9)
 136        STREG   %r29,PT_GR29(%r9)
 137        STREG   %r26,PT_GR26(%r9)
 138        STREG   %r16,PT_SR7(%r9)
 139        copy    %r9,%r29
 140        .endm
 141
 142        .macro  rest_stack
 143        LDREG   PT_GR1(%r29), %r1
 144        LDREG   PT_GR30(%r29),%r30
 145        LDREG   PT_GR29(%r29),%r29
 146        .endm
 147
 148        /* default interruption handler
 149         * (calls traps.c:handle_interruption) */
 150        .macro  def code
 151        b       intr_save
 152        ldi     \code, %r8
 153        .align  32
 154        .endm
 155
 156        /* Interrupt interruption handler
 157         * (calls irq.c:do_cpu_irq_mask) */
 158        .macro  extint code
 159        b       intr_extint
 160        mfsp    %sr7,%r16
 161        .align  32
 162        .endm   
 163
 164        .import os_hpmc, code
 165
 166        /* HPMC handler */
 167        .macro  hpmc code
 168        nop                     /* must be a NOP, will be patched later */
 169        load32  PA(os_hpmc), %r3
 170        bv,n    0(%r3)
 171        nop
 172        .word   0               /* checksum (will be patched) */
 173        .word   0               /* address of handler */
 174        .word   0               /* length of handler */
 175        .endm
 176
 177        /*
 178         * Performance Note: Instructions will be moved up into
 179         * this part of the code later on, once we are sure
 180         * that the tlb miss handlers are close to final form.
 181         */
 182
 183        /* Register definitions for tlb miss handler macros */
 184
 185        va  = r8        /* virtual address for which the trap occurred */
 186        spc = r24       /* space for which the trap occurred */
 187
 188#ifndef CONFIG_64BIT
 189
 190        /*
 191         * itlb miss interruption handler (parisc 1.1 - 32 bit)
 192         */
 193
 194        .macro  itlb_11 code
 195
 196        mfctl   %pcsq, spc
 197        b       itlb_miss_11
 198        mfctl   %pcoq, va
 199
 200        .align          32
 201        .endm
 202#endif
 203        
 204        /*
 205         * itlb miss interruption handler (parisc 2.0)
 206         */
 207
 208        .macro  itlb_20 code
 209        mfctl   %pcsq, spc
 210#ifdef CONFIG_64BIT
 211        b       itlb_miss_20w
 212#else
 213        b       itlb_miss_20
 214#endif
 215        mfctl   %pcoq, va
 216
 217        .align          32
 218        .endm
 219        
 220#ifndef CONFIG_64BIT
 221        /*
 222         * naitlb miss interruption handler (parisc 1.1 - 32 bit)
 223         */
 224
 225        .macro  naitlb_11 code
 226
 227        mfctl   %isr,spc
 228        b       naitlb_miss_11
 229        mfctl   %ior,va
 230
 231        .align          32
 232        .endm
 233#endif
 234        
 235        /*
 236         * naitlb miss interruption handler (parisc 2.0)
 237         */
 238
 239        .macro  naitlb_20 code
 240
 241        mfctl   %isr,spc
 242#ifdef CONFIG_64BIT
 243        b       naitlb_miss_20w
 244#else
 245        b       naitlb_miss_20
 246#endif
 247        mfctl   %ior,va
 248
 249        .align          32
 250        .endm
 251        
 252#ifndef CONFIG_64BIT
 253        /*
 254         * dtlb miss interruption handler (parisc 1.1 - 32 bit)
 255         */
 256
 257        .macro  dtlb_11 code
 258
 259        mfctl   %isr, spc
 260        b       dtlb_miss_11
 261        mfctl   %ior, va
 262
 263        .align          32
 264        .endm
 265#endif
 266
 267        /*
 268         * dtlb miss interruption handler (parisc 2.0)
 269         */
 270
 271        .macro  dtlb_20 code
 272
 273        mfctl   %isr, spc
 274#ifdef CONFIG_64BIT
 275        b       dtlb_miss_20w
 276#else
 277        b       dtlb_miss_20
 278#endif
 279        mfctl   %ior, va
 280
 281        .align          32
 282        .endm
 283        
 284#ifndef CONFIG_64BIT
 285        /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
 286
 287        .macro  nadtlb_11 code
 288
 289        mfctl   %isr,spc
 290        b       nadtlb_miss_11
 291        mfctl   %ior,va
 292
 293        .align          32
 294        .endm
 295#endif
 296        
 297        /* nadtlb miss interruption handler (parisc 2.0) */
 298
 299        .macro  nadtlb_20 code
 300
 301        mfctl   %isr,spc
 302#ifdef CONFIG_64BIT
 303        b       nadtlb_miss_20w
 304#else
 305        b       nadtlb_miss_20
 306#endif
 307        mfctl   %ior,va
 308
 309        .align          32
 310        .endm
 311        
 312#ifndef CONFIG_64BIT
 313        /*
 314         * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
 315         */
 316
 317        .macro  dbit_11 code
 318
 319        mfctl   %isr,spc
 320        b       dbit_trap_11
 321        mfctl   %ior,va
 322
 323        .align          32
 324        .endm
 325#endif
 326
 327        /*
 328         * dirty bit trap interruption handler (parisc 2.0)
 329         */
 330
 331        .macro  dbit_20 code
 332
 333        mfctl   %isr,spc
 334#ifdef CONFIG_64BIT
 335        b       dbit_trap_20w
 336#else
 337        b       dbit_trap_20
 338#endif
 339        mfctl   %ior,va
 340
 341        .align          32
 342        .endm
 343
 344        /* In LP64, the space contains part of the upper 32 bits of the
 345         * fault.  We have to extract this and place it in the va,
 346         * zeroing the corresponding bits in the space register */
 347        .macro          space_adjust    spc,va,tmp
 348#ifdef CONFIG_64BIT
 349        extrd,u         \spc,63,SPACEID_SHIFT,\tmp
 350        depd            %r0,63,SPACEID_SHIFT,\spc
 351        depd            \tmp,31,SPACEID_SHIFT,\va
 352#endif
 353        .endm
 354
 355        .import         swapper_pg_dir,code
 356
 357        /* Get the pgd.  For faults on space zero (kernel space), this
 358         * is simply swapper_pg_dir.  For user space faults, the
 359         * pgd is stored in %cr25 */
 360        .macro          get_pgd         spc,reg
 361        ldil            L%PA(swapper_pg_dir),\reg
 362        ldo             R%PA(swapper_pg_dir)(\reg),\reg
 363        or,COND(=)      %r0,\spc,%r0
 364        mfctl           %cr25,\reg
 365        .endm
 366
 367        /* 
 368                space_check(spc,tmp,fault)
 369
 370                spc - The space we saw the fault with.
 371                tmp - The place to store the current space.
 372                fault - Function to call on failure.
 373
 374                Only allow faults on different spaces from the
 375                currently active one if we're the kernel 
 376
 377        */
 378        .macro          space_check     spc,tmp,fault
 379        mfsp            %sr7,\tmp
 380        /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
 381        or,COND(<>)     %r0,\spc,%r0    /* user may execute gateway page
 382                                         * as kernel, so defeat the space
 383                                         * check if it is */
 384        copy            \spc,\tmp
 385        or,COND(=)      %r0,\tmp,%r0    /* nullify if executing as kernel */
 386        cmpb,COND(<>),n \tmp,\spc,\fault
 387        .endm
 388
 389        /* Look up a PTE in a 2-Level scheme (faulting at each
 390         * level if the entry isn't present 
 391         *
 392         * NOTE: we use ldw even for LP64, since the short pointers
 393         * can address up to 1TB
 394         */
 395        .macro          L2_ptep pmd,pte,index,va,fault
 396#if CONFIG_PGTABLE_LEVELS == 3
 397        extru           \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
 398#else
 399# if defined(CONFIG_64BIT)
 400        extrd,u         \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
 401  #else
 402  # if PAGE_SIZE > 4096
 403        extru           \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
 404  # else
 405        extru           \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
 406  # endif
 407# endif
 408#endif
 409        dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
 410        copy            %r0,\pte
 411        ldw,s           \index(\pmd),\pmd
 412        bb,>=,n         \pmd,_PxD_PRESENT_BIT,\fault
 413        dep             %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
 414        SHLREG          \pmd,PxD_VALUE_SHIFT,\pmd
 415        extru           \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
 416        dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
 417        shladd          \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
 418        .endm
 419
 420        /* Look up PTE in a 3-Level scheme.
 421         *
 422         * Here we implement a Hybrid L2/L3 scheme: we allocate the
 423         * first pmd adjacent to the pgd.  This means that we can
 424         * subtract a constant offset to get to it.  The pmd and pgd
 425         * sizes are arranged so that a single pmd covers 4GB (giving
 426         * a full LP64 process access to 8TB) so our lookups are
 427         * effectively L2 for the first 4GB of the kernel (i.e. for
 428         * all ILP32 processes and all the kernel for machines with
 429         * under 4GB of memory) */
 430        .macro          L3_ptep pgd,pte,index,va,fault
 431#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
 432        extrd,u         \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
 433        extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 434        ldw,s           \index(\pgd),\pgd
 435        extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 436        bb,>=,n         \pgd,_PxD_PRESENT_BIT,\fault
 437        extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 438        shld            \pgd,PxD_VALUE_SHIFT,\index
 439        extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 440        copy            \index,\pgd
 441        extrd,u,*<>     \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 442        ldo             ASM_PGD_PMD_OFFSET(\pgd),\pgd
 443#endif
 444        L2_ptep         \pgd,\pte,\index,\va,\fault
 445        .endm
 446
 447        /* Acquire pa_tlb_lock lock and check page is present. */
 448        .macro          tlb_lock        spc,ptp,pte,tmp,tmp1,fault
 449#ifdef CONFIG_SMP
 45098:     cmpib,COND(=),n 0,\spc,2f
 451        load_pa_tlb_lock \tmp
 4521:      LDCW            0(\tmp),\tmp1
 453        cmpib,COND(=)   0,\tmp1,1b
 454        nop
 455        LDREG           0(\ptp),\pte
 456        bb,<,n          \pte,_PAGE_PRESENT_BIT,3f
 457        LDCW            0(\tmp),\tmp1
 458        b               \fault
 459        stw             \spc,0(\tmp)
 46099:     ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
 461#endif
 4622:      LDREG           0(\ptp),\pte
 463        bb,>=,n         \pte,_PAGE_PRESENT_BIT,\fault
 4643:
 465        .endm
 466
 467        /* Release pa_tlb_lock lock without reloading lock address. */
 468        .macro          tlb_unlock0     spc,tmp,tmp1
 469#ifdef CONFIG_SMP
 47098:     or,COND(=)      %r0,\spc,%r0
 471        LDCW            0(\tmp),\tmp1
 472        or,COND(=)      %r0,\spc,%r0
 473        stw             \spc,0(\tmp)
 47499:     ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
 475#endif
 476        .endm
 477
 478        /* Release pa_tlb_lock lock. */
 479        .macro          tlb_unlock1     spc,tmp,tmp1
 480#ifdef CONFIG_SMP
 48198:     load_pa_tlb_lock \tmp
 48299:     ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
 483        tlb_unlock0     \spc,\tmp,\tmp1
 484#endif
 485        .endm
 486
 487        /* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
 488         * don't needlessly dirty the cache line if it was already set */
 489        .macro          update_accessed ptp,pte,tmp,tmp1
 490        ldi             _PAGE_ACCESSED,\tmp1
 491        or              \tmp1,\pte,\tmp
 492        and,COND(<>)    \tmp1,\pte,%r0
 493        STREG           \tmp,0(\ptp)
 494        .endm
 495
 496        /* Set the dirty bit (and accessed bit).  No need to be
 497         * clever, this is only used from the dirty fault */
 498        .macro          update_dirty    ptp,pte,tmp
 499        ldi             _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
 500        or              \tmp,\pte,\pte
 501        STREG           \pte,0(\ptp)
 502        .endm
 503
 504        /* We have (depending on the page size):
 505         * - 38 to 52-bit Physical Page Number
 506         * - 12 to 26-bit page offset
 507         */
 508        /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
 509         * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
 510        #define PAGE_ADD_SHIFT          (PAGE_SHIFT-12)
 511        #define PAGE_ADD_HUGE_SHIFT     (REAL_HPAGE_SHIFT-12)
 512
 513        /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
 514        .macro          convert_for_tlb_insert20 pte,tmp
 515#ifdef CONFIG_HUGETLB_PAGE
 516        copy            \pte,\tmp
 517        extrd,u         \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
 518                                64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
 519
 520        depdi           _PAGE_SIZE_ENCODING_DEFAULT,63,\
 521                                (63-58)+PAGE_ADD_SHIFT,\pte
 522        extrd,u,*=      \tmp,_PAGE_HPAGE_BIT+32,1,%r0
 523        depdi           _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
 524                                (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
 525#else /* Huge pages disabled */
 526        extrd,u         \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
 527                                64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
 528        depdi           _PAGE_SIZE_ENCODING_DEFAULT,63,\
 529                                (63-58)+PAGE_ADD_SHIFT,\pte
 530#endif
 531        .endm
 532
 533        /* Convert the pte and prot to tlb insertion values.  How
 534         * this happens is quite subtle, read below */
 535        .macro          make_insert_tlb spc,pte,prot,tmp
 536        space_to_prot   \spc \prot        /* create prot id from space */
 537        /* The following is the real subtlety.  This is depositing
 538         * T <-> _PAGE_REFTRAP
 539         * D <-> _PAGE_DIRTY
 540         * B <-> _PAGE_DMB (memory break)
 541         *
 542         * Then incredible subtlety: The access rights are
 543         * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
 544         * See 3-14 of the parisc 2.0 manual
 545         *
 546         * Finally, _PAGE_READ goes in the top bit of PL1 (so we
 547         * trigger an access rights trap in user space if the user
 548         * tries to read an unreadable page */
 549        depd            \pte,8,7,\prot
 550
 551        /* PAGE_USER indicates the page can be read with user privileges,
 552         * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
 553         * contains _PAGE_READ) */
 554        extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
 555        depdi           7,11,3,\prot
 556        /* If we're a gateway page, drop PL2 back to zero for promotion
 557         * to kernel privilege (so we can execute the page as kernel).
 558         * Any privilege promotion page always denys read and write */
 559        extrd,u,*=      \pte,_PAGE_GATEWAY_BIT+32,1,%r0
 560        depd            %r0,11,2,\prot  /* If Gateway, Set PL2 to 0 */
 561
 562        /* Enforce uncacheable pages.
 563         * This should ONLY be use for MMIO on PA 2.0 machines.
 564         * Memory/DMA is cache coherent on all PA2.0 machines we support
 565         * (that means T-class is NOT supported) and the memory controllers
 566         * on most of those machines only handles cache transactions.
 567         */
 568        extrd,u,*=      \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
 569        depdi           1,12,1,\prot
 570
 571        /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
 572        convert_for_tlb_insert20 \pte \tmp
 573        .endm
 574
 575        /* Identical macro to make_insert_tlb above, except it
 576         * makes the tlb entry for the differently formatted pa11
 577         * insertion instructions */
 578        .macro          make_insert_tlb_11      spc,pte,prot
 579        zdep            \spc,30,15,\prot
 580        dep             \pte,8,7,\prot
 581        extru,=         \pte,_PAGE_NO_CACHE_BIT,1,%r0
 582        depi            1,12,1,\prot
 583        extru,=         \pte,_PAGE_USER_BIT,1,%r0
 584        depi            7,11,3,\prot   /* Set for user space (1 rsvd for read) */
 585        extru,=         \pte,_PAGE_GATEWAY_BIT,1,%r0
 586        depi            0,11,2,\prot    /* If Gateway, Set PL2 to 0 */
 587
 588        /* Get rid of prot bits and convert to page addr for iitlba */
 589
 590        depi            0,31,ASM_PFN_PTE_SHIFT,\pte
 591        SHRREG          \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
 592        .endm
 593
 594        /* This is for ILP32 PA2.0 only.  The TLB insertion needs
 595         * to extend into I/O space if the address is 0xfXXXXXXX
 596         * so we extend the f's into the top word of the pte in
 597         * this case */
 598        .macro          f_extend        pte,tmp
 599        extrd,s         \pte,42,4,\tmp
 600        addi,<>         1,\tmp,%r0
 601        extrd,s         \pte,63,25,\pte
 602        .endm
 603
 604        /* The alias region is an 8MB aligned 16MB to do clear and
 605         * copy user pages at addresses congruent with the user
 606         * virtual address.
 607         *
 608         * To use the alias page, you set %r26 up with the to TLB
 609         * entry (identifying the physical page) and %r23 up with
 610         * the from tlb entry (or nothing if only a to entry---for
 611         * clear_user_page_asm) */
 612        .macro          do_alias        spc,tmp,tmp1,va,pte,prot,fault,patype
 613        cmpib,COND(<>),n 0,\spc,\fault
 614        ldil            L%(TMPALIAS_MAP_START),\tmp
 615#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
 616        /* on LP64, ldi will sign extend into the upper 32 bits,
 617         * which is behaviour we don't want */
 618        depdi           0,31,32,\tmp
 619#endif
 620        copy            \va,\tmp1
 621        depi            0,31,23,\tmp1
 622        cmpb,COND(<>),n \tmp,\tmp1,\fault
 623        mfctl           %cr19,\tmp      /* iir */
 624        /* get the opcode (first six bits) into \tmp */
 625        extrw,u         \tmp,5,6,\tmp
 626        /*
 627         * Only setting the T bit prevents data cache movein
 628         * Setting access rights to zero prevents instruction cache movein
 629         *
 630         * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
 631         * to type field and _PAGE_READ goes to top bit of PL1
 632         */
 633        ldi             (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
 634        /*
 635         * so if the opcode is one (i.e. this is a memory management
 636         * instruction) nullify the next load so \prot is only T.
 637         * Otherwise this is a normal data operation
 638         */
 639        cmpiclr,=       0x01,\tmp,%r0
 640        ldi             (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
 641.ifc \patype,20
 642        depd,z          \prot,8,7,\prot
 643.else
 644.ifc \patype,11
 645        depw,z          \prot,8,7,\prot
 646.else
 647        .error "undefined PA type to do_alias"
 648.endif
 649.endif
 650        /*
 651         * OK, it is in the temp alias region, check whether "from" or "to".
 652         * Check "subtle" note in pacache.S re: r23/r26.
 653         */
 654#ifdef CONFIG_64BIT
 655        extrd,u,*=      \va,41,1,%r0
 656#else
 657        extrw,u,=       \va,9,1,%r0
 658#endif
 659        or,COND(tr)     %r23,%r0,\pte
 660        or              %r26,%r0,\pte
 661        .endm 
 662
 663
 664        /*
 665         * Fault_vectors are architecturally required to be aligned on a 2K
 666         * boundary
 667         */
 668
 669        .section .text.hot
 670        .align 2048
 671
 672ENTRY(fault_vector_20)
 673        /* First vector is invalid (0) */
 674        .ascii  "cows can fly"
 675        .byte 0
 676        .align 32
 677
 678        hpmc             1
 679        def              2
 680        def              3
 681        extint           4
 682        def              5
 683        itlb_20          PARISC_ITLB_TRAP
 684        def              7
 685        def              8
 686        def              9
 687        def             10
 688        def             11
 689        def             12
 690        def             13
 691        def             14
 692        dtlb_20         15
 693        naitlb_20       16
 694        nadtlb_20       17
 695        def             18
 696        def             19
 697        dbit_20         20
 698        def             21
 699        def             22
 700        def             23
 701        def             24
 702        def             25
 703        def             26
 704        def             27
 705        def             28
 706        def             29
 707        def             30
 708        def             31
 709END(fault_vector_20)
 710
 711#ifndef CONFIG_64BIT
 712
 713        .align 2048
 714
 715ENTRY(fault_vector_11)
 716        /* First vector is invalid (0) */
 717        .ascii  "cows can fly"
 718        .byte 0
 719        .align 32
 720
 721        hpmc             1
 722        def              2
 723        def              3
 724        extint           4
 725        def              5
 726        itlb_11          PARISC_ITLB_TRAP
 727        def              7
 728        def              8
 729        def              9
 730        def             10
 731        def             11
 732        def             12
 733        def             13
 734        def             14
 735        dtlb_11         15
 736        naitlb_11       16
 737        nadtlb_11       17
 738        def             18
 739        def             19
 740        dbit_11         20
 741        def             21
 742        def             22
 743        def             23
 744        def             24
 745        def             25
 746        def             26
 747        def             27
 748        def             28
 749        def             29
 750        def             30
 751        def             31
 752END(fault_vector_11)
 753
 754#endif
 755        /* Fault vector is separately protected and *must* be on its own page */
 756        .align          PAGE_SIZE
 757
 758        .import         handle_interruption,code
 759        .import         do_cpu_irq_mask,code
 760
 761        /*
 762         * Child Returns here
 763         *
 764         * copy_thread moved args into task save area.
 765         */
 766
 767ENTRY(ret_from_kernel_thread)
 768        /* Call schedule_tail first though */
 769        BL      schedule_tail, %r2
 770        nop
 771
 772        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
 773        LDREG   TASK_PT_GR25(%r1), %r26
 774#ifdef CONFIG_64BIT
 775        LDREG   TASK_PT_GR27(%r1), %r27
 776#endif
 777        LDREG   TASK_PT_GR26(%r1), %r1
 778        ble     0(%sr7, %r1)
 779        copy    %r31, %r2
 780        b       finish_child_return
 781        nop
 782END(ret_from_kernel_thread)
 783
 784
 785        /*
 786         * struct task_struct *_switch_to(struct task_struct *prev,
 787         *      struct task_struct *next)
 788         *
 789         * switch kernel stacks and return prev */
 790ENTRY_CFI(_switch_to)
 791        STREG    %r2, -RP_OFFSET(%r30)
 792
 793        callee_save_float
 794        callee_save
 795
 796        load32  _switch_to_ret, %r2
 797
 798        STREG   %r2, TASK_PT_KPC(%r26)
 799        LDREG   TASK_PT_KPC(%r25), %r2
 800
 801        STREG   %r30, TASK_PT_KSP(%r26)
 802        LDREG   TASK_PT_KSP(%r25), %r30
 803        LDREG   TASK_THREAD_INFO(%r25), %r25
 804        bv      %r0(%r2)
 805        mtctl   %r25,%cr30
 806
 807ENTRY(_switch_to_ret)
 808        mtctl   %r0, %cr0               /* Needed for single stepping */
 809        callee_rest
 810        callee_rest_float
 811
 812        LDREG   -RP_OFFSET(%r30), %r2
 813        bv      %r0(%r2)
 814        copy    %r26, %r28
 815ENDPROC_CFI(_switch_to)
 816
 817        /*
 818         * Common rfi return path for interruptions, kernel execve, and
 819         * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
 820         * return via this path if the signal was received when the process
 821         * was running; if the process was blocked on a syscall then the
 822         * normal syscall_exit path is used.  All syscalls for traced
 823         * proceses exit via intr_restore.
 824         *
 825         * XXX If any syscalls that change a processes space id ever exit
 826         * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
 827         * adjust IASQ[0..1].
 828         *
 829         */
 830
 831        .align  PAGE_SIZE
 832
 833ENTRY_CFI(syscall_exit_rfi)
 834        mfctl   %cr30,%r16
 835        LDREG   TI_TASK(%r16), %r16     /* thread_info -> task_struct */
 836        ldo     TASK_REGS(%r16),%r16
 837        /* Force iaoq to userspace, as the user has had access to our current
 838         * context via sigcontext. Also Filter the PSW for the same reason.
 839         */
 840        LDREG   PT_IAOQ0(%r16),%r19
 841        depi    3,31,2,%r19
 842        STREG   %r19,PT_IAOQ0(%r16)
 843        LDREG   PT_IAOQ1(%r16),%r19
 844        depi    3,31,2,%r19
 845        STREG   %r19,PT_IAOQ1(%r16)
 846        LDREG   PT_PSW(%r16),%r19
 847        load32  USER_PSW_MASK,%r1
 848#ifdef CONFIG_64BIT
 849        load32  USER_PSW_HI_MASK,%r20
 850        depd    %r20,31,32,%r1
 851#endif
 852        and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
 853        load32  USER_PSW,%r1
 854        or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
 855        STREG   %r19,PT_PSW(%r16)
 856
 857        /*
 858         * If we aren't being traced, we never saved space registers
 859         * (we don't store them in the sigcontext), so set them
 860         * to "proper" values now (otherwise we'll wind up restoring
 861         * whatever was last stored in the task structure, which might
 862         * be inconsistent if an interrupt occurred while on the gateway
 863         * page). Note that we may be "trashing" values the user put in
 864         * them, but we don't support the user changing them.
 865         */
 866
 867        STREG   %r0,PT_SR2(%r16)
 868        mfsp    %sr3,%r19
 869        STREG   %r19,PT_SR0(%r16)
 870        STREG   %r19,PT_SR1(%r16)
 871        STREG   %r19,PT_SR3(%r16)
 872        STREG   %r19,PT_SR4(%r16)
 873        STREG   %r19,PT_SR5(%r16)
 874        STREG   %r19,PT_SR6(%r16)
 875        STREG   %r19,PT_SR7(%r16)
 876
 877ENTRY(intr_return)
 878        /* check for reschedule */
 879        mfctl   %cr30,%r1
 880        LDREG   TI_FLAGS(%r1),%r19      /* sched.h: TIF_NEED_RESCHED */
 881        bb,<,n  %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
 882
 883        .import do_notify_resume,code
 884intr_check_sig:
 885        /* As above */
 886        mfctl   %cr30,%r1
 887        LDREG   TI_FLAGS(%r1),%r19
 888        ldi     (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
 889        and,COND(<>)    %r19, %r20, %r0
 890        b,n     intr_restore    /* skip past if we've nothing to do */
 891
 892        /* This check is critical to having LWS
 893         * working. The IASQ is zero on the gateway
 894         * page and we cannot deliver any signals until
 895         * we get off the gateway page.
 896         *
 897         * Only do signals if we are returning to user space
 898         */
 899        LDREG   PT_IASQ0(%r16), %r20
 900        cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */
 901        LDREG   PT_IASQ1(%r16), %r20
 902        cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */
 903
 904        /* NOTE: We need to enable interrupts if we have to deliver
 905         * signals. We used to do this earlier but it caused kernel
 906         * stack overflows. */
 907        ssm     PSW_SM_I, %r0
 908
 909        copy    %r0, %r25                       /* long in_syscall = 0 */
 910#ifdef CONFIG_64BIT
 911        ldo     -16(%r30),%r29                  /* Reference param save area */
 912#endif
 913
 914        BL      do_notify_resume,%r2
 915        copy    %r16, %r26                      /* struct pt_regs *regs */
 916
 917        b,n     intr_check_sig
 918
 919intr_restore:
 920        copy            %r16,%r29
 921        ldo             PT_FR31(%r29),%r1
 922        rest_fp         %r1
 923        rest_general    %r29
 924
 925        /* inverse of virt_map */
 926        pcxt_ssm_bug
 927        rsm             PSW_SM_QUIET,%r0        /* prepare for rfi */
 928        tophys_r1       %r29
 929
 930        /* Restore space id's and special cr's from PT_REGS
 931         * structure pointed to by r29
 932         */
 933        rest_specials   %r29
 934
 935        /* IMPORTANT: rest_stack restores r29 last (we are using it)!
 936         * It also restores r1 and r30.
 937         */
 938        rest_stack
 939
 940        rfi
 941        nop
 942
 943#ifndef CONFIG_PREEMPT
 944# define intr_do_preempt        intr_restore
 945#endif /* !CONFIG_PREEMPT */
 946
 947        .import schedule,code
 948intr_do_resched:
 949        /* Only call schedule on return to userspace. If we're returning
 950         * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
 951         * we jump back to intr_restore.
 952         */
 953        LDREG   PT_IASQ0(%r16), %r20
 954        cmpib,COND(=)   0, %r20, intr_do_preempt
 955        nop
 956        LDREG   PT_IASQ1(%r16), %r20
 957        cmpib,COND(=)   0, %r20, intr_do_preempt
 958        nop
 959
 960        /* NOTE: We need to enable interrupts if we schedule.  We used
 961         * to do this earlier but it caused kernel stack overflows. */
 962        ssm     PSW_SM_I, %r0
 963
 964#ifdef CONFIG_64BIT
 965        ldo     -16(%r30),%r29          /* Reference param save area */
 966#endif
 967
 968        ldil    L%intr_check_sig, %r2
 969#ifndef CONFIG_64BIT
 970        b       schedule
 971#else
 972        load32  schedule, %r20
 973        bv      %r0(%r20)
 974#endif
 975        ldo     R%intr_check_sig(%r2), %r2
 976
 977        /* preempt the current task on returning to kernel
 978         * mode from an interrupt, iff need_resched is set,
 979         * and preempt_count is 0. otherwise, we continue on
 980         * our merry way back to the current running task.
 981         */
 982#ifdef CONFIG_PREEMPT
 983        .import preempt_schedule_irq,code
 984intr_do_preempt:
 985        rsm     PSW_SM_I, %r0           /* disable interrupts */
 986
 987        /* current_thread_info()->preempt_count */
 988        mfctl   %cr30, %r1
 989        LDREG   TI_PRE_COUNT(%r1), %r19
 990        cmpib,COND(<>)  0, %r19, intr_restore   /* if preempt_count > 0 */
 991        nop                             /* prev insn branched backwards */
 992
 993        /* check if we interrupted a critical path */
 994        LDREG   PT_PSW(%r16), %r20
 995        bb,<,n  %r20, 31 - PSW_SM_I, intr_restore
 996        nop
 997
 998        BL      preempt_schedule_irq, %r2
 999        nop
1000
1001        b,n     intr_restore            /* ssm PSW_SM_I done by intr_restore */
1002#endif /* CONFIG_PREEMPT */
1003
1004        /*
1005         * External interrupts.
1006         */
1007
1008intr_extint:
1009        cmpib,COND(=),n 0,%r16,1f
1010
1011        get_stack_use_cr30
1012        b,n 2f
1013
10141:
1015        get_stack_use_r30
10162:
1017        save_specials   %r29
1018        virt_map
1019        save_general    %r29
1020
1021        ldo     PT_FR0(%r29), %r24
1022        save_fp %r24
1023        
1024        loadgp
1025
1026        copy    %r29, %r26      /* arg0 is pt_regs */
1027        copy    %r29, %r16      /* save pt_regs */
1028
1029        ldil    L%intr_return, %r2
1030
1031#ifdef CONFIG_64BIT
1032        ldo     -16(%r30),%r29  /* Reference param save area */
1033#endif
1034
1035        b       do_cpu_irq_mask
1036        ldo     R%intr_return(%r2), %r2 /* return to intr_return, not here */
1037ENDPROC_CFI(syscall_exit_rfi)
1038
1039
1040        /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1041
1042ENTRY_CFI(intr_save)            /* for os_hpmc */
1043        mfsp    %sr7,%r16
1044        cmpib,COND(=),n 0,%r16,1f
1045        get_stack_use_cr30
1046        b       2f
1047        copy    %r8,%r26
1048
10491:
1050        get_stack_use_r30
1051        copy    %r8,%r26
1052
10532:
1054        save_specials   %r29
1055
1056        /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1057        cmpib,COND(=),n        PARISC_ITLB_TRAP,%r26,skip_save_ior
1058
1059
1060        mfctl           %isr, %r16
1061        nop             /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1062        mfctl           %ior, %r17
1063
1064
1065#ifdef CONFIG_64BIT
1066        /*
1067         * If the interrupted code was running with W bit off (32 bit),
1068         * clear the b bits (bits 0 & 1) in the ior.
1069         * save_specials left ipsw value in r8 for us to test.
1070         */
1071        extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1072        depdi           0,1,2,%r17
1073
1074        /* adjust isr/ior: get high bits from isr and deposit in ior */
1075        space_adjust    %r16,%r17,%r1
1076#endif
1077        STREG           %r16, PT_ISR(%r29)
1078        STREG           %r17, PT_IOR(%r29)
1079
1080#if 0 && defined(CONFIG_64BIT)
1081        /* Revisit when we have 64-bit code above 4Gb */
1082        b,n             intr_save2
1083
1084skip_save_ior:
1085        /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1086         * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1087         * above.
1088         */
1089        extrd,u,*       %r8,PSW_W_BIT,1,%r1
1090        cmpib,COND(=),n 1,%r1,intr_save2
1091        LDREG           PT_IASQ0(%r29), %r16
1092        LDREG           PT_IAOQ0(%r29), %r17
1093        /* adjust iasq/iaoq */
1094        space_adjust    %r16,%r17,%r1
1095        STREG           %r16, PT_IASQ0(%r29)
1096        STREG           %r17, PT_IAOQ0(%r29)
1097#else
1098skip_save_ior:
1099#endif
1100
1101intr_save2:
1102        virt_map
1103        save_general    %r29
1104
1105        ldo             PT_FR0(%r29), %r25
1106        save_fp         %r25
1107        
1108        loadgp
1109
1110        copy            %r29, %r25      /* arg1 is pt_regs */
1111#ifdef CONFIG_64BIT
1112        ldo             -16(%r30),%r29  /* Reference param save area */
1113#endif
1114
1115        ldil            L%intr_check_sig, %r2
1116        copy            %r25, %r16      /* save pt_regs */
1117
1118        b               handle_interruption
1119        ldo             R%intr_check_sig(%r2), %r2
1120ENDPROC_CFI(intr_save)
1121
1122
1123        /*
1124         * Note for all tlb miss handlers:
1125         *
1126         * cr24 contains a pointer to the kernel address space
1127         * page directory.
1128         *
1129         * cr25 contains a pointer to the current user address
1130         * space page directory.
1131         *
1132         * sr3 will contain the space id of the user address space
1133         * of the current running thread while that thread is
1134         * running in the kernel.
1135         */
1136
1137        /*
1138         * register number allocations.  Note that these are all
1139         * in the shadowed registers
1140         */
1141
1142        t0 = r1         /* temporary register 0 */
1143        va = r8         /* virtual address for which the trap occurred */
1144        t1 = r9         /* temporary register 1 */
1145        pte  = r16      /* pte/phys page # */
1146        prot = r17      /* prot bits */
1147        spc  = r24      /* space for which the trap occurred */
1148        ptp = r25       /* page directory/page table pointer */
1149
1150#ifdef CONFIG_64BIT
1151
1152dtlb_miss_20w:
1153        space_adjust    spc,va,t0
1154        get_pgd         spc,ptp
1155        space_check     spc,t0,dtlb_fault
1156
1157        L3_ptep         ptp,pte,t0,va,dtlb_check_alias_20w
1158
1159        tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1160        update_accessed ptp,pte,t0,t1
1161
1162        make_insert_tlb spc,pte,prot,t1
1163        
1164        idtlbt          pte,prot
1165
1166        tlb_unlock1     spc,t0,t1
1167        rfir
1168        nop
1169
1170dtlb_check_alias_20w:
1171        do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
1172
1173        idtlbt          pte,prot
1174
1175        rfir
1176        nop
1177
1178nadtlb_miss_20w:
1179        space_adjust    spc,va,t0
1180        get_pgd         spc,ptp
1181        space_check     spc,t0,nadtlb_fault
1182
1183        L3_ptep         ptp,pte,t0,va,nadtlb_check_alias_20w
1184
1185        tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1186        update_accessed ptp,pte,t0,t1
1187
1188        make_insert_tlb spc,pte,prot,t1
1189
1190        idtlbt          pte,prot
1191
1192        tlb_unlock1     spc,t0,t1
1193        rfir
1194        nop
1195
1196nadtlb_check_alias_20w:
1197        do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1198
1199        idtlbt          pte,prot
1200
1201        rfir
1202        nop
1203
1204#else
1205
1206dtlb_miss_11:
1207        get_pgd         spc,ptp
1208
1209        space_check     spc,t0,dtlb_fault
1210
1211        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_11
1212
1213        tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_11
1214        update_accessed ptp,pte,t0,t1
1215
1216        make_insert_tlb_11      spc,pte,prot
1217
1218        mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1219        mtsp            spc,%sr1
1220
1221        idtlba          pte,(%sr1,va)
1222        idtlbp          prot,(%sr1,va)
1223
1224        mtsp            t1, %sr1        /* Restore sr1 */
1225
1226        tlb_unlock1     spc,t0,t1
1227        rfir
1228        nop
1229
1230dtlb_check_alias_11:
1231        do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,11
1232
1233        idtlba          pte,(va)
1234        idtlbp          prot,(va)
1235
1236        rfir
1237        nop
1238
1239nadtlb_miss_11:
1240        get_pgd         spc,ptp
1241
1242        space_check     spc,t0,nadtlb_fault
1243
1244        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_11
1245
1246        tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1247        update_accessed ptp,pte,t0,t1
1248
1249        make_insert_tlb_11      spc,pte,prot
1250
1251        mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1252        mtsp            spc,%sr1
1253
1254        idtlba          pte,(%sr1,va)
1255        idtlbp          prot,(%sr1,va)
1256
1257        mtsp            t1, %sr1        /* Restore sr1 */
1258
1259        tlb_unlock1     spc,t0,t1
1260        rfir
1261        nop
1262
1263nadtlb_check_alias_11:
1264        do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1265
1266        idtlba          pte,(va)
1267        idtlbp          prot,(va)
1268
1269        rfir
1270        nop
1271
1272dtlb_miss_20:
1273        space_adjust    spc,va,t0
1274        get_pgd         spc,ptp
1275        space_check     spc,t0,dtlb_fault
1276
1277        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_20
1278
1279        tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20
1280        update_accessed ptp,pte,t0,t1
1281
1282        make_insert_tlb spc,pte,prot,t1
1283
1284        f_extend        pte,t1
1285
1286        idtlbt          pte,prot
1287
1288        tlb_unlock1     spc,t0,t1
1289        rfir
1290        nop
1291
1292dtlb_check_alias_20:
1293        do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
1294        
1295        idtlbt          pte,prot
1296
1297        rfir
1298        nop
1299
1300nadtlb_miss_20:
1301        get_pgd         spc,ptp
1302
1303        space_check     spc,t0,nadtlb_fault
1304
1305        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_20
1306
1307        tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1308        update_accessed ptp,pte,t0,t1
1309
1310        make_insert_tlb spc,pte,prot,t1
1311
1312        f_extend        pte,t1
1313        
1314        idtlbt          pte,prot
1315
1316        tlb_unlock1     spc,t0,t1
1317        rfir
1318        nop
1319
1320nadtlb_check_alias_20:
1321        do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1322
1323        idtlbt          pte,prot
1324
1325        rfir
1326        nop
1327
1328#endif
1329
1330nadtlb_emulate:
1331
1332        /*
1333         * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1334         * probei instructions. We don't want to fault for these
1335         * instructions (not only does it not make sense, it can cause
1336         * deadlocks, since some flushes are done with the mmap
1337         * semaphore held). If the translation doesn't exist, we can't
1338         * insert a translation, so have to emulate the side effects
1339         * of the instruction. Since we don't insert a translation
1340         * we can get a lot of faults during a flush loop, so it makes
1341         * sense to try to do it here with minimum overhead. We only
1342         * emulate fdc,fic,pdc,probew,prober instructions whose base 
1343         * and index registers are not shadowed. We defer everything 
1344         * else to the "slow" path.
1345         */
1346
1347        mfctl           %cr19,%r9 /* Get iir */
1348
1349        /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1350           Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1351
1352        /* Checks for fdc,fdce,pdc,"fic,4f" only */
1353        ldi             0x280,%r16
1354        and             %r9,%r16,%r17
1355        cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1356        bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1357        BL              get_register,%r25
1358        extrw,u         %r9,15,5,%r8           /* Get index register # */
1359        cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1360        copy            %r1,%r24
1361        BL              get_register,%r25
1362        extrw,u         %r9,10,5,%r8           /* Get base register # */
1363        cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1364        BL              set_register,%r25
1365        add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1366
1367nadtlb_nullify:
1368        mfctl           %ipsw,%r8
1369        ldil            L%PSW_N,%r9
1370        or              %r8,%r9,%r8            /* Set PSW_N */
1371        mtctl           %r8,%ipsw
1372
1373        rfir
1374        nop
1375
1376        /* 
1377                When there is no translation for the probe address then we
1378                must nullify the insn and return zero in the target register.
1379                This will indicate to the calling code that it does not have 
1380                write/read privileges to this address.
1381
1382                This should technically work for prober and probew in PA 1.1,
1383                and also probe,r and probe,w in PA 2.0
1384
1385                WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1386                THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1387
1388        */
1389nadtlb_probe_check:
1390        ldi             0x80,%r16
1391        and             %r9,%r16,%r17
1392        cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1393        BL              get_register,%r25      /* Find the target register */
1394        extrw,u         %r9,31,5,%r8           /* Get target register */
1395        cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1396        BL              set_register,%r25
1397        copy            %r0,%r1                /* Write zero to target register */
1398        b nadtlb_nullify                       /* Nullify return insn */
1399        nop
1400
1401
1402#ifdef CONFIG_64BIT
1403itlb_miss_20w:
1404
1405        /*
1406         * I miss is a little different, since we allow users to fault
1407         * on the gateway page which is in the kernel address space.
1408         */
1409
1410        space_adjust    spc,va,t0
1411        get_pgd         spc,ptp
1412        space_check     spc,t0,itlb_fault
1413
1414        L3_ptep         ptp,pte,t0,va,itlb_fault
1415
1416        tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
1417        update_accessed ptp,pte,t0,t1
1418
1419        make_insert_tlb spc,pte,prot,t1
1420        
1421        iitlbt          pte,prot
1422
1423        tlb_unlock1     spc,t0,t1
1424        rfir
1425        nop
1426
1427naitlb_miss_20w:
1428
1429        /*
1430         * I miss is a little different, since we allow users to fault
1431         * on the gateway page which is in the kernel address space.
1432         */
1433
1434        space_adjust    spc,va,t0
1435        get_pgd         spc,ptp
1436        space_check     spc,t0,naitlb_fault
1437
1438        L3_ptep         ptp,pte,t0,va,naitlb_check_alias_20w
1439
1440        tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1441        update_accessed ptp,pte,t0,t1
1442
1443        make_insert_tlb spc,pte,prot,t1
1444
1445        iitlbt          pte,prot
1446
1447        tlb_unlock1     spc,t0,t1
1448        rfir
1449        nop
1450
1451naitlb_check_alias_20w:
1452        do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
1453
1454        iitlbt          pte,prot
1455
1456        rfir
1457        nop
1458
1459#else
1460
1461itlb_miss_11:
1462        get_pgd         spc,ptp
1463
1464        space_check     spc,t0,itlb_fault
1465
1466        L2_ptep         ptp,pte,t0,va,itlb_fault
1467
1468        tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
1469        update_accessed ptp,pte,t0,t1
1470
1471        make_insert_tlb_11      spc,pte,prot
1472
1473        mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1474        mtsp            spc,%sr1
1475
1476        iitlba          pte,(%sr1,va)
1477        iitlbp          prot,(%sr1,va)
1478
1479        mtsp            t1, %sr1        /* Restore sr1 */
1480
1481        tlb_unlock1     spc,t0,t1
1482        rfir
1483        nop
1484
1485naitlb_miss_11:
1486        get_pgd         spc,ptp
1487
1488        space_check     spc,t0,naitlb_fault
1489
1490        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_11
1491
1492        tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_11
1493        update_accessed ptp,pte,t0,t1
1494
1495        make_insert_tlb_11      spc,pte,prot
1496
1497        mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1498        mtsp            spc,%sr1
1499
1500        iitlba          pte,(%sr1,va)
1501        iitlbp          prot,(%sr1,va)
1502
1503        mtsp            t1, %sr1        /* Restore sr1 */
1504
1505        tlb_unlock1     spc,t0,t1
1506        rfir
1507        nop
1508
1509naitlb_check_alias_11:
1510        do_alias        spc,t0,t1,va,pte,prot,itlb_fault,11
1511
1512        iitlba          pte,(%sr0, va)
1513        iitlbp          prot,(%sr0, va)
1514
1515        rfir
1516        nop
1517
1518
1519itlb_miss_20:
1520        get_pgd         spc,ptp
1521
1522        space_check     spc,t0,itlb_fault
1523
1524        L2_ptep         ptp,pte,t0,va,itlb_fault
1525
1526        tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
1527        update_accessed ptp,pte,t0,t1
1528
1529        make_insert_tlb spc,pte,prot,t1
1530
1531        f_extend        pte,t1
1532
1533        iitlbt          pte,prot
1534
1535        tlb_unlock1     spc,t0,t1
1536        rfir
1537        nop
1538
1539naitlb_miss_20:
1540        get_pgd         spc,ptp
1541
1542        space_check     spc,t0,naitlb_fault
1543
1544        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_20
1545
1546        tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20
1547        update_accessed ptp,pte,t0,t1
1548
1549        make_insert_tlb spc,pte,prot,t1
1550
1551        f_extend        pte,t1
1552
1553        iitlbt          pte,prot
1554
1555        tlb_unlock1     spc,t0,t1
1556        rfir
1557        nop
1558
1559naitlb_check_alias_20:
1560        do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
1561
1562        iitlbt          pte,prot
1563
1564        rfir
1565        nop
1566
1567#endif
1568
1569#ifdef CONFIG_64BIT
1570
1571dbit_trap_20w:
1572        space_adjust    spc,va,t0
1573        get_pgd         spc,ptp
1574        space_check     spc,t0,dbit_fault
1575
1576        L3_ptep         ptp,pte,t0,va,dbit_fault
1577
1578        tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
1579        update_dirty    ptp,pte,t1
1580
1581        make_insert_tlb spc,pte,prot,t1
1582                
1583        idtlbt          pte,prot
1584
1585        tlb_unlock0     spc,t0,t1
1586        rfir
1587        nop
1588#else
1589
1590dbit_trap_11:
1591
1592        get_pgd         spc,ptp
1593
1594        space_check     spc,t0,dbit_fault
1595
1596        L2_ptep         ptp,pte,t0,va,dbit_fault
1597
1598        tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
1599        update_dirty    ptp,pte,t1
1600
1601        make_insert_tlb_11      spc,pte,prot
1602
1603        mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1604        mtsp            spc,%sr1
1605
1606        idtlba          pte,(%sr1,va)
1607        idtlbp          prot,(%sr1,va)
1608
1609        mtsp            t1, %sr1     /* Restore sr1 */
1610
1611        tlb_unlock0     spc,t0,t1
1612        rfir
1613        nop
1614
1615dbit_trap_20:
1616        get_pgd         spc,ptp
1617
1618        space_check     spc,t0,dbit_fault
1619
1620        L2_ptep         ptp,pte,t0,va,dbit_fault
1621
1622        tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
1623        update_dirty    ptp,pte,t1
1624
1625        make_insert_tlb spc,pte,prot,t1
1626
1627        f_extend        pte,t1
1628        
1629        idtlbt          pte,prot
1630
1631        tlb_unlock0     spc,t0,t1
1632        rfir
1633        nop
1634#endif
1635
1636        .import handle_interruption,code
1637
1638kernel_bad_space:
1639        b               intr_save
1640        ldi             31,%r8  /* Use an unused code */
1641
1642dbit_fault:
1643        b               intr_save
1644        ldi             20,%r8
1645
1646itlb_fault:
1647        b               intr_save
1648        ldi             PARISC_ITLB_TRAP,%r8
1649
1650nadtlb_fault:
1651        b               intr_save
1652        ldi             17,%r8
1653
1654naitlb_fault:
1655        b               intr_save
1656        ldi             16,%r8
1657
1658dtlb_fault:
1659        b               intr_save
1660        ldi             15,%r8
1661
1662        /* Register saving semantics for system calls:
1663
1664           %r1             clobbered by system call macro in userspace
1665           %r2             saved in PT_REGS by gateway page
1666           %r3  - %r18     preserved by C code (saved by signal code)
1667           %r19 - %r20     saved in PT_REGS by gateway page
1668           %r21 - %r22     non-standard syscall args
1669                           stored in kernel stack by gateway page
1670           %r23 - %r26     arg3-arg0, saved in PT_REGS by gateway page
1671           %r27 - %r30     saved in PT_REGS by gateway page
1672           %r31            syscall return pointer
1673         */
1674
1675        /* Floating point registers (FIXME: what do we do with these?)
1676
1677           %fr0  - %fr3    status/exception, not preserved
1678           %fr4  - %fr7    arguments
1679           %fr8  - %fr11   not preserved by C code
1680           %fr12 - %fr21   preserved by C code
1681           %fr22 - %fr31   not preserved by C code
1682         */
1683
1684        .macro  reg_save regs
1685        STREG   %r3, PT_GR3(\regs)
1686        STREG   %r4, PT_GR4(\regs)
1687        STREG   %r5, PT_GR5(\regs)
1688        STREG   %r6, PT_GR6(\regs)
1689        STREG   %r7, PT_GR7(\regs)
1690        STREG   %r8, PT_GR8(\regs)
1691        STREG   %r9, PT_GR9(\regs)
1692        STREG   %r10,PT_GR10(\regs)
1693        STREG   %r11,PT_GR11(\regs)
1694        STREG   %r12,PT_GR12(\regs)
1695        STREG   %r13,PT_GR13(\regs)
1696        STREG   %r14,PT_GR14(\regs)
1697        STREG   %r15,PT_GR15(\regs)
1698        STREG   %r16,PT_GR16(\regs)
1699        STREG   %r17,PT_GR17(\regs)
1700        STREG   %r18,PT_GR18(\regs)
1701        .endm
1702
1703        .macro  reg_restore regs
1704        LDREG   PT_GR3(\regs), %r3
1705        LDREG   PT_GR4(\regs), %r4
1706        LDREG   PT_GR5(\regs), %r5
1707        LDREG   PT_GR6(\regs), %r6
1708        LDREG   PT_GR7(\regs), %r7
1709        LDREG   PT_GR8(\regs), %r8
1710        LDREG   PT_GR9(\regs), %r9
1711        LDREG   PT_GR10(\regs),%r10
1712        LDREG   PT_GR11(\regs),%r11
1713        LDREG   PT_GR12(\regs),%r12
1714        LDREG   PT_GR13(\regs),%r13
1715        LDREG   PT_GR14(\regs),%r14
1716        LDREG   PT_GR15(\regs),%r15
1717        LDREG   PT_GR16(\regs),%r16
1718        LDREG   PT_GR17(\regs),%r17
1719        LDREG   PT_GR18(\regs),%r18
1720        .endm
1721
1722        .macro  fork_like name
1723ENTRY_CFI(sys_\name\()_wrapper)
1724        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1725        ldo     TASK_REGS(%r1),%r1
1726        reg_save %r1
1727        mfctl   %cr27, %r28
1728        ldil    L%sys_\name, %r31
1729        be      R%sys_\name(%sr4,%r31)
1730        STREG   %r28, PT_CR27(%r1)
1731ENDPROC_CFI(sys_\name\()_wrapper)
1732        .endm
1733
1734fork_like clone
1735fork_like clone3
1736fork_like fork
1737fork_like vfork
1738
1739        /* Set the return value for the child */
1740ENTRY(child_return)
1741        BL      schedule_tail, %r2
1742        nop
1743finish_child_return:
1744        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1745        ldo     TASK_REGS(%r1),%r1       /* get pt regs */
1746
1747        LDREG   PT_CR27(%r1), %r3
1748        mtctl   %r3, %cr27
1749        reg_restore %r1
1750        b       syscall_exit
1751        copy    %r0,%r28
1752END(child_return)
1753
1754ENTRY_CFI(sys_rt_sigreturn_wrapper)
1755        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1756        ldo     TASK_REGS(%r26),%r26    /* get pt regs */
1757        /* Don't save regs, we are going to restore them from sigcontext. */
1758        STREG   %r2, -RP_OFFSET(%r30)
1759#ifdef CONFIG_64BIT
1760        ldo     FRAME_SIZE(%r30), %r30
1761        BL      sys_rt_sigreturn,%r2
1762        ldo     -16(%r30),%r29          /* Reference param save area */
1763#else
1764        BL      sys_rt_sigreturn,%r2
1765        ldo     FRAME_SIZE(%r30), %r30
1766#endif
1767
1768        ldo     -FRAME_SIZE(%r30), %r30
1769        LDREG   -RP_OFFSET(%r30), %r2
1770
1771        /* FIXME: I think we need to restore a few more things here. */
1772        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1773        ldo     TASK_REGS(%r1),%r1      /* get pt regs */
1774        reg_restore %r1
1775
1776        /* If the signal was received while the process was blocked on a
1777         * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1778         * take us to syscall_exit_rfi and on to intr_return.
1779         */
1780        bv      %r0(%r2)
1781        LDREG   PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1782ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1783
1784ENTRY(syscall_exit)
1785        /* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1786         * via syscall_exit_rfi if the signal was received while the process
1787         * was running.
1788         */
1789
1790        /* save return value now */
1791
1792        mfctl     %cr30, %r1
1793        LDREG     TI_TASK(%r1),%r1
1794        STREG     %r28,TASK_PT_GR28(%r1)
1795
1796        /* Seems to me that dp could be wrong here, if the syscall involved
1797         * calling a module, and nothing got round to restoring dp on return.
1798         */
1799        loadgp
1800
1801syscall_check_resched:
1802
1803        /* check for reschedule */
1804
1805        LDREG   TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19   /* long */
1806        bb,<,n  %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1807
1808        .import do_signal,code
1809syscall_check_sig:
1810        LDREG   TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1811        ldi     (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1812        and,COND(<>)    %r19, %r26, %r0
1813        b,n     syscall_restore /* skip past if we've nothing to do */
1814
1815syscall_do_signal:
1816        /* Save callee-save registers (for sigcontext).
1817         * FIXME: After this point the process structure should be
1818         * consistent with all the relevant state of the process
1819         * before the syscall.  We need to verify this.
1820         */
1821        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1822        ldo     TASK_REGS(%r1), %r26            /* struct pt_regs *regs */
1823        reg_save %r26
1824
1825#ifdef CONFIG_64BIT
1826        ldo     -16(%r30),%r29                  /* Reference param save area */
1827#endif
1828
1829        BL      do_notify_resume,%r2
1830        ldi     1, %r25                         /* long in_syscall = 1 */
1831
1832        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1833        ldo     TASK_REGS(%r1), %r20            /* reload pt_regs */
1834        reg_restore %r20
1835
1836        b,n     syscall_check_sig
1837
1838syscall_restore:
1839        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1840
1841        /* Are we being ptraced? */
1842        ldw     TASK_FLAGS(%r1),%r19
1843        ldi     _TIF_SYSCALL_TRACE_MASK,%r2
1844        and,COND(=)     %r19,%r2,%r0
1845        b,n     syscall_restore_rfi
1846
1847        ldo     TASK_PT_FR31(%r1),%r19             /* reload fpregs */
1848        rest_fp %r19
1849
1850        LDREG   TASK_PT_SAR(%r1),%r19              /* restore SAR */
1851        mtsar   %r19
1852
1853        LDREG   TASK_PT_GR2(%r1),%r2               /* restore user rp */
1854        LDREG   TASK_PT_GR19(%r1),%r19
1855        LDREG   TASK_PT_GR20(%r1),%r20
1856        LDREG   TASK_PT_GR21(%r1),%r21
1857        LDREG   TASK_PT_GR22(%r1),%r22
1858        LDREG   TASK_PT_GR23(%r1),%r23
1859        LDREG   TASK_PT_GR24(%r1),%r24
1860        LDREG   TASK_PT_GR25(%r1),%r25
1861        LDREG   TASK_PT_GR26(%r1),%r26
1862        LDREG   TASK_PT_GR27(%r1),%r27     /* restore user dp */
1863        LDREG   TASK_PT_GR28(%r1),%r28     /* syscall return value */
1864        LDREG   TASK_PT_GR29(%r1),%r29
1865        LDREG   TASK_PT_GR31(%r1),%r31     /* restore syscall rp */
1866
1867        /* NOTE: We use rsm/ssm pair to make this operation atomic */
1868        LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1869        rsm     PSW_SM_I, %r0
1870        copy    %r1,%r30                           /* Restore user sp */
1871        mfsp    %sr3,%r1                           /* Get user space id */
1872        mtsp    %r1,%sr7                           /* Restore sr7 */
1873        ssm     PSW_SM_I, %r0
1874
1875        /* Set sr2 to zero for userspace syscalls to work. */
1876        mtsp    %r0,%sr2 
1877        mtsp    %r1,%sr4                           /* Restore sr4 */
1878        mtsp    %r1,%sr5                           /* Restore sr5 */
1879        mtsp    %r1,%sr6                           /* Restore sr6 */
1880
1881        depi    3,31,2,%r31                        /* ensure return to user mode. */
1882
1883#ifdef CONFIG_64BIT
1884        /* decide whether to reset the wide mode bit
1885         *
1886         * For a syscall, the W bit is stored in the lowest bit
1887         * of sp.  Extract it and reset W if it is zero */
1888        extrd,u,*<>     %r30,63,1,%r1
1889        rsm     PSW_SM_W, %r0
1890        /* now reset the lowest bit of sp if it was set */
1891        xor     %r30,%r1,%r30
1892#endif
1893        be,n    0(%sr3,%r31)                       /* return to user space */
1894
1895        /* We have to return via an RFI, so that PSW T and R bits can be set
1896         * appropriately.
1897         * This sets up pt_regs so we can return via intr_restore, which is not
1898         * the most efficient way of doing things, but it works.
1899         */
1900syscall_restore_rfi:
1901        ldo     -1(%r0),%r2                        /* Set recovery cntr to -1 */
1902        mtctl   %r2,%cr0                           /*   for immediate trap */
1903        LDREG   TASK_PT_PSW(%r1),%r2               /* Get old PSW */
1904        ldi     0x0b,%r20                          /* Create new PSW */
1905        depi    -1,13,1,%r20                       /* C, Q, D, and I bits */
1906
1907        /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1908         * set in thread_info.h and converted to PA bitmap
1909         * numbers in asm-offsets.c */
1910
1911        /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1912        extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1913        depi    -1,27,1,%r20                       /* R bit */
1914
1915        /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1916        extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1917        depi    -1,7,1,%r20                        /* T bit */
1918
1919        STREG   %r20,TASK_PT_PSW(%r1)
1920
1921        /* Always store space registers, since sr3 can be changed (e.g. fork) */
1922
1923        mfsp    %sr3,%r25
1924        STREG   %r25,TASK_PT_SR3(%r1)
1925        STREG   %r25,TASK_PT_SR4(%r1)
1926        STREG   %r25,TASK_PT_SR5(%r1)
1927        STREG   %r25,TASK_PT_SR6(%r1)
1928        STREG   %r25,TASK_PT_SR7(%r1)
1929        STREG   %r25,TASK_PT_IASQ0(%r1)
1930        STREG   %r25,TASK_PT_IASQ1(%r1)
1931
1932        /* XXX W bit??? */
1933        /* Now if old D bit is clear, it means we didn't save all registers
1934         * on syscall entry, so do that now.  This only happens on TRACEME
1935         * calls, or if someone attached to us while we were on a syscall.
1936         * We could make this more efficient by not saving r3-r18, but
1937         * then we wouldn't be able to use the common intr_restore path.
1938         * It is only for traced processes anyway, so performance is not
1939         * an issue.
1940         */
1941        bb,<    %r2,30,pt_regs_ok                  /* Branch if D set */
1942        ldo     TASK_REGS(%r1),%r25
1943        reg_save %r25                              /* Save r3 to r18 */
1944
1945        /* Save the current sr */
1946        mfsp    %sr0,%r2
1947        STREG   %r2,TASK_PT_SR0(%r1)
1948
1949        /* Save the scratch sr */
1950        mfsp    %sr1,%r2
1951        STREG   %r2,TASK_PT_SR1(%r1)
1952
1953        /* sr2 should be set to zero for userspace syscalls */
1954        STREG   %r0,TASK_PT_SR2(%r1)
1955
1956        LDREG   TASK_PT_GR31(%r1),%r2
1957        depi    3,31,2,%r2                 /* ensure return to user mode. */
1958        STREG   %r2,TASK_PT_IAOQ0(%r1)
1959        ldo     4(%r2),%r2
1960        STREG   %r2,TASK_PT_IAOQ1(%r1)
1961        b       intr_restore
1962        copy    %r25,%r16
1963
1964pt_regs_ok:
1965        LDREG   TASK_PT_IAOQ0(%r1),%r2
1966        depi    3,31,2,%r2                 /* ensure return to user mode. */
1967        STREG   %r2,TASK_PT_IAOQ0(%r1)
1968        LDREG   TASK_PT_IAOQ1(%r1),%r2
1969        depi    3,31,2,%r2
1970        STREG   %r2,TASK_PT_IAOQ1(%r1)
1971        b       intr_restore
1972        copy    %r25,%r16
1973
1974syscall_do_resched:
1975        load32  syscall_check_resched,%r2 /* if resched, we start over again */
1976        load32  schedule,%r19
1977        bv      %r0(%r19)               /* jumps to schedule() */
1978#ifdef CONFIG_64BIT
1979        ldo     -16(%r30),%r29          /* Reference param save area */
1980#else
1981        nop
1982#endif
1983END(syscall_exit)
1984
1985
1986#ifdef CONFIG_FUNCTION_TRACER
1987
1988        .import ftrace_function_trampoline,code
1989        .align L1_CACHE_BYTES
1990ENTRY_CFI(mcount, caller)
1991_mcount:
1992        .export _mcount,data
1993        /*
1994         * The 64bit mcount() function pointer needs 4 dwords, of which the
1995         * first two are free.  We optimize it here and put 2 instructions for
1996         * calling mcount(), and 2 instructions for ftrace_stub().  That way we
1997         * have all on one L1 cacheline.
1998         */
1999        ldi     0, %arg3
2000        b       ftrace_function_trampoline
2001        copy    %r3, %arg2      /* caller original %sp */
2002ftrace_stub:
2003        .globl ftrace_stub
2004        .type  ftrace_stub, @function
2005#ifdef CONFIG_64BIT
2006        bve     (%rp)
2007#else
2008        bv      %r0(%rp)
2009#endif
2010        nop
2011#ifdef CONFIG_64BIT
2012        .dword mcount
2013        .dword 0 /* code in head.S puts value of global gp here */
2014#endif
2015ENDPROC_CFI(mcount)
2016
2017#ifdef CONFIG_DYNAMIC_FTRACE
2018
2019#ifdef CONFIG_64BIT
2020#define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
2021#else
2022#define FTRACE_FRAME_SIZE FRAME_SIZE
2023#endif
2024ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2025ftrace_caller:
2026        .global ftrace_caller
2027
2028        STREG   %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
2029        ldo     -FTRACE_FRAME_SIZE(%sp), %r3
2030        STREG   %rp, -RP_OFFSET(%r3)
2031
2032        /* Offset 0 is already allocated for %r1 */
2033        STREG   %r23, 2*REG_SZ(%r3)
2034        STREG   %r24, 3*REG_SZ(%r3)
2035        STREG   %r25, 4*REG_SZ(%r3)
2036        STREG   %r26, 5*REG_SZ(%r3)
2037        STREG   %r28, 6*REG_SZ(%r3)
2038        STREG   %r29, 7*REG_SZ(%r3)
2039#ifdef CONFIG_64BIT
2040        STREG   %r19, 8*REG_SZ(%r3)
2041        STREG   %r20, 9*REG_SZ(%r3)
2042        STREG   %r21, 10*REG_SZ(%r3)
2043        STREG   %r22, 11*REG_SZ(%r3)
2044        STREG   %r27, 12*REG_SZ(%r3)
2045        STREG   %r31, 13*REG_SZ(%r3)
2046        loadgp
2047        ldo     -16(%sp),%r29
2048#endif
2049        LDREG   0(%r3), %r25
2050        copy    %rp, %r26
2051        ldo     -8(%r25), %r25
2052        ldi     0, %r23         /* no pt_regs */
2053        b,l     ftrace_function_trampoline, %rp
2054        copy    %r3, %r24
2055
2056        LDREG   -RP_OFFSET(%r3), %rp
2057        LDREG   2*REG_SZ(%r3), %r23
2058        LDREG   3*REG_SZ(%r3), %r24
2059        LDREG   4*REG_SZ(%r3), %r25
2060        LDREG   5*REG_SZ(%r3), %r26
2061        LDREG   6*REG_SZ(%r3), %r28
2062        LDREG   7*REG_SZ(%r3), %r29
2063#ifdef CONFIG_64BIT
2064        LDREG   8*REG_SZ(%r3), %r19
2065        LDREG   9*REG_SZ(%r3), %r20
2066        LDREG   10*REG_SZ(%r3), %r21
2067        LDREG   11*REG_SZ(%r3), %r22
2068        LDREG   12*REG_SZ(%r3), %r27
2069        LDREG   13*REG_SZ(%r3), %r31
2070#endif
2071        LDREG   1*REG_SZ(%r3), %r3
2072
2073        LDREGM  -FTRACE_FRAME_SIZE(%sp), %r1
2074        /* Adjust return point to jump back to beginning of traced function */
2075        ldo     -4(%r1), %r1
2076        bv,n    (%r1)
2077
2078ENDPROC_CFI(ftrace_caller)
2079
2080#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
2081ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
2082        CALLS,SAVE_RP,SAVE_SP)
2083ftrace_regs_caller:
2084        .global ftrace_regs_caller
2085
2086        ldo     -FTRACE_FRAME_SIZE(%sp), %r1
2087        STREG   %rp, -RP_OFFSET(%r1)
2088
2089        copy    %sp, %r1
2090        ldo     PT_SZ_ALGN(%sp), %sp
2091
2092        STREG   %rp, PT_GR2(%r1)
2093        STREG   %r3, PT_GR3(%r1)
2094        STREG   %r4, PT_GR4(%r1)
2095        STREG   %r5, PT_GR5(%r1)
2096        STREG   %r6, PT_GR6(%r1)
2097        STREG   %r7, PT_GR7(%r1)
2098        STREG   %r8, PT_GR8(%r1)
2099        STREG   %r9, PT_GR9(%r1)
2100        STREG   %r10, PT_GR10(%r1)
2101        STREG   %r11, PT_GR11(%r1)
2102        STREG   %r12, PT_GR12(%r1)
2103        STREG   %r13, PT_GR13(%r1)
2104        STREG   %r14, PT_GR14(%r1)
2105        STREG   %r15, PT_GR15(%r1)
2106        STREG   %r16, PT_GR16(%r1)
2107        STREG   %r17, PT_GR17(%r1)
2108        STREG   %r18, PT_GR18(%r1)
2109        STREG   %r19, PT_GR19(%r1)
2110        STREG   %r20, PT_GR20(%r1)
2111        STREG   %r21, PT_GR21(%r1)
2112        STREG   %r22, PT_GR22(%r1)
2113        STREG   %r23, PT_GR23(%r1)
2114        STREG   %r24, PT_GR24(%r1)
2115        STREG   %r25, PT_GR25(%r1)
2116        STREG   %r26, PT_GR26(%r1)
2117        STREG   %r27, PT_GR27(%r1)
2118        STREG   %r28, PT_GR28(%r1)
2119        STREG   %r29, PT_GR29(%r1)
2120        STREG   %r30, PT_GR30(%r1)
2121        STREG   %r31, PT_GR31(%r1)
2122        mfctl   %cr11, %r26
2123        STREG   %r26, PT_SAR(%r1)
2124
2125        copy    %rp, %r26
2126        LDREG   -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2127        ldo     -8(%r25), %r25
2128        ldo     -FTRACE_FRAME_SIZE(%r1), %arg2
2129        b,l     ftrace_function_trampoline, %rp
2130        copy    %r1, %arg3 /* struct pt_regs */
2131
2132        ldo     -PT_SZ_ALGN(%sp), %r1
2133
2134        LDREG   PT_SAR(%r1), %rp
2135        mtctl   %rp, %cr11
2136
2137        LDREG   PT_GR2(%r1), %rp
2138        LDREG   PT_GR3(%r1), %r3
2139        LDREG   PT_GR4(%r1), %r4
2140        LDREG   PT_GR5(%r1), %r5
2141        LDREG   PT_GR6(%r1), %r6
2142        LDREG   PT_GR7(%r1), %r7
2143        LDREG   PT_GR8(%r1), %r8
2144        LDREG   PT_GR9(%r1), %r9
2145        LDREG   PT_GR10(%r1),%r10
2146        LDREG   PT_GR11(%r1),%r11
2147        LDREG   PT_GR12(%r1),%r12
2148        LDREG   PT_GR13(%r1),%r13
2149        LDREG   PT_GR14(%r1),%r14
2150        LDREG   PT_GR15(%r1),%r15
2151        LDREG   PT_GR16(%r1),%r16
2152        LDREG   PT_GR17(%r1),%r17
2153        LDREG   PT_GR18(%r1),%r18
2154        LDREG   PT_GR19(%r1),%r19
2155        LDREG   PT_GR20(%r1),%r20
2156        LDREG   PT_GR21(%r1),%r21
2157        LDREG   PT_GR22(%r1),%r22
2158        LDREG   PT_GR23(%r1),%r23
2159        LDREG   PT_GR24(%r1),%r24
2160        LDREG   PT_GR25(%r1),%r25
2161        LDREG   PT_GR26(%r1),%r26
2162        LDREG   PT_GR27(%r1),%r27
2163        LDREG   PT_GR28(%r1),%r28
2164        LDREG   PT_GR29(%r1),%r29
2165        LDREG   PT_GR30(%r1),%r30
2166        LDREG   PT_GR31(%r1),%r31
2167
2168        ldo     -PT_SZ_ALGN(%sp), %sp
2169        LDREGM  -FTRACE_FRAME_SIZE(%sp), %r1
2170        /* Adjust return point to jump back to beginning of traced function */
2171        ldo     -4(%r1), %r1
2172        bv,n    (%r1)
2173
2174ENDPROC_CFI(ftrace_regs_caller)
2175
2176#endif
2177#endif
2178
2179#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2180        .align 8
2181ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2182        .export parisc_return_to_handler,data
2183parisc_return_to_handler:
2184        copy %r3,%r1
2185        STREG %r0,-RP_OFFSET(%sp)       /* store 0 as %rp */
2186        copy %sp,%r3
2187        STREGM %r1,FRAME_SIZE(%sp)
2188        STREG %ret0,8(%r3)
2189        STREG %ret1,16(%r3)
2190
2191#ifdef CONFIG_64BIT
2192        loadgp
2193#endif
2194
2195        /* call ftrace_return_to_handler(0) */
2196        .import ftrace_return_to_handler,code
2197        load32 ftrace_return_to_handler,%ret0
2198        load32 .Lftrace_ret,%r2
2199#ifdef CONFIG_64BIT
2200        ldo -16(%sp),%ret1              /* Reference param save area */
2201        bve     (%ret0)
2202#else
2203        bv      %r0(%ret0)
2204#endif
2205        ldi 0,%r26
2206.Lftrace_ret:
2207        copy %ret0,%rp
2208
2209        /* restore original return values */
2210        LDREG 8(%r3),%ret0
2211        LDREG 16(%r3),%ret1
2212
2213        /* return from function */
2214#ifdef CONFIG_64BIT
2215        bve     (%rp)
2216#else
2217        bv      %r0(%rp)
2218#endif
2219        LDREGM -FRAME_SIZE(%sp),%r3
2220ENDPROC_CFI(return_to_handler)
2221
2222#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2223
2224#endif  /* CONFIG_FUNCTION_TRACER */
2225
2226#ifdef CONFIG_IRQSTACKS
2227/* void call_on_stack(unsigned long param1, void *func,
2228                      unsigned long new_stack) */
2229ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2230ENTRY(_call_on_stack)
2231        copy    %sp, %r1
2232
2233        /* Regarding the HPPA calling conventions for function pointers,
2234           we assume the PIC register is not changed across call.  For
2235           CONFIG_64BIT, the argument pointer is left to point at the
2236           argument region allocated for the call to call_on_stack. */
2237
2238        /* Switch to new stack.  We allocate two frames.  */
2239        ldo     2*FRAME_SIZE(%arg2), %sp
2240# ifdef CONFIG_64BIT
2241        /* Save previous stack pointer and return pointer in frame marker */
2242        STREG   %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2243        /* Calls always use function descriptor */
2244        LDREG   16(%arg1), %arg1
2245        bve,l   (%arg1), %rp
2246        STREG   %r1, -FRAME_SIZE-REG_SZ(%sp)
2247        LDREG   -FRAME_SIZE-RP_OFFSET(%sp), %rp
2248        bve     (%rp)
2249        LDREG   -FRAME_SIZE-REG_SZ(%sp), %sp
2250# else
2251        /* Save previous stack pointer and return pointer in frame marker */
2252        STREG   %r1, -FRAME_SIZE-REG_SZ(%sp)
2253        STREG   %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2254        /* Calls use function descriptor if PLABEL bit is set */
2255        bb,>=,n %arg1, 30, 1f
2256        depwi   0,31,2, %arg1
2257        LDREG   0(%arg1), %arg1
22581:
2259        be,l    0(%sr4,%arg1), %sr0, %r31
2260        copy    %r31, %rp
2261        LDREG   -FRAME_SIZE-RP_OFFSET(%sp), %rp
2262        bv      (%rp)
2263        LDREG   -FRAME_SIZE-REG_SZ(%sp), %sp
2264# endif /* CONFIG_64BIT */
2265ENDPROC_CFI(call_on_stack)
2266#endif /* CONFIG_IRQSTACKS */
2267
2268ENTRY_CFI(get_register)
2269        /*
2270         * get_register is used by the non access tlb miss handlers to
2271         * copy the value of the general register specified in r8 into
2272         * r1. This routine can't be used for shadowed registers, since
2273         * the rfir will restore the original value. So, for the shadowed
2274         * registers we put a -1 into r1 to indicate that the register
2275         * should not be used (the register being copied could also have
2276         * a -1 in it, but that is OK, it just means that we will have
2277         * to use the slow path instead).
2278         */
2279        blr     %r8,%r0
2280        nop
2281        bv      %r0(%r25)    /* r0 */
2282        copy    %r0,%r1
2283        bv      %r0(%r25)    /* r1 - shadowed */
2284        ldi     -1,%r1
2285        bv      %r0(%r25)    /* r2 */
2286        copy    %r2,%r1
2287        bv      %r0(%r25)    /* r3 */
2288        copy    %r3,%r1
2289        bv      %r0(%r25)    /* r4 */
2290        copy    %r4,%r1
2291        bv      %r0(%r25)    /* r5 */
2292        copy    %r5,%r1
2293        bv      %r0(%r25)    /* r6 */
2294        copy    %r6,%r1
2295        bv      %r0(%r25)    /* r7 */
2296        copy    %r7,%r1
2297        bv      %r0(%r25)    /* r8 - shadowed */
2298        ldi     -1,%r1
2299        bv      %r0(%r25)    /* r9 - shadowed */
2300        ldi     -1,%r1
2301        bv      %r0(%r25)    /* r10 */
2302        copy    %r10,%r1
2303        bv      %r0(%r25)    /* r11 */
2304        copy    %r11,%r1
2305        bv      %r0(%r25)    /* r12 */
2306        copy    %r12,%r1
2307        bv      %r0(%r25)    /* r13 */
2308        copy    %r13,%r1
2309        bv      %r0(%r25)    /* r14 */
2310        copy    %r14,%r1
2311        bv      %r0(%r25)    /* r15 */
2312        copy    %r15,%r1
2313        bv      %r0(%r25)    /* r16 - shadowed */
2314        ldi     -1,%r1
2315        bv      %r0(%r25)    /* r17 - shadowed */
2316        ldi     -1,%r1
2317        bv      %r0(%r25)    /* r18 */
2318        copy    %r18,%r1
2319        bv      %r0(%r25)    /* r19 */
2320        copy    %r19,%r1
2321        bv      %r0(%r25)    /* r20 */
2322        copy    %r20,%r1
2323        bv      %r0(%r25)    /* r21 */
2324        copy    %r21,%r1
2325        bv      %r0(%r25)    /* r22 */
2326        copy    %r22,%r1
2327        bv      %r0(%r25)    /* r23 */
2328        copy    %r23,%r1
2329        bv      %r0(%r25)    /* r24 - shadowed */
2330        ldi     -1,%r1
2331        bv      %r0(%r25)    /* r25 - shadowed */
2332        ldi     -1,%r1
2333        bv      %r0(%r25)    /* r26 */
2334        copy    %r26,%r1
2335        bv      %r0(%r25)    /* r27 */
2336        copy    %r27,%r1
2337        bv      %r0(%r25)    /* r28 */
2338        copy    %r28,%r1
2339        bv      %r0(%r25)    /* r29 */
2340        copy    %r29,%r1
2341        bv      %r0(%r25)    /* r30 */
2342        copy    %r30,%r1
2343        bv      %r0(%r25)    /* r31 */
2344        copy    %r31,%r1
2345ENDPROC_CFI(get_register)
2346
2347
2348ENTRY_CFI(set_register)
2349        /*
2350         * set_register is used by the non access tlb miss handlers to
2351         * copy the value of r1 into the general register specified in
2352         * r8.
2353         */
2354        blr     %r8,%r0
2355        nop
2356        bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2357        copy    %r1,%r0
2358        bv      %r0(%r25)    /* r1 */
2359        copy    %r1,%r1
2360        bv      %r0(%r25)    /* r2 */
2361        copy    %r1,%r2
2362        bv      %r0(%r25)    /* r3 */
2363        copy    %r1,%r3
2364        bv      %r0(%r25)    /* r4 */
2365        copy    %r1,%r4
2366        bv      %r0(%r25)    /* r5 */
2367        copy    %r1,%r5
2368        bv      %r0(%r25)    /* r6 */
2369        copy    %r1,%r6
2370        bv      %r0(%r25)    /* r7 */
2371        copy    %r1,%r7
2372        bv      %r0(%r25)    /* r8 */
2373        copy    %r1,%r8
2374        bv      %r0(%r25)    /* r9 */
2375        copy    %r1,%r9
2376        bv      %r0(%r25)    /* r10 */
2377        copy    %r1,%r10
2378        bv      %r0(%r25)    /* r11 */
2379        copy    %r1,%r11
2380        bv      %r0(%r25)    /* r12 */
2381        copy    %r1,%r12
2382        bv      %r0(%r25)    /* r13 */
2383        copy    %r1,%r13
2384        bv      %r0(%r25)    /* r14 */
2385        copy    %r1,%r14
2386        bv      %r0(%r25)    /* r15 */
2387        copy    %r1,%r15
2388        bv      %r0(%r25)    /* r16 */
2389        copy    %r1,%r16
2390        bv      %r0(%r25)    /* r17 */
2391        copy    %r1,%r17
2392        bv      %r0(%r25)    /* r18 */
2393        copy    %r1,%r18
2394        bv      %r0(%r25)    /* r19 */
2395        copy    %r1,%r19
2396        bv      %r0(%r25)    /* r20 */
2397        copy    %r1,%r20
2398        bv      %r0(%r25)    /* r21 */
2399        copy    %r1,%r21
2400        bv      %r0(%r25)    /* r22 */
2401        copy    %r1,%r22
2402        bv      %r0(%r25)    /* r23 */
2403        copy    %r1,%r23
2404        bv      %r0(%r25)    /* r24 */
2405        copy    %r1,%r24
2406        bv      %r0(%r25)    /* r25 */
2407        copy    %r1,%r25
2408        bv      %r0(%r25)    /* r26 */
2409        copy    %r1,%r26
2410        bv      %r0(%r25)    /* r27 */
2411        copy    %r1,%r27
2412        bv      %r0(%r25)    /* r28 */
2413        copy    %r1,%r28
2414        bv      %r0(%r25)    /* r29 */
2415        copy    %r1,%r29
2416        bv      %r0(%r25)    /* r30 */
2417        copy    %r1,%r30
2418        bv      %r0(%r25)    /* r31 */
2419        copy    %r1,%r31
2420ENDPROC_CFI(set_register)
2421
2422