linux/arch/parisc/kernel/entry.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
   4 *
   5 * kernel entry points (interruptions, system call wrappers)
   6 *  Copyright (C) 1999,2000 Philipp Rumpf 
   7 *  Copyright (C) 1999 SuSE GmbH Nuernberg 
   8 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
   9 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
  10 */
  11
  12#include <asm/asm-offsets.h>
  13
  14/* we have the following possibilities to act on an interruption:
  15 *  - handle in assembly and use shadowed registers only
  16 *  - save registers to kernel stack and handle in assembly or C */
  17
  18
  19#include <asm/psw.h>
  20#include <asm/cache.h>          /* for L1_CACHE_SHIFT */
  21#include <asm/assembly.h>       /* for LDREG/STREG defines */
  22#include <asm/signal.h>
  23#include <asm/unistd.h>
  24#include <asm/ldcw.h>
  25#include <asm/traps.h>
  26#include <asm/thread_info.h>
  27#include <asm/alternative.h>
  28
  29#include <linux/linkage.h>
  30#include <linux/pgtable.h>
  31
  32#ifdef CONFIG_64BIT
  33        .level 2.0w
  34#else
  35        .level 2.0
  36#endif
  37
  38        /* Get aligned page_table_lock address for this mm from cr28/tr4 */
  39        .macro  get_ptl reg
  40        mfctl   %cr28,\reg
  41        .endm
  42
  43        /* space_to_prot macro creates a prot id from a space id */
  44
  45#if (SPACEID_SHIFT) == 0
  46        .macro  space_to_prot spc prot
  47        depd,z  \spc,62,31,\prot
  48        .endm
  49#else
  50        .macro  space_to_prot spc prot
  51        extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
  52        .endm
  53#endif
  54        /*
  55         * The "get_stack" macros are responsible for determining the
  56         * kernel stack value.
  57         *
  58         *      If sr7 == 0
  59         *          Already using a kernel stack, so call the
  60         *          get_stack_use_r30 macro to push a pt_regs structure
  61         *          on the stack, and store registers there.
  62         *      else
  63         *          Need to set up a kernel stack, so call the
  64         *          get_stack_use_cr30 macro to set up a pointer
  65         *          to the pt_regs structure contained within the
  66         *          task pointer pointed to by cr30. Load the stack
  67         *          pointer from the task structure.
  68         *
  69         * Note that we use shadowed registers for temps until
  70         * we can save %r26 and %r29. %r26 is used to preserve
  71         * %r8 (a shadowed register) which temporarily contained
  72         * either the fault type ("code") or the eirr. We need
  73         * to use a non-shadowed register to carry the value over
  74         * the rfir in virt_map. We use %r26 since this value winds
  75         * up being passed as the argument to either do_cpu_irq_mask
  76         * or handle_interruption. %r29 is used to hold a pointer
  77         * the register save area, and once again, it needs to
  78         * be a non-shadowed register so that it survives the rfir.
  79         */
  80
  81        .macro  get_stack_use_cr30
  82
  83        /* we save the registers in the task struct */
  84
  85        copy    %r30, %r17
  86        mfctl   %cr30, %r1
  87        tophys  %r1,%r9         /* task_struct */
  88        LDREG   TASK_STACK(%r9),%r30
  89        ldo     PT_SZ_ALGN(%r30),%r30
  90        mtsp    %r0,%sr7        /* clear sr7 after kernel stack was set! */
  91        mtsp    %r16,%sr3
  92        ldo     TASK_REGS(%r9),%r9
  93        STREG   %r17,PT_GR30(%r9)
  94        STREG   %r29,PT_GR29(%r9)
  95        STREG   %r26,PT_GR26(%r9)
  96        STREG   %r16,PT_SR7(%r9)
  97        copy    %r9,%r29
  98        .endm
  99
 100        .macro  get_stack_use_r30
 101
 102        /* we put a struct pt_regs on the stack and save the registers there */
 103
 104        tophys  %r30,%r9
 105        copy    %r30,%r1
 106        ldo     PT_SZ_ALGN(%r30),%r30
 107        STREG   %r1,PT_GR30(%r9)
 108        STREG   %r29,PT_GR29(%r9)
 109        STREG   %r26,PT_GR26(%r9)
 110        STREG   %r16,PT_SR7(%r9)
 111        copy    %r9,%r29
 112        .endm
 113
 114        .macro  rest_stack
 115        LDREG   PT_GR1(%r29), %r1
 116        LDREG   PT_GR30(%r29),%r30
 117        LDREG   PT_GR29(%r29),%r29
 118        .endm
 119
 120        /* default interruption handler
 121         * (calls traps.c:handle_interruption) */
 122        .macro  def code
 123        b       intr_save
 124        ldi     \code, %r8
 125        .align  32
 126        .endm
 127
 128        /* Interrupt interruption handler
 129         * (calls irq.c:do_cpu_irq_mask) */
 130        .macro  extint code
 131        b       intr_extint
 132        mfsp    %sr7,%r16
 133        .align  32
 134        .endm   
 135
 136        .import os_hpmc, code
 137
 138        /* HPMC handler */
 139        .macro  hpmc code
 140        nop                     /* must be a NOP, will be patched later */
 141        load32  PA(os_hpmc), %r3
 142        bv,n    0(%r3)
 143        nop
 144        .word   0               /* checksum (will be patched) */
 145        .word   0               /* address of handler */
 146        .word   0               /* length of handler */
 147        .endm
 148
 149        /*
 150         * Performance Note: Instructions will be moved up into
 151         * this part of the code later on, once we are sure
 152         * that the tlb miss handlers are close to final form.
 153         */
 154
 155        /* Register definitions for tlb miss handler macros */
 156
 157        va  = r8        /* virtual address for which the trap occurred */
 158        spc = r24       /* space for which the trap occurred */
 159
 160#ifndef CONFIG_64BIT
 161
 162        /*
 163         * itlb miss interruption handler (parisc 1.1 - 32 bit)
 164         */
 165
 166        .macro  itlb_11 code
 167
 168        mfctl   %pcsq, spc
 169        b       itlb_miss_11
 170        mfctl   %pcoq, va
 171
 172        .align          32
 173        .endm
 174#endif
 175        
 176        /*
 177         * itlb miss interruption handler (parisc 2.0)
 178         */
 179
 180        .macro  itlb_20 code
 181        mfctl   %pcsq, spc
 182#ifdef CONFIG_64BIT
 183        b       itlb_miss_20w
 184#else
 185        b       itlb_miss_20
 186#endif
 187        mfctl   %pcoq, va
 188
 189        .align          32
 190        .endm
 191        
 192#ifndef CONFIG_64BIT
 193        /*
 194         * naitlb miss interruption handler (parisc 1.1 - 32 bit)
 195         */
 196
 197        .macro  naitlb_11 code
 198
 199        mfctl   %isr,spc
 200        b       naitlb_miss_11
 201        mfctl   %ior,va
 202
 203        .align          32
 204        .endm
 205#endif
 206        
 207        /*
 208         * naitlb miss interruption handler (parisc 2.0)
 209         */
 210
 211        .macro  naitlb_20 code
 212
 213        mfctl   %isr,spc
 214#ifdef CONFIG_64BIT
 215        b       naitlb_miss_20w
 216#else
 217        b       naitlb_miss_20
 218#endif
 219        mfctl   %ior,va
 220
 221        .align          32
 222        .endm
 223        
 224#ifndef CONFIG_64BIT
 225        /*
 226         * dtlb miss interruption handler (parisc 1.1 - 32 bit)
 227         */
 228
 229        .macro  dtlb_11 code
 230
 231        mfctl   %isr, spc
 232        b       dtlb_miss_11
 233        mfctl   %ior, va
 234
 235        .align          32
 236        .endm
 237#endif
 238
 239        /*
 240         * dtlb miss interruption handler (parisc 2.0)
 241         */
 242
 243        .macro  dtlb_20 code
 244
 245        mfctl   %isr, spc
 246#ifdef CONFIG_64BIT
 247        b       dtlb_miss_20w
 248#else
 249        b       dtlb_miss_20
 250#endif
 251        mfctl   %ior, va
 252
 253        .align          32
 254        .endm
 255        
 256#ifndef CONFIG_64BIT
 257        /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
 258
 259        .macro  nadtlb_11 code
 260
 261        mfctl   %isr,spc
 262        b       nadtlb_miss_11
 263        mfctl   %ior,va
 264
 265        .align          32
 266        .endm
 267#endif
 268        
 269        /* nadtlb miss interruption handler (parisc 2.0) */
 270
 271        .macro  nadtlb_20 code
 272
 273        mfctl   %isr,spc
 274#ifdef CONFIG_64BIT
 275        b       nadtlb_miss_20w
 276#else
 277        b       nadtlb_miss_20
 278#endif
 279        mfctl   %ior,va
 280
 281        .align          32
 282        .endm
 283        
 284#ifndef CONFIG_64BIT
 285        /*
 286         * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
 287         */
 288
 289        .macro  dbit_11 code
 290
 291        mfctl   %isr,spc
 292        b       dbit_trap_11
 293        mfctl   %ior,va
 294
 295        .align          32
 296        .endm
 297#endif
 298
 299        /*
 300         * dirty bit trap interruption handler (parisc 2.0)
 301         */
 302
 303        .macro  dbit_20 code
 304
 305        mfctl   %isr,spc
 306#ifdef CONFIG_64BIT
 307        b       dbit_trap_20w
 308#else
 309        b       dbit_trap_20
 310#endif
 311        mfctl   %ior,va
 312
 313        .align          32
 314        .endm
 315
 316        /* In LP64, the space contains part of the upper 32 bits of the
 317         * fault.  We have to extract this and place it in the va,
 318         * zeroing the corresponding bits in the space register */
 319        .macro          space_adjust    spc,va,tmp
 320#ifdef CONFIG_64BIT
 321        extrd,u         \spc,63,SPACEID_SHIFT,\tmp
 322        depd            %r0,63,SPACEID_SHIFT,\spc
 323        depd            \tmp,31,SPACEID_SHIFT,\va
 324#endif
 325        .endm
 326
 327        .import         swapper_pg_dir,code
 328
 329        /* Get the pgd.  For faults on space zero (kernel space), this
 330         * is simply swapper_pg_dir.  For user space faults, the
 331         * pgd is stored in %cr25 */
 332        .macro          get_pgd         spc,reg
 333        ldil            L%PA(swapper_pg_dir),\reg
 334        ldo             R%PA(swapper_pg_dir)(\reg),\reg
 335        or,COND(=)      %r0,\spc,%r0
 336        mfctl           %cr25,\reg
 337        .endm
 338
 339        /* 
 340                space_check(spc,tmp,fault)
 341
 342                spc - The space we saw the fault with.
 343                tmp - The place to store the current space.
 344                fault - Function to call on failure.
 345
 346                Only allow faults on different spaces from the
 347                currently active one if we're the kernel 
 348
 349        */
 350        .macro          space_check     spc,tmp,fault
 351        mfsp            %sr7,\tmp
 352        /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
 353        or,COND(<>)     %r0,\spc,%r0    /* user may execute gateway page
 354                                         * as kernel, so defeat the space
 355                                         * check if it is */
 356        copy            \spc,\tmp
 357        or,COND(=)      %r0,\tmp,%r0    /* nullify if executing as kernel */
 358        cmpb,COND(<>),n \tmp,\spc,\fault
 359        .endm
 360
 361        /* Look up a PTE in a 2-Level scheme (faulting at each
 362         * level if the entry isn't present 
 363         *
 364         * NOTE: we use ldw even for LP64, since the short pointers
 365         * can address up to 1TB
 366         */
 367        .macro          L2_ptep pmd,pte,index,va,fault
 368#if CONFIG_PGTABLE_LEVELS == 3
 369        extru_safe      \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
 370#else
 371        extru_safe      \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
 372#endif
 373        dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
 374#if CONFIG_PGTABLE_LEVELS < 3
 375        copy            %r0,\pte
 376#endif
 377        ldw,s           \index(\pmd),\pmd
 378        bb,>=,n         \pmd,_PxD_PRESENT_BIT,\fault
 379        dep             %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
 380        SHLREG          \pmd,PxD_VALUE_SHIFT,\pmd
 381        extru_safe      \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
 382        dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
 383        shladd          \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
 384        .endm
 385
 386        /* Look up PTE in a 3-Level scheme. */
 387        .macro          L3_ptep pgd,pte,index,va,fault
 388#if CONFIG_PGTABLE_LEVELS == 3
 389        copy            %r0,\pte
 390        extrd,u         \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
 391        ldw,s           \index(\pgd),\pgd
 392        bb,>=,n         \pgd,_PxD_PRESENT_BIT,\fault
 393        shld            \pgd,PxD_VALUE_SHIFT,\pgd
 394#endif
 395        L2_ptep         \pgd,\pte,\index,\va,\fault
 396        .endm
 397
 398        /* Acquire page_table_lock and check page is present. */
 399        .macro          ptl_lock        spc,ptp,pte,tmp,tmp1,fault
 400#ifdef CONFIG_TLB_PTLOCK
 40198:     cmpib,COND(=),n 0,\spc,2f
 402        get_ptl         \tmp
 4031:      LDCW            0(\tmp),\tmp1
 404        cmpib,COND(=)   0,\tmp1,1b
 405        nop
 406        LDREG           0(\ptp),\pte
 407        bb,<,n          \pte,_PAGE_PRESENT_BIT,3f
 408        b               \fault
 409        stw             \spc,0(\tmp)
 41099:     ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
 411#endif
 4122:      LDREG           0(\ptp),\pte
 413        bb,>=,n         \pte,_PAGE_PRESENT_BIT,\fault
 4143:
 415        .endm
 416
 417        /* Release page_table_lock without reloading lock address.
 418           Note that the values in the register spc are limited to
 419           NR_SPACE_IDS (262144). Thus, the stw instruction always
 420           stores a nonzero value even when register spc is 64 bits.
 421           We use an ordered store to ensure all prior accesses are
 422           performed prior to releasing the lock. */
 423        .macro          ptl_unlock0     spc,tmp
 424#ifdef CONFIG_TLB_PTLOCK
 42598:     or,COND(=)      %r0,\spc,%r0
 426        stw,ma          \spc,0(\tmp)
 42799:     ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
 428#endif
 429        .endm
 430
 431        /* Release page_table_lock. */
 432        .macro          ptl_unlock1     spc,tmp
 433#ifdef CONFIG_TLB_PTLOCK
 43498:     get_ptl         \tmp
 435        ptl_unlock0     \spc,\tmp
 43699:     ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
 437#endif
 438        .endm
 439
 440        /* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
 441         * don't needlessly dirty the cache line if it was already set */
 442        .macro          update_accessed ptp,pte,tmp,tmp1
 443        ldi             _PAGE_ACCESSED,\tmp1
 444        or              \tmp1,\pte,\tmp
 445        and,COND(<>)    \tmp1,\pte,%r0
 446        STREG           \tmp,0(\ptp)
 447        .endm
 448
 449        /* Set the dirty bit (and accessed bit).  No need to be
 450         * clever, this is only used from the dirty fault */
 451        .macro          update_dirty    ptp,pte,tmp
 452        ldi             _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
 453        or              \tmp,\pte,\pte
 454        STREG           \pte,0(\ptp)
 455        .endm
 456
 457        /* We have (depending on the page size):
 458         * - 38 to 52-bit Physical Page Number
 459         * - 12 to 26-bit page offset
 460         */
 461        /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
 462         * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
 463        #define PAGE_ADD_SHIFT          (PAGE_SHIFT-12)
 464        #define PAGE_ADD_HUGE_SHIFT     (REAL_HPAGE_SHIFT-12)
 465
 466        /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
 467        .macro          convert_for_tlb_insert20 pte,tmp
 468#ifdef CONFIG_HUGETLB_PAGE
 469        copy            \pte,\tmp
 470        extrd,u         \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
 471                                64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
 472
 473        depdi           _PAGE_SIZE_ENCODING_DEFAULT,63,\
 474                                (63-58)+PAGE_ADD_SHIFT,\pte
 475        extrd,u,*=      \tmp,_PAGE_HPAGE_BIT+32,1,%r0
 476        depdi           _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
 477                                (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
 478#else /* Huge pages disabled */
 479        extrd,u         \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
 480                                64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
 481        depdi           _PAGE_SIZE_ENCODING_DEFAULT,63,\
 482                                (63-58)+PAGE_ADD_SHIFT,\pte
 483#endif
 484        .endm
 485
 486        /* Convert the pte and prot to tlb insertion values.  How
 487         * this happens is quite subtle, read below */
 488        .macro          make_insert_tlb spc,pte,prot,tmp
 489        space_to_prot   \spc \prot        /* create prot id from space */
 490        /* The following is the real subtlety.  This is depositing
 491         * T <-> _PAGE_REFTRAP
 492         * D <-> _PAGE_DIRTY
 493         * B <-> _PAGE_DMB (memory break)
 494         *
 495         * Then incredible subtlety: The access rights are
 496         * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
 497         * See 3-14 of the parisc 2.0 manual
 498         *
 499         * Finally, _PAGE_READ goes in the top bit of PL1 (so we
 500         * trigger an access rights trap in user space if the user
 501         * tries to read an unreadable page */
 502        depd            \pte,8,7,\prot
 503
 504        /* PAGE_USER indicates the page can be read with user privileges,
 505         * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
 506         * contains _PAGE_READ) */
 507        extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
 508        depdi           7,11,3,\prot
 509        /* If we're a gateway page, drop PL2 back to zero for promotion
 510         * to kernel privilege (so we can execute the page as kernel).
 511         * Any privilege promotion page always denys read and write */
 512        extrd,u,*=      \pte,_PAGE_GATEWAY_BIT+32,1,%r0
 513        depd            %r0,11,2,\prot  /* If Gateway, Set PL2 to 0 */
 514
 515        /* Enforce uncacheable pages.
 516         * This should ONLY be use for MMIO on PA 2.0 machines.
 517         * Memory/DMA is cache coherent on all PA2.0 machines we support
 518         * (that means T-class is NOT supported) and the memory controllers
 519         * on most of those machines only handles cache transactions.
 520         */
 521        extrd,u,*=      \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
 522        depdi           1,12,1,\prot
 523
 524        /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
 525        convert_for_tlb_insert20 \pte \tmp
 526        .endm
 527
 528        /* Identical macro to make_insert_tlb above, except it
 529         * makes the tlb entry for the differently formatted pa11
 530         * insertion instructions */
 531        .macro          make_insert_tlb_11      spc,pte,prot
 532        zdep            \spc,30,15,\prot
 533        dep             \pte,8,7,\prot
 534        extru,=         \pte,_PAGE_NO_CACHE_BIT,1,%r0
 535        depi            1,12,1,\prot
 536        extru,=         \pte,_PAGE_USER_BIT,1,%r0
 537        depi            7,11,3,\prot   /* Set for user space (1 rsvd for read) */
 538        extru,=         \pte,_PAGE_GATEWAY_BIT,1,%r0
 539        depi            0,11,2,\prot    /* If Gateway, Set PL2 to 0 */
 540
 541        /* Get rid of prot bits and convert to page addr for iitlba */
 542
 543        depi            0,31,ASM_PFN_PTE_SHIFT,\pte
 544        SHRREG          \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
 545        .endm
 546
 547        /* This is for ILP32 PA2.0 only.  The TLB insertion needs
 548         * to extend into I/O space if the address is 0xfXXXXXXX
 549         * so we extend the f's into the top word of the pte in
 550         * this case */
 551        .macro          f_extend        pte,tmp
 552        extrd,s         \pte,42,4,\tmp
 553        addi,<>         1,\tmp,%r0
 554        extrd,s         \pte,63,25,\pte
 555        .endm
 556
 557        /* The alias region is comprised of a pair of 4 MB regions
 558         * aligned to 8 MB. It is used to clear/copy/flush user pages
 559         * using kernel virtual addresses congruent with the user
 560         * virtual address.
 561         *
 562         * To use the alias page, you set %r26 up with the to TLB
 563         * entry (identifying the physical page) and %r23 up with
 564         * the from tlb entry (or nothing if only a to entry---for
 565         * clear_user_page_asm) */
 566        .macro          do_alias        spc,tmp,tmp1,va,pte,prot,fault,patype
 567        cmpib,COND(<>),n 0,\spc,\fault
 568        ldil            L%(TMPALIAS_MAP_START),\tmp
 569        copy            \va,\tmp1
 570        depi_safe       0,31,TMPALIAS_SIZE_BITS+1,\tmp1
 571        cmpb,COND(<>),n \tmp,\tmp1,\fault
 572        mfctl           %cr19,\tmp      /* iir */
 573        /* get the opcode (first six bits) into \tmp */
 574        extrw,u         \tmp,5,6,\tmp
 575        /*
 576         * Only setting the T bit prevents data cache movein
 577         * Setting access rights to zero prevents instruction cache movein
 578         *
 579         * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
 580         * to type field and _PAGE_READ goes to top bit of PL1
 581         */
 582        ldi             (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
 583        /*
 584         * so if the opcode is one (i.e. this is a memory management
 585         * instruction) nullify the next load so \prot is only T.
 586         * Otherwise this is a normal data operation
 587         */
 588        cmpiclr,=       0x01,\tmp,%r0
 589        ldi             (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
 590.ifc \patype,20
 591        depd,z          \prot,8,7,\prot
 592.else
 593.ifc \patype,11
 594        depw,z          \prot,8,7,\prot
 595.else
 596        .error "undefined PA type to do_alias"
 597.endif
 598.endif
 599        /*
 600         * OK, it is in the temp alias region, check whether "from" or "to".
 601         * Check "subtle" note in pacache.S re: r23/r26.
 602         */
 603        extrw,u,=       \va,31-TMPALIAS_SIZE_BITS,1,%r0
 604        or,COND(tr)     %r23,%r0,\pte
 605        or              %r26,%r0,\pte
 606
 607        /* convert phys addr in \pte (from r23 or r26) to tlb insert format */
 608        SHRREG          \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte
 609        depi_safe       _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte
 610        .endm 
 611
 612
 613        /*
 614         * Fault_vectors are architecturally required to be aligned on a 2K
 615         * boundary
 616         */
 617
 618        .section .text.hot
 619        .align 2048
 620
 621ENTRY(fault_vector_20)
 622        /* First vector is invalid (0) */
 623        .ascii  "cows can fly"
 624        .byte 0
 625        .align 32
 626
 627        hpmc             1
 628        def              2
 629        def              3
 630        extint           4
 631        def              5
 632        itlb_20          PARISC_ITLB_TRAP
 633        def              7
 634        def              8
 635        def              9
 636        def             10
 637        def             11
 638        def             12
 639        def             13
 640        def             14
 641        dtlb_20         15
 642        naitlb_20       16
 643        nadtlb_20       17
 644        def             18
 645        def             19
 646        dbit_20         20
 647        def             21
 648        def             22
 649        def             23
 650        def             24
 651        def             25
 652        def             26
 653        def             27
 654        def             28
 655        def             29
 656        def             30
 657        def             31
 658END(fault_vector_20)
 659
 660#ifndef CONFIG_64BIT
 661
 662        .align 2048
 663
 664ENTRY(fault_vector_11)
 665        /* First vector is invalid (0) */
 666        .ascii  "cows can fly"
 667        .byte 0
 668        .align 32
 669
 670        hpmc             1
 671        def              2
 672        def              3
 673        extint           4
 674        def              5
 675        itlb_11          PARISC_ITLB_TRAP
 676        def              7
 677        def              8
 678        def              9
 679        def             10
 680        def             11
 681        def             12
 682        def             13
 683        def             14
 684        dtlb_11         15
 685        naitlb_11       16
 686        nadtlb_11       17
 687        def             18
 688        def             19
 689        dbit_11         20
 690        def             21
 691        def             22
 692        def             23
 693        def             24
 694        def             25
 695        def             26
 696        def             27
 697        def             28
 698        def             29
 699        def             30
 700        def             31
 701END(fault_vector_11)
 702
 703#endif
 704        /* Fault vector is separately protected and *must* be on its own page */
 705        .align          PAGE_SIZE
 706
 707        .import         handle_interruption,code
 708        .import         do_cpu_irq_mask,code
 709
 710        /*
 711         * Child Returns here
 712         *
 713         * copy_thread moved args into task save area.
 714         */
 715
 716ENTRY(ret_from_kernel_thread)
 717        /* Call schedule_tail first though */
 718        BL      schedule_tail, %r2
 719        nop
 720
 721        mfctl   %cr30,%r1       /* task_struct */
 722        LDREG   TASK_PT_GR25(%r1), %r26
 723#ifdef CONFIG_64BIT
 724        LDREG   TASK_PT_GR27(%r1), %r27
 725#endif
 726        LDREG   TASK_PT_GR26(%r1), %r1
 727        ble     0(%sr7, %r1)
 728        copy    %r31, %r2
 729        b       finish_child_return
 730        nop
 731END(ret_from_kernel_thread)
 732
 733
 734        /*
 735         * struct task_struct *_switch_to(struct task_struct *prev,
 736         *      struct task_struct *next)
 737         *
 738         * switch kernel stacks and return prev */
 739ENTRY_CFI(_switch_to)
 740        STREG    %r2, -RP_OFFSET(%r30)
 741
 742        callee_save_float
 743        callee_save
 744
 745        load32  _switch_to_ret, %r2
 746
 747        STREG   %r2, TASK_PT_KPC(%r26)
 748        LDREG   TASK_PT_KPC(%r25), %r2
 749
 750        STREG   %r30, TASK_PT_KSP(%r26)
 751        LDREG   TASK_PT_KSP(%r25), %r30
 752        bv      %r0(%r2)
 753        mtctl   %r25,%cr30
 754
 755ENTRY(_switch_to_ret)
 756        mtctl   %r0, %cr0               /* Needed for single stepping */
 757        callee_rest
 758        callee_rest_float
 759
 760        LDREG   -RP_OFFSET(%r30), %r2
 761        bv      %r0(%r2)
 762        copy    %r26, %r28
 763ENDPROC_CFI(_switch_to)
 764
 765        /*
 766         * Common rfi return path for interruptions, kernel execve, and
 767         * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
 768         * return via this path if the signal was received when the process
 769         * was running; if the process was blocked on a syscall then the
 770         * normal syscall_exit path is used.  All syscalls for traced
 771         * proceses exit via intr_restore.
 772         *
 773         * XXX If any syscalls that change a processes space id ever exit
 774         * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
 775         * adjust IASQ[0..1].
 776         *
 777         */
 778
 779        .align  PAGE_SIZE
 780
 781ENTRY_CFI(syscall_exit_rfi)
 782        mfctl   %cr30,%r16              /* task_struct */
 783        ldo     TASK_REGS(%r16),%r16
 784        /* Force iaoq to userspace, as the user has had access to our current
 785         * context via sigcontext. Also Filter the PSW for the same reason.
 786         */
 787        LDREG   PT_IAOQ0(%r16),%r19
 788        depi    PRIV_USER,31,2,%r19
 789        STREG   %r19,PT_IAOQ0(%r16)
 790        LDREG   PT_IAOQ1(%r16),%r19
 791        depi    PRIV_USER,31,2,%r19
 792        STREG   %r19,PT_IAOQ1(%r16)
 793        LDREG   PT_PSW(%r16),%r19
 794        load32  USER_PSW_MASK,%r1
 795#ifdef CONFIG_64BIT
 796        load32  USER_PSW_HI_MASK,%r20
 797        depd    %r20,31,32,%r1
 798#endif
 799        and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
 800        load32  USER_PSW,%r1
 801        or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
 802        STREG   %r19,PT_PSW(%r16)
 803
 804        /*
 805         * If we aren't being traced, we never saved space registers
 806         * (we don't store them in the sigcontext), so set them
 807         * to "proper" values now (otherwise we'll wind up restoring
 808         * whatever was last stored in the task structure, which might
 809         * be inconsistent if an interrupt occurred while on the gateway
 810         * page). Note that we may be "trashing" values the user put in
 811         * them, but we don't support the user changing them.
 812         */
 813
 814        STREG   %r0,PT_SR2(%r16)
 815        mfsp    %sr3,%r19
 816        STREG   %r19,PT_SR0(%r16)
 817        STREG   %r19,PT_SR1(%r16)
 818        STREG   %r19,PT_SR3(%r16)
 819        STREG   %r19,PT_SR4(%r16)
 820        STREG   %r19,PT_SR5(%r16)
 821        STREG   %r19,PT_SR6(%r16)
 822        STREG   %r19,PT_SR7(%r16)
 823
 824ENTRY(intr_return)
 825        /* check for reschedule */
 826        mfctl   %cr30,%r1
 827        LDREG   TASK_TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
 828        bb,<,n  %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
 829
 830        .import do_notify_resume,code
 831intr_check_sig:
 832        /* As above */
 833        mfctl   %cr30,%r1
 834        LDREG   TASK_TI_FLAGS(%r1),%r19
 835        ldi     (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
 836        and,COND(<>)    %r19, %r20, %r0
 837        b,n     intr_restore    /* skip past if we've nothing to do */
 838
 839        /* This check is critical to having LWS
 840         * working. The IASQ is zero on the gateway
 841         * page and we cannot deliver any signals until
 842         * we get off the gateway page.
 843         *
 844         * Only do signals if we are returning to user space
 845         */
 846        LDREG   PT_IASQ0(%r16), %r20
 847        cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
 848        LDREG   PT_IASQ1(%r16), %r20
 849        cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
 850
 851        copy    %r0, %r25                       /* long in_syscall = 0 */
 852#ifdef CONFIG_64BIT
 853        ldo     -16(%r30),%r29                  /* Reference param save area */
 854#endif
 855
 856        /* NOTE: We need to enable interrupts if we have to deliver
 857         * signals. We used to do this earlier but it caused kernel
 858         * stack overflows. */
 859        ssm     PSW_SM_I, %r0
 860
 861        BL      do_notify_resume,%r2
 862        copy    %r16, %r26                      /* struct pt_regs *regs */
 863
 864        b,n     intr_check_sig
 865
 866intr_restore:
 867        copy            %r16,%r29
 868        ldo             PT_FR31(%r29),%r1
 869        rest_fp         %r1
 870        rest_general    %r29
 871
 872        /* inverse of virt_map */
 873        pcxt_ssm_bug
 874        rsm             PSW_SM_QUIET,%r0        /* prepare for rfi */
 875        tophys_r1       %r29
 876
 877        /* Restore space id's and special cr's from PT_REGS
 878         * structure pointed to by r29
 879         */
 880        rest_specials   %r29
 881
 882        /* IMPORTANT: rest_stack restores r29 last (we are using it)!
 883         * It also restores r1 and r30.
 884         */
 885        rest_stack
 886
 887        rfi
 888        nop
 889
 890#ifndef CONFIG_PREEMPTION
 891# define intr_do_preempt        intr_restore
 892#endif /* !CONFIG_PREEMPTION */
 893
 894        .import schedule,code
 895intr_do_resched:
 896        /* Only call schedule on return to userspace. If we're returning
 897         * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
 898         * we jump back to intr_restore.
 899         */
 900        LDREG   PT_IASQ0(%r16), %r20
 901        cmpib,COND(=)   0, %r20, intr_do_preempt
 902        nop
 903        LDREG   PT_IASQ1(%r16), %r20
 904        cmpib,COND(=)   0, %r20, intr_do_preempt
 905        nop
 906
 907        /* NOTE: We need to enable interrupts if we schedule.  We used
 908         * to do this earlier but it caused kernel stack overflows. */
 909        ssm     PSW_SM_I, %r0
 910
 911#ifdef CONFIG_64BIT
 912        ldo     -16(%r30),%r29          /* Reference param save area */
 913#endif
 914
 915        ldil    L%intr_check_sig, %r2
 916#ifndef CONFIG_64BIT
 917        b       schedule
 918#else
 919        load32  schedule, %r20
 920        bv      %r0(%r20)
 921#endif
 922        ldo     R%intr_check_sig(%r2), %r2
 923
 924        /* preempt the current task on returning to kernel
 925         * mode from an interrupt, iff need_resched is set,
 926         * and preempt_count is 0. otherwise, we continue on
 927         * our merry way back to the current running task.
 928         */
 929#ifdef CONFIG_PREEMPTION
 930        .import preempt_schedule_irq,code
 931intr_do_preempt:
 932        rsm     PSW_SM_I, %r0           /* disable interrupts */
 933
 934        /* current_thread_info()->preempt_count */
 935        mfctl   %cr30, %r1
 936        ldw     TI_PRE_COUNT(%r1), %r19
 937        cmpib,<>        0, %r19, intr_restore   /* if preempt_count > 0 */
 938        nop                             /* prev insn branched backwards */
 939
 940        /* check if we interrupted a critical path */
 941        LDREG   PT_PSW(%r16), %r20
 942        bb,<,n  %r20, 31 - PSW_SM_I, intr_restore
 943        nop
 944
 945        /* ssm PSW_SM_I done later in intr_restore */
 946#ifdef CONFIG_MLONGCALLS
 947        ldil    L%intr_restore, %r2
 948        load32  preempt_schedule_irq, %r1
 949        bv      %r0(%r1)
 950        ldo     R%intr_restore(%r2), %r2
 951#else
 952        ldil    L%intr_restore, %r1
 953        BL      preempt_schedule_irq, %r2
 954        ldo     R%intr_restore(%r1), %r2
 955#endif
 956#endif /* CONFIG_PREEMPTION */
 957
 958        /*
 959         * External interrupts.
 960         */
 961
 962intr_extint:
 963        cmpib,COND(=),n 0,%r16,1f
 964
 965        get_stack_use_cr30
 966        b,n 2f
 967
 9681:
 969        get_stack_use_r30
 9702:
 971        save_specials   %r29
 972        virt_map
 973        save_general    %r29
 974
 975        ldo     PT_FR0(%r29), %r24
 976        save_fp %r24
 977        
 978        loadgp
 979
 980        copy    %r29, %r26      /* arg0 is pt_regs */
 981        copy    %r29, %r16      /* save pt_regs */
 982
 983        ldil    L%intr_return, %r2
 984
 985#ifdef CONFIG_64BIT
 986        ldo     -16(%r30),%r29  /* Reference param save area */
 987#endif
 988
 989        b       do_cpu_irq_mask
 990        ldo     R%intr_return(%r2), %r2 /* return to intr_return, not here */
 991ENDPROC_CFI(syscall_exit_rfi)
 992
 993
 994        /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
 995
 996ENTRY_CFI(intr_save)            /* for os_hpmc */
 997        mfsp    %sr7,%r16
 998        cmpib,COND(=),n 0,%r16,1f
 999        get_stack_use_cr30
1000        b       2f
1001        copy    %r8,%r26
1002
10031:
1004        get_stack_use_r30
1005        copy    %r8,%r26
1006
10072:
1008        save_specials   %r29
1009
1010        /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1011        cmpib,COND(=),n        PARISC_ITLB_TRAP,%r26,skip_save_ior
1012
1013
1014        mfctl           %isr, %r16
1015        nop             /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1016        mfctl           %ior, %r17
1017
1018
1019#ifdef CONFIG_64BIT
1020        /*
1021         * If the interrupted code was running with W bit off (32 bit),
1022         * clear the b bits (bits 0 & 1) in the ior.
1023         * save_specials left ipsw value in r8 for us to test.
1024         */
1025        extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1026        depdi           0,1,2,%r17
1027
1028        /* adjust isr/ior: get high bits from isr and deposit in ior */
1029        space_adjust    %r16,%r17,%r1
1030#endif
1031        STREG           %r16, PT_ISR(%r29)
1032        STREG           %r17, PT_IOR(%r29)
1033
1034#if 0 && defined(CONFIG_64BIT)
1035        /* Revisit when we have 64-bit code above 4Gb */
1036        b,n             intr_save2
1037
1038skip_save_ior:
1039        /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1040         * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1041         * above.
1042         */
1043        extrd,u,*       %r8,PSW_W_BIT,1,%r1
1044        cmpib,COND(=),n 1,%r1,intr_save2
1045        LDREG           PT_IASQ0(%r29), %r16
1046        LDREG           PT_IAOQ0(%r29), %r17
1047        /* adjust iasq/iaoq */
1048        space_adjust    %r16,%r17,%r1
1049        STREG           %r16, PT_IASQ0(%r29)
1050        STREG           %r17, PT_IAOQ0(%r29)
1051#else
1052skip_save_ior:
1053#endif
1054
1055intr_save2:
1056        virt_map
1057        save_general    %r29
1058
1059        ldo             PT_FR0(%r29), %r25
1060        save_fp         %r25
1061        
1062        loadgp
1063
1064        copy            %r29, %r25      /* arg1 is pt_regs */
1065#ifdef CONFIG_64BIT
1066        ldo             -16(%r30),%r29  /* Reference param save area */
1067#endif
1068
1069        ldil            L%intr_check_sig, %r2
1070        copy            %r25, %r16      /* save pt_regs */
1071
1072        b               handle_interruption
1073        ldo             R%intr_check_sig(%r2), %r2
1074ENDPROC_CFI(intr_save)
1075
1076
1077        /*
1078         * Note for all tlb miss handlers:
1079         *
1080         * cr24 contains a pointer to the kernel address space
1081         * page directory.
1082         *
1083         * cr25 contains a pointer to the current user address
1084         * space page directory.
1085         *
1086         * sr3 will contain the space id of the user address space
1087         * of the current running thread while that thread is
1088         * running in the kernel.
1089         */
1090
1091        /*
1092         * register number allocations.  Note that these are all
1093         * in the shadowed registers
1094         */
1095
1096        t0 = r1         /* temporary register 0 */
1097        va = r8         /* virtual address for which the trap occurred */
1098        t1 = r9         /* temporary register 1 */
1099        pte  = r16      /* pte/phys page # */
1100        prot = r17      /* prot bits */
1101        spc  = r24      /* space for which the trap occurred */
1102        ptp = r25       /* page directory/page table pointer */
1103
1104#ifdef CONFIG_64BIT
1105
1106dtlb_miss_20w:
1107        space_adjust    spc,va,t0
1108        get_pgd         spc,ptp
1109        space_check     spc,t0,dtlb_fault
1110
1111        L3_ptep         ptp,pte,t0,va,dtlb_check_alias_20w
1112
1113        ptl_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1114        update_accessed ptp,pte,t0,t1
1115
1116        make_insert_tlb spc,pte,prot,t1
1117        
1118        idtlbt          pte,prot
1119
1120        ptl_unlock1     spc,t0
1121        rfir
1122        nop
1123
1124dtlb_check_alias_20w:
1125        do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
1126
1127        idtlbt          pte,prot
1128
1129        rfir
1130        nop
1131
1132nadtlb_miss_20w:
1133        space_adjust    spc,va,t0
1134        get_pgd         spc,ptp
1135        space_check     spc,t0,nadtlb_fault
1136
1137        L3_ptep         ptp,pte,t0,va,nadtlb_check_alias_20w
1138
1139        ptl_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1140        update_accessed ptp,pte,t0,t1
1141
1142        make_insert_tlb spc,pte,prot,t1
1143
1144        idtlbt          pte,prot
1145
1146        ptl_unlock1     spc,t0
1147        rfir
1148        nop
1149
1150nadtlb_check_alias_20w:
1151        do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1152
1153        idtlbt          pte,prot
1154
1155        rfir
1156        nop
1157
1158#else
1159
1160dtlb_miss_11:
1161        get_pgd         spc,ptp
1162
1163        space_check     spc,t0,dtlb_fault
1164
1165        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_11
1166
1167        ptl_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_11
1168        update_accessed ptp,pte,t0,t1
1169
1170        make_insert_tlb_11      spc,pte,prot
1171
1172        mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1173        mtsp            spc,%sr1
1174
1175        idtlba          pte,(%sr1,va)
1176        idtlbp          prot,(%sr1,va)
1177
1178        mtsp            t1, %sr1        /* Restore sr1 */
1179
1180        ptl_unlock1     spc,t0
1181        rfir
1182        nop
1183
1184dtlb_check_alias_11:
1185        do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,11
1186
1187        idtlba          pte,(va)
1188        idtlbp          prot,(va)
1189
1190        rfir
1191        nop
1192
1193nadtlb_miss_11:
1194        get_pgd         spc,ptp
1195
1196        space_check     spc,t0,nadtlb_fault
1197
1198        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_11
1199
1200        ptl_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1201        update_accessed ptp,pte,t0,t1
1202
1203        make_insert_tlb_11      spc,pte,prot
1204
1205        mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1206        mtsp            spc,%sr1
1207
1208        idtlba          pte,(%sr1,va)
1209        idtlbp          prot,(%sr1,va)
1210
1211        mtsp            t1, %sr1        /* Restore sr1 */
1212
1213        ptl_unlock1     spc,t0
1214        rfir
1215        nop
1216
1217nadtlb_check_alias_11:
1218        do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1219
1220        idtlba          pte,(va)
1221        idtlbp          prot,(va)
1222
1223        rfir
1224        nop
1225
1226dtlb_miss_20:
1227        space_adjust    spc,va,t0
1228        get_pgd         spc,ptp
1229        space_check     spc,t0,dtlb_fault
1230
1231        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_20
1232
1233        ptl_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20
1234        update_accessed ptp,pte,t0,t1
1235
1236        make_insert_tlb spc,pte,prot,t1
1237
1238        f_extend        pte,t1
1239
1240        idtlbt          pte,prot
1241
1242        ptl_unlock1     spc,t0
1243        rfir
1244        nop
1245
1246dtlb_check_alias_20:
1247        do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
1248        
1249        idtlbt          pte,prot
1250
1251        rfir
1252        nop
1253
1254nadtlb_miss_20:
1255        get_pgd         spc,ptp
1256
1257        space_check     spc,t0,nadtlb_fault
1258
1259        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_20
1260
1261        ptl_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1262        update_accessed ptp,pte,t0,t1
1263
1264        make_insert_tlb spc,pte,prot,t1
1265
1266        f_extend        pte,t1
1267        
1268        idtlbt          pte,prot
1269
1270        ptl_unlock1     spc,t0
1271        rfir
1272        nop
1273
1274nadtlb_check_alias_20:
1275        do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1276
1277        idtlbt          pte,prot
1278
1279        rfir
1280        nop
1281
1282#endif
1283
1284nadtlb_emulate:
1285
1286        /*
1287         * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and
1288         * probei instructions. The kernel no longer faults doing flushes.
1289         * Use of lpa and probe instructions is rare. Given the issue
1290         * with shadow registers, we defer everything to the "slow" path.
1291         */
1292        b,n             nadtlb_fault
1293
1294#ifdef CONFIG_64BIT
1295itlb_miss_20w:
1296
1297        /*
1298         * I miss is a little different, since we allow users to fault
1299         * on the gateway page which is in the kernel address space.
1300         */
1301
1302        space_adjust    spc,va,t0
1303        get_pgd         spc,ptp
1304        space_check     spc,t0,itlb_fault
1305
1306        L3_ptep         ptp,pte,t0,va,itlb_fault
1307
1308        ptl_lock        spc,ptp,pte,t0,t1,itlb_fault
1309        update_accessed ptp,pte,t0,t1
1310
1311        make_insert_tlb spc,pte,prot,t1
1312        
1313        iitlbt          pte,prot
1314
1315        ptl_unlock1     spc,t0
1316        rfir
1317        nop
1318
1319naitlb_miss_20w:
1320
1321        /*
1322         * I miss is a little different, since we allow users to fault
1323         * on the gateway page which is in the kernel address space.
1324         */
1325
1326        space_adjust    spc,va,t0
1327        get_pgd         spc,ptp
1328        space_check     spc,t0,naitlb_fault
1329
1330        L3_ptep         ptp,pte,t0,va,naitlb_check_alias_20w
1331
1332        ptl_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1333        update_accessed ptp,pte,t0,t1
1334
1335        make_insert_tlb spc,pte,prot,t1
1336
1337        iitlbt          pte,prot
1338
1339        ptl_unlock1     spc,t0
1340        rfir
1341        nop
1342
1343naitlb_check_alias_20w:
1344        do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
1345
1346        iitlbt          pte,prot
1347
1348        rfir
1349        nop
1350
1351#else
1352
1353itlb_miss_11:
1354        get_pgd         spc,ptp
1355
1356        space_check     spc,t0,itlb_fault
1357
1358        L2_ptep         ptp,pte,t0,va,itlb_fault
1359
1360        ptl_lock        spc,ptp,pte,t0,t1,itlb_fault
1361        update_accessed ptp,pte,t0,t1
1362
1363        make_insert_tlb_11      spc,pte,prot
1364
1365        mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1366        mtsp            spc,%sr1
1367
1368        iitlba          pte,(%sr1,va)
1369        iitlbp          prot,(%sr1,va)
1370
1371        mtsp            t1, %sr1        /* Restore sr1 */
1372
1373        ptl_unlock1     spc,t0
1374        rfir
1375        nop
1376
1377naitlb_miss_11:
1378        get_pgd         spc,ptp
1379
1380        space_check     spc,t0,naitlb_fault
1381
1382        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_11
1383
1384        ptl_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_11
1385        update_accessed ptp,pte,t0,t1
1386
1387        make_insert_tlb_11      spc,pte,prot
1388
1389        mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1390        mtsp            spc,%sr1
1391
1392        iitlba          pte,(%sr1,va)
1393        iitlbp          prot,(%sr1,va)
1394
1395        mtsp            t1, %sr1        /* Restore sr1 */
1396
1397        ptl_unlock1     spc,t0
1398        rfir
1399        nop
1400
1401naitlb_check_alias_11:
1402        do_alias        spc,t0,t1,va,pte,prot,itlb_fault,11
1403
1404        iitlba          pte,(%sr0, va)
1405        iitlbp          prot,(%sr0, va)
1406
1407        rfir
1408        nop
1409
1410
1411itlb_miss_20:
1412        get_pgd         spc,ptp
1413
1414        space_check     spc,t0,itlb_fault
1415
1416        L2_ptep         ptp,pte,t0,va,itlb_fault
1417
1418        ptl_lock        spc,ptp,pte,t0,t1,itlb_fault
1419        update_accessed ptp,pte,t0,t1
1420
1421        make_insert_tlb spc,pte,prot,t1
1422
1423        f_extend        pte,t1
1424
1425        iitlbt          pte,prot
1426
1427        ptl_unlock1     spc,t0
1428        rfir
1429        nop
1430
1431naitlb_miss_20:
1432        get_pgd         spc,ptp
1433
1434        space_check     spc,t0,naitlb_fault
1435
1436        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_20
1437
1438        ptl_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20
1439        update_accessed ptp,pte,t0,t1
1440
1441        make_insert_tlb spc,pte,prot,t1
1442
1443        f_extend        pte,t1
1444
1445        iitlbt          pte,prot
1446
1447        ptl_unlock1     spc,t0
1448        rfir
1449        nop
1450
1451naitlb_check_alias_20:
1452        do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
1453
1454        iitlbt          pte,prot
1455
1456        rfir
1457        nop
1458
1459#endif
1460
1461#ifdef CONFIG_64BIT
1462
1463dbit_trap_20w:
1464        space_adjust    spc,va,t0
1465        get_pgd         spc,ptp
1466        space_check     spc,t0,dbit_fault
1467
1468        L3_ptep         ptp,pte,t0,va,dbit_fault
1469
1470        ptl_lock        spc,ptp,pte,t0,t1,dbit_fault
1471        update_dirty    ptp,pte,t1
1472
1473        make_insert_tlb spc,pte,prot,t1
1474                
1475        idtlbt          pte,prot
1476
1477        ptl_unlock0     spc,t0
1478        rfir
1479        nop
1480#else
1481
1482dbit_trap_11:
1483
1484        get_pgd         spc,ptp
1485
1486        space_check     spc,t0,dbit_fault
1487
1488        L2_ptep         ptp,pte,t0,va,dbit_fault
1489
1490        ptl_lock        spc,ptp,pte,t0,t1,dbit_fault
1491        update_dirty    ptp,pte,t1
1492
1493        make_insert_tlb_11      spc,pte,prot
1494
1495        mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1496        mtsp            spc,%sr1
1497
1498        idtlba          pte,(%sr1,va)
1499        idtlbp          prot,(%sr1,va)
1500
1501        mtsp            t1, %sr1     /* Restore sr1 */
1502
1503        ptl_unlock0     spc,t0
1504        rfir
1505        nop
1506
1507dbit_trap_20:
1508        get_pgd         spc,ptp
1509
1510        space_check     spc,t0,dbit_fault
1511
1512        L2_ptep         ptp,pte,t0,va,dbit_fault
1513
1514        ptl_lock        spc,ptp,pte,t0,t1,dbit_fault
1515        update_dirty    ptp,pte,t1
1516
1517        make_insert_tlb spc,pte,prot,t1
1518
1519        f_extend        pte,t1
1520        
1521        idtlbt          pte,prot
1522
1523        ptl_unlock0     spc,t0
1524        rfir
1525        nop
1526#endif
1527
1528        .import handle_interruption,code
1529
1530kernel_bad_space:
1531        b               intr_save
1532        ldi             31,%r8  /* Use an unused code */
1533
1534dbit_fault:
1535        b               intr_save
1536        ldi             20,%r8
1537
1538itlb_fault:
1539        b               intr_save
1540        ldi             PARISC_ITLB_TRAP,%r8
1541
1542nadtlb_fault:
1543        b               intr_save
1544        ldi             17,%r8
1545
1546naitlb_fault:
1547        b               intr_save
1548        ldi             16,%r8
1549
1550dtlb_fault:
1551        b               intr_save
1552        ldi             15,%r8
1553
1554        /* Register saving semantics for system calls:
1555
1556           %r1             clobbered by system call macro in userspace
1557           %r2             saved in PT_REGS by gateway page
1558           %r3  - %r18     preserved by C code (saved by signal code)
1559           %r19 - %r20     saved in PT_REGS by gateway page
1560           %r21 - %r22     non-standard syscall args
1561                           stored in kernel stack by gateway page
1562           %r23 - %r26     arg3-arg0, saved in PT_REGS by gateway page
1563           %r27 - %r30     saved in PT_REGS by gateway page
1564           %r31            syscall return pointer
1565         */
1566
1567        /* Floating point registers (FIXME: what do we do with these?)
1568
1569           %fr0  - %fr3    status/exception, not preserved
1570           %fr4  - %fr7    arguments
1571           %fr8  - %fr11   not preserved by C code
1572           %fr12 - %fr21   preserved by C code
1573           %fr22 - %fr31   not preserved by C code
1574         */
1575
1576        .macro  reg_save regs
1577        STREG   %r3, PT_GR3(\regs)
1578        STREG   %r4, PT_GR4(\regs)
1579        STREG   %r5, PT_GR5(\regs)
1580        STREG   %r6, PT_GR6(\regs)
1581        STREG   %r7, PT_GR7(\regs)
1582        STREG   %r8, PT_GR8(\regs)
1583        STREG   %r9, PT_GR9(\regs)
1584        STREG   %r10,PT_GR10(\regs)
1585        STREG   %r11,PT_GR11(\regs)
1586        STREG   %r12,PT_GR12(\regs)
1587        STREG   %r13,PT_GR13(\regs)
1588        STREG   %r14,PT_GR14(\regs)
1589        STREG   %r15,PT_GR15(\regs)
1590        STREG   %r16,PT_GR16(\regs)
1591        STREG   %r17,PT_GR17(\regs)
1592        STREG   %r18,PT_GR18(\regs)
1593        .endm
1594
1595        .macro  reg_restore regs
1596        LDREG   PT_GR3(\regs), %r3
1597        LDREG   PT_GR4(\regs), %r4
1598        LDREG   PT_GR5(\regs), %r5
1599        LDREG   PT_GR6(\regs), %r6
1600        LDREG   PT_GR7(\regs), %r7
1601        LDREG   PT_GR8(\regs), %r8
1602        LDREG   PT_GR9(\regs), %r9
1603        LDREG   PT_GR10(\regs),%r10
1604        LDREG   PT_GR11(\regs),%r11
1605        LDREG   PT_GR12(\regs),%r12
1606        LDREG   PT_GR13(\regs),%r13
1607        LDREG   PT_GR14(\regs),%r14
1608        LDREG   PT_GR15(\regs),%r15
1609        LDREG   PT_GR16(\regs),%r16
1610        LDREG   PT_GR17(\regs),%r17
1611        LDREG   PT_GR18(\regs),%r18
1612        .endm
1613
1614        .macro  fork_like name
1615ENTRY_CFI(sys_\name\()_wrapper)
1616        mfctl   %cr30,%r1
1617        ldo     TASK_REGS(%r1),%r1
1618        reg_save %r1
1619        mfctl   %cr27, %r28
1620        ldil    L%sys_\name, %r31
1621        be      R%sys_\name(%sr4,%r31)
1622        STREG   %r28, PT_CR27(%r1)
1623ENDPROC_CFI(sys_\name\()_wrapper)
1624        .endm
1625
1626fork_like clone
1627fork_like clone3
1628fork_like fork
1629fork_like vfork
1630
1631        /* Set the return value for the child */
1632ENTRY(child_return)
1633        BL      schedule_tail, %r2
1634        nop
1635finish_child_return:
1636        mfctl   %cr30,%r1
1637        ldo     TASK_REGS(%r1),%r1       /* get pt regs */
1638
1639        LDREG   PT_CR27(%r1), %r3
1640        mtctl   %r3, %cr27
1641        reg_restore %r1
1642        b       syscall_exit
1643        copy    %r0,%r28
1644END(child_return)
1645
1646ENTRY_CFI(sys_rt_sigreturn_wrapper)
1647        mfctl   %cr30,%r26
1648        ldo     TASK_REGS(%r26),%r26    /* get pt regs */
1649        /* Don't save regs, we are going to restore them from sigcontext. */
1650        STREG   %r2, -RP_OFFSET(%r30)
1651#ifdef CONFIG_64BIT
1652        ldo     FRAME_SIZE(%r30), %r30
1653        BL      sys_rt_sigreturn,%r2
1654        ldo     -16(%r30),%r29          /* Reference param save area */
1655#else
1656        BL      sys_rt_sigreturn,%r2
1657        ldo     FRAME_SIZE(%r30), %r30
1658#endif
1659
1660        ldo     -FRAME_SIZE(%r30), %r30
1661        LDREG   -RP_OFFSET(%r30), %r2
1662
1663        /* FIXME: I think we need to restore a few more things here. */
1664        mfctl   %cr30,%r1
1665        ldo     TASK_REGS(%r1),%r1      /* get pt regs */
1666        reg_restore %r1
1667
1668        /* If the signal was received while the process was blocked on a
1669         * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1670         * take us to syscall_exit_rfi and on to intr_return.
1671         */
1672        bv      %r0(%r2)
1673        LDREG   PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1674ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1675
1676ENTRY(syscall_exit)
1677        /* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1678         * via syscall_exit_rfi if the signal was received while the process
1679         * was running.
1680         */
1681
1682        /* save return value now */
1683        mfctl     %cr30, %r1
1684        STREG     %r28,TASK_PT_GR28(%r1)
1685
1686        /* Seems to me that dp could be wrong here, if the syscall involved
1687         * calling a module, and nothing got round to restoring dp on return.
1688         */
1689        loadgp
1690
1691syscall_check_resched:
1692
1693        /* check for reschedule */
1694        mfctl   %cr30,%r19
1695        LDREG   TASK_TI_FLAGS(%r19),%r19        /* long */
1696        bb,<,n  %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1697
1698        .import do_signal,code
1699syscall_check_sig:
1700        mfctl   %cr30,%r19
1701        LDREG   TASK_TI_FLAGS(%r19),%r19
1702        ldi     (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1703        and,COND(<>)    %r19, %r26, %r0
1704        b,n     syscall_restore /* skip past if we've nothing to do */
1705
1706syscall_do_signal:
1707        /* Save callee-save registers (for sigcontext).
1708         * FIXME: After this point the process structure should be
1709         * consistent with all the relevant state of the process
1710         * before the syscall.  We need to verify this.
1711         */
1712        mfctl   %cr30,%r1
1713        ldo     TASK_REGS(%r1), %r26            /* struct pt_regs *regs */
1714        reg_save %r26
1715
1716#ifdef CONFIG_64BIT
1717        ldo     -16(%r30),%r29                  /* Reference param save area */
1718#endif
1719
1720        BL      do_notify_resume,%r2
1721        ldi     1, %r25                         /* long in_syscall = 1 */
1722
1723        mfctl   %cr30,%r1
1724        ldo     TASK_REGS(%r1), %r20            /* reload pt_regs */
1725        reg_restore %r20
1726
1727        b,n     syscall_check_sig
1728
1729syscall_restore:
1730        mfctl   %cr30,%r1
1731
1732        /* Are we being ptraced? */
1733        LDREG   TASK_TI_FLAGS(%r1),%r19
1734        ldi     _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2
1735        and,COND(=)     %r19,%r2,%r0
1736        b,n     syscall_restore_rfi
1737
1738        ldo     TASK_PT_FR31(%r1),%r19             /* reload fpregs */
1739        rest_fp %r19
1740
1741        LDREG   TASK_PT_SAR(%r1),%r19              /* restore SAR */
1742        mtsar   %r19
1743
1744        LDREG   TASK_PT_GR2(%r1),%r2               /* restore user rp */
1745        LDREG   TASK_PT_GR19(%r1),%r19
1746        LDREG   TASK_PT_GR20(%r1),%r20
1747        LDREG   TASK_PT_GR21(%r1),%r21
1748        LDREG   TASK_PT_GR22(%r1),%r22
1749        LDREG   TASK_PT_GR23(%r1),%r23
1750        LDREG   TASK_PT_GR24(%r1),%r24
1751        LDREG   TASK_PT_GR25(%r1),%r25
1752        LDREG   TASK_PT_GR26(%r1),%r26
1753        LDREG   TASK_PT_GR27(%r1),%r27     /* restore user dp */
1754        LDREG   TASK_PT_GR28(%r1),%r28     /* syscall return value */
1755        LDREG   TASK_PT_GR29(%r1),%r29
1756        LDREG   TASK_PT_GR31(%r1),%r31     /* restore syscall rp */
1757
1758        /* NOTE: We use rsm/ssm pair to make this operation atomic */
1759        LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1760        rsm     PSW_SM_I, %r0
1761        copy    %r1,%r30                           /* Restore user sp */
1762        mfsp    %sr3,%r1                           /* Get user space id */
1763        mtsp    %r1,%sr7                           /* Restore sr7 */
1764        ssm     PSW_SM_I, %r0
1765
1766        /* Set sr2 to zero for userspace syscalls to work. */
1767        mtsp    %r0,%sr2 
1768        mtsp    %r1,%sr4                           /* Restore sr4 */
1769        mtsp    %r1,%sr5                           /* Restore sr5 */
1770        mtsp    %r1,%sr6                           /* Restore sr6 */
1771
1772        depi    PRIV_USER,31,2,%r31     /* ensure return to user mode. */
1773
1774#ifdef CONFIG_64BIT
1775        /* decide whether to reset the wide mode bit
1776         *
1777         * For a syscall, the W bit is stored in the lowest bit
1778         * of sp.  Extract it and reset W if it is zero */
1779        extrd,u,*<>     %r30,63,1,%r1
1780        rsm     PSW_SM_W, %r0
1781        /* now reset the lowest bit of sp if it was set */
1782        xor     %r30,%r1,%r30
1783#endif
1784        be,n    0(%sr3,%r31)                       /* return to user space */
1785
1786        /* We have to return via an RFI, so that PSW T and R bits can be set
1787         * appropriately.
1788         * This sets up pt_regs so we can return via intr_restore, which is not
1789         * the most efficient way of doing things, but it works.
1790         */
1791syscall_restore_rfi:
1792        ldo     -1(%r0),%r2                        /* Set recovery cntr to -1 */
1793        mtctl   %r2,%cr0                           /*   for immediate trap */
1794        LDREG   TASK_PT_PSW(%r1),%r2               /* Get old PSW */
1795        ldi     0x0b,%r20                          /* Create new PSW */
1796        depi    -1,13,1,%r20                       /* C, Q, D, and I bits */
1797
1798        /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1799         * set in thread_info.h and converted to PA bitmap
1800         * numbers in asm-offsets.c */
1801
1802        /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1803        extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1804        depi    -1,27,1,%r20                       /* R bit */
1805
1806        /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1807        extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1808        depi    -1,7,1,%r20                        /* T bit */
1809
1810        STREG   %r20,TASK_PT_PSW(%r1)
1811
1812        /* Always store space registers, since sr3 can be changed (e.g. fork) */
1813
1814        mfsp    %sr3,%r25
1815        STREG   %r25,TASK_PT_SR3(%r1)
1816        STREG   %r25,TASK_PT_SR4(%r1)
1817        STREG   %r25,TASK_PT_SR5(%r1)
1818        STREG   %r25,TASK_PT_SR6(%r1)
1819        STREG   %r25,TASK_PT_SR7(%r1)
1820        STREG   %r25,TASK_PT_IASQ0(%r1)
1821        STREG   %r25,TASK_PT_IASQ1(%r1)
1822
1823        /* XXX W bit??? */
1824        /* Now if old D bit is clear, it means we didn't save all registers
1825         * on syscall entry, so do that now.  This only happens on TRACEME
1826         * calls, or if someone attached to us while we were on a syscall.
1827         * We could make this more efficient by not saving r3-r18, but
1828         * then we wouldn't be able to use the common intr_restore path.
1829         * It is only for traced processes anyway, so performance is not
1830         * an issue.
1831         */
1832        bb,<    %r2,30,pt_regs_ok                  /* Branch if D set */
1833        ldo     TASK_REGS(%r1),%r25
1834        reg_save %r25                              /* Save r3 to r18 */
1835
1836        /* Save the current sr */
1837        mfsp    %sr0,%r2
1838        STREG   %r2,TASK_PT_SR0(%r1)
1839
1840        /* Save the scratch sr */
1841        mfsp    %sr1,%r2
1842        STREG   %r2,TASK_PT_SR1(%r1)
1843
1844        /* sr2 should be set to zero for userspace syscalls */
1845        STREG   %r0,TASK_PT_SR2(%r1)
1846
1847        LDREG   TASK_PT_GR31(%r1),%r2
1848        depi    PRIV_USER,31,2,%r2      /* ensure return to user mode. */
1849        STREG   %r2,TASK_PT_IAOQ0(%r1)
1850        ldo     4(%r2),%r2
1851        STREG   %r2,TASK_PT_IAOQ1(%r1)
1852        b       intr_restore
1853        copy    %r25,%r16
1854
1855pt_regs_ok:
1856        LDREG   TASK_PT_IAOQ0(%r1),%r2
1857        depi    PRIV_USER,31,2,%r2      /* ensure return to user mode. */
1858        STREG   %r2,TASK_PT_IAOQ0(%r1)
1859        LDREG   TASK_PT_IAOQ1(%r1),%r2
1860        depi    PRIV_USER,31,2,%r2
1861        STREG   %r2,TASK_PT_IAOQ1(%r1)
1862        b       intr_restore
1863        copy    %r25,%r16
1864
1865syscall_do_resched:
1866        load32  syscall_check_resched,%r2 /* if resched, we start over again */
1867        load32  schedule,%r19
1868        bv      %r0(%r19)               /* jumps to schedule() */
1869#ifdef CONFIG_64BIT
1870        ldo     -16(%r30),%r29          /* Reference param save area */
1871#else
1872        nop
1873#endif
1874END(syscall_exit)
1875
1876
1877#ifdef CONFIG_FUNCTION_TRACER
1878
1879        .import ftrace_function_trampoline,code
1880        .align L1_CACHE_BYTES
1881ENTRY_CFI(mcount, caller)
1882_mcount:
1883        .export _mcount,data
1884        /*
1885         * The 64bit mcount() function pointer needs 4 dwords, of which the
1886         * first two are free.  We optimize it here and put 2 instructions for
1887         * calling mcount(), and 2 instructions for ftrace_stub().  That way we
1888         * have all on one L1 cacheline.
1889         */
1890        ldi     0, %arg3
1891        b       ftrace_function_trampoline
1892        copy    %r3, %arg2      /* caller original %sp */
1893ftrace_stub:
1894        .globl ftrace_stub
1895        .type  ftrace_stub, @function
1896#ifdef CONFIG_64BIT
1897        bve     (%rp)
1898#else
1899        bv      %r0(%rp)
1900#endif
1901        nop
1902#ifdef CONFIG_64BIT
1903        .dword mcount
1904        .dword 0 /* code in head.S puts value of global gp here */
1905#endif
1906ENDPROC_CFI(mcount)
1907
1908#ifdef CONFIG_DYNAMIC_FTRACE
1909
1910#ifdef CONFIG_64BIT
1911#define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
1912#else
1913#define FTRACE_FRAME_SIZE FRAME_SIZE
1914#endif
1915ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
1916ftrace_caller:
1917        .global ftrace_caller
1918
1919        STREG   %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
1920        ldo     -FTRACE_FRAME_SIZE(%sp), %r3
1921        STREG   %rp, -RP_OFFSET(%r3)
1922
1923        /* Offset 0 is already allocated for %r1 */
1924        STREG   %r23, 2*REG_SZ(%r3)
1925        STREG   %r24, 3*REG_SZ(%r3)
1926        STREG   %r25, 4*REG_SZ(%r3)
1927        STREG   %r26, 5*REG_SZ(%r3)
1928        STREG   %r28, 6*REG_SZ(%r3)
1929        STREG   %r29, 7*REG_SZ(%r3)
1930#ifdef CONFIG_64BIT
1931        STREG   %r19, 8*REG_SZ(%r3)
1932        STREG   %r20, 9*REG_SZ(%r3)
1933        STREG   %r21, 10*REG_SZ(%r3)
1934        STREG   %r22, 11*REG_SZ(%r3)
1935        STREG   %r27, 12*REG_SZ(%r3)
1936        STREG   %r31, 13*REG_SZ(%r3)
1937        loadgp
1938        ldo     -16(%sp),%r29
1939#endif
1940        LDREG   0(%r3), %r25
1941        copy    %rp, %r26
1942        ldo     -8(%r25), %r25
1943        ldi     0, %r23         /* no pt_regs */
1944        b,l     ftrace_function_trampoline, %rp
1945        copy    %r3, %r24
1946
1947        LDREG   -RP_OFFSET(%r3), %rp
1948        LDREG   2*REG_SZ(%r3), %r23
1949        LDREG   3*REG_SZ(%r3), %r24
1950        LDREG   4*REG_SZ(%r3), %r25
1951        LDREG   5*REG_SZ(%r3), %r26
1952        LDREG   6*REG_SZ(%r3), %r28
1953        LDREG   7*REG_SZ(%r3), %r29
1954#ifdef CONFIG_64BIT
1955        LDREG   8*REG_SZ(%r3), %r19
1956        LDREG   9*REG_SZ(%r3), %r20
1957        LDREG   10*REG_SZ(%r3), %r21
1958        LDREG   11*REG_SZ(%r3), %r22
1959        LDREG   12*REG_SZ(%r3), %r27
1960        LDREG   13*REG_SZ(%r3), %r31
1961#endif
1962        LDREG   1*REG_SZ(%r3), %r3
1963
1964        LDREGM  -FTRACE_FRAME_SIZE(%sp), %r1
1965        /* Adjust return point to jump back to beginning of traced function */
1966        ldo     -4(%r1), %r1
1967        bv,n    (%r1)
1968
1969ENDPROC_CFI(ftrace_caller)
1970
1971#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
1972ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
1973        CALLS,SAVE_RP,SAVE_SP)
1974ftrace_regs_caller:
1975        .global ftrace_regs_caller
1976
1977        ldo     -FTRACE_FRAME_SIZE(%sp), %r1
1978        STREG   %rp, -RP_OFFSET(%r1)
1979
1980        copy    %sp, %r1
1981        ldo     PT_SZ_ALGN(%sp), %sp
1982
1983        STREG   %rp, PT_GR2(%r1)
1984        STREG   %r3, PT_GR3(%r1)
1985        STREG   %r4, PT_GR4(%r1)
1986        STREG   %r5, PT_GR5(%r1)
1987        STREG   %r6, PT_GR6(%r1)
1988        STREG   %r7, PT_GR7(%r1)
1989        STREG   %r8, PT_GR8(%r1)
1990        STREG   %r9, PT_GR9(%r1)
1991        STREG   %r10, PT_GR10(%r1)
1992        STREG   %r11, PT_GR11(%r1)
1993        STREG   %r12, PT_GR12(%r1)
1994        STREG   %r13, PT_GR13(%r1)
1995        STREG   %r14, PT_GR14(%r1)
1996        STREG   %r15, PT_GR15(%r1)
1997        STREG   %r16, PT_GR16(%r1)
1998        STREG   %r17, PT_GR17(%r1)
1999        STREG   %r18, PT_GR18(%r1)
2000        STREG   %r19, PT_GR19(%r1)
2001        STREG   %r20, PT_GR20(%r1)
2002        STREG   %r21, PT_GR21(%r1)
2003        STREG   %r22, PT_GR22(%r1)
2004        STREG   %r23, PT_GR23(%r1)
2005        STREG   %r24, PT_GR24(%r1)
2006        STREG   %r25, PT_GR25(%r1)
2007        STREG   %r26, PT_GR26(%r1)
2008        STREG   %r27, PT_GR27(%r1)
2009        STREG   %r28, PT_GR28(%r1)
2010        STREG   %r29, PT_GR29(%r1)
2011        STREG   %r30, PT_GR30(%r1)
2012        STREG   %r31, PT_GR31(%r1)
2013        mfctl   %cr11, %r26
2014        STREG   %r26, PT_SAR(%r1)
2015
2016        copy    %rp, %r26
2017        LDREG   -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2018        ldo     -8(%r25), %r25
2019        ldo     -FTRACE_FRAME_SIZE(%r1), %arg2
2020        b,l     ftrace_function_trampoline, %rp
2021        copy    %r1, %arg3 /* struct pt_regs */
2022
2023        ldo     -PT_SZ_ALGN(%sp), %r1
2024
2025        LDREG   PT_SAR(%r1), %rp
2026        mtctl   %rp, %cr11
2027
2028        LDREG   PT_GR2(%r1), %rp
2029        LDREG   PT_GR3(%r1), %r3
2030        LDREG   PT_GR4(%r1), %r4
2031        LDREG   PT_GR5(%r1), %r5
2032        LDREG   PT_GR6(%r1), %r6
2033        LDREG   PT_GR7(%r1), %r7
2034        LDREG   PT_GR8(%r1), %r8
2035        LDREG   PT_GR9(%r1), %r9
2036        LDREG   PT_GR10(%r1),%r10
2037        LDREG   PT_GR11(%r1),%r11
2038        LDREG   PT_GR12(%r1),%r12
2039        LDREG   PT_GR13(%r1),%r13
2040        LDREG   PT_GR14(%r1),%r14
2041        LDREG   PT_GR15(%r1),%r15
2042        LDREG   PT_GR16(%r1),%r16
2043        LDREG   PT_GR17(%r1),%r17
2044        LDREG   PT_GR18(%r1),%r18
2045        LDREG   PT_GR19(%r1),%r19
2046        LDREG   PT_GR20(%r1),%r20
2047        LDREG   PT_GR21(%r1),%r21
2048        LDREG   PT_GR22(%r1),%r22
2049        LDREG   PT_GR23(%r1),%r23
2050        LDREG   PT_GR24(%r1),%r24
2051        LDREG   PT_GR25(%r1),%r25
2052        LDREG   PT_GR26(%r1),%r26
2053        LDREG   PT_GR27(%r1),%r27
2054        LDREG   PT_GR28(%r1),%r28
2055        LDREG   PT_GR29(%r1),%r29
2056        LDREG   PT_GR30(%r1),%r30
2057        LDREG   PT_GR31(%r1),%r31
2058
2059        ldo     -PT_SZ_ALGN(%sp), %sp
2060        LDREGM  -FTRACE_FRAME_SIZE(%sp), %r1
2061        /* Adjust return point to jump back to beginning of traced function */
2062        ldo     -4(%r1), %r1
2063        bv,n    (%r1)
2064
2065ENDPROC_CFI(ftrace_regs_caller)
2066
2067#endif
2068#endif
2069
2070#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2071        .align 8
2072ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2073        .export parisc_return_to_handler,data
2074parisc_return_to_handler:
2075        copy %r3,%r1
2076        STREG %r0,-RP_OFFSET(%sp)       /* store 0 as %rp */
2077        copy %sp,%r3
2078        STREGM %r1,FRAME_SIZE(%sp)
2079        STREG %ret0,8(%r3)
2080        STREG %ret1,16(%r3)
2081
2082#ifdef CONFIG_64BIT
2083        loadgp
2084#endif
2085
2086        /* call ftrace_return_to_handler(0) */
2087        .import ftrace_return_to_handler,code
2088        load32 ftrace_return_to_handler,%ret0
2089        load32 .Lftrace_ret,%r2
2090#ifdef CONFIG_64BIT
2091        ldo -16(%sp),%ret1              /* Reference param save area */
2092        bve     (%ret0)
2093#else
2094        bv      %r0(%ret0)
2095#endif
2096        ldi 0,%r26
2097.Lftrace_ret:
2098        copy %ret0,%rp
2099
2100        /* restore original return values */
2101        LDREG 8(%r3),%ret0
2102        LDREG 16(%r3),%ret1
2103
2104        /* return from function */
2105#ifdef CONFIG_64BIT
2106        bve     (%rp)
2107#else
2108        bv      %r0(%rp)
2109#endif
2110        LDREGM -FRAME_SIZE(%sp),%r3
2111ENDPROC_CFI(return_to_handler)
2112
2113#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2114
2115#endif  /* CONFIG_FUNCTION_TRACER */
2116
2117#ifdef CONFIG_IRQSTACKS
2118/* void call_on_stack(unsigned long param1, void *func,
2119                      unsigned long new_stack) */
2120ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2121ENTRY(_call_on_stack)
2122        copy    %sp, %r1
2123
2124        /* Regarding the HPPA calling conventions for function pointers,
2125           we assume the PIC register is not changed across call.  For
2126           CONFIG_64BIT, the argument pointer is left to point at the
2127           argument region allocated for the call to call_on_stack. */
2128
2129        /* Switch to new stack.  We allocate two frames.  */
2130        ldo     2*FRAME_SIZE(%arg2), %sp
2131# ifdef CONFIG_64BIT
2132        /* Save previous stack pointer and return pointer in frame marker */
2133        STREG   %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2134        /* Calls always use function descriptor */
2135        LDREG   16(%arg1), %arg1
2136        bve,l   (%arg1), %rp
2137        STREG   %r1, -FRAME_SIZE-REG_SZ(%sp)
2138        LDREG   -FRAME_SIZE-RP_OFFSET(%sp), %rp
2139        bve     (%rp)
2140        LDREG   -FRAME_SIZE-REG_SZ(%sp), %sp
2141# else
2142        /* Save previous stack pointer and return pointer in frame marker */
2143        STREG   %r1, -FRAME_SIZE-REG_SZ(%sp)
2144        STREG   %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2145        /* Calls use function descriptor if PLABEL bit is set */
2146        bb,>=,n %arg1, 30, 1f
2147        depwi   0,31,2, %arg1
2148        LDREG   0(%arg1), %arg1
21491:
2150        be,l    0(%sr4,%arg1), %sr0, %r31
2151        copy    %r31, %rp
2152        LDREG   -FRAME_SIZE-RP_OFFSET(%sp), %rp
2153        bv      (%rp)
2154        LDREG   -FRAME_SIZE-REG_SZ(%sp), %sp
2155# endif /* CONFIG_64BIT */
2156ENDPROC_CFI(call_on_stack)
2157#endif /* CONFIG_IRQSTACKS */
2158
2159ENTRY_CFI(get_register)
2160        /*
2161         * get_register is used by the non access tlb miss handlers to
2162         * copy the value of the general register specified in r8 into
2163         * r1. This routine can't be used for shadowed registers, since
2164         * the rfir will restore the original value. So, for the shadowed
2165         * registers we put a -1 into r1 to indicate that the register
2166         * should not be used (the register being copied could also have
2167         * a -1 in it, but that is OK, it just means that we will have
2168         * to use the slow path instead).
2169         */
2170        blr     %r8,%r0
2171        nop
2172        bv      %r0(%r25)    /* r0 */
2173        copy    %r0,%r1
2174        bv      %r0(%r25)    /* r1 - shadowed */
2175        ldi     -1,%r1
2176        bv      %r0(%r25)    /* r2 */
2177        copy    %r2,%r1
2178        bv      %r0(%r25)    /* r3 */
2179        copy    %r3,%r1
2180        bv      %r0(%r25)    /* r4 */
2181        copy    %r4,%r1
2182        bv      %r0(%r25)    /* r5 */
2183        copy    %r5,%r1
2184        bv      %r0(%r25)    /* r6 */
2185        copy    %r6,%r1
2186        bv      %r0(%r25)    /* r7 */
2187        copy    %r7,%r1
2188        bv      %r0(%r25)    /* r8 - shadowed */
2189        ldi     -1,%r1
2190        bv      %r0(%r25)    /* r9 - shadowed */
2191        ldi     -1,%r1
2192        bv      %r0(%r25)    /* r10 */
2193        copy    %r10,%r1
2194        bv      %r0(%r25)    /* r11 */
2195        copy    %r11,%r1
2196        bv      %r0(%r25)    /* r12 */
2197        copy    %r12,%r1
2198        bv      %r0(%r25)    /* r13 */
2199        copy    %r13,%r1
2200        bv      %r0(%r25)    /* r14 */
2201        copy    %r14,%r1
2202        bv      %r0(%r25)    /* r15 */
2203        copy    %r15,%r1
2204        bv      %r0(%r25)    /* r16 - shadowed */
2205        ldi     -1,%r1
2206        bv      %r0(%r25)    /* r17 - shadowed */
2207        ldi     -1,%r1
2208        bv      %r0(%r25)    /* r18 */
2209        copy    %r18,%r1
2210        bv      %r0(%r25)    /* r19 */
2211        copy    %r19,%r1
2212        bv      %r0(%r25)    /* r20 */
2213        copy    %r20,%r1
2214        bv      %r0(%r25)    /* r21 */
2215        copy    %r21,%r1
2216        bv      %r0(%r25)    /* r22 */
2217        copy    %r22,%r1
2218        bv      %r0(%r25)    /* r23 */
2219        copy    %r23,%r1
2220        bv      %r0(%r25)    /* r24 - shadowed */
2221        ldi     -1,%r1
2222        bv      %r0(%r25)    /* r25 - shadowed */
2223        ldi     -1,%r1
2224        bv      %r0(%r25)    /* r26 */
2225        copy    %r26,%r1
2226        bv      %r0(%r25)    /* r27 */
2227        copy    %r27,%r1
2228        bv      %r0(%r25)    /* r28 */
2229        copy    %r28,%r1
2230        bv      %r0(%r25)    /* r29 */
2231        copy    %r29,%r1
2232        bv      %r0(%r25)    /* r30 */
2233        copy    %r30,%r1
2234        bv      %r0(%r25)    /* r31 */
2235        copy    %r31,%r1
2236ENDPROC_CFI(get_register)
2237
2238
2239ENTRY_CFI(set_register)
2240        /*
2241         * set_register is used by the non access tlb miss handlers to
2242         * copy the value of r1 into the general register specified in
2243         * r8.
2244         */
2245        blr     %r8,%r0
2246        nop
2247        bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2248        copy    %r1,%r0
2249        bv      %r0(%r25)    /* r1 */
2250        copy    %r1,%r1
2251        bv      %r0(%r25)    /* r2 */
2252        copy    %r1,%r2
2253        bv      %r0(%r25)    /* r3 */
2254        copy    %r1,%r3
2255        bv      %r0(%r25)    /* r4 */
2256        copy    %r1,%r4
2257        bv      %r0(%r25)    /* r5 */
2258        copy    %r1,%r5
2259        bv      %r0(%r25)    /* r6 */
2260        copy    %r1,%r6
2261        bv      %r0(%r25)    /* r7 */
2262        copy    %r1,%r7
2263        bv      %r0(%r25)    /* r8 */
2264        copy    %r1,%r8
2265        bv      %r0(%r25)    /* r9 */
2266        copy    %r1,%r9
2267        bv      %r0(%r25)    /* r10 */
2268        copy    %r1,%r10
2269        bv      %r0(%r25)    /* r11 */
2270        copy    %r1,%r11
2271        bv      %r0(%r25)    /* r12 */
2272        copy    %r1,%r12
2273        bv      %r0(%r25)    /* r13 */
2274        copy    %r1,%r13
2275        bv      %r0(%r25)    /* r14 */
2276        copy    %r1,%r14
2277        bv      %r0(%r25)    /* r15 */
2278        copy    %r1,%r15
2279        bv      %r0(%r25)    /* r16 */
2280        copy    %r1,%r16
2281        bv      %r0(%r25)    /* r17 */
2282        copy    %r1,%r17
2283        bv      %r0(%r25)    /* r18 */
2284        copy    %r1,%r18
2285        bv      %r0(%r25)    /* r19 */
2286        copy    %r1,%r19
2287        bv      %r0(%r25)    /* r20 */
2288        copy    %r1,%r20
2289        bv      %r0(%r25)    /* r21 */
2290        copy    %r1,%r21
2291        bv      %r0(%r25)    /* r22 */
2292        copy    %r1,%r22
2293        bv      %r0(%r25)    /* r23 */
2294        copy    %r1,%r23
2295        bv      %r0(%r25)    /* r24 */
2296        copy    %r1,%r24
2297        bv      %r0(%r25)    /* r25 */
2298        copy    %r1,%r25
2299        bv      %r0(%r25)    /* r26 */
2300        copy    %r1,%r26
2301        bv      %r0(%r25)    /* r27 */
2302        copy    %r1,%r27
2303        bv      %r0(%r25)    /* r28 */
2304        copy    %r1,%r28
2305        bv      %r0(%r25)    /* r29 */
2306        copy    %r1,%r29
2307        bv      %r0(%r25)    /* r30 */
2308        copy    %r1,%r30
2309        bv      %r0(%r25)    /* r31 */
2310        copy    %r1,%r31
2311ENDPROC_CFI(set_register)
2312
2313