linux/arch/sparc/mm/ultra.S
<<
>>
Prefs
   1/*
   2 * ultra.S: Don't expand these all over the place...
   3 *
   4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
   5 */
   6
   7#include <asm/asi.h>
   8#include <asm/pgtable.h>
   9#include <asm/page.h>
  10#include <asm/spitfire.h>
  11#include <asm/mmu_context.h>
  12#include <asm/mmu.h>
  13#include <asm/pil.h>
  14#include <asm/head.h>
  15#include <asm/thread_info.h>
  16#include <asm/cacheflush.h>
  17#include <asm/hypervisor.h>
  18#include <asm/cpudata.h>
  19
  20        /* Basically, most of the Spitfire vs. Cheetah madness
  21         * has to do with the fact that Cheetah does not support
  22         * IMMU flushes out of the secondary context.  Someone needs
  23         * to throw a south lake birthday party for the folks
  24         * in Microelectronics who refused to fix this shit.
  25         */
  26
  27        /* This file is meant to be read efficiently by the CPU, not humans.
  28         * Staraj sie tego nikomu nie pierdolnac...
  29         */
  30        .text
  31        .align          32
  32        .globl          __flush_tlb_mm
  33__flush_tlb_mm:         /* 18 insns */
  34        /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  35        ldxa            [%o1] ASI_DMMU, %g2
  36        cmp             %g2, %o0
  37        bne,pn          %icc, __spitfire_flush_tlb_mm_slow
  38         mov            0x50, %g3
  39        stxa            %g0, [%g3] ASI_DMMU_DEMAP
  40        stxa            %g0, [%g3] ASI_IMMU_DEMAP
  41        sethi           %hi(KERNBASE), %g3
  42        flush           %g3
  43        retl
  44         nop
  45        nop
  46        nop
  47        nop
  48        nop
  49        nop
  50        nop
  51        nop
  52        nop
  53        nop
  54
  55        .align          32
  56        .globl          __flush_tlb_page
  57__flush_tlb_page:       /* 22 insns */
  58        /* %o0 = context, %o1 = vaddr */
  59        rdpr            %pstate, %g7
  60        andn            %g7, PSTATE_IE, %g2
  61        wrpr            %g2, %pstate
  62        mov             SECONDARY_CONTEXT, %o4
  63        ldxa            [%o4] ASI_DMMU, %g2
  64        stxa            %o0, [%o4] ASI_DMMU
  65        andcc           %o1, 1, %g0
  66        andn            %o1, 1, %o3
  67        be,pn           %icc, 1f
  68         or             %o3, 0x10, %o3
  69        stxa            %g0, [%o3] ASI_IMMU_DEMAP
  701:      stxa            %g0, [%o3] ASI_DMMU_DEMAP
  71        membar          #Sync
  72        stxa            %g2, [%o4] ASI_DMMU
  73        sethi           %hi(KERNBASE), %o4
  74        flush           %o4
  75        retl
  76         wrpr           %g7, 0x0, %pstate
  77        nop
  78        nop
  79        nop
  80        nop
  81
  82        .align          32
  83        .globl          __flush_tlb_pending
  84__flush_tlb_pending:    /* 26 insns */
  85        /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  86        rdpr            %pstate, %g7
  87        sllx            %o1, 3, %o1
  88        andn            %g7, PSTATE_IE, %g2
  89        wrpr            %g2, %pstate
  90        mov             SECONDARY_CONTEXT, %o4
  91        ldxa            [%o4] ASI_DMMU, %g2
  92        stxa            %o0, [%o4] ASI_DMMU
  931:      sub             %o1, (1 << 3), %o1
  94        ldx             [%o2 + %o1], %o3
  95        andcc           %o3, 1, %g0
  96        andn            %o3, 1, %o3
  97        be,pn           %icc, 2f
  98         or             %o3, 0x10, %o3
  99        stxa            %g0, [%o3] ASI_IMMU_DEMAP
 1002:      stxa            %g0, [%o3] ASI_DMMU_DEMAP
 101        membar          #Sync
 102        brnz,pt         %o1, 1b
 103         nop
 104        stxa            %g2, [%o4] ASI_DMMU
 105        sethi           %hi(KERNBASE), %o4
 106        flush           %o4
 107        retl
 108         wrpr           %g7, 0x0, %pstate
 109        nop
 110        nop
 111        nop
 112        nop
 113
 114        .align          32
 115        .globl          __flush_tlb_kernel_range
 116__flush_tlb_kernel_range:       /* 16 insns */
 117        /* %o0=start, %o1=end */
 118        cmp             %o0, %o1
 119        be,pn           %xcc, 2f
 120         sethi          %hi(PAGE_SIZE), %o4
 121        sub             %o1, %o0, %o3
 122        sub             %o3, %o4, %o3
 123        or              %o0, 0x20, %o0          ! Nucleus
 1241:      stxa            %g0, [%o0 + %o3] ASI_DMMU_DEMAP
 125        stxa            %g0, [%o0 + %o3] ASI_IMMU_DEMAP
 126        membar          #Sync
 127        brnz,pt         %o3, 1b
 128         sub            %o3, %o4, %o3
 1292:      sethi           %hi(KERNBASE), %o3
 130        flush           %o3
 131        retl
 132         nop
 133        nop
 134
 135__spitfire_flush_tlb_mm_slow:
 136        rdpr            %pstate, %g1
 137        wrpr            %g1, PSTATE_IE, %pstate
 138        stxa            %o0, [%o1] ASI_DMMU
 139        stxa            %g0, [%g3] ASI_DMMU_DEMAP
 140        stxa            %g0, [%g3] ASI_IMMU_DEMAP
 141        flush           %g6
 142        stxa            %g2, [%o1] ASI_DMMU
 143        sethi           %hi(KERNBASE), %o1
 144        flush           %o1
 145        retl
 146         wrpr           %g1, 0, %pstate
 147
 148/*
 149 * The following code flushes one page_size worth.
 150 */
 151        .section .kprobes.text, "ax"
 152        .align          32
 153        .globl          __flush_icache_page
 154__flush_icache_page:    /* %o0 = phys_page */
 155        srlx            %o0, PAGE_SHIFT, %o0
 156        sethi           %hi(PAGE_OFFSET), %g1
 157        sllx            %o0, PAGE_SHIFT, %o0
 158        sethi           %hi(PAGE_SIZE), %g2
 159        ldx             [%g1 + %lo(PAGE_OFFSET)], %g1
 160        add             %o0, %g1, %o0
 1611:      subcc           %g2, 32, %g2
 162        bne,pt          %icc, 1b
 163         flush          %o0 + %g2
 164        retl
 165         nop
 166
 167#ifdef DCACHE_ALIASING_POSSIBLE
 168
 169#if (PAGE_SHIFT != 13)
 170#error only page shift of 13 is supported by dcache flush
 171#endif
 172
 173#define DTAG_MASK 0x3
 174
 175        /* This routine is Spitfire specific so the hardcoded
 176         * D-cache size and line-size are OK.
 177         */
 178        .align          64
 179        .globl          __flush_dcache_page
 180__flush_dcache_page:    /* %o0=kaddr, %o1=flush_icache */
 181        sethi           %hi(PAGE_OFFSET), %g1
 182        ldx             [%g1 + %lo(PAGE_OFFSET)], %g1
 183        sub             %o0, %g1, %o0                   ! physical address
 184        srlx            %o0, 11, %o0                    ! make D-cache TAG
 185        sethi           %hi(1 << 14), %o2               ! D-cache size
 186        sub             %o2, (1 << 5), %o2              ! D-cache line size
 1871:      ldxa            [%o2] ASI_DCACHE_TAG, %o3       ! load D-cache TAG
 188        andcc           %o3, DTAG_MASK, %g0             ! Valid?
 189        be,pn           %xcc, 2f                        ! Nope, branch
 190         andn           %o3, DTAG_MASK, %o3             ! Clear valid bits
 191        cmp             %o3, %o0                        ! TAG match?
 192        bne,pt          %xcc, 2f                        ! Nope, branch
 193         nop
 194        stxa            %g0, [%o2] ASI_DCACHE_TAG       ! Invalidate TAG
 195        membar          #Sync
 1962:      brnz,pt         %o2, 1b
 197         sub            %o2, (1 << 5), %o2              ! D-cache line size
 198
 199        /* The I-cache does not snoop local stores so we
 200         * better flush that too when necessary.
 201         */
 202        brnz,pt         %o1, __flush_icache_page
 203         sllx           %o0, 11, %o0
 204        retl
 205         nop
 206
 207#endif /* DCACHE_ALIASING_POSSIBLE */
 208
 209        .previous
 210
 211        /* Cheetah specific versions, patched at boot time. */
 212__cheetah_flush_tlb_mm: /* 19 insns */
 213        rdpr            %pstate, %g7
 214        andn            %g7, PSTATE_IE, %g2
 215        wrpr            %g2, 0x0, %pstate
 216        wrpr            %g0, 1, %tl
 217        mov             PRIMARY_CONTEXT, %o2
 218        mov             0x40, %g3
 219        ldxa            [%o2] ASI_DMMU, %g2
 220        srlx            %g2, CTX_PGSZ1_NUC_SHIFT, %o1
 221        sllx            %o1, CTX_PGSZ1_NUC_SHIFT, %o1
 222        or              %o0, %o1, %o0   /* Preserve nucleus page size fields */
 223        stxa            %o0, [%o2] ASI_DMMU
 224        stxa            %g0, [%g3] ASI_DMMU_DEMAP
 225        stxa            %g0, [%g3] ASI_IMMU_DEMAP
 226        stxa            %g2, [%o2] ASI_DMMU
 227        sethi           %hi(KERNBASE), %o2
 228        flush           %o2
 229        wrpr            %g0, 0, %tl
 230        retl
 231         wrpr           %g7, 0x0, %pstate
 232
 233__cheetah_flush_tlb_page:       /* 22 insns */
 234        /* %o0 = context, %o1 = vaddr */
 235        rdpr            %pstate, %g7
 236        andn            %g7, PSTATE_IE, %g2
 237        wrpr            %g2, 0x0, %pstate
 238        wrpr            %g0, 1, %tl
 239        mov             PRIMARY_CONTEXT, %o4
 240        ldxa            [%o4] ASI_DMMU, %g2
 241        srlx            %g2, CTX_PGSZ1_NUC_SHIFT, %o3
 242        sllx            %o3, CTX_PGSZ1_NUC_SHIFT, %o3
 243        or              %o0, %o3, %o0   /* Preserve nucleus page size fields */
 244        stxa            %o0, [%o4] ASI_DMMU
 245        andcc           %o1, 1, %g0
 246        be,pn           %icc, 1f
 247         andn           %o1, 1, %o3
 248        stxa            %g0, [%o3] ASI_IMMU_DEMAP
 2491:      stxa            %g0, [%o3] ASI_DMMU_DEMAP       
 250        membar          #Sync
 251        stxa            %g2, [%o4] ASI_DMMU
 252        sethi           %hi(KERNBASE), %o4
 253        flush           %o4
 254        wrpr            %g0, 0, %tl
 255        retl
 256         wrpr           %g7, 0x0, %pstate
 257
 258__cheetah_flush_tlb_pending:    /* 27 insns */
 259        /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
 260        rdpr            %pstate, %g7
 261        sllx            %o1, 3, %o1
 262        andn            %g7, PSTATE_IE, %g2
 263        wrpr            %g2, 0x0, %pstate
 264        wrpr            %g0, 1, %tl
 265        mov             PRIMARY_CONTEXT, %o4
 266        ldxa            [%o4] ASI_DMMU, %g2
 267        srlx            %g2, CTX_PGSZ1_NUC_SHIFT, %o3
 268        sllx            %o3, CTX_PGSZ1_NUC_SHIFT, %o3
 269        or              %o0, %o3, %o0   /* Preserve nucleus page size fields */
 270        stxa            %o0, [%o4] ASI_DMMU
 2711:      sub             %o1, (1 << 3), %o1
 272        ldx             [%o2 + %o1], %o3
 273        andcc           %o3, 1, %g0
 274        be,pn           %icc, 2f
 275         andn           %o3, 1, %o3
 276        stxa            %g0, [%o3] ASI_IMMU_DEMAP
 2772:      stxa            %g0, [%o3] ASI_DMMU_DEMAP       
 278        membar          #Sync
 279        brnz,pt         %o1, 1b
 280         nop
 281        stxa            %g2, [%o4] ASI_DMMU
 282        sethi           %hi(KERNBASE), %o4
 283        flush           %o4
 284        wrpr            %g0, 0, %tl
 285        retl
 286         wrpr           %g7, 0x0, %pstate
 287
 288#ifdef DCACHE_ALIASING_POSSIBLE
 289__cheetah_flush_dcache_page: /* 11 insns */
 290        sethi           %hi(PAGE_OFFSET), %g1
 291        ldx             [%g1 + %lo(PAGE_OFFSET)], %g1
 292        sub             %o0, %g1, %o0
 293        sethi           %hi(PAGE_SIZE), %o4
 2941:      subcc           %o4, (1 << 5), %o4
 295        stxa            %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
 296        membar          #Sync
 297        bne,pt          %icc, 1b
 298         nop
 299        retl            /* I-cache flush never needed on Cheetah, see callers. */
 300         nop
 301#endif /* DCACHE_ALIASING_POSSIBLE */
 302
 303        /* Hypervisor specific versions, patched at boot time.  */
 304__hypervisor_tlb_tl0_error:
 305        save            %sp, -192, %sp
 306        mov             %i0, %o0
 307        call            hypervisor_tlbop_error
 308         mov            %i1, %o1
 309        ret
 310         restore
 311
 312__hypervisor_flush_tlb_mm: /* 10 insns */
 313        mov             %o0, %o2        /* ARG2: mmu context */
 314        mov             0, %o0          /* ARG0: CPU lists unimplemented */
 315        mov             0, %o1          /* ARG1: CPU lists unimplemented */
 316        mov             HV_MMU_ALL, %o3 /* ARG3: flags */
 317        mov             HV_FAST_MMU_DEMAP_CTX, %o5
 318        ta              HV_FAST_TRAP
 319        brnz,pn         %o0, __hypervisor_tlb_tl0_error
 320         mov            HV_FAST_MMU_DEMAP_CTX, %o1
 321        retl
 322         nop
 323
 324__hypervisor_flush_tlb_page: /* 11 insns */
 325        /* %o0 = context, %o1 = vaddr */
 326        mov             %o0, %g2
 327        mov             %o1, %o0              /* ARG0: vaddr + IMMU-bit */
 328        mov             %g2, %o1              /* ARG1: mmu context */
 329        mov             HV_MMU_ALL, %o2       /* ARG2: flags */
 330        srlx            %o0, PAGE_SHIFT, %o0
 331        sllx            %o0, PAGE_SHIFT, %o0
 332        ta              HV_MMU_UNMAP_ADDR_TRAP
 333        brnz,pn         %o0, __hypervisor_tlb_tl0_error
 334         mov            HV_MMU_UNMAP_ADDR_TRAP, %o1
 335        retl
 336         nop
 337
 338__hypervisor_flush_tlb_pending: /* 16 insns */
 339        /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
 340        sllx            %o1, 3, %g1
 341        mov             %o2, %g2
 342        mov             %o0, %g3
 3431:      sub             %g1, (1 << 3), %g1
 344        ldx             [%g2 + %g1], %o0      /* ARG0: vaddr + IMMU-bit */
 345        mov             %g3, %o1              /* ARG1: mmu context */
 346        mov             HV_MMU_ALL, %o2       /* ARG2: flags */
 347        srlx            %o0, PAGE_SHIFT, %o0
 348        sllx            %o0, PAGE_SHIFT, %o0
 349        ta              HV_MMU_UNMAP_ADDR_TRAP
 350        brnz,pn         %o0, __hypervisor_tlb_tl0_error
 351         mov            HV_MMU_UNMAP_ADDR_TRAP, %o1
 352        brnz,pt         %g1, 1b
 353         nop
 354        retl
 355         nop
 356
 357__hypervisor_flush_tlb_kernel_range: /* 16 insns */
 358        /* %o0=start, %o1=end */
 359        cmp             %o0, %o1
 360        be,pn           %xcc, 2f
 361         sethi          %hi(PAGE_SIZE), %g3
 362        mov             %o0, %g1
 363        sub             %o1, %g1, %g2
 364        sub             %g2, %g3, %g2
 3651:      add             %g1, %g2, %o0   /* ARG0: virtual address */
 366        mov             0, %o1          /* ARG1: mmu context */
 367        mov             HV_MMU_ALL, %o2 /* ARG2: flags */
 368        ta              HV_MMU_UNMAP_ADDR_TRAP
 369        brnz,pn         %o0, __hypervisor_tlb_tl0_error
 370         mov            HV_MMU_UNMAP_ADDR_TRAP, %o1
 371        brnz,pt         %g2, 1b
 372         sub            %g2, %g3, %g2
 3732:      retl
 374         nop
 375
 376#ifdef DCACHE_ALIASING_POSSIBLE
 377        /* XXX Niagara and friends have an 8K cache, so no aliasing is
 378         * XXX possible, but nothing explicit in the Hypervisor API
 379         * XXX guarantees this.
 380         */
 381__hypervisor_flush_dcache_page: /* 2 insns */
 382        retl
 383         nop
 384#endif
 385
 386tlb_patch_one:
 3871:      lduw            [%o1], %g1
 388        stw             %g1, [%o0]
 389        flush           %o0
 390        subcc           %o2, 1, %o2
 391        add             %o1, 4, %o1
 392        bne,pt          %icc, 1b
 393         add            %o0, 4, %o0
 394        retl
 395         nop
 396
 397        .globl          cheetah_patch_cachetlbops
 398cheetah_patch_cachetlbops:
 399        save            %sp, -128, %sp
 400
 401        sethi           %hi(__flush_tlb_mm), %o0
 402        or              %o0, %lo(__flush_tlb_mm), %o0
 403        sethi           %hi(__cheetah_flush_tlb_mm), %o1
 404        or              %o1, %lo(__cheetah_flush_tlb_mm), %o1
 405        call            tlb_patch_one
 406         mov            19, %o2
 407
 408        sethi           %hi(__flush_tlb_page), %o0
 409        or              %o0, %lo(__flush_tlb_page), %o0
 410        sethi           %hi(__cheetah_flush_tlb_page), %o1
 411        or              %o1, %lo(__cheetah_flush_tlb_page), %o1
 412        call            tlb_patch_one
 413         mov            22, %o2
 414
 415        sethi           %hi(__flush_tlb_pending), %o0
 416        or              %o0, %lo(__flush_tlb_pending), %o0
 417        sethi           %hi(__cheetah_flush_tlb_pending), %o1
 418        or              %o1, %lo(__cheetah_flush_tlb_pending), %o1
 419        call            tlb_patch_one
 420         mov            27, %o2
 421
 422#ifdef DCACHE_ALIASING_POSSIBLE
 423        sethi           %hi(__flush_dcache_page), %o0
 424        or              %o0, %lo(__flush_dcache_page), %o0
 425        sethi           %hi(__cheetah_flush_dcache_page), %o1
 426        or              %o1, %lo(__cheetah_flush_dcache_page), %o1
 427        call            tlb_patch_one
 428         mov            11, %o2
 429#endif /* DCACHE_ALIASING_POSSIBLE */
 430
 431        ret
 432         restore
 433
 434#ifdef CONFIG_SMP
 435        /* These are all called by the slaves of a cross call, at
 436         * trap level 1, with interrupts fully disabled.
 437         *
 438         * Register usage:
 439         *   %g5        mm->context     (all tlb flushes)
 440         *   %g1        address arg 1   (tlb page and range flushes)
 441         *   %g7        address arg 2   (tlb range flush only)
 442         *
 443         *   %g6        scratch 1
 444         *   %g2        scratch 2
 445         *   %g3        scratch 3
 446         *   %g4        scratch 4
 447         */
 448        .align          32
 449        .globl          xcall_flush_tlb_mm
 450xcall_flush_tlb_mm:     /* 21 insns */
 451        mov             PRIMARY_CONTEXT, %g2
 452        ldxa            [%g2] ASI_DMMU, %g3
 453        srlx            %g3, CTX_PGSZ1_NUC_SHIFT, %g4
 454        sllx            %g4, CTX_PGSZ1_NUC_SHIFT, %g4
 455        or              %g5, %g4, %g5   /* Preserve nucleus page size fields */
 456        stxa            %g5, [%g2] ASI_DMMU
 457        mov             0x40, %g4
 458        stxa            %g0, [%g4] ASI_DMMU_DEMAP
 459        stxa            %g0, [%g4] ASI_IMMU_DEMAP
 460        stxa            %g3, [%g2] ASI_DMMU
 461        retry
 462        nop
 463        nop
 464        nop
 465        nop
 466        nop
 467        nop
 468        nop
 469        nop
 470        nop
 471        nop
 472
 473        .globl          xcall_flush_tlb_page
 474xcall_flush_tlb_page:   /* 17 insns */
 475        /* %g5=context, %g1=vaddr */
 476        mov             PRIMARY_CONTEXT, %g4
 477        ldxa            [%g4] ASI_DMMU, %g2
 478        srlx            %g2, CTX_PGSZ1_NUC_SHIFT, %g4
 479        sllx            %g4, CTX_PGSZ1_NUC_SHIFT, %g4
 480        or              %g5, %g4, %g5
 481        mov             PRIMARY_CONTEXT, %g4
 482        stxa            %g5, [%g4] ASI_DMMU
 483        andcc           %g1, 0x1, %g0
 484        be,pn           %icc, 2f
 485         andn           %g1, 0x1, %g5
 486        stxa            %g0, [%g5] ASI_IMMU_DEMAP
 4872:      stxa            %g0, [%g5] ASI_DMMU_DEMAP
 488        membar          #Sync
 489        stxa            %g2, [%g4] ASI_DMMU
 490        retry
 491        nop
 492        nop
 493
 494        .globl          xcall_flush_tlb_kernel_range
 495xcall_flush_tlb_kernel_range:   /* 25 insns */
 496        sethi           %hi(PAGE_SIZE - 1), %g2
 497        or              %g2, %lo(PAGE_SIZE - 1), %g2
 498        andn            %g1, %g2, %g1
 499        andn            %g7, %g2, %g7
 500        sub             %g7, %g1, %g3
 501        add             %g2, 1, %g2
 502        sub             %g3, %g2, %g3
 503        or              %g1, 0x20, %g1          ! Nucleus
 5041:      stxa            %g0, [%g1 + %g3] ASI_DMMU_DEMAP
 505        stxa            %g0, [%g1 + %g3] ASI_IMMU_DEMAP
 506        membar          #Sync
 507        brnz,pt         %g3, 1b
 508         sub            %g3, %g2, %g3
 509        retry
 510        nop
 511        nop
 512        nop
 513        nop
 514        nop
 515        nop
 516        nop
 517        nop
 518        nop
 519        nop
 520        nop
 521
 522        /* This runs in a very controlled environment, so we do
 523         * not need to worry about BH races etc.
 524         */
 525        .globl          xcall_sync_tick
 526xcall_sync_tick:
 527
 528661:    rdpr            %pstate, %g2
 529        wrpr            %g2, PSTATE_IG | PSTATE_AG, %pstate
 530        .section        .sun4v_2insn_patch, "ax"
 531        .word           661b
 532        nop
 533        nop
 534        .previous
 535
 536        rdpr            %pil, %g2
 537        wrpr            %g0, PIL_NORMAL_MAX, %pil
 538        sethi           %hi(109f), %g7
 539        b,pt            %xcc, etrap_irq
 540109:     or             %g7, %lo(109b), %g7
 541#ifdef CONFIG_TRACE_IRQFLAGS
 542        call            trace_hardirqs_off
 543         nop
 544#endif
 545        call            smp_synchronize_tick_client
 546         nop
 547        b               rtrap_xcall
 548         ldx            [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
 549
 550        .globl          xcall_fetch_glob_regs
 551xcall_fetch_glob_regs:
 552        sethi           %hi(global_cpu_snapshot), %g1
 553        or              %g1, %lo(global_cpu_snapshot), %g1
 554        __GET_CPUID(%g2)
 555        sllx            %g2, 6, %g3
 556        add             %g1, %g3, %g1
 557        rdpr            %tstate, %g7
 558        stx             %g7, [%g1 + GR_SNAP_TSTATE]
 559        rdpr            %tpc, %g7
 560        stx             %g7, [%g1 + GR_SNAP_TPC]
 561        rdpr            %tnpc, %g7
 562        stx             %g7, [%g1 + GR_SNAP_TNPC]
 563        stx             %o7, [%g1 + GR_SNAP_O7]
 564        stx             %i7, [%g1 + GR_SNAP_I7]
 565        /* Don't try this at home kids... */
 566        rdpr            %cwp, %g3
 567        sub             %g3, 1, %g7
 568        wrpr            %g7, %cwp
 569        mov             %i7, %g7
 570        wrpr            %g3, %cwp
 571        stx             %g7, [%g1 + GR_SNAP_RPC]
 572        sethi           %hi(trap_block), %g7
 573        or              %g7, %lo(trap_block), %g7
 574        sllx            %g2, TRAP_BLOCK_SZ_SHIFT, %g2
 575        add             %g7, %g2, %g7
 576        ldx             [%g7 + TRAP_PER_CPU_THREAD], %g3
 577        stx             %g3, [%g1 + GR_SNAP_THREAD]
 578        retry
 579
 580        .globl          xcall_fetch_glob_pmu
 581xcall_fetch_glob_pmu:
 582        sethi           %hi(global_cpu_snapshot), %g1
 583        or              %g1, %lo(global_cpu_snapshot), %g1
 584        __GET_CPUID(%g2)
 585        sllx            %g2, 6, %g3
 586        add             %g1, %g3, %g1
 587        rd              %pic, %g7
 588        stx             %g7, [%g1 + (4 * 8)]
 589        rd              %pcr, %g7
 590        stx             %g7, [%g1 + (0 * 8)]
 591        retry
 592
 593        .globl          xcall_fetch_glob_pmu_n4
 594xcall_fetch_glob_pmu_n4:
 595        sethi           %hi(global_cpu_snapshot), %g1
 596        or              %g1, %lo(global_cpu_snapshot), %g1
 597        __GET_CPUID(%g2)
 598        sllx            %g2, 6, %g3
 599        add             %g1, %g3, %g1
 600
 601        ldxa            [%g0] ASI_PIC, %g7
 602        stx             %g7, [%g1 + (4 * 8)]
 603        mov             0x08, %g3
 604        ldxa            [%g3] ASI_PIC, %g7
 605        stx             %g7, [%g1 + (5 * 8)]
 606        mov             0x10, %g3
 607        ldxa            [%g3] ASI_PIC, %g7
 608        stx             %g7, [%g1 + (6 * 8)]
 609        mov             0x18, %g3
 610        ldxa            [%g3] ASI_PIC, %g7
 611        stx             %g7, [%g1 + (7 * 8)]
 612
 613        mov             %o0, %g2
 614        mov             %o1, %g3
 615        mov             %o5, %g7
 616
 617        mov             HV_FAST_VT_GET_PERFREG, %o5
 618        mov             3, %o0
 619        ta              HV_FAST_TRAP
 620        stx             %o1, [%g1 + (3 * 8)]
 621        mov             HV_FAST_VT_GET_PERFREG, %o5
 622        mov             2, %o0
 623        ta              HV_FAST_TRAP
 624        stx             %o1, [%g1 + (2 * 8)]
 625        mov             HV_FAST_VT_GET_PERFREG, %o5
 626        mov             1, %o0
 627        ta              HV_FAST_TRAP
 628        stx             %o1, [%g1 + (1 * 8)]
 629        mov             HV_FAST_VT_GET_PERFREG, %o5
 630        mov             0, %o0
 631        ta              HV_FAST_TRAP
 632        stx             %o1, [%g1 + (0 * 8)]
 633
 634        mov             %g2, %o0
 635        mov             %g3, %o1
 636        mov             %g7, %o5
 637
 638        retry
 639
 640#ifdef DCACHE_ALIASING_POSSIBLE
 641        .align          32
 642        .globl          xcall_flush_dcache_page_cheetah
 643xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
 644        sethi           %hi(PAGE_SIZE), %g3
 6451:      subcc           %g3, (1 << 5), %g3
 646        stxa            %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
 647        membar          #Sync
 648        bne,pt          %icc, 1b
 649         nop
 650        retry
 651        nop
 652#endif /* DCACHE_ALIASING_POSSIBLE */
 653
 654        .globl          xcall_flush_dcache_page_spitfire
 655xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
 656                                     %g7 == kernel page virtual address
 657                                     %g5 == (page->mapping != NULL)  */
 658#ifdef DCACHE_ALIASING_POSSIBLE
 659        srlx            %g1, (13 - 2), %g1      ! Form tag comparitor
 660        sethi           %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
 661        sub             %g3, (1 << 5), %g3      ! D$ linesize == 32
 6621:      ldxa            [%g3] ASI_DCACHE_TAG, %g2
 663        andcc           %g2, 0x3, %g0
 664        be,pn           %xcc, 2f
 665         andn           %g2, 0x3, %g2
 666        cmp             %g2, %g1
 667
 668        bne,pt          %xcc, 2f
 669         nop
 670        stxa            %g0, [%g3] ASI_DCACHE_TAG
 671        membar          #Sync
 6722:      cmp             %g3, 0
 673        bne,pt          %xcc, 1b
 674         sub            %g3, (1 << 5), %g3
 675
 676        brz,pn          %g5, 2f
 677#endif /* DCACHE_ALIASING_POSSIBLE */
 678         sethi          %hi(PAGE_SIZE), %g3
 679
 6801:      flush           %g7
 681        subcc           %g3, (1 << 5), %g3
 682        bne,pt          %icc, 1b
 683         add            %g7, (1 << 5), %g7
 684
 6852:      retry
 686        nop
 687        nop
 688
 689        /* %g5: error
 690         * %g6: tlb op
 691         */
 692__hypervisor_tlb_xcall_error:
 693        mov     %g5, %g4
 694        mov     %g6, %g5
 695        ba,pt   %xcc, etrap
 696         rd     %pc, %g7
 697        mov     %l4, %o0
 698        call    hypervisor_tlbop_error_xcall
 699         mov    %l5, %o1
 700        ba,a,pt %xcc, rtrap
 701
 702        .globl          __hypervisor_xcall_flush_tlb_mm
 703__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
 704        /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
 705        mov             %o0, %g2
 706        mov             %o1, %g3
 707        mov             %o2, %g4
 708        mov             %o3, %g1
 709        mov             %o5, %g7
 710        clr             %o0             /* ARG0: CPU lists unimplemented */
 711        clr             %o1             /* ARG1: CPU lists unimplemented */
 712        mov             %g5, %o2        /* ARG2: mmu context */
 713        mov             HV_MMU_ALL, %o3 /* ARG3: flags */
 714        mov             HV_FAST_MMU_DEMAP_CTX, %o5
 715        ta              HV_FAST_TRAP
 716        mov             HV_FAST_MMU_DEMAP_CTX, %g6
 717        brnz,pn         %o0, __hypervisor_tlb_xcall_error
 718         mov            %o0, %g5
 719        mov             %g2, %o0
 720        mov             %g3, %o1
 721        mov             %g4, %o2
 722        mov             %g1, %o3
 723        mov             %g7, %o5
 724        membar          #Sync
 725        retry
 726
 727        .globl          __hypervisor_xcall_flush_tlb_page
 728__hypervisor_xcall_flush_tlb_page: /* 17 insns */
 729        /* %g5=ctx, %g1=vaddr */
 730        mov             %o0, %g2
 731        mov             %o1, %g3
 732        mov             %o2, %g4
 733        mov             %g1, %o0                /* ARG0: virtual address */
 734        mov             %g5, %o1                /* ARG1: mmu context */
 735        mov             HV_MMU_ALL, %o2         /* ARG2: flags */
 736        srlx            %o0, PAGE_SHIFT, %o0
 737        sllx            %o0, PAGE_SHIFT, %o0
 738        ta              HV_MMU_UNMAP_ADDR_TRAP
 739        mov             HV_MMU_UNMAP_ADDR_TRAP, %g6
 740        brnz,a,pn       %o0, __hypervisor_tlb_xcall_error
 741         mov            %o0, %g5
 742        mov             %g2, %o0
 743        mov             %g3, %o1
 744        mov             %g4, %o2
 745        membar          #Sync
 746        retry
 747
 748        .globl          __hypervisor_xcall_flush_tlb_kernel_range
 749__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
 750        /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
 751        sethi           %hi(PAGE_SIZE - 1), %g2
 752        or              %g2, %lo(PAGE_SIZE - 1), %g2
 753        andn            %g1, %g2, %g1
 754        andn            %g7, %g2, %g7
 755        sub             %g7, %g1, %g3
 756        add             %g2, 1, %g2
 757        sub             %g3, %g2, %g3
 758        mov             %o0, %g2
 759        mov             %o1, %g4
 760        mov             %o2, %g7
 7611:      add             %g1, %g3, %o0   /* ARG0: virtual address */
 762        mov             0, %o1          /* ARG1: mmu context */
 763        mov             HV_MMU_ALL, %o2 /* ARG2: flags */
 764        ta              HV_MMU_UNMAP_ADDR_TRAP
 765        mov             HV_MMU_UNMAP_ADDR_TRAP, %g6
 766        brnz,pn         %o0, __hypervisor_tlb_xcall_error
 767         mov            %o0, %g5
 768        sethi           %hi(PAGE_SIZE), %o2
 769        brnz,pt         %g3, 1b
 770         sub            %g3, %o2, %g3
 771        mov             %g2, %o0
 772        mov             %g4, %o1
 773        mov             %g7, %o2
 774        membar          #Sync
 775        retry
 776
 777        /* These just get rescheduled to PIL vectors. */
 778        .globl          xcall_call_function
 779xcall_call_function:
 780        wr              %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
 781        retry
 782
 783        .globl          xcall_call_function_single
 784xcall_call_function_single:
 785        wr              %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
 786        retry
 787
 788        .globl          xcall_receive_signal
 789xcall_receive_signal:
 790        wr              %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
 791        retry
 792
 793        .globl          xcall_capture
 794xcall_capture:
 795        wr              %g0, (1 << PIL_SMP_CAPTURE), %set_softint
 796        retry
 797
 798        .globl          xcall_new_mmu_context_version
 799xcall_new_mmu_context_version:
 800        wr              %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
 801        retry
 802
 803#ifdef CONFIG_KGDB
 804        .globl          xcall_kgdb_capture
 805xcall_kgdb_capture:
 806        wr              %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
 807        retry
 808#endif
 809
 810#endif /* CONFIG_SMP */
 811
 812
 813        .globl          hypervisor_patch_cachetlbops
 814hypervisor_patch_cachetlbops:
 815        save            %sp, -128, %sp
 816
 817        sethi           %hi(__flush_tlb_mm), %o0
 818        or              %o0, %lo(__flush_tlb_mm), %o0
 819        sethi           %hi(__hypervisor_flush_tlb_mm), %o1
 820        or              %o1, %lo(__hypervisor_flush_tlb_mm), %o1
 821        call            tlb_patch_one
 822         mov            10, %o2
 823
 824        sethi           %hi(__flush_tlb_page), %o0
 825        or              %o0, %lo(__flush_tlb_page), %o0
 826        sethi           %hi(__hypervisor_flush_tlb_page), %o1
 827        or              %o1, %lo(__hypervisor_flush_tlb_page), %o1
 828        call            tlb_patch_one
 829         mov            11, %o2
 830
 831        sethi           %hi(__flush_tlb_pending), %o0
 832        or              %o0, %lo(__flush_tlb_pending), %o0
 833        sethi           %hi(__hypervisor_flush_tlb_pending), %o1
 834        or              %o1, %lo(__hypervisor_flush_tlb_pending), %o1
 835        call            tlb_patch_one
 836         mov            16, %o2
 837
 838        sethi           %hi(__flush_tlb_kernel_range), %o0
 839        or              %o0, %lo(__flush_tlb_kernel_range), %o0
 840        sethi           %hi(__hypervisor_flush_tlb_kernel_range), %o1
 841        or              %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
 842        call            tlb_patch_one
 843         mov            16, %o2
 844
 845#ifdef DCACHE_ALIASING_POSSIBLE
 846        sethi           %hi(__flush_dcache_page), %o0
 847        or              %o0, %lo(__flush_dcache_page), %o0
 848        sethi           %hi(__hypervisor_flush_dcache_page), %o1
 849        or              %o1, %lo(__hypervisor_flush_dcache_page), %o1
 850        call            tlb_patch_one
 851         mov            2, %o2
 852#endif /* DCACHE_ALIASING_POSSIBLE */
 853
 854#ifdef CONFIG_SMP
 855        sethi           %hi(xcall_flush_tlb_mm), %o0
 856        or              %o0, %lo(xcall_flush_tlb_mm), %o0
 857        sethi           %hi(__hypervisor_xcall_flush_tlb_mm), %o1
 858        or              %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
 859        call            tlb_patch_one
 860         mov            21, %o2
 861
 862        sethi           %hi(xcall_flush_tlb_page), %o0
 863        or              %o0, %lo(xcall_flush_tlb_page), %o0
 864        sethi           %hi(__hypervisor_xcall_flush_tlb_page), %o1
 865        or              %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
 866        call            tlb_patch_one
 867         mov            17, %o2
 868
 869        sethi           %hi(xcall_flush_tlb_kernel_range), %o0
 870        or              %o0, %lo(xcall_flush_tlb_kernel_range), %o0
 871        sethi           %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
 872        or              %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
 873        call            tlb_patch_one
 874         mov            25, %o2
 875#endif /* CONFIG_SMP */
 876
 877        ret
 878         restore
 879