linux/arch/sparc/kernel/ktlb.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
   3 *
   4 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
   5 * Copyright (C) 1996 Eddie C. Dost        (ecd@brainaid.de)
   6 * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
   7 * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
   8 */
   9
  10#include <linux/pgtable.h>
  11#include <asm/head.h>
  12#include <asm/asi.h>
  13#include <asm/page.h>
  14#include <asm/tsb.h>
  15
  16        .text
  17        .align          32
  18
  19kvmap_itlb:
  20        /* g6: TAG TARGET */
  21        mov             TLB_TAG_ACCESS, %g4
  22        ldxa            [%g4] ASI_IMMU, %g4
  23
  24        /* The kernel executes in context zero, therefore we do not
  25         * need to clear the context ID bits out of %g4 here.
  26         */
  27
  28        /* sun4v_itlb_miss branches here with the missing virtual
  29         * address already loaded into %g4
  30         */
  31kvmap_itlb_4v:
  32
  33        /* Catch kernel NULL pointer calls.  */
  34        sethi           %hi(PAGE_SIZE), %g5
  35        cmp             %g4, %g5
  36        blu,pn          %xcc, kvmap_itlb_longpath
  37         nop
  38
  39        KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
  40
  41kvmap_itlb_tsb_miss:
  42        sethi           %hi(LOW_OBP_ADDRESS), %g5
  43        cmp             %g4, %g5
  44        blu,pn          %xcc, kvmap_itlb_vmalloc_addr
  45         mov            0x1, %g5
  46        sllx            %g5, 32, %g5
  47        cmp             %g4, %g5
  48        blu,pn          %xcc, kvmap_itlb_obp
  49         nop
  50
  51kvmap_itlb_vmalloc_addr:
  52        KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
  53
  54        TSB_LOCK_TAG(%g1, %g2, %g7)
  55        TSB_WRITE(%g1, %g5, %g6)
  56
  57        /* fallthrough to TLB load */
  58
  59kvmap_itlb_load:
  60
  61661:    stxa            %g5, [%g0] ASI_ITLB_DATA_IN
  62        retry
  63        .section        .sun4v_2insn_patch, "ax"
  64        .word           661b
  65        nop
  66        nop
  67        .previous
  68
  69        /* For sun4v the ASI_ITLB_DATA_IN store and the retry
  70         * instruction get nop'd out and we get here to branch
  71         * to the sun4v tlb load code.  The registers are setup
  72         * as follows:
  73         *
  74         * %g4: vaddr
  75         * %g5: PTE
  76         * %g6: TAG
  77         *
  78         * The sun4v TLB load wants the PTE in %g3 so we fix that
  79         * up here.
  80         */
  81        ba,pt           %xcc, sun4v_itlb_load
  82         mov            %g5, %g3
  83
  84kvmap_itlb_longpath:
  85
  86661:    rdpr    %pstate, %g5
  87        wrpr    %g5, PSTATE_AG | PSTATE_MG, %pstate
  88        .section .sun4v_2insn_patch, "ax"
  89        .word   661b
  90        SET_GL(1)
  91        nop
  92        .previous
  93
  94        rdpr    %tpc, %g5
  95        ba,pt   %xcc, sparc64_realfault_common
  96         mov    FAULT_CODE_ITLB, %g4
  97
  98kvmap_itlb_obp:
  99        OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
 100
 101        TSB_LOCK_TAG(%g1, %g2, %g7)
 102
 103        TSB_WRITE(%g1, %g5, %g6)
 104
 105        ba,pt           %xcc, kvmap_itlb_load
 106         nop
 107
 108kvmap_dtlb_obp:
 109        OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
 110
 111        TSB_LOCK_TAG(%g1, %g2, %g7)
 112
 113        TSB_WRITE(%g1, %g5, %g6)
 114
 115        ba,pt           %xcc, kvmap_dtlb_load
 116         nop
 117
 118kvmap_linear_early:
 119        sethi           %hi(kern_linear_pte_xor), %g7
 120        ldx             [%g7 + %lo(kern_linear_pte_xor)], %g2
 121        ba,pt           %xcc, kvmap_dtlb_tsb4m_load
 122         xor            %g2, %g4, %g5
 123
 124        .align          32
 125kvmap_dtlb_tsb4m_load:
 126        TSB_LOCK_TAG(%g1, %g2, %g7)
 127        TSB_WRITE(%g1, %g5, %g6)
 128        ba,pt           %xcc, kvmap_dtlb_load
 129         nop
 130
 131kvmap_dtlb:
 132        /* %g6: TAG TARGET */
 133        mov             TLB_TAG_ACCESS, %g4
 134        ldxa            [%g4] ASI_DMMU, %g4
 135
 136        /* The kernel executes in context zero, therefore we do not
 137         * need to clear the context ID bits out of %g4 here.
 138         */
 139
 140        /* sun4v_dtlb_miss branches here with the missing virtual
 141         * address already loaded into %g4
 142         */
 143kvmap_dtlb_4v:
 144        brgez,pn        %g4, kvmap_dtlb_nonlinear
 145         nop
 146
 147#ifdef CONFIG_DEBUG_PAGEALLOC
 148        /* Index through the base page size TSB even for linear
 149         * mappings when using page allocation debugging.
 150         */
 151        KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
 152#else
 153        /* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
 154        KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
 155#endif
 156        /* Linear mapping TSB lookup failed.  Fallthrough to kernel
 157         * page table based lookup.
 158         */
 159        .globl          kvmap_linear_patch
 160kvmap_linear_patch:
 161        ba,a,pt         %xcc, kvmap_linear_early
 162
 163kvmap_dtlb_vmalloc_addr:
 164        KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
 165
 166        TSB_LOCK_TAG(%g1, %g2, %g7)
 167        TSB_WRITE(%g1, %g5, %g6)
 168
 169        /* fallthrough to TLB load */
 170
 171kvmap_dtlb_load:
 172
 173661:    stxa            %g5, [%g0] ASI_DTLB_DATA_IN     ! Reload TLB
 174        retry
 175        .section        .sun4v_2insn_patch, "ax"
 176        .word           661b
 177        nop
 178        nop
 179        .previous
 180
 181        /* For sun4v the ASI_DTLB_DATA_IN store and the retry
 182         * instruction get nop'd out and we get here to branch
 183         * to the sun4v tlb load code.  The registers are setup
 184         * as follows:
 185         *
 186         * %g4: vaddr
 187         * %g5: PTE
 188         * %g6: TAG
 189         *
 190         * The sun4v TLB load wants the PTE in %g3 so we fix that
 191         * up here.
 192         */
 193        ba,pt           %xcc, sun4v_dtlb_load
 194         mov            %g5, %g3
 195
 196#ifdef CONFIG_SPARSEMEM_VMEMMAP
 197kvmap_vmemmap:
 198        KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
 199        ba,a,pt         %xcc, kvmap_dtlb_load
 200#endif
 201
 202kvmap_dtlb_nonlinear:
 203        /* Catch kernel NULL pointer derefs.  */
 204        sethi           %hi(PAGE_SIZE), %g5
 205        cmp             %g4, %g5
 206        bleu,pn         %xcc, kvmap_dtlb_longpath
 207         nop
 208
 209#ifdef CONFIG_SPARSEMEM_VMEMMAP
 210        /* Do not use the TSB for vmemmap.  */
 211        sethi           %hi(VMEMMAP_BASE), %g5
 212        ldx             [%g5 + %lo(VMEMMAP_BASE)], %g5
 213        cmp             %g4,%g5
 214        bgeu,pn         %xcc, kvmap_vmemmap
 215         nop
 216#endif
 217
 218        KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
 219
 220kvmap_dtlb_tsbmiss:
 221        sethi           %hi(MODULES_VADDR), %g5
 222        cmp             %g4, %g5
 223        blu,pn          %xcc, kvmap_dtlb_longpath
 224         sethi          %hi(VMALLOC_END), %g5
 225        ldx             [%g5 + %lo(VMALLOC_END)], %g5
 226        cmp             %g4, %g5
 227        bgeu,pn         %xcc, kvmap_dtlb_longpath
 228         nop
 229
 230kvmap_check_obp:
 231        sethi           %hi(LOW_OBP_ADDRESS), %g5
 232        cmp             %g4, %g5
 233        blu,pn          %xcc, kvmap_dtlb_vmalloc_addr
 234         mov            0x1, %g5
 235        sllx            %g5, 32, %g5
 236        cmp             %g4, %g5
 237        blu,pn          %xcc, kvmap_dtlb_obp
 238         nop
 239        ba,pt           %xcc, kvmap_dtlb_vmalloc_addr
 240         nop
 241
 242kvmap_dtlb_longpath:
 243
 244661:    rdpr    %pstate, %g5
 245        wrpr    %g5, PSTATE_AG | PSTATE_MG, %pstate
 246        .section .sun4v_2insn_patch, "ax"
 247        .word   661b
 248        SET_GL(1)
 249        ldxa            [%g0] ASI_SCRATCHPAD, %g5
 250        .previous
 251
 252        rdpr    %tl, %g3
 253        cmp     %g3, 1
 254
 255661:    mov     TLB_TAG_ACCESS, %g4
 256        ldxa    [%g4] ASI_DMMU, %g5
 257        .section .sun4v_2insn_patch, "ax"
 258        .word   661b
 259        ldx     [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
 260        nop
 261        .previous
 262
 263        /* The kernel executes in context zero, therefore we do not
 264         * need to clear the context ID bits out of %g5 here.
 265         */
 266
 267        be,pt   %xcc, sparc64_realfault_common
 268         mov    FAULT_CODE_DTLB, %g4
 269        ba,pt   %xcc, winfix_trampoline
 270         nop
 271