uboot/arch/arm/cpu/armv8/cache.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/*
   3 * (C) Copyright 2013
   4 * David Feng <fenghua@phytium.com.cn>
   5 *
   6 * This file is based on sample code from ARMv8 ARM.
   7 */
   8
   9#include <asm-offsets.h>
  10#include <config.h>
  11#include <asm/macro.h>
  12#include <asm/system.h>
  13#include <linux/linkage.h>
  14
  15/*
  16 * void __asm_dcache_level(level)
  17 *
  18 * flush or invalidate one level cache.
  19 *
  20 * x0: cache level
  21 * x1: 0 clean & invalidate, 1 invalidate only
  22 * x2~x9: clobbered
  23 */
  24.pushsection .text.__asm_dcache_level, "ax"
  25ENTRY(__asm_dcache_level)
  26        lsl     x12, x0, #1
  27        msr     csselr_el1, x12         /* select cache level */
  28        isb                             /* sync change of cssidr_el1 */
  29        mrs     x6, ccsidr_el1          /* read the new cssidr_el1 */
  30        and     x2, x6, #7              /* x2 <- log2(cache line size)-4 */
  31        add     x2, x2, #4              /* x2 <- log2(cache line size) */
  32        mov     x3, #0x3ff
  33        and     x3, x3, x6, lsr #3      /* x3 <- max number of #ways */
  34        clz     w5, w3                  /* bit position of #ways */
  35        mov     x4, #0x7fff
  36        and     x4, x4, x6, lsr #13     /* x4 <- max number of #sets */
  37        /* x12 <- cache level << 1 */
  38        /* x2 <- line length offset */
  39        /* x3 <- number of cache ways - 1 */
  40        /* x4 <- number of cache sets - 1 */
  41        /* x5 <- bit position of #ways */
  42
  43loop_set:
  44        mov     x6, x3                  /* x6 <- working copy of #ways */
  45loop_way:
  46        lsl     x7, x6, x5
  47        orr     x9, x12, x7             /* map way and level to cisw value */
  48        lsl     x7, x4, x2
  49        orr     x9, x9, x7              /* map set number to cisw value */
  50        tbz     w1, #0, 1f
  51        dc      isw, x9
  52        b       2f
  531:      dc      cisw, x9                /* clean & invalidate by set/way */
  542:      subs    x6, x6, #1              /* decrement the way */
  55        b.ge    loop_way
  56        subs    x4, x4, #1              /* decrement the set */
  57        b.ge    loop_set
  58
  59        ret
  60ENDPROC(__asm_dcache_level)
  61.popsection
  62
  63/*
  64 * void __asm_flush_dcache_all(int invalidate_only)
  65 *
  66 * x0: 0 clean & invalidate, 1 invalidate only
  67 *
  68 * flush or invalidate all data cache by SET/WAY.
  69 */
  70.pushsection .text.__asm_dcache_all, "ax"
  71ENTRY(__asm_dcache_all)
  72        mov     x1, x0
  73        dsb     sy
  74        mrs     x10, clidr_el1          /* read clidr_el1 */
  75        lsr     x11, x10, #24
  76        and     x11, x11, #0x7          /* x11 <- loc */
  77        cbz     x11, finished           /* if loc is 0, exit */
  78        mov     x15, lr
  79        mov     x0, #0                  /* start flush at cache level 0 */
  80        /* x0  <- cache level */
  81        /* x10 <- clidr_el1 */
  82        /* x11 <- loc */
  83        /* x15 <- return address */
  84
  85loop_level:
  86        lsl     x12, x0, #1
  87        add     x12, x12, x0            /* x0 <- tripled cache level */
  88        lsr     x12, x10, x12
  89        and     x12, x12, #7            /* x12 <- cache type */
  90        cmp     x12, #2
  91        b.lt    skip                    /* skip if no cache or icache */
  92        bl      __asm_dcache_level      /* x1 = 0 flush, 1 invalidate */
  93skip:
  94        add     x0, x0, #1              /* increment cache level */
  95        cmp     x11, x0
  96        b.gt    loop_level
  97
  98        mov     x0, #0
  99        msr     csselr_el1, x0          /* restore csselr_el1 */
 100        dsb     sy
 101        isb
 102        mov     lr, x15
 103
 104finished:
 105        ret
 106ENDPROC(__asm_dcache_all)
 107.popsection
 108
 109.pushsection .text.__asm_flush_dcache_all, "ax"
 110ENTRY(__asm_flush_dcache_all)
 111        mov     x0, #0
 112        b       __asm_dcache_all
 113ENDPROC(__asm_flush_dcache_all)
 114.popsection
 115
 116.pushsection .text.__asm_invalidate_dcache_all, "ax"
 117ENTRY(__asm_invalidate_dcache_all)
 118        mov     x0, #0x1
 119        b       __asm_dcache_all
 120ENDPROC(__asm_invalidate_dcache_all)
 121.popsection
 122
 123/*
 124 * void __asm_flush_dcache_range(start, end)
 125 *
 126 * clean & invalidate data cache in the range
 127 *
 128 * x0: start address
 129 * x1: end address
 130 */
 131.pushsection .text.__asm_flush_dcache_range, "ax"
 132ENTRY(__asm_flush_dcache_range)
 133        mrs     x3, ctr_el0
 134        lsr     x3, x3, #16
 135        and     x3, x3, #0xf
 136        mov     x2, #4
 137        lsl     x2, x2, x3              /* cache line size */
 138
 139        /* x2 <- minimal cache line size in cache system */
 140        sub     x3, x2, #1
 141        bic     x0, x0, x3
 1421:      dc      civac, x0       /* clean & invalidate data or unified cache */
 143        add     x0, x0, x2
 144        cmp     x0, x1
 145        b.lo    1b
 146        dsb     sy
 147        ret
 148ENDPROC(__asm_flush_dcache_range)
 149.popsection
 150/*
 151 * void __asm_invalidate_dcache_range(start, end)
 152 *
 153 * invalidate data cache in the range
 154 *
 155 * x0: start address
 156 * x1: end address
 157 */
 158.pushsection .text.__asm_invalidate_dcache_range, "ax"
 159ENTRY(__asm_invalidate_dcache_range)
 160        mrs     x3, ctr_el0
 161        ubfm    x3, x3, #16, #19
 162        mov     x2, #4
 163        lsl     x2, x2, x3              /* cache line size */
 164
 165        /* x2 <- minimal cache line size in cache system */
 166        sub     x3, x2, #1
 167        bic     x0, x0, x3
 1681:      dc      ivac, x0        /* invalidate data or unified cache */
 169        add     x0, x0, x2
 170        cmp     x0, x1
 171        b.lo    1b
 172        dsb     sy
 173        ret
 174ENDPROC(__asm_invalidate_dcache_range)
 175.popsection
 176
 177/*
 178 * void __asm_invalidate_icache_all(void)
 179 *
 180 * invalidate all tlb entries.
 181 */
 182.pushsection .text.__asm_invalidate_icache_all, "ax"
 183ENTRY(__asm_invalidate_icache_all)
 184        ic      ialluis
 185        isb     sy
 186        ret
 187ENDPROC(__asm_invalidate_icache_all)
 188.popsection
 189
 190.pushsection .text.__asm_invalidate_l3_dcache, "ax"
 191ENTRY(__asm_invalidate_l3_dcache)
 192        mov     x0, #0                  /* return status as success */
 193        ret
 194ENDPROC(__asm_invalidate_l3_dcache)
 195        .weak   __asm_invalidate_l3_dcache
 196.popsection
 197
 198.pushsection .text.__asm_flush_l3_dcache, "ax"
 199ENTRY(__asm_flush_l3_dcache)
 200        mov     x0, #0                  /* return status as success */
 201        ret
 202ENDPROC(__asm_flush_l3_dcache)
 203        .weak   __asm_flush_l3_dcache
 204.popsection
 205
 206.pushsection .text.__asm_invalidate_l3_icache, "ax"
 207ENTRY(__asm_invalidate_l3_icache)
 208        mov     x0, #0                  /* return status as success */
 209        ret
 210ENDPROC(__asm_invalidate_l3_icache)
 211        .weak   __asm_invalidate_l3_icache
 212.popsection
 213
 214/*
 215 * void __asm_switch_ttbr(ulong new_ttbr)
 216 *
 217 * Safely switches to a new page table.
 218 */
 219.pushsection .text.__asm_switch_ttbr, "ax"
 220ENTRY(__asm_switch_ttbr)
 221        /* x2 = SCTLR (alive throghout the function) */
 222        switch_el x4, 3f, 2f, 1f
 2233:      mrs     x2, sctlr_el3
 224        b       0f
 2252:      mrs     x2, sctlr_el2
 226        b       0f
 2271:      mrs     x2, sctlr_el1
 2280:
 229
 230        /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
 231        movn    x1, #(CR_M | CR_C | CR_I)
 232        and     x1, x2, x1
 233        switch_el x4, 3f, 2f, 1f
 2343:      msr     sctlr_el3, x1
 235        b       0f
 2362:      msr     sctlr_el2, x1
 237        b       0f
 2381:      msr     sctlr_el1, x1
 2390:      isb
 240
 241        /* This call only clobbers x30 (lr) and x9 (unused) */
 242        mov     x3, x30
 243        bl      __asm_invalidate_tlb_all
 244
 245        /* From here on we're running safely with caches disabled */
 246
 247        /* Set TTBR to our first argument */
 248        switch_el x4, 3f, 2f, 1f
 2493:      msr     ttbr0_el3, x0
 250        b       0f
 2512:      msr     ttbr0_el2, x0
 252        b       0f
 2531:      msr     ttbr0_el1, x0
 2540:      isb
 255
 256        /* Restore original SCTLR and thus enable caches again */
 257        switch_el x4, 3f, 2f, 1f
 2583:      msr     sctlr_el3, x2
 259        b       0f
 2602:      msr     sctlr_el2, x2
 261        b       0f
 2621:      msr     sctlr_el1, x2
 2630:      isb
 264
 265        ret     x3
 266ENDPROC(__asm_switch_ttbr)
 267.popsection
 268