uboot/arch/arm/cpu/armv8/cache.S
<<
>>
Prefs
   1/*
   2 * (C) Copyright 2013
   3 * David Feng <fenghua@phytium.com.cn>
   4 *
   5 * This file is based on sample code from ARMv8 ARM.
   6 *
   7 * SPDX-License-Identifier:     GPL-2.0+
   8 */
   9
  10#include <asm-offsets.h>
  11#include <config.h>
  12#include <asm/macro.h>
  13#include <asm/system.h>
  14#include <linux/linkage.h>
  15
  16/*
  17 * void __asm_dcache_level(level)
  18 *
  19 * flush or invalidate one level cache.
  20 *
  21 * x0: cache level
  22 * x1: 0 clean & invalidate, 1 invalidate only
  23 * x2~x9: clobbered
  24 */
  25.pushsection .text.__asm_dcache_level, "ax"
  26ENTRY(__asm_dcache_level)
  27        lsl     x12, x0, #1
  28        msr     csselr_el1, x12         /* select cache level */
  29        isb                             /* sync change of cssidr_el1 */
  30        mrs     x6, ccsidr_el1          /* read the new cssidr_el1 */
  31        and     x2, x6, #7              /* x2 <- log2(cache line size)-4 */
  32        add     x2, x2, #4              /* x2 <- log2(cache line size) */
  33        mov     x3, #0x3ff
  34        and     x3, x3, x6, lsr #3      /* x3 <- max number of #ways */
  35        clz     w5, w3                  /* bit position of #ways */
  36        mov     x4, #0x7fff
  37        and     x4, x4, x6, lsr #13     /* x4 <- max number of #sets */
  38        /* x12 <- cache level << 1 */
  39        /* x2 <- line length offset */
  40        /* x3 <- number of cache ways - 1 */
  41        /* x4 <- number of cache sets - 1 */
  42        /* x5 <- bit position of #ways */
  43
  44loop_set:
  45        mov     x6, x3                  /* x6 <- working copy of #ways */
  46loop_way:
  47        lsl     x7, x6, x5
  48        orr     x9, x12, x7             /* map way and level to cisw value */
  49        lsl     x7, x4, x2
  50        orr     x9, x9, x7              /* map set number to cisw value */
  51        tbz     w1, #0, 1f
  52        dc      isw, x9
  53        b       2f
  541:      dc      cisw, x9                /* clean & invalidate by set/way */
  552:      subs    x6, x6, #1              /* decrement the way */
  56        b.ge    loop_way
  57        subs    x4, x4, #1              /* decrement the set */
  58        b.ge    loop_set
  59
  60        ret
  61ENDPROC(__asm_dcache_level)
  62.popsection
  63
  64/*
  65 * void __asm_flush_dcache_all(int invalidate_only)
  66 *
  67 * x0: 0 clean & invalidate, 1 invalidate only
  68 *
  69 * flush or invalidate all data cache by SET/WAY.
  70 */
  71.pushsection .text.__asm_dcache_all, "ax"
  72ENTRY(__asm_dcache_all)
  73        mov     x1, x0
  74        dsb     sy
  75        mrs     x10, clidr_el1          /* read clidr_el1 */
  76        lsr     x11, x10, #24
  77        and     x11, x11, #0x7          /* x11 <- loc */
  78        cbz     x11, finished           /* if loc is 0, exit */
  79        mov     x15, lr
  80        mov     x0, #0                  /* start flush at cache level 0 */
  81        /* x0  <- cache level */
  82        /* x10 <- clidr_el1 */
  83        /* x11 <- loc */
  84        /* x15 <- return address */
  85
  86loop_level:
  87        lsl     x12, x0, #1
  88        add     x12, x12, x0            /* x0 <- tripled cache level */
  89        lsr     x12, x10, x12
  90        and     x12, x12, #7            /* x12 <- cache type */
  91        cmp     x12, #2
  92        b.lt    skip                    /* skip if no cache or icache */
  93        bl      __asm_dcache_level      /* x1 = 0 flush, 1 invalidate */
  94skip:
  95        add     x0, x0, #1              /* increment cache level */
  96        cmp     x11, x0
  97        b.gt    loop_level
  98
  99        mov     x0, #0
 100        msr     csselr_el1, x0          /* restore csselr_el1 */
 101        dsb     sy
 102        isb
 103        mov     lr, x15
 104
 105finished:
 106        ret
 107ENDPROC(__asm_dcache_all)
 108.popsection
 109
 110.pushsection .text.__asm_flush_dcache_all, "ax"
 111ENTRY(__asm_flush_dcache_all)
 112        mov     x0, #0
 113        b       __asm_dcache_all
 114ENDPROC(__asm_flush_dcache_all)
 115.popsection
 116
 117.pushsection .text.__asm_invalidate_dcache_all, "ax"
 118ENTRY(__asm_invalidate_dcache_all)
 119        mov     x0, #0x1
 120        b       __asm_dcache_all
 121ENDPROC(__asm_invalidate_dcache_all)
 122.popsection
 123
 124/*
 125 * void __asm_flush_dcache_range(start, end)
 126 *
 127 * clean & invalidate data cache in the range
 128 *
 129 * x0: start address
 130 * x1: end address
 131 */
 132.pushsection .text.__asm_flush_dcache_range, "ax"
 133ENTRY(__asm_flush_dcache_range)
 134        mrs     x3, ctr_el0
 135        lsr     x3, x3, #16
 136        and     x3, x3, #0xf
 137        mov     x2, #4
 138        lsl     x2, x2, x3              /* cache line size */
 139
 140        /* x2 <- minimal cache line size in cache system */
 141        sub     x3, x2, #1
 142        bic     x0, x0, x3
 1431:      dc      civac, x0       /* clean & invalidate data or unified cache */
 144        add     x0, x0, x2
 145        cmp     x0, x1
 146        b.lo    1b
 147        dsb     sy
 148        ret
 149ENDPROC(__asm_flush_dcache_range)
 150.popsection
 151/*
 152 * void __asm_invalidate_dcache_range(start, end)
 153 *
 154 * invalidate data cache in the range
 155 *
 156 * x0: start address
 157 * x1: end address
 158 */
 159.pushsection .text.__asm_invalidate_dcache_range, "ax"
 160ENTRY(__asm_invalidate_dcache_range)
 161        mrs     x3, ctr_el0
 162        ubfm    x3, x3, #16, #19
 163        mov     x2, #4
 164        lsl     x2, x2, x3              /* cache line size */
 165
 166        /* x2 <- minimal cache line size in cache system */
 167        sub     x3, x2, #1
 168        bic     x0, x0, x3
 1691:      dc      ivac, x0        /* invalidate data or unified cache */
 170        add     x0, x0, x2
 171        cmp     x0, x1
 172        b.lo    1b
 173        dsb     sy
 174        ret
 175ENDPROC(__asm_invalidate_dcache_range)
 176.popsection
 177
 178/*
 179 * void __asm_invalidate_icache_all(void)
 180 *
 181 * invalidate all tlb entries.
 182 */
 183.pushsection .text.__asm_invalidate_icache_all, "ax"
 184ENTRY(__asm_invalidate_icache_all)
 185        ic      ialluis
 186        isb     sy
 187        ret
 188ENDPROC(__asm_invalidate_icache_all)
 189.popsection
 190
 191.pushsection .text.__asm_invalidate_l3_dcache, "ax"
 192ENTRY(__asm_invalidate_l3_dcache)
 193        mov     x0, #0                  /* return status as success */
 194        ret
 195ENDPROC(__asm_invalidate_l3_dcache)
 196        .weak   __asm_invalidate_l3_dcache
 197.popsection
 198
 199.pushsection .text.__asm_flush_l3_dcache, "ax"
 200ENTRY(__asm_flush_l3_dcache)
 201        mov     x0, #0                  /* return status as success */
 202        ret
 203ENDPROC(__asm_flush_l3_dcache)
 204        .weak   __asm_flush_l3_dcache
 205.popsection
 206
 207.pushsection .text.__asm_invalidate_l3_icache, "ax"
 208ENTRY(__asm_invalidate_l3_icache)
 209        mov     x0, #0                  /* return status as success */
 210        ret
 211ENDPROC(__asm_invalidate_l3_icache)
 212        .weak   __asm_invalidate_l3_icache
 213.popsection
 214
 215/*
 216 * void __asm_switch_ttbr(ulong new_ttbr)
 217 *
 218 * Safely switches to a new page table.
 219 */
 220.pushsection .text.__asm_switch_ttbr, "ax"
 221ENTRY(__asm_switch_ttbr)
 222        /* x2 = SCTLR (alive throghout the function) */
 223        switch_el x4, 3f, 2f, 1f
 2243:      mrs     x2, sctlr_el3
 225        b       0f
 2262:      mrs     x2, sctlr_el2
 227        b       0f
 2281:      mrs     x2, sctlr_el1
 2290:
 230
 231        /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
 232        movn    x1, #(CR_M | CR_C | CR_I)
 233        and     x1, x2, x1
 234        switch_el x4, 3f, 2f, 1f
 2353:      msr     sctlr_el3, x1
 236        b       0f
 2372:      msr     sctlr_el2, x1
 238        b       0f
 2391:      msr     sctlr_el1, x1
 2400:      isb
 241
 242        /* This call only clobbers x30 (lr) and x9 (unused) */
 243        mov     x3, x30
 244        bl      __asm_invalidate_tlb_all
 245
 246        /* From here on we're running safely with caches disabled */
 247
 248        /* Set TTBR to our first argument */
 249        switch_el x4, 3f, 2f, 1f
 2503:      msr     ttbr0_el3, x0
 251        b       0f
 2522:      msr     ttbr0_el2, x0
 253        b       0f
 2541:      msr     ttbr0_el1, x0
 2550:      isb
 256
 257        /* Restore original SCTLR and thus enable caches again */
 258        switch_el x4, 3f, 2f, 1f
 2593:      msr     sctlr_el3, x2
 260        b       0f
 2612:      msr     sctlr_el2, x2
 262        b       0f
 2631:      msr     sctlr_el1, x2
 2640:      isb
 265
 266        ret     x3
 267ENDPROC(__asm_switch_ttbr)
 268.popsection
 269