uboot/arch/arm/cpu/armv8/cache.S
<<
>>
Prefs
   1/*
   2 * (C) Copyright 2013
   3 * David Feng <fenghua@phytium.com.cn>
   4 *
   5 * This file is based on sample code from ARMv8 ARM.
   6 *
   7 * SPDX-License-Identifier:     GPL-2.0+
   8 */
   9
  10#include <asm-offsets.h>
  11#include <config.h>
  12#include <asm/macro.h>
  13#include <asm/system.h>
  14#include <linux/linkage.h>
  15
  16/*
  17 * void __asm_dcache_level(level)
  18 *
  19 * flush or invalidate one level cache.
  20 *
  21 * x0: cache level
  22 * x1: 0 clean & invalidate, 1 invalidate only
  23 * x2~x9: clobbered
  24 */
  25ENTRY(__asm_dcache_level)
  26        lsl     x12, x0, #1
  27        msr     csselr_el1, x12         /* select cache level */
  28        isb                             /* sync change of cssidr_el1 */
  29        mrs     x6, ccsidr_el1          /* read the new cssidr_el1 */
  30        and     x2, x6, #7              /* x2 <- log2(cache line size)-4 */
  31        add     x2, x2, #4              /* x2 <- log2(cache line size) */
  32        mov     x3, #0x3ff
  33        and     x3, x3, x6, lsr #3      /* x3 <- max number of #ways */
  34        clz     w5, w3                  /* bit position of #ways */
  35        mov     x4, #0x7fff
  36        and     x4, x4, x6, lsr #13     /* x4 <- max number of #sets */
  37        /* x12 <- cache level << 1 */
  38        /* x2 <- line length offset */
  39        /* x3 <- number of cache ways - 1 */
  40        /* x4 <- number of cache sets - 1 */
  41        /* x5 <- bit position of #ways */
  42
  43loop_set:
  44        mov     x6, x3                  /* x6 <- working copy of #ways */
  45loop_way:
  46        lsl     x7, x6, x5
  47        orr     x9, x12, x7             /* map way and level to cisw value */
  48        lsl     x7, x4, x2
  49        orr     x9, x9, x7              /* map set number to cisw value */
  50        tbz     w1, #0, 1f
  51        dc      isw, x9
  52        b       2f
  531:      dc      cisw, x9                /* clean & invalidate by set/way */
  542:      subs    x6, x6, #1              /* decrement the way */
  55        b.ge    loop_way
  56        subs    x4, x4, #1              /* decrement the set */
  57        b.ge    loop_set
  58
  59        ret
  60ENDPROC(__asm_dcache_level)
  61
  62/*
  63 * void __asm_flush_dcache_all(int invalidate_only)
  64 *
  65 * x0: 0 clean & invalidate, 1 invalidate only
  66 *
  67 * flush or invalidate all data cache by SET/WAY.
  68 */
  69ENTRY(__asm_dcache_all)
  70        mov     x1, x0
  71        dsb     sy
  72        mrs     x10, clidr_el1          /* read clidr_el1 */
  73        lsr     x11, x10, #24
  74        and     x11, x11, #0x7          /* x11 <- loc */
  75        cbz     x11, finished           /* if loc is 0, exit */
  76        mov     x15, lr
  77        mov     x0, #0                  /* start flush at cache level 0 */
  78        /* x0  <- cache level */
  79        /* x10 <- clidr_el1 */
  80        /* x11 <- loc */
  81        /* x15 <- return address */
  82
  83loop_level:
  84        lsl     x12, x0, #1
  85        add     x12, x12, x0            /* x0 <- tripled cache level */
  86        lsr     x12, x10, x12
  87        and     x12, x12, #7            /* x12 <- cache type */
  88        cmp     x12, #2
  89        b.lt    skip                    /* skip if no cache or icache */
  90        bl      __asm_dcache_level      /* x1 = 0 flush, 1 invalidate */
  91skip:
  92        add     x0, x0, #1              /* increment cache level */
  93        cmp     x11, x0
  94        b.gt    loop_level
  95
  96        mov     x0, #0
  97        msr     csselr_el1, x0          /* restore csselr_el1 */
  98        dsb     sy
  99        isb
 100        mov     lr, x15
 101
 102finished:
 103        ret
 104ENDPROC(__asm_dcache_all)
 105
 106ENTRY(__asm_flush_dcache_all)
 107        mov     x0, #0
 108        b       __asm_dcache_all
 109ENDPROC(__asm_flush_dcache_all)
 110
 111ENTRY(__asm_invalidate_dcache_all)
 112        mov     x0, #0x1
 113        b       __asm_dcache_all
 114ENDPROC(__asm_invalidate_dcache_all)
 115
 116/*
 117 * void __asm_flush_dcache_range(start, end)
 118 *
 119 * clean & invalidate data cache in the range
 120 *
 121 * x0: start address
 122 * x1: end address
 123 */
 124ENTRY(__asm_flush_dcache_range)
 125        mrs     x3, ctr_el0
 126        lsr     x3, x3, #16
 127        and     x3, x3, #0xf
 128        mov     x2, #4
 129        lsl     x2, x2, x3              /* cache line size */
 130
 131        /* x2 <- minimal cache line size in cache system */
 132        sub     x3, x2, #1
 133        bic     x0, x0, x3
 1341:      dc      civac, x0       /* clean & invalidate data or unified cache */
 135        add     x0, x0, x2
 136        cmp     x0, x1
 137        b.lo    1b
 138        dsb     sy
 139        ret
 140ENDPROC(__asm_flush_dcache_range)
 141
 142/*
 143 * void __asm_invalidate_icache_all(void)
 144 *
 145 * invalidate all tlb entries.
 146 */
 147ENTRY(__asm_invalidate_icache_all)
 148        ic      ialluis
 149        isb     sy
 150        ret
 151ENDPROC(__asm_invalidate_icache_all)
 152
 153ENTRY(__asm_flush_l3_cache)
 154        mov     x0, #0                  /* return status as success */
 155        ret
 156ENDPROC(__asm_flush_l3_cache)
 157        .weak   __asm_flush_l3_cache
 158
 159/*
 160 * void __asm_switch_ttbr(ulong new_ttbr)
 161 *
 162 * Safely switches to a new page table.
 163 */
 164ENTRY(__asm_switch_ttbr)
 165        /* x2 = SCTLR (alive throghout the function) */
 166        switch_el x4, 3f, 2f, 1f
 1673:      mrs     x2, sctlr_el3
 168        b       0f
 1692:      mrs     x2, sctlr_el2
 170        b       0f
 1711:      mrs     x2, sctlr_el1
 1720:
 173
 174        /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
 175        movn    x1, #(CR_M | CR_C | CR_I)
 176        and     x1, x2, x1
 177        switch_el x4, 3f, 2f, 1f
 1783:      msr     sctlr_el3, x1
 179        b       0f
 1802:      msr     sctlr_el2, x1
 181        b       0f
 1821:      msr     sctlr_el1, x1
 1830:      isb
 184
 185        /* This call only clobbers x30 (lr) and x9 (unused) */
 186        mov     x3, x30
 187        bl      __asm_invalidate_tlb_all
 188
 189        /* From here on we're running safely with caches disabled */
 190
 191        /* Set TTBR to our first argument */
 192        switch_el x4, 3f, 2f, 1f
 1933:      msr     ttbr0_el3, x0
 194        b       0f
 1952:      msr     ttbr0_el2, x0
 196        b       0f
 1971:      msr     ttbr0_el1, x0
 1980:      isb
 199
 200        /* Restore original SCTLR and thus enable caches again */
 201        switch_el x4, 3f, 2f, 1f
 2023:      msr     sctlr_el3, x2
 203        b       0f
 2042:      msr     sctlr_el2, x2
 205        b       0f
 2061:      msr     sctlr_el1, x2
 2070:      isb
 208
 209        ret     x3
 210ENDPROC(__asm_switch_ttbr)
 211