linux/arch/arm/mm/cache-v7m.S
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/cache-v7m.S
   3 *
   4 *  Based on linux/arch/arm/mm/cache-v7.S
   5 *
   6 *  Copyright (C) 2001 Deep Blue Solutions Ltd.
   7 *  Copyright (C) 2005 ARM Ltd.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 *  This is the "shell" of the ARMv7M processor support.
  14 */
  15#include <linux/linkage.h>
  16#include <linux/init.h>
  17#include <asm/assembler.h>
  18#include <asm/errno.h>
  19#include <asm/unwind.h>
  20#include <asm/v7m.h>
  21
  22#include "proc-macros.S"
  23
  24/* Generic V7M read/write macros for memory mapped cache operations */
  25.macro v7m_cache_read, rt, reg
  26        movw    \rt, #:lower16:BASEADDR_V7M_SCB + \reg
  27        movt    \rt, #:upper16:BASEADDR_V7M_SCB + \reg
  28        ldr     \rt, [\rt]
  29.endm
  30
  31.macro v7m_cacheop, rt, tmp, op, c = al
  32        movw\c  \tmp, #:lower16:BASEADDR_V7M_SCB + \op
  33        movt\c  \tmp, #:upper16:BASEADDR_V7M_SCB + \op
  34        str\c   \rt, [\tmp]
  35.endm
  36
  37
  38.macro  read_ccsidr, rt
  39        v7m_cache_read \rt, V7M_SCB_CCSIDR
  40.endm
  41
  42.macro read_clidr, rt
  43        v7m_cache_read \rt, V7M_SCB_CLIDR
  44.endm
  45
  46.macro  write_csselr, rt, tmp
  47        v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR
  48.endm
  49
  50/*
  51 * dcisw: Invalidate data cache by set/way
  52 */
  53.macro dcisw, rt, tmp
  54        v7m_cacheop \rt, \tmp, V7M_SCB_DCISW
  55.endm
  56
  57/*
  58 * dccisw: Clean and invalidate data cache by set/way
  59 */
  60.macro dccisw, rt, tmp
  61        v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW
  62.endm
  63
  64/*
  65 * dccimvac: Clean and invalidate data cache line by MVA to PoC.
  66 */
  67.irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
  68.macro dccimvac\c, rt, tmp
  69        v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c
  70.endm
  71.endr
  72
  73/*
  74 * dcimvac: Invalidate data cache line by MVA to PoC
  75 */
  76.macro dcimvac, rt, tmp
  77        v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC
  78.endm
  79
  80/*
  81 * dccmvau: Clean data cache line by MVA to PoU
  82 */
  83.macro dccmvau, rt, tmp
  84        v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU
  85.endm
  86
  87/*
  88 * dccmvac: Clean data cache line by MVA to PoC
  89 */
  90.macro dccmvac,  rt, tmp
  91        v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC
  92.endm
  93
  94/*
  95 * icimvau: Invalidate instruction caches by MVA to PoU
  96 */
  97.macro icimvau, rt, tmp
  98        v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU
  99.endm
 100
 101/*
 102 * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP.
 103 * rt data ignored by ICIALLU(IS), so can be used for the address
 104 */
 105.macro invalidate_icache, rt
 106        v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU
 107        mov \rt, #0
 108.endm
 109
 110/*
 111 * Invalidate the BTB, inner shareable if SMP.
 112 * rt data ignored by BPIALL, so it can be used for the address
 113 */
 114.macro invalidate_bp, rt
 115        v7m_cacheop \rt, \rt, V7M_SCB_BPIALL
 116        mov \rt, #0
 117.endm
 118
 119ENTRY(v7m_invalidate_l1)
 120        mov     r0, #0
 121
 122        write_csselr r0, r1
 123        read_ccsidr r0
 124
 125        movw    r1, #0x7fff
 126        and     r2, r1, r0, lsr #13
 127
 128        movw    r1, #0x3ff
 129
 130        and     r3, r1, r0, lsr #3      @ NumWays - 1
 131        add     r2, r2, #1              @ NumSets
 132
 133        and     r0, r0, #0x7
 134        add     r0, r0, #4      @ SetShift
 135
 136        clz     r1, r3          @ WayShift
 137        add     r4, r3, #1      @ NumWays
 1381:      sub     r2, r2, #1      @ NumSets--
 139        mov     r3, r4          @ Temp = NumWays
 1402:      subs    r3, r3, #1      @ Temp--
 141        mov     r5, r3, lsl r1
 142        mov     r6, r2, lsl r0
 143        orr     r5, r5, r6      @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
 144        dcisw   r5, r6
 145        bgt     2b
 146        cmp     r2, #0
 147        bgt     1b
 148        dsb     st
 149        isb
 150        ret     lr
 151ENDPROC(v7m_invalidate_l1)
 152
 153/*
 154 *      v7m_flush_icache_all()
 155 *
 156 *      Flush the whole I-cache.
 157 *
 158 *      Registers:
 159 *      r0 - set to 0
 160 */
 161ENTRY(v7m_flush_icache_all)
 162        invalidate_icache r0
 163        ret     lr
 164ENDPROC(v7m_flush_icache_all)
 165
 166/*
 167 *      v7m_flush_dcache_all()
 168 *
 169 *      Flush the whole D-cache.
 170 *
 171 *      Corrupted registers: r0-r7, r9-r11
 172 */
 173ENTRY(v7m_flush_dcache_all)
 174        dmb                                     @ ensure ordering with previous memory accesses
 175        read_clidr r0
 176        mov     r3, r0, lsr #23                 @ move LoC into position
 177        ands    r3, r3, #7 << 1                 @ extract LoC*2 from clidr
 178        beq     finished                        @ if loc is 0, then no need to clean
 179start_flush_levels:
 180        mov     r10, #0                         @ start clean at cache level 0
 181flush_levels:
 182        add     r2, r10, r10, lsr #1            @ work out 3x current cache level
 183        mov     r1, r0, lsr r2                  @ extract cache type bits from clidr
 184        and     r1, r1, #7                      @ mask of the bits for current cache only
 185        cmp     r1, #2                          @ see what cache we have at this level
 186        blt     skip                            @ skip if no cache, or just i-cache
 187#ifdef CONFIG_PREEMPT
 188        save_and_disable_irqs_notrace r9        @ make cssr&csidr read atomic
 189#endif
 190        write_csselr r10, r1                    @ set current cache level
 191        isb                                     @ isb to sych the new cssr&csidr
 192        read_ccsidr r1                          @ read the new csidr
 193#ifdef CONFIG_PREEMPT
 194        restore_irqs_notrace r9
 195#endif
 196        and     r2, r1, #7                      @ extract the length of the cache lines
 197        add     r2, r2, #4                      @ add 4 (line length offset)
 198        movw    r4, #0x3ff
 199        ands    r4, r4, r1, lsr #3              @ find maximum number on the way size
 200        clz     r5, r4                          @ find bit position of way size increment
 201        movw    r7, #0x7fff
 202        ands    r7, r7, r1, lsr #13             @ extract max number of the index size
 203loop1:
 204        mov     r9, r7                          @ create working copy of max index
 205loop2:
 206        lsl     r6, r4, r5
 207        orr     r11, r10, r6                    @ factor way and cache number into r11
 208        lsl     r6, r9, r2
 209        orr     r11, r11, r6                    @ factor index number into r11
 210        dccisw  r11, r6                         @ clean/invalidate by set/way
 211        subs    r9, r9, #1                      @ decrement the index
 212        bge     loop2
 213        subs    r4, r4, #1                      @ decrement the way
 214        bge     loop1
 215skip:
 216        add     r10, r10, #2                    @ increment cache number
 217        cmp     r3, r10
 218        bgt     flush_levels
 219finished:
 220        mov     r10, #0                         @ switch back to cache level 0
 221        write_csselr r10, r3                    @ select current cache level in cssr
 222        dsb     st
 223        isb
 224        ret     lr
 225ENDPROC(v7m_flush_dcache_all)
 226
 227/*
 228 *      v7m_flush_cache_all()
 229 *
 230 *      Flush the entire cache system.
 231 *  The data cache flush is now achieved using atomic clean / invalidates
 232 *  working outwards from L1 cache. This is done using Set/Way based cache
 233 *  maintenance instructions.
 234 *  The instruction cache can still be invalidated back to the point of
 235 *  unification in a single instruction.
 236 *
 237 */
 238ENTRY(v7m_flush_kern_cache_all)
 239        stmfd   sp!, {r4-r7, r9-r11, lr}
 240        bl      v7m_flush_dcache_all
 241        invalidate_icache r0
 242        ldmfd   sp!, {r4-r7, r9-r11, lr}
 243        ret     lr
 244ENDPROC(v7m_flush_kern_cache_all)
 245
 246/*
 247 *      v7m_flush_cache_all()
 248 *
 249 *      Flush all TLB entries in a particular address space
 250 *
 251 *      - mm    - mm_struct describing address space
 252 */
 253ENTRY(v7m_flush_user_cache_all)
 254        /*FALLTHROUGH*/
 255
 256/*
 257 *      v7m_flush_cache_range(start, end, flags)
 258 *
 259 *      Flush a range of TLB entries in the specified address space.
 260 *
 261 *      - start - start address (may not be aligned)
 262 *      - end   - end address (exclusive, may not be aligned)
 263 *      - flags - vm_area_struct flags describing address space
 264 *
 265 *      It is assumed that:
 266 *      - we have a VIPT cache.
 267 */
 268ENTRY(v7m_flush_user_cache_range)
 269        ret     lr
 270ENDPROC(v7m_flush_user_cache_all)
 271ENDPROC(v7m_flush_user_cache_range)
 272
 273/*
 274 *      v7m_coherent_kern_range(start,end)
 275 *
 276 *      Ensure that the I and D caches are coherent within specified
 277 *      region.  This is typically used when code has been written to
 278 *      a memory region, and will be executed.
 279 *
 280 *      - start   - virtual start address of region
 281 *      - end     - virtual end address of region
 282 *
 283 *      It is assumed that:
 284 *      - the Icache does not read data from the write buffer
 285 */
 286ENTRY(v7m_coherent_kern_range)
 287        /* FALLTHROUGH */
 288
 289/*
 290 *      v7m_coherent_user_range(start,end)
 291 *
 292 *      Ensure that the I and D caches are coherent within specified
 293 *      region.  This is typically used when code has been written to
 294 *      a memory region, and will be executed.
 295 *
 296 *      - start   - virtual start address of region
 297 *      - end     - virtual end address of region
 298 *
 299 *      It is assumed that:
 300 *      - the Icache does not read data from the write buffer
 301 */
 302ENTRY(v7m_coherent_user_range)
 303 UNWIND(.fnstart                )
 304        dcache_line_size r2, r3
 305        sub     r3, r2, #1
 306        bic     r12, r0, r3
 3071:
 308/*
 309 * We use open coded version of dccmvau otherwise USER() would
 310 * point at movw instruction.
 311 */
 312        dccmvau r12, r3
 313        add     r12, r12, r2
 314        cmp     r12, r1
 315        blo     1b
 316        dsb     ishst
 317        icache_line_size r2, r3
 318        sub     r3, r2, #1
 319        bic     r12, r0, r3
 3202:
 321        icimvau r12, r3
 322        add     r12, r12, r2
 323        cmp     r12, r1
 324        blo     2b
 325        invalidate_bp r0
 326        dsb     ishst
 327        isb
 328        ret     lr
 329 UNWIND(.fnend          )
 330ENDPROC(v7m_coherent_kern_range)
 331ENDPROC(v7m_coherent_user_range)
 332
 333/*
 334 *      v7m_flush_kern_dcache_area(void *addr, size_t size)
 335 *
 336 *      Ensure that the data held in the page kaddr is written back
 337 *      to the page in question.
 338 *
 339 *      - addr  - kernel address
 340 *      - size  - region size
 341 */
 342ENTRY(v7m_flush_kern_dcache_area)
 343        dcache_line_size r2, r3
 344        add     r1, r0, r1
 345        sub     r3, r2, #1
 346        bic     r0, r0, r3
 3471:
 348        dccimvac r0, r3         @ clean & invalidate D line / unified line
 349        add     r0, r0, r2
 350        cmp     r0, r1
 351        blo     1b
 352        dsb     st
 353        ret     lr
 354ENDPROC(v7m_flush_kern_dcache_area)
 355
 356/*
 357 *      v7m_dma_inv_range(start,end)
 358 *
 359 *      Invalidate the data cache within the specified region; we will
 360 *      be performing a DMA operation in this region and we want to
 361 *      purge old data in the cache.
 362 *
 363 *      - start   - virtual start address of region
 364 *      - end     - virtual end address of region
 365 */
 366v7m_dma_inv_range:
 367        dcache_line_size r2, r3
 368        sub     r3, r2, #1
 369        tst     r0, r3
 370        bic     r0, r0, r3
 371        dccimvacne r0, r3
 372        subne   r3, r2, #1      @ restore r3, corrupted by v7m's dccimvac
 373        tst     r1, r3
 374        bic     r1, r1, r3
 375        dccimvacne r1, r3
 3761:
 377        dcimvac r0, r3
 378        add     r0, r0, r2
 379        cmp     r0, r1
 380        blo     1b
 381        dsb     st
 382        ret     lr
 383ENDPROC(v7m_dma_inv_range)
 384
 385/*
 386 *      v7m_dma_clean_range(start,end)
 387 *      - start   - virtual start address of region
 388 *      - end     - virtual end address of region
 389 */
 390v7m_dma_clean_range:
 391        dcache_line_size r2, r3
 392        sub     r3, r2, #1
 393        bic     r0, r0, r3
 3941:
 395        dccmvac r0, r3                  @ clean D / U line
 396        add     r0, r0, r2
 397        cmp     r0, r1
 398        blo     1b
 399        dsb     st
 400        ret     lr
 401ENDPROC(v7m_dma_clean_range)
 402
 403/*
 404 *      v7m_dma_flush_range(start,end)
 405 *      - start   - virtual start address of region
 406 *      - end     - virtual end address of region
 407 */
 408ENTRY(v7m_dma_flush_range)
 409        dcache_line_size r2, r3
 410        sub     r3, r2, #1
 411        bic     r0, r0, r3
 4121:
 413        dccimvac r0, r3                  @ clean & invalidate D / U line
 414        add     r0, r0, r2
 415        cmp     r0, r1
 416        blo     1b
 417        dsb     st
 418        ret     lr
 419ENDPROC(v7m_dma_flush_range)
 420
 421/*
 422 *      dma_map_area(start, size, dir)
 423 *      - start - kernel virtual start address
 424 *      - size  - size of region
 425 *      - dir   - DMA direction
 426 */
 427ENTRY(v7m_dma_map_area)
 428        add     r1, r1, r0
 429        teq     r2, #DMA_FROM_DEVICE
 430        beq     v7m_dma_inv_range
 431        b       v7m_dma_clean_range
 432ENDPROC(v7m_dma_map_area)
 433
 434/*
 435 *      dma_unmap_area(start, size, dir)
 436 *      - start - kernel virtual start address
 437 *      - size  - size of region
 438 *      - dir   - DMA direction
 439 */
 440ENTRY(v7m_dma_unmap_area)
 441        add     r1, r1, r0
 442        teq     r2, #DMA_TO_DEVICE
 443        bne     v7m_dma_inv_range
 444        ret     lr
 445ENDPROC(v7m_dma_unmap_area)
 446
 447        .globl  v7m_flush_kern_cache_louis
 448        .equ    v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
 449
 450        __INITDATA
 451
 452        @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
 453        define_cache_functions v7m
 454