linux/arch/arm/mm/cache-v6.S
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/cache-v6.S
   3 *
   4 *  Copyright (C) 2001 Deep Blue Solutions Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 *  This is the "shell" of the ARMv6 processor support.
  11 */
  12#include <linux/linkage.h>
  13#include <linux/init.h>
  14#include <asm/assembler.h>
  15#include <asm/errno.h>
  16#include <asm/unwind.h>
  17
  18#include "proc-macros.S"
  19
  20#define HARVARD_CACHE
  21#define CACHE_LINE_SIZE         32
  22#define D_CACHE_LINE_SIZE       32
  23#define BTB_FLUSH_SIZE          8
  24
  25/*
  26 *      v6_flush_icache_all()
  27 *
  28 *      Flush the whole I-cache.
  29 *
  30 *      ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
  31 *      This erratum is present in 1136, 1156 and 1176. It does not affect the
  32 *      MPCore.
  33 *
  34 *      Registers:
  35 *      r0 - set to 0
  36 *      r1 - corrupted
  37 */
  38ENTRY(v6_flush_icache_all)
  39        mov     r0, #0
  40#ifdef CONFIG_ARM_ERRATA_411920
  41        mrs     r1, cpsr
  42        cpsid   ifa                             @ disable interrupts
  43        mcr     p15, 0, r0, c7, c5, 0           @ invalidate entire I-cache
  44        mcr     p15, 0, r0, c7, c5, 0           @ invalidate entire I-cache
  45        mcr     p15, 0, r0, c7, c5, 0           @ invalidate entire I-cache
  46        mcr     p15, 0, r0, c7, c5, 0           @ invalidate entire I-cache
  47        msr     cpsr_cx, r1                     @ restore interrupts
  48        .rept   11                              @ ARM Ltd recommends at least
  49        nop                                     @ 11 NOPs
  50        .endr
  51#else
  52        mcr     p15, 0, r0, c7, c5, 0           @ invalidate I-cache
  53#endif
  54        ret     lr
  55ENDPROC(v6_flush_icache_all)
  56
  57/*
  58 *      v6_flush_cache_all()
  59 *
  60 *      Flush the entire cache.
  61 *
  62 *      It is assumed that:
  63 */
  64ENTRY(v6_flush_kern_cache_all)
  65        mov     r0, #0
  66#ifdef HARVARD_CACHE
  67        mcr     p15, 0, r0, c7, c14, 0          @ D cache clean+invalidate
  68#ifndef CONFIG_ARM_ERRATA_411920
  69        mcr     p15, 0, r0, c7, c5, 0           @ I+BTB cache invalidate
  70#else
  71        b       v6_flush_icache_all
  72#endif
  73#else
  74        mcr     p15, 0, r0, c7, c15, 0          @ Cache clean+invalidate
  75#endif
  76        ret     lr
  77
  78/*
  79 *      v6_flush_cache_all()
  80 *
  81 *      Flush all TLB entries in a particular address space
  82 *
  83 *      - mm    - mm_struct describing address space
  84 */
  85ENTRY(v6_flush_user_cache_all)
  86        /*FALLTHROUGH*/
  87
  88/*
  89 *      v6_flush_cache_range(start, end, flags)
  90 *
  91 *      Flush a range of TLB entries in the specified address space.
  92 *
  93 *      - start - start address (may not be aligned)
  94 *      - end   - end address (exclusive, may not be aligned)
  95 *      - flags - vm_area_struct flags describing address space
  96 *
  97 *      It is assumed that:
  98 *      - we have a VIPT cache.
  99 */
 100ENTRY(v6_flush_user_cache_range)
 101        ret     lr
 102
 103/*
 104 *      v6_coherent_kern_range(start,end)
 105 *
 106 *      Ensure that the I and D caches are coherent within specified
 107 *      region.  This is typically used when code has been written to
 108 *      a memory region, and will be executed.
 109 *
 110 *      - start   - virtual start address of region
 111 *      - end     - virtual end address of region
 112 *
 113 *      It is assumed that:
 114 *      - the Icache does not read data from the write buffer
 115 */
 116ENTRY(v6_coherent_kern_range)
 117        /* FALLTHROUGH */
 118
 119/*
 120 *      v6_coherent_user_range(start,end)
 121 *
 122 *      Ensure that the I and D caches are coherent within specified
 123 *      region.  This is typically used when code has been written to
 124 *      a memory region, and will be executed.
 125 *
 126 *      - start   - virtual start address of region
 127 *      - end     - virtual end address of region
 128 *
 129 *      It is assumed that:
 130 *      - the Icache does not read data from the write buffer
 131 */
 132ENTRY(v6_coherent_user_range)
 133 UNWIND(.fnstart                )
 134#ifdef HARVARD_CACHE
 135        bic     r0, r0, #CACHE_LINE_SIZE - 1
 1361:
 137 USER(  mcr     p15, 0, r0, c7, c10, 1  )       @ clean D line
 138        add     r0, r0, #CACHE_LINE_SIZE
 139        cmp     r0, r1
 140        blo     1b
 141#endif
 142        mov     r0, #0
 143#ifdef HARVARD_CACHE
 144        mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
 145#ifndef CONFIG_ARM_ERRATA_411920
 146        mcr     p15, 0, r0, c7, c5, 0           @ I+BTB cache invalidate
 147#else
 148        b       v6_flush_icache_all
 149#endif
 150#else
 151        mcr     p15, 0, r0, c7, c5, 6           @ invalidate BTB
 152#endif
 153        ret     lr
 154
 155/*
 156 * Fault handling for the cache operation above. If the virtual address in r0
 157 * isn't mapped, fail with -EFAULT.
 158 */
 1599001:
 160        mov     r0, #-EFAULT
 161        ret     lr
 162 UNWIND(.fnend          )
 163ENDPROC(v6_coherent_user_range)
 164ENDPROC(v6_coherent_kern_range)
 165
 166/*
 167 *      v6_flush_kern_dcache_area(void *addr, size_t size)
 168 *
 169 *      Ensure that the data held in the page kaddr is written back
 170 *      to the page in question.
 171 *
 172 *      - addr  - kernel address
 173 *      - size  - region size
 174 */
 175ENTRY(v6_flush_kern_dcache_area)
 176        add     r1, r0, r1
 177        bic     r0, r0, #D_CACHE_LINE_SIZE - 1
 1781:
 179#ifdef HARVARD_CACHE
 180        mcr     p15, 0, r0, c7, c14, 1          @ clean & invalidate D line
 181#else
 182        mcr     p15, 0, r0, c7, c15, 1          @ clean & invalidate unified line
 183#endif  
 184        add     r0, r0, #D_CACHE_LINE_SIZE
 185        cmp     r0, r1
 186        blo     1b
 187#ifdef HARVARD_CACHE
 188        mov     r0, #0
 189        mcr     p15, 0, r0, c7, c10, 4
 190#endif
 191        ret     lr
 192
 193
 194/*
 195 *      v6_dma_inv_range(start,end)
 196 *
 197 *      Invalidate the data cache within the specified region; we will
 198 *      be performing a DMA operation in this region and we want to
 199 *      purge old data in the cache.
 200 *
 201 *      - start   - virtual start address of region
 202 *      - end     - virtual end address of region
 203 */
 204v6_dma_inv_range:
 205#ifdef CONFIG_DMA_CACHE_RWFO
 206        ldrb    r2, [r0]                        @ read for ownership
 207        strb    r2, [r0]                        @ write for ownership
 208#endif
 209        tst     r0, #D_CACHE_LINE_SIZE - 1
 210        bic     r0, r0, #D_CACHE_LINE_SIZE - 1
 211#ifdef HARVARD_CACHE
 212        mcrne   p15, 0, r0, c7, c10, 1          @ clean D line
 213#else
 214        mcrne   p15, 0, r0, c7, c11, 1          @ clean unified line
 215#endif
 216        tst     r1, #D_CACHE_LINE_SIZE - 1
 217#ifdef CONFIG_DMA_CACHE_RWFO
 218        ldrneb  r2, [r1, #-1]                   @ read for ownership
 219        strneb  r2, [r1, #-1]                   @ write for ownership
 220#endif
 221        bic     r1, r1, #D_CACHE_LINE_SIZE - 1
 222#ifdef HARVARD_CACHE
 223        mcrne   p15, 0, r1, c7, c14, 1          @ clean & invalidate D line
 224#else
 225        mcrne   p15, 0, r1, c7, c15, 1          @ clean & invalidate unified line
 226#endif
 2271:
 228#ifdef HARVARD_CACHE
 229        mcr     p15, 0, r0, c7, c6, 1           @ invalidate D line
 230#else
 231        mcr     p15, 0, r0, c7, c7, 1           @ invalidate unified line
 232#endif
 233        add     r0, r0, #D_CACHE_LINE_SIZE
 234        cmp     r0, r1
 235#ifdef CONFIG_DMA_CACHE_RWFO
 236        ldrlo   r2, [r0]                        @ read for ownership
 237        strlo   r2, [r0]                        @ write for ownership
 238#endif
 239        blo     1b
 240        mov     r0, #0
 241        mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
 242        ret     lr
 243
 244/*
 245 *      v6_dma_clean_range(start,end)
 246 *      - start   - virtual start address of region
 247 *      - end     - virtual end address of region
 248 */
 249v6_dma_clean_range:
 250        bic     r0, r0, #D_CACHE_LINE_SIZE - 1
 2511:
 252#ifdef CONFIG_DMA_CACHE_RWFO
 253        ldr     r2, [r0]                        @ read for ownership
 254#endif
 255#ifdef HARVARD_CACHE
 256        mcr     p15, 0, r0, c7, c10, 1          @ clean D line
 257#else
 258        mcr     p15, 0, r0, c7, c11, 1          @ clean unified line
 259#endif
 260        add     r0, r0, #D_CACHE_LINE_SIZE
 261        cmp     r0, r1
 262        blo     1b
 263        mov     r0, #0
 264        mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
 265        ret     lr
 266
 267/*
 268 *      v6_dma_flush_range(start,end)
 269 *      - start   - virtual start address of region
 270 *      - end     - virtual end address of region
 271 */
 272ENTRY(v6_dma_flush_range)
 273#ifdef CONFIG_DMA_CACHE_RWFO
 274        ldrb    r2, [r0]                @ read for ownership
 275        strb    r2, [r0]                @ write for ownership
 276#endif
 277        bic     r0, r0, #D_CACHE_LINE_SIZE - 1
 2781:
 279#ifdef HARVARD_CACHE
 280        mcr     p15, 0, r0, c7, c14, 1          @ clean & invalidate D line
 281#else
 282        mcr     p15, 0, r0, c7, c15, 1          @ clean & invalidate line
 283#endif
 284        add     r0, r0, #D_CACHE_LINE_SIZE
 285        cmp     r0, r1
 286#ifdef CONFIG_DMA_CACHE_RWFO
 287        ldrlob  r2, [r0]                        @ read for ownership
 288        strlob  r2, [r0]                        @ write for ownership
 289#endif
 290        blo     1b
 291        mov     r0, #0
 292        mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
 293        ret     lr
 294
 295/*
 296 *      dma_map_area(start, size, dir)
 297 *      - start - kernel virtual start address
 298 *      - size  - size of region
 299 *      - dir   - DMA direction
 300 */
 301ENTRY(v6_dma_map_area)
 302        add     r1, r1, r0
 303        teq     r2, #DMA_FROM_DEVICE
 304        beq     v6_dma_inv_range
 305#ifndef CONFIG_DMA_CACHE_RWFO
 306        b       v6_dma_clean_range
 307#else
 308        teq     r2, #DMA_TO_DEVICE
 309        beq     v6_dma_clean_range
 310        b       v6_dma_flush_range
 311#endif
 312ENDPROC(v6_dma_map_area)
 313
 314/*
 315 *      dma_unmap_area(start, size, dir)
 316 *      - start - kernel virtual start address
 317 *      - size  - size of region
 318 *      - dir   - DMA direction
 319 */
 320ENTRY(v6_dma_unmap_area)
 321#ifndef CONFIG_DMA_CACHE_RWFO
 322        add     r1, r1, r0
 323        teq     r2, #DMA_TO_DEVICE
 324        bne     v6_dma_inv_range
 325#endif
 326        ret     lr
 327ENDPROC(v6_dma_unmap_area)
 328
 329        .globl  v6_flush_kern_cache_louis
 330        .equ    v6_flush_kern_cache_louis, v6_flush_kern_cache_all
 331
 332        __INITDATA
 333
 334        @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
 335        define_cache_functions v6
 336