linux/arch/xtensa/include/asm/cacheflush.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * (C) 2001 - 2013 Tensilica Inc.
   7 */
   8
   9#ifndef _XTENSA_CACHEFLUSH_H
  10#define _XTENSA_CACHEFLUSH_H
  11
  12#include <linux/mm.h>
  13#include <asm/processor.h>
  14#include <asm/page.h>
  15
  16/*
  17 * Lo-level routines for cache flushing.
  18 *
  19 * invalidate data or instruction cache:
  20 *
  21 * __invalidate_icache_all()
  22 * __invalidate_icache_page(adr)
  23 * __invalidate_dcache_page(adr)
  24 * __invalidate_icache_range(from,size)
  25 * __invalidate_dcache_range(from,size)
  26 *
  27 * flush data cache:
  28 *
  29 * __flush_dcache_page(adr)
  30 *
  31 * flush and invalidate data cache:
  32 *
  33 * __flush_invalidate_dcache_all()
  34 * __flush_invalidate_dcache_page(adr)
  35 * __flush_invalidate_dcache_range(from,size)
  36 *
  37 * specials for cache aliasing:
  38 *
  39 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
  40 * __invalidate_dcache_page_alias(vaddr,paddr)
  41 * __invalidate_icache_page_alias(vaddr,paddr)
  42 */
  43
  44extern void __invalidate_dcache_all(void);
  45extern void __invalidate_icache_all(void);
  46extern void __invalidate_dcache_page(unsigned long);
  47extern void __invalidate_icache_page(unsigned long);
  48extern void __invalidate_icache_range(unsigned long, unsigned long);
  49extern void __invalidate_dcache_range(unsigned long, unsigned long);
  50
  51#if XCHAL_DCACHE_IS_WRITEBACK
  52extern void __flush_invalidate_dcache_all(void);
  53extern void __flush_dcache_page(unsigned long);
  54extern void __flush_dcache_range(unsigned long, unsigned long);
  55extern void __flush_invalidate_dcache_page(unsigned long);
  56extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
  57#else
  58# define __flush_dcache_range(p,s)              do { } while(0)
  59# define __flush_dcache_page(p)                 do { } while(0)
  60# define __flush_invalidate_dcache_page(p)      __invalidate_dcache_page(p)
  61# define __flush_invalidate_dcache_range(p,s)   __invalidate_dcache_range(p,s)
  62#endif
  63
  64#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
  65extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
  66extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
  67#else
  68static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
  69                                                        unsigned long phys) { }
  70static inline void __invalidate_dcache_page_alias(unsigned long virt,
  71                                                  unsigned long phys) { }
  72#endif
  73#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
  74extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
  75#else
  76static inline void __invalidate_icache_page_alias(unsigned long virt,
  77                                                unsigned long phys) { }
  78#endif
  79
  80/*
  81 * We have physically tagged caches - nothing to do here -
  82 * unless we have cache aliasing.
  83 *
  84 * Pages can get remapped. Because this might change the 'color' of that page,
  85 * we have to flush the cache before the PTE is changed.
  86 * (see also Documentation/cachetlb.txt)
  87 */
  88
  89#if defined(CONFIG_MMU) && \
  90        ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
  91
  92#ifdef CONFIG_SMP
  93void flush_cache_all(void);
  94void flush_cache_range(struct vm_area_struct*, ulong, ulong);
  95void flush_icache_range(unsigned long start, unsigned long end);
  96void flush_cache_page(struct vm_area_struct*,
  97                             unsigned long, unsigned long);
  98#else
  99#define flush_cache_all local_flush_cache_all
 100#define flush_cache_range local_flush_cache_range
 101#define flush_icache_range local_flush_icache_range
 102#define flush_cache_page  local_flush_cache_page
 103#endif
 104
 105#define local_flush_cache_all()                                         \
 106        do {                                                            \
 107                __flush_invalidate_dcache_all();                        \
 108                __invalidate_icache_all();                              \
 109        } while (0)
 110
 111#define flush_cache_mm(mm)              flush_cache_all()
 112#define flush_cache_dup_mm(mm)          flush_cache_mm(mm)
 113
 114#define flush_cache_vmap(start,end)     flush_cache_all()
 115#define flush_cache_vunmap(start,end)   flush_cache_all()
 116
 117#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 118extern void flush_dcache_page(struct page*);
 119
 120void local_flush_cache_range(struct vm_area_struct *vma,
 121                unsigned long start, unsigned long end);
 122void local_flush_cache_page(struct vm_area_struct *vma,
 123                unsigned long address, unsigned long pfn);
 124
 125#else
 126
 127#define flush_cache_all()                               do { } while (0)
 128#define flush_cache_mm(mm)                              do { } while (0)
 129#define flush_cache_dup_mm(mm)                          do { } while (0)
 130
 131#define flush_cache_vmap(start,end)                     do { } while (0)
 132#define flush_cache_vunmap(start,end)                   do { } while (0)
 133
 134#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
 135#define flush_dcache_page(page)                         do { } while (0)
 136
 137#define flush_icache_range local_flush_icache_range
 138#define flush_cache_page(vma, addr, pfn)                do { } while (0)
 139#define flush_cache_range(vma, start, end)              do { } while (0)
 140
 141#endif
 142
 143/* Ensure consistency between data and instruction cache. */
 144#define local_flush_icache_range(start, end)                            \
 145        do {                                                            \
 146                __flush_dcache_range(start, (end) - (start));           \
 147                __invalidate_icache_range(start,(end) - (start));       \
 148        } while (0)
 149
 150/* This is not required, see Documentation/cachetlb.txt */
 151#define flush_icache_page(vma,page)                     do { } while (0)
 152
 153#define flush_dcache_mmap_lock(mapping)                 do { } while (0)
 154#define flush_dcache_mmap_unlock(mapping)               do { } while (0)
 155
 156#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
 157
 158extern void copy_to_user_page(struct vm_area_struct*, struct page*,
 159                unsigned long, void*, const void*, unsigned long);
 160extern void copy_from_user_page(struct vm_area_struct*, struct page*,
 161                unsigned long, void*, const void*, unsigned long);
 162
 163#else
 164
 165#define copy_to_user_page(vma, page, vaddr, dst, src, len)              \
 166        do {                                                            \
 167                memcpy(dst, src, len);                                  \
 168                __flush_dcache_range((unsigned long) dst, len);         \
 169                __invalidate_icache_range((unsigned long) dst, len);    \
 170        } while (0)
 171
 172#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 173        memcpy(dst, src, len)
 174
 175#endif
 176
 177#define XTENSA_CACHEBLK_LOG2    29
 178#define XTENSA_CACHEBLK_SIZE    (1 << XTENSA_CACHEBLK_LOG2)
 179#define XTENSA_CACHEBLK_MASK    (7 << XTENSA_CACHEBLK_LOG2)
 180
 181#if XCHAL_HAVE_CACHEATTR
 182static inline u32 xtensa_get_cacheattr(void)
 183{
 184        u32 r;
 185        asm volatile("  rsr %0, cacheattr" : "=a"(r));
 186        return r;
 187}
 188
 189static inline u32 xtensa_get_dtlb1(u32 addr)
 190{
 191        u32 r = addr & XTENSA_CACHEBLK_MASK;
 192        return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
 193                        & 0xF);
 194}
 195#else
 196static inline u32 xtensa_get_dtlb1(u32 addr)
 197{
 198        u32 r;
 199        asm volatile("  rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
 200        asm volatile("  dsync");
 201        return r;
 202}
 203
 204static inline u32 xtensa_get_cacheattr(void)
 205{
 206        u32 r = 0;
 207        u32 a = 0;
 208        do {
 209                a -= XTENSA_CACHEBLK_SIZE;
 210                r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
 211        } while (a);
 212        return r;
 213}
 214#endif
 215
 216static inline int xtensa_need_flush_dma_source(u32 addr)
 217{
 218        return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
 219}
 220
 221static inline int xtensa_need_invalidate_dma_destination(u32 addr)
 222{
 223        return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
 224}
 225
 226static inline void flush_dcache_unaligned(u32 addr, u32 size)
 227{
 228        u32 cnt;
 229        if (size) {
 230                cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
 231                        + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
 232                while (cnt--) {
 233                        asm volatile("  dhwb %0, 0" : : "a"(addr));
 234                        addr += XCHAL_DCACHE_LINESIZE;
 235                }
 236                asm volatile("  dsync");
 237        }
 238}
 239
 240static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
 241{
 242        int cnt;
 243        if (size) {
 244                asm volatile("  dhwbi %0, 0 ;" : : "a"(addr));
 245                cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
 246                        - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
 247                while (cnt-- > 0) {
 248                        asm volatile("  dhi %0, %1" : : "a"(addr),
 249                                                "n"(XCHAL_DCACHE_LINESIZE));
 250                        addr += XCHAL_DCACHE_LINESIZE;
 251                }
 252                asm volatile("  dhwbi %0, %1" : : "a"(addr),
 253                                                "n"(XCHAL_DCACHE_LINESIZE));
 254                asm volatile("  dsync");
 255        }
 256}
 257
 258static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
 259{
 260        u32 cnt;
 261        if (size) {
 262                cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
 263                        + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
 264                while (cnt--) {
 265                        asm volatile("  dhwbi %0, 0" : : "a"(addr));
 266                        addr += XCHAL_DCACHE_LINESIZE;
 267                }
 268                asm volatile("  dsync");
 269        }
 270}
 271
 272#endif /* _XTENSA_CACHEFLUSH_H */
 273