linux/arch/xtensa/include/asm/cacheflush.h
<<
>>
Prefs
   1/*
   2 * include/asm-xtensa/cacheflush.h
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * (C) 2001 - 2007 Tensilica Inc.
   9 */
  10
  11#ifndef _XTENSA_CACHEFLUSH_H
  12#define _XTENSA_CACHEFLUSH_H
  13
  14#ifdef __KERNEL__
  15
  16#include <linux/mm.h>
  17#include <asm/processor.h>
  18#include <asm/page.h>
  19
  20/*
  21 * Lo-level routines for cache flushing.
  22 *
  23 * invalidate data or instruction cache:
  24 *
  25 * __invalidate_icache_all()
  26 * __invalidate_icache_page(adr)
  27 * __invalidate_dcache_page(adr)
  28 * __invalidate_icache_range(from,size)
  29 * __invalidate_dcache_range(from,size)
  30 *
  31 * flush data cache:
  32 *
  33 * __flush_dcache_page(adr)
  34 *
  35 * flush and invalidate data cache:
  36 *
  37 * __flush_invalidate_dcache_all()
  38 * __flush_invalidate_dcache_page(adr)
  39 * __flush_invalidate_dcache_range(from,size)
  40 *
  41 * specials for cache aliasing:
  42 *
  43 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
  44 * __invalidate_icache_page_alias(vaddr,paddr)
  45 */
  46
  47extern void __invalidate_dcache_all(void);
  48extern void __invalidate_icache_all(void);
  49extern void __invalidate_dcache_page(unsigned long);
  50extern void __invalidate_icache_page(unsigned long);
  51extern void __invalidate_icache_range(unsigned long, unsigned long);
  52extern void __invalidate_dcache_range(unsigned long, unsigned long);
  53
  54
  55#if XCHAL_DCACHE_IS_WRITEBACK
  56extern void __flush_invalidate_dcache_all(void);
  57extern void __flush_dcache_page(unsigned long);
  58extern void __flush_dcache_range(unsigned long, unsigned long);
  59extern void __flush_invalidate_dcache_page(unsigned long);
  60extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
  61#else
  62# define __flush_dcache_range(p,s)              do { } while(0)
  63# define __flush_dcache_page(p)                 do { } while(0)
  64# define __flush_invalidate_dcache_page(p)      __invalidate_dcache_page(p)
  65# define __flush_invalidate_dcache_range(p,s)   __invalidate_dcache_range(p,s)
  66#endif
  67
  68#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
  69extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
  70#else
  71static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
  72                                                        unsigned long phys) { }
  73#endif
  74#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
  75extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
  76#else
  77static inline void __invalidate_icache_page_alias(unsigned long virt,
  78                                                unsigned long phys) { }
  79#endif
  80
  81/*
  82 * We have physically tagged caches - nothing to do here -
  83 * unless we have cache aliasing.
  84 *
  85 * Pages can get remapped. Because this might change the 'color' of that page,
  86 * we have to flush the cache before the PTE is changed.
  87 * (see also Documentation/cachetlb.txt)
  88 */
  89
  90#if (DCACHE_WAY_SIZE > PAGE_SIZE)
  91
  92#define flush_cache_all()                                               \
  93        do {                                                            \
  94                __flush_invalidate_dcache_all();                        \
  95                __invalidate_icache_all();                              \
  96        } while (0)
  97
  98#define flush_cache_mm(mm)              flush_cache_all()
  99#define flush_cache_dup_mm(mm)          flush_cache_mm(mm)
 100
 101#define flush_cache_vmap(start,end)     flush_cache_all()
 102#define flush_cache_vunmap(start,end)   flush_cache_all()
 103
 104#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 105extern void flush_dcache_page(struct page*);
 106extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
 107extern void flush_cache_page(struct vm_area_struct*,
 108                             unsigned long, unsigned long);
 109
 110#else
 111
 112#define flush_cache_all()                               do { } while (0)
 113#define flush_cache_mm(mm)                              do { } while (0)
 114#define flush_cache_dup_mm(mm)                          do { } while (0)
 115
 116#define flush_cache_vmap(start,end)                     do { } while (0)
 117#define flush_cache_vunmap(start,end)                   do { } while (0)
 118
 119#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
 120#define flush_dcache_page(page)                         do { } while (0)
 121
 122#define flush_cache_page(vma,addr,pfn)                  do { } while (0)
 123#define flush_cache_range(vma,start,end)                do { } while (0)
 124
 125#endif
 126
 127/* Ensure consistency between data and instruction cache. */
 128#define flush_icache_range(start,end)                                   \
 129        do {                                                            \
 130                __flush_dcache_range(start, (end) - (start));           \
 131                __invalidate_icache_range(start,(end) - (start));       \
 132        } while (0)
 133
 134/* This is not required, see Documentation/cachetlb.txt */
 135#define flush_icache_page(vma,page)                     do { } while (0)
 136
 137#define flush_dcache_mmap_lock(mapping)                 do { } while (0)
 138#define flush_dcache_mmap_unlock(mapping)               do { } while (0)
 139
 140#if (DCACHE_WAY_SIZE > PAGE_SIZE)
 141
 142extern void copy_to_user_page(struct vm_area_struct*, struct page*,
 143                unsigned long, void*, const void*, unsigned long);
 144extern void copy_from_user_page(struct vm_area_struct*, struct page*,
 145                unsigned long, void*, const void*, unsigned long);
 146
 147#else
 148
 149#define copy_to_user_page(vma, page, vaddr, dst, src, len)              \
 150        do {                                                            \
 151                memcpy(dst, src, len);                                  \
 152                __flush_dcache_range((unsigned long) dst, len);         \
 153                __invalidate_icache_range((unsigned long) dst, len);    \
 154        } while (0)
 155
 156#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 157        memcpy(dst, src, len)
 158
 159#endif
 160
 161#define XTENSA_CACHEBLK_LOG2    29
 162#define XTENSA_CACHEBLK_SIZE    (1 << XTENSA_CACHEBLK_LOG2)
 163#define XTENSA_CACHEBLK_MASK    (7 << XTENSA_CACHEBLK_LOG2)
 164
 165#if XCHAL_HAVE_CACHEATTR
 166static inline u32 xtensa_get_cacheattr(void)
 167{
 168        u32 r;
 169        asm volatile("  rsr %0, cacheattr" : "=a"(r));
 170        return r;
 171}
 172
 173static inline u32 xtensa_get_dtlb1(u32 addr)
 174{
 175        u32 r = addr & XTENSA_CACHEBLK_MASK;
 176        return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
 177                        & 0xF);
 178}
 179#else
 180static inline u32 xtensa_get_dtlb1(u32 addr)
 181{
 182        u32 r;
 183        asm volatile("  rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
 184        asm volatile("  dsync");
 185        return r;
 186}
 187
 188static inline u32 xtensa_get_cacheattr(void)
 189{
 190        u32 r = 0;
 191        u32 a = 0;
 192        do {
 193                a -= XTENSA_CACHEBLK_SIZE;
 194                r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
 195        } while (a);
 196        return r;
 197}
 198#endif
 199
 200static inline int xtensa_need_flush_dma_source(u32 addr)
 201{
 202        return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
 203}
 204
 205static inline int xtensa_need_invalidate_dma_destination(u32 addr)
 206{
 207        return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
 208}
 209
 210static inline void flush_dcache_unaligned(u32 addr, u32 size)
 211{
 212        u32 cnt;
 213        if (size) {
 214                cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
 215                        + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
 216                while (cnt--) {
 217                        asm volatile("  dhwb %0, 0" : : "a"(addr));
 218                        addr += XCHAL_DCACHE_LINESIZE;
 219                }
 220                asm volatile("  dsync");
 221        }
 222}
 223
 224static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
 225{
 226        int cnt;
 227        if (size) {
 228                asm volatile("  dhwbi %0, 0 ;" : : "a"(addr));
 229                cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
 230                        - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
 231                while (cnt-- > 0) {
 232                        asm volatile("  dhi %0, %1" : : "a"(addr),
 233                                                "n"(XCHAL_DCACHE_LINESIZE));
 234                        addr += XCHAL_DCACHE_LINESIZE;
 235                }
 236                asm volatile("  dhwbi %0, %1" : : "a"(addr),
 237                                                "n"(XCHAL_DCACHE_LINESIZE));
 238                asm volatile("  dsync");
 239        }
 240}
 241
 242static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
 243{
 244        u32 cnt;
 245        if (size) {
 246                cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
 247                        + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
 248                while (cnt--) {
 249                        asm volatile("  dhwbi %0, 0" : : "a"(addr));
 250                        addr += XCHAL_DCACHE_LINESIZE;
 251                }
 252                asm volatile("  dsync");
 253        }
 254}
 255
 256#endif /* __KERNEL__ */
 257#endif /* _XTENSA_CACHEFLUSH_H */
 258