linux/arch/xtensa/include/asm/cacheflush.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * (C) 2001 - 2013 Tensilica Inc.
   7 */
   8
   9#ifndef _XTENSA_CACHEFLUSH_H
  10#define _XTENSA_CACHEFLUSH_H
  11
  12#include <linux/mm.h>
  13#include <asm/processor.h>
  14#include <asm/page.h>
  15
  16/*
  17 * Lo-level routines for cache flushing.
  18 *
  19 * invalidate data or instruction cache:
  20 *
  21 * __invalidate_icache_all()
  22 * __invalidate_icache_page(adr)
  23 * __invalidate_dcache_page(adr)
  24 * __invalidate_icache_range(from,size)
  25 * __invalidate_dcache_range(from,size)
  26 *
  27 * flush data cache:
  28 *
  29 * __flush_dcache_page(adr)
  30 *
  31 * flush and invalidate data cache:
  32 *
  33 * __flush_invalidate_dcache_all()
  34 * __flush_invalidate_dcache_page(adr)
  35 * __flush_invalidate_dcache_range(from,size)
  36 *
  37 * specials for cache aliasing:
  38 *
  39 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
  40 * __invalidate_dcache_page_alias(vaddr,paddr)
  41 * __invalidate_icache_page_alias(vaddr,paddr)
  42 */
  43
  44extern void __invalidate_dcache_all(void);
  45extern void __invalidate_icache_all(void);
  46extern void __invalidate_dcache_page(unsigned long);
  47extern void __invalidate_icache_page(unsigned long);
  48extern void __invalidate_icache_range(unsigned long, unsigned long);
  49extern void __invalidate_dcache_range(unsigned long, unsigned long);
  50
  51#if XCHAL_DCACHE_IS_WRITEBACK
  52extern void __flush_invalidate_dcache_all(void);
  53extern void __flush_dcache_page(unsigned long);
  54extern void __flush_dcache_range(unsigned long, unsigned long);
  55extern void __flush_invalidate_dcache_page(unsigned long);
  56extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
  57#else
  58static inline void __flush_dcache_page(unsigned long va)
  59{
  60}
  61static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
  62{
  63}
  64# define __flush_invalidate_dcache_all()        __invalidate_dcache_all()
  65# define __flush_invalidate_dcache_page(p)      __invalidate_dcache_page(p)
  66# define __flush_invalidate_dcache_range(p,s)   __invalidate_dcache_range(p,s)
  67#endif
  68
  69#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
  70extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
  71extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
  72#else
  73static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
  74                                                        unsigned long phys) { }
  75static inline void __invalidate_dcache_page_alias(unsigned long virt,
  76                                                  unsigned long phys) { }
  77#endif
  78#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
  79extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
  80#else
  81static inline void __invalidate_icache_page_alias(unsigned long virt,
  82                                                unsigned long phys) { }
  83#endif
  84
  85/*
  86 * We have physically tagged caches - nothing to do here -
  87 * unless we have cache aliasing.
  88 *
  89 * Pages can get remapped. Because this might change the 'color' of that page,
  90 * we have to flush the cache before the PTE is changed.
  91 * (see also Documentation/core-api/cachetlb.rst)
  92 */
  93
  94#if defined(CONFIG_MMU) && \
  95        ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
  96
  97#ifdef CONFIG_SMP
  98void flush_cache_all(void);
  99void flush_cache_range(struct vm_area_struct*, ulong, ulong);
 100void flush_icache_range(unsigned long start, unsigned long end);
 101void flush_cache_page(struct vm_area_struct*,
 102                             unsigned long, unsigned long);
 103#else
 104#define flush_cache_all local_flush_cache_all
 105#define flush_cache_range local_flush_cache_range
 106#define flush_icache_range local_flush_icache_range
 107#define flush_cache_page  local_flush_cache_page
 108#endif
 109
 110#define local_flush_cache_all()                                         \
 111        do {                                                            \
 112                __flush_invalidate_dcache_all();                        \
 113                __invalidate_icache_all();                              \
 114        } while (0)
 115
 116#define flush_cache_mm(mm)              flush_cache_all()
 117#define flush_cache_dup_mm(mm)          flush_cache_mm(mm)
 118
 119#define flush_cache_vmap(start,end)     flush_cache_all()
 120#define flush_cache_vunmap(start,end)   flush_cache_all()
 121
 122#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 123extern void flush_dcache_page(struct page*);
 124
 125void local_flush_cache_range(struct vm_area_struct *vma,
 126                unsigned long start, unsigned long end);
 127void local_flush_cache_page(struct vm_area_struct *vma,
 128                unsigned long address, unsigned long pfn);
 129
 130#else
 131
 132#define flush_cache_all()                               do { } while (0)
 133#define flush_cache_mm(mm)                              do { } while (0)
 134#define flush_cache_dup_mm(mm)                          do { } while (0)
 135
 136#define flush_cache_vmap(start,end)                     do { } while (0)
 137#define flush_cache_vunmap(start,end)                   do { } while (0)
 138
 139#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
 140#define flush_dcache_page(page)                         do { } while (0)
 141
 142#define flush_icache_range local_flush_icache_range
 143#define flush_cache_page(vma, addr, pfn)                do { } while (0)
 144#define flush_cache_range(vma, start, end)              do { } while (0)
 145
 146#endif
 147
 148#define flush_icache_user_range flush_icache_range
 149
 150/* Ensure consistency between data and instruction cache. */
 151#define local_flush_icache_range(start, end)                            \
 152        do {                                                            \
 153                __flush_dcache_range(start, (end) - (start));           \
 154                __invalidate_icache_range(start,(end) - (start));       \
 155        } while (0)
 156
 157/* This is not required, see Documentation/core-api/cachetlb.rst */
 158#define flush_icache_page(vma,page)                     do { } while (0)
 159
 160#define flush_dcache_mmap_lock(mapping)                 do { } while (0)
 161#define flush_dcache_mmap_unlock(mapping)               do { } while (0)
 162
 163#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
 164
 165extern void copy_to_user_page(struct vm_area_struct*, struct page*,
 166                unsigned long, void*, const void*, unsigned long);
 167extern void copy_from_user_page(struct vm_area_struct*, struct page*,
 168                unsigned long, void*, const void*, unsigned long);
 169
 170#else
 171
 172#define copy_to_user_page(vma, page, vaddr, dst, src, len)              \
 173        do {                                                            \
 174                memcpy(dst, src, len);                                  \
 175                __flush_dcache_range((unsigned long) dst, len);         \
 176                __invalidate_icache_range((unsigned long) dst, len);    \
 177        } while (0)
 178
 179#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 180        memcpy(dst, src, len)
 181
 182#endif
 183
 184#endif /* _XTENSA_CACHEFLUSH_H */
 185