linux/arch/metag/include/asm/cacheflush.h
<<
>>
Prefs
   1#ifndef _METAG_CACHEFLUSH_H
   2#define _METAG_CACHEFLUSH_H
   3
   4#include <linux/mm.h>
   5#include <linux/sched.h>
   6#include <linux/io.h>
   7
   8#include <asm/l2cache.h>
   9#include <asm/metag_isa.h>
  10#include <asm/metag_mem.h>
  11
  12void metag_cache_probe(void);
  13
  14void metag_data_cache_flush_all(const void *start);
  15void metag_code_cache_flush_all(const void *start);
  16
  17/*
  18 * Routines to flush physical cache lines that may be used to cache data or code
  19 * normally accessed via the linear address range supplied. The region flushed
  20 * must either lie in local or global address space determined by the top bit of
  21 * the pStart address. If Bytes is >= 4K then the whole of the related cache
  22 * state will be flushed rather than a limited range.
  23 */
  24void metag_data_cache_flush(const void *start, int bytes);
  25void metag_code_cache_flush(const void *start, int bytes);
  26
  27#ifdef CONFIG_METAG_META12
  28
  29/* Write through, virtually tagged, split I/D cache. */
  30
  31static inline void __flush_cache_all(void)
  32{
  33        metag_code_cache_flush_all((void *) PAGE_OFFSET);
  34        metag_data_cache_flush_all((void *) PAGE_OFFSET);
  35}
  36
  37#define flush_cache_all() __flush_cache_all()
  38
  39/* flush the entire user address space referenced in this mm structure */
  40static inline void flush_cache_mm(struct mm_struct *mm)
  41{
  42        if (mm == current->mm)
  43                __flush_cache_all();
  44}
  45
  46#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  47
  48/* flush a range of addresses from this mm */
  49static inline void flush_cache_range(struct vm_area_struct *vma,
  50                                     unsigned long start, unsigned long end)
  51{
  52        flush_cache_mm(vma->vm_mm);
  53}
  54
  55static inline void flush_cache_page(struct vm_area_struct *vma,
  56                                    unsigned long vmaddr, unsigned long pfn)
  57{
  58        flush_cache_mm(vma->vm_mm);
  59}
  60
  61#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE       1
  62static inline void flush_dcache_page(struct page *page)
  63{
  64        metag_data_cache_flush_all((void *) PAGE_OFFSET);
  65}
  66
  67#define flush_dcache_mmap_lock(mapping)         do { } while (0)
  68#define flush_dcache_mmap_unlock(mapping)       do { } while (0)
  69
  70static inline void flush_icache_page(struct vm_area_struct *vma,
  71                                     struct page *page)
  72{
  73        metag_code_cache_flush(page_to_virt(page), PAGE_SIZE);
  74}
  75
  76static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  77{
  78        metag_data_cache_flush_all((void *) PAGE_OFFSET);
  79}
  80
  81static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
  82{
  83        metag_data_cache_flush_all((void *) PAGE_OFFSET);
  84}
  85
  86#else
  87
  88/* Write through, physically tagged, split I/D cache. */
  89
  90#define flush_cache_all()                       do { } while (0)
  91#define flush_cache_mm(mm)                      do { } while (0)
  92#define flush_cache_dup_mm(mm)                  do { } while (0)
  93#define flush_cache_range(vma, start, end)      do { } while (0)
  94#define flush_cache_page(vma, vmaddr, pfn)      do { } while (0)
  95#define flush_dcache_mmap_lock(mapping)         do { } while (0)
  96#define flush_dcache_mmap_unlock(mapping)       do { } while (0)
  97#define flush_icache_page(vma, pg)              do { } while (0)
  98#define flush_cache_vmap(start, end)            do { } while (0)
  99#define flush_cache_vunmap(start, end)          do { } while (0)
 100
 101#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE       1
 102static inline void flush_dcache_page(struct page *page)
 103{
 104        /* FIXME: We can do better than this. All we are trying to do is
 105         * make the i-cache coherent, we should use the PG_arch_1 bit like
 106         * e.g. powerpc.
 107         */
 108#ifdef CONFIG_SMP
 109        metag_out32(1, SYSC_ICACHE_FLUSH);
 110#else
 111        metag_code_cache_flush_all((void *) PAGE_OFFSET);
 112#endif
 113}
 114
 115#endif
 116
 117/* Push n pages at kernel virtual address and clear the icache */
 118static inline void flush_icache_range(unsigned long address,
 119                                      unsigned long endaddr)
 120{
 121#ifdef CONFIG_SMP
 122        metag_out32(1, SYSC_ICACHE_FLUSH);
 123#else
 124        metag_code_cache_flush((void *) address, endaddr - address);
 125#endif
 126}
 127
 128static inline void flush_cache_sigtramp(unsigned long addr, int size)
 129{
 130        /*
 131         * Flush the icache in case there was previously some code
 132         * fetched from this address, perhaps a previous sigtramp.
 133         *
 134         * We don't need to flush the dcache, it's write through and
 135         * we just wrote the sigtramp code through it.
 136         */
 137#ifdef CONFIG_SMP
 138        metag_out32(1, SYSC_ICACHE_FLUSH);
 139#else
 140        metag_code_cache_flush((void *) addr, size);
 141#endif
 142}
 143
 144#ifdef CONFIG_METAG_L2C
 145
 146/*
 147 * Perform a single specific CACHEWD operation on an address, masking lower bits
 148 * of address first.
 149 */
 150static inline void cachewd_line(void *addr, unsigned int data)
 151{
 152        unsigned long masked = (unsigned long)addr & -0x40;
 153        __builtin_meta2_cachewd((void *)masked, data);
 154}
 155
 156/* Perform a certain CACHEW op on each cache line in a range */
 157static inline void cachew_region_op(void *start, unsigned long size,
 158                                    unsigned int op)
 159{
 160        unsigned long offset = (unsigned long)start & 0x3f;
 161        int i;
 162        if (offset) {
 163                size += offset;
 164                start -= offset;
 165        }
 166        i = (size - 1) >> 6;
 167        do {
 168                __builtin_meta2_cachewd(start, op);
 169                start += 0x40;
 170        } while (i--);
 171}
 172
 173/* prevent write fence and flushbacks being reordered in L2 */
 174static inline void l2c_fence_flush(void *addr)
 175{
 176        /*
 177         * Synchronise by reading back and re-flushing.
 178         * It is assumed this access will miss, as the caller should have just
 179         * flushed the cache line.
 180         */
 181        (void)(volatile u8 *)addr;
 182        cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
 183}
 184
 185/* prevent write fence and writebacks being reordered in L2 */
 186static inline void l2c_fence(void *addr)
 187{
 188        /*
 189         * A write back has occurred, but not necessarily an invalidate, so the
 190         * readback in l2c_fence_flush() would hit in the cache and have no
 191         * effect. Therefore fully flush the line first.
 192         */
 193        cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
 194        l2c_fence_flush(addr);
 195}
 196
 197/* Used to keep memory consistent when doing DMA. */
 198static inline void flush_dcache_region(void *start, unsigned long size)
 199{
 200        /* metag_data_cache_flush won't flush L2 cache lines if size >= 4096 */
 201        if (meta_l2c_is_enabled()) {
 202                cachew_region_op(start, size, CACHEW_FLUSH_L1D_L2);
 203                if (meta_l2c_is_writeback())
 204                        l2c_fence_flush(start + size - 1);
 205        } else {
 206                metag_data_cache_flush(start, size);
 207        }
 208}
 209
 210/* Write back dirty lines to memory (or do nothing if no writeback caches) */
 211static inline void writeback_dcache_region(void *start, unsigned long size)
 212{
 213        if (meta_l2c_is_enabled() && meta_l2c_is_writeback()) {
 214                cachew_region_op(start, size, CACHEW_WRITEBACK_L1D_L2);
 215                l2c_fence(start + size - 1);
 216        }
 217}
 218
 219/* Invalidate (may also write back if necessary) */
 220static inline void invalidate_dcache_region(void *start, unsigned long size)
 221{
 222        if (meta_l2c_is_enabled())
 223                cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2);
 224        else
 225                metag_data_cache_flush(start, size);
 226}
 227#else
 228#define flush_dcache_region(s, l)       metag_data_cache_flush((s), (l))
 229#define writeback_dcache_region(s, l)   do {} while (0)
 230#define invalidate_dcache_region(s, l)  flush_dcache_region((s), (l))
 231#endif
 232
 233static inline void copy_to_user_page(struct vm_area_struct *vma,
 234                                     struct page *page, unsigned long vaddr,
 235                                     void *dst, const void *src,
 236                                     unsigned long len)
 237{
 238        memcpy(dst, src, len);
 239        flush_icache_range((unsigned long)dst, (unsigned long)dst + len);
 240}
 241
 242static inline void copy_from_user_page(struct vm_area_struct *vma,
 243                                       struct page *page, unsigned long vaddr,
 244                                       void *dst, const void *src,
 245                                       unsigned long len)
 246{
 247        memcpy(dst, src, len);
 248}
 249
 250#endif /* _METAG_CACHEFLUSH_H */
 251