linux/arch/m68k/include/asm/cacheflush_no.h
<<
>>
Prefs
   1#ifndef _M68KNOMMU_CACHEFLUSH_H
   2#define _M68KNOMMU_CACHEFLUSH_H
   3
   4/*
   5 * (C) Copyright 2000-2010, Greg Ungerer <gerg@snapgear.com>
   6 */
   7#include <linux/mm.h>
   8#include <asm/mcfsim.h>
   9
  10#define flush_cache_all()                       __flush_cache_all()
  11#define flush_cache_mm(mm)                      do { } while (0)
  12#define flush_cache_dup_mm(mm)                  do { } while (0)
  13#define flush_cache_range(vma, start, end)      do { } while (0)
  14#define flush_cache_page(vma, vmaddr)           do { } while (0)
  15#define flush_dcache_range(start, len)          __flush_dcache_all()
  16#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
  17#define flush_dcache_page(page)                 do { } while (0)
  18#define flush_dcache_mmap_lock(mapping)         do { } while (0)
  19#define flush_dcache_mmap_unlock(mapping)       do { } while (0)
  20#define flush_icache_range(start, len)          __flush_icache_all()
  21#define flush_icache_page(vma,pg)               do { } while (0)
  22#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
  23#define flush_cache_vmap(start, end)            do { } while (0)
  24#define flush_cache_vunmap(start, end)          do { } while (0)
  25
  26#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  27        memcpy(dst, src, len)
  28#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  29        memcpy(dst, src, len)
  30
  31void mcf_cache_push(void);
  32
  33static inline void __clear_cache_all(void)
  34{
  35#ifdef CACHE_INVALIDATE
  36        __asm__ __volatile__ (
  37                "movec  %0, %%CACR\n\t"
  38                "nop\n\t"
  39                : : "r" (CACHE_INVALIDATE) );
  40#endif
  41}
  42
  43static inline void __flush_cache_all(void)
  44{
  45#ifdef CACHE_PUSH
  46        mcf_cache_push();
  47#endif
  48        __clear_cache_all();
  49}
  50
  51/*
  52 * Some ColdFire parts implement separate instruction and data caches,
  53 * on those we should just flush the appropriate cache. If we don't need
  54 * to do any specific flushing then this will be optimized away.
  55 */
  56static inline void __flush_icache_all(void)
  57{
  58#ifdef CACHE_INVALIDATEI
  59        __asm__ __volatile__ (
  60                "movec  %0, %%CACR\n\t"
  61                "nop\n\t"
  62                : : "r" (CACHE_INVALIDATEI) );
  63#endif
  64}
  65
  66static inline void __flush_dcache_all(void)
  67{
  68#ifdef CACHE_PUSH
  69        mcf_cache_push();
  70#endif
  71#ifdef CACHE_INVALIDATED
  72        __asm__ __volatile__ (
  73                "movec  %0, %%CACR\n\t"
  74                "nop\n\t"
  75                : : "r" (CACHE_INVALIDATED) );
  76#else
  77        /* Flush the write buffer */
  78        __asm__ __volatile__ ( "nop" );
  79#endif
  80}
  81
  82/*
  83 * Push cache entries at supplied address. We want to write back any dirty
  84 * data and then invalidate the cache lines associated with this address.
  85 */
  86static inline void cache_push(unsigned long paddr, int len)
  87{
  88        __flush_cache_all();
  89}
  90
  91/*
  92 * Clear cache entries at supplied address (that is don't write back any
  93 * dirty data).
  94 */
  95static inline void cache_clear(unsigned long paddr, int len)
  96{
  97        __clear_cache_all();
  98}
  99
 100#endif /* _M68KNOMMU_CACHEFLUSH_H */
 101