linux/arch/sh/include/asm/cacheflush.h
<<
>>
Prefs
   1#ifndef __ASM_SH_CACHEFLUSH_H
   2#define __ASM_SH_CACHEFLUSH_H
   3
   4#ifdef __KERNEL__
   5
   6#include <linux/mm.h>
   7
   8/*
   9 * Cache flushing:
  10 *
  11 *  - flush_cache_all() flushes entire cache
  12 *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
  13 *  - flush_cache_dup mm(mm) handles cache flushing when forking
  14 *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
  15 *  - flush_cache_range(vma, start, end) flushes a range of pages
  16 *
  17 *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
  18 *  - flush_icache_range(start, end) flushes(invalidates) a range for icache
  19 *  - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
  20 *  - flush_cache_sigtramp(vaddr) flushes the signal trampoline
  21 */
  22extern void (*local_flush_cache_all)(void *args);
  23extern void (*local_flush_cache_mm)(void *args);
  24extern void (*local_flush_cache_dup_mm)(void *args);
  25extern void (*local_flush_cache_page)(void *args);
  26extern void (*local_flush_cache_range)(void *args);
  27extern void (*local_flush_dcache_page)(void *args);
  28extern void (*local_flush_icache_range)(void *args);
  29extern void (*local_flush_icache_page)(void *args);
  30extern void (*local_flush_cache_sigtramp)(void *args);
  31
  32static inline void cache_noop(void *args) { }
  33
  34extern void (*__flush_wback_region)(void *start, int size);
  35extern void (*__flush_purge_region)(void *start, int size);
  36extern void (*__flush_invalidate_region)(void *start, int size);
  37
  38extern void flush_cache_all(void);
  39extern void flush_cache_mm(struct mm_struct *mm);
  40extern void flush_cache_dup_mm(struct mm_struct *mm);
  41extern void flush_cache_page(struct vm_area_struct *vma,
  42                                unsigned long addr, unsigned long pfn);
  43extern void flush_cache_range(struct vm_area_struct *vma,
  44                                 unsigned long start, unsigned long end);
  45extern void flush_dcache_page(struct page *page);
  46extern void flush_icache_range(unsigned long start, unsigned long end);
  47extern void flush_icache_page(struct vm_area_struct *vma,
  48                                 struct page *page);
  49extern void flush_cache_sigtramp(unsigned long address);
  50
  51struct flusher_data {
  52        struct vm_area_struct *vma;
  53        unsigned long addr1, addr2;
  54};
  55
  56#define ARCH_HAS_FLUSH_ANON_PAGE
  57extern void __flush_anon_page(struct page *page, unsigned long);
  58
  59static inline void flush_anon_page(struct vm_area_struct *vma,
  60                                   struct page *page, unsigned long vmaddr)
  61{
  62        if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
  63                __flush_anon_page(page, vmaddr);
  64}
  65
  66#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  67static inline void flush_kernel_dcache_page(struct page *page)
  68{
  69        flush_dcache_page(page);
  70}
  71
  72extern void copy_to_user_page(struct vm_area_struct *vma,
  73        struct page *page, unsigned long vaddr, void *dst, const void *src,
  74        unsigned long len);
  75
  76extern void copy_from_user_page(struct vm_area_struct *vma,
  77        struct page *page, unsigned long vaddr, void *dst, const void *src,
  78        unsigned long len);
  79
  80#define flush_cache_vmap(start, end)            flush_cache_all()
  81#define flush_cache_vunmap(start, end)          flush_cache_all()
  82
  83#define flush_dcache_mmap_lock(mapping)         do { } while (0)
  84#define flush_dcache_mmap_unlock(mapping)       do { } while (0)
  85
  86void kmap_coherent_init(void);
  87void *kmap_coherent(struct page *page, unsigned long addr);
  88void kunmap_coherent(void *kvaddr);
  89
  90#define PG_dcache_dirty PG_arch_1
  91
  92void cpu_cache_init(void);
  93
  94#endif /* __KERNEL__ */
  95#endif /* __ASM_SH_CACHEFLUSH_H */
  96