linux/arch/mips/include/asm/cacheflush.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
   7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
   8 */
   9#ifndef _ASM_CACHEFLUSH_H
  10#define _ASM_CACHEFLUSH_H
  11
  12/* Keep includes the same across arches.  */
  13#include <linux/mm.h>
  14#include <asm/cpu-features.h>
  15
  16/* Cache flushing:
  17 *
  18 *  - flush_cache_all() flushes entire cache
  19 *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
  20 *  - flush_cache_dup mm(mm) handles cache flushing when forking
  21 *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
  22 *  - flush_cache_range(vma, start, end) flushes a range of pages
  23 *  - flush_icache_range(start, end) flush a range of instructions
  24 *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
  25 *
  26 * MIPS specific flush operations:
  27 *
  28 *  - flush_cache_sigtramp() flush signal trampoline
  29 *  - flush_icache_all() flush the entire instruction cache
  30 *  - flush_data_cache_page() flushes a page from the data cache
  31 */
  32
  33 /*
  34 * This flag is used to indicate that the page pointed to by a pte
  35 * is dirty and requires cleaning before returning it to the user.
  36 */
  37#define PG_dcache_dirty                 PG_arch_1
  38
  39#define Page_dcache_dirty(page)         \
  40        test_bit(PG_dcache_dirty, &(page)->flags)
  41#define SetPageDcacheDirty(page)        \
  42        set_bit(PG_dcache_dirty, &(page)->flags)
  43#define ClearPageDcacheDirty(page)      \
  44        clear_bit(PG_dcache_dirty, &(page)->flags)
  45
  46extern void (*flush_cache_all)(void);
  47extern void (*__flush_cache_all)(void);
  48extern void (*flush_cache_mm)(struct mm_struct *mm);
  49#define flush_cache_dup_mm(mm)  do { (void) (mm); } while (0)
  50extern void (*flush_cache_range)(struct vm_area_struct *vma,
  51        unsigned long start, unsigned long end);
  52extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
  53extern void __flush_dcache_page(struct page *page);
  54extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
  55
  56#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  57static inline void flush_dcache_page(struct page *page)
  58{
  59        if (cpu_has_dc_aliases)
  60                __flush_dcache_page(page);
  61        else if (!cpu_has_ic_fills_f_dc)
  62                SetPageDcacheDirty(page);
  63}
  64
  65#define flush_dcache_mmap_lock(mapping)         do { } while (0)
  66#define flush_dcache_mmap_unlock(mapping)       do { } while (0)
  67
  68#define ARCH_HAS_FLUSH_ANON_PAGE
  69extern void __flush_anon_page(struct page *, unsigned long);
  70static inline void flush_anon_page(struct vm_area_struct *vma,
  71        struct page *page, unsigned long vmaddr)
  72{
  73        if (cpu_has_dc_aliases && PageAnon(page))
  74                __flush_anon_page(page, vmaddr);
  75}
  76
  77static inline void flush_icache_page(struct vm_area_struct *vma,
  78        struct page *page)
  79{
  80        if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) &&
  81            Page_dcache_dirty(page)) {
  82                __flush_icache_page(vma, page);
  83                ClearPageDcacheDirty(page);
  84        }
  85}
  86
  87extern void (*flush_icache_range)(unsigned long start, unsigned long end);
  88extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
  89
  90extern void (*__flush_cache_vmap)(void);
  91
  92static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  93{
  94        if (cpu_has_dc_aliases)
  95                __flush_cache_vmap();
  96}
  97
  98extern void (*__flush_cache_vunmap)(void);
  99
 100static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 101{
 102        if (cpu_has_dc_aliases)
 103                __flush_cache_vunmap();
 104}
 105
 106extern void copy_to_user_page(struct vm_area_struct *vma,
 107        struct page *page, unsigned long vaddr, void *dst, const void *src,
 108        unsigned long len);
 109
 110extern void copy_from_user_page(struct vm_area_struct *vma,
 111        struct page *page, unsigned long vaddr, void *dst, const void *src,
 112        unsigned long len);
 113
 114extern void (*flush_cache_sigtramp)(unsigned long addr);
 115extern void (*flush_icache_all)(void);
 116extern void (*local_flush_data_cache_page)(void * addr);
 117extern void (*flush_data_cache_page)(unsigned long addr);
 118
 119/* Run kernel code uncached, useful for cache probing functions. */
 120unsigned long run_uncached(void *func);
 121
 122extern void *kmap_coherent(struct page *page, unsigned long addr);
 123extern void kunmap_coherent(void);
 124extern void *kmap_noncoherent(struct page *page, unsigned long addr);
 125
 126static inline void kunmap_noncoherent(void)
 127{
 128        kunmap_coherent();
 129}
 130
 131#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
 132static inline void flush_kernel_dcache_page(struct page *page)
 133{
 134        BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
 135}
 136
 137/*
 138 * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
 139 * cache writeback and invalidate operation.
 140 */
 141extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
 142
 143static inline void flush_kernel_vmap_range(void *vaddr, int size)
 144{
 145        if (cpu_has_dc_aliases)
 146                __flush_kernel_vmap_range((unsigned long) vaddr, size);
 147}
 148
 149static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
 150{
 151        if (cpu_has_dc_aliases)
 152                __flush_kernel_vmap_range((unsigned long) vaddr, size);
 153}
 154
 155#endif /* _ASM_CACHEFLUSH_H */
 156