linux/arch/mips/include/asm/cacheflush.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
   7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
   8 */
   9#ifndef _ASM_CACHEFLUSH_H
  10#define _ASM_CACHEFLUSH_H
  11
  12/* Keep includes the same across arches.  */
  13#include <linux/mm.h>
  14#include <asm/cpu-features.h>
  15
  16/* Cache flushing:
  17 *
  18 *  - flush_cache_all() flushes entire cache
  19 *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
  20 *  - flush_cache_dup mm(mm) handles cache flushing when forking
  21 *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
  22 *  - flush_cache_range(vma, start, end) flushes a range of pages
  23 *  - flush_icache_range(start, end) flush a range of instructions
  24 *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
  25 *
  26 * MIPS specific flush operations:
  27 *
  28 *  - flush_icache_all() flush the entire instruction cache
  29 *  - flush_data_cache_page() flushes a page from the data cache
  30 *  - __flush_icache_user_range(start, end) flushes range of user instructions
  31 */
  32
  33 /*
  34 * This flag is used to indicate that the page pointed to by a pte
  35 * is dirty and requires cleaning before returning it to the user.
  36 */
  37#define PG_dcache_dirty                 PG_arch_1
  38
  39#define Page_dcache_dirty(page)         \
  40        test_bit(PG_dcache_dirty, &(page)->flags)
  41#define SetPageDcacheDirty(page)        \
  42        set_bit(PG_dcache_dirty, &(page)->flags)
  43#define ClearPageDcacheDirty(page)      \
  44        clear_bit(PG_dcache_dirty, &(page)->flags)
  45
  46extern void (*flush_cache_all)(void);
  47extern void (*__flush_cache_all)(void);
  48extern void (*flush_cache_mm)(struct mm_struct *mm);
  49#define flush_cache_dup_mm(mm)  do { (void) (mm); } while (0)
  50extern void (*flush_cache_range)(struct vm_area_struct *vma,
  51        unsigned long start, unsigned long end);
  52extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
  53extern void __flush_dcache_page(struct page *page);
  54
  55#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  56static inline void flush_dcache_page(struct page *page)
  57{
  58        if (cpu_has_dc_aliases)
  59                __flush_dcache_page(page);
  60        else if (!cpu_has_ic_fills_f_dc)
  61                SetPageDcacheDirty(page);
  62}
  63
  64#define flush_dcache_mmap_lock(mapping)         do { } while (0)
  65#define flush_dcache_mmap_unlock(mapping)       do { } while (0)
  66
  67#define ARCH_HAS_FLUSH_ANON_PAGE
  68extern void __flush_anon_page(struct page *, unsigned long);
  69static inline void flush_anon_page(struct vm_area_struct *vma,
  70        struct page *page, unsigned long vmaddr)
  71{
  72        if (cpu_has_dc_aliases && PageAnon(page))
  73                __flush_anon_page(page, vmaddr);
  74}
  75
  76static inline void flush_icache_page(struct vm_area_struct *vma,
  77        struct page *page)
  78{
  79}
  80
  81extern void (*flush_icache_range)(unsigned long start, unsigned long end);
  82extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
  83extern void (*__flush_icache_user_range)(unsigned long start,
  84                                         unsigned long end);
  85extern void (*__local_flush_icache_user_range)(unsigned long start,
  86                                               unsigned long end);
  87
  88extern void (*__flush_cache_vmap)(void);
  89
  90static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  91{
  92        if (cpu_has_dc_aliases)
  93                __flush_cache_vmap();
  94}
  95
  96extern void (*__flush_cache_vunmap)(void);
  97
  98static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
  99{
 100        if (cpu_has_dc_aliases)
 101                __flush_cache_vunmap();
 102}
 103
 104extern void copy_to_user_page(struct vm_area_struct *vma,
 105        struct page *page, unsigned long vaddr, void *dst, const void *src,
 106        unsigned long len);
 107
 108extern void copy_from_user_page(struct vm_area_struct *vma,
 109        struct page *page, unsigned long vaddr, void *dst, const void *src,
 110        unsigned long len);
 111
 112extern void (*flush_icache_all)(void);
 113extern void (*local_flush_data_cache_page)(void * addr);
 114extern void (*flush_data_cache_page)(unsigned long addr);
 115
 116/* Run kernel code uncached, useful for cache probing functions. */
 117unsigned long run_uncached(void *func);
 118
 119extern void *kmap_coherent(struct page *page, unsigned long addr);
 120extern void kunmap_coherent(void);
 121extern void *kmap_noncoherent(struct page *page, unsigned long addr);
 122
 123static inline void kunmap_noncoherent(void)
 124{
 125        kunmap_coherent();
 126}
 127
 128#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
 129/*
 130 * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
 131 * cache writeback and invalidate operation.
 132 */
 133extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
 134
 135static inline void flush_kernel_vmap_range(void *vaddr, int size)
 136{
 137        if (cpu_has_dc_aliases)
 138                __flush_kernel_vmap_range((unsigned long) vaddr, size);
 139}
 140
 141static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
 142{
 143        if (cpu_has_dc_aliases)
 144                __flush_kernel_vmap_range((unsigned long) vaddr, size);
 145}
 146
 147#endif /* _ASM_CACHEFLUSH_H */
 148