linux/arch/mips/mm/cache.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
   7 * Copyright (C) 2007 MIPS Technologies, Inc.
   8 */
   9#include <linux/fs.h>
  10#include <linux/fcntl.h>
  11#include <linux/init.h>
  12#include <linux/kernel.h>
  13#include <linux/linkage.h>
  14#include <linux/module.h>
  15#include <linux/sched.h>
  16#include <linux/syscalls.h>
  17#include <linux/mm.h>
  18
  19#include <asm/cacheflush.h>
  20#include <asm/processor.h>
  21#include <asm/cpu.h>
  22#include <asm/cpu-features.h>
  23
  24/* Cache operations. */
  25void (*flush_cache_all)(void);
  26void (*__flush_cache_all)(void);
  27void (*flush_cache_mm)(struct mm_struct *mm);
  28void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
  29        unsigned long end);
  30void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
  31        unsigned long pfn);
  32void (*flush_icache_range)(unsigned long start, unsigned long end);
  33void (*local_flush_icache_range)(unsigned long start, unsigned long end);
  34
  35void (*__flush_cache_vmap)(void);
  36void (*__flush_cache_vunmap)(void);
  37
  38/* MIPS specific cache operations */
  39void (*flush_cache_sigtramp)(unsigned long addr);
  40void (*local_flush_data_cache_page)(void * addr);
  41void (*flush_data_cache_page)(unsigned long addr);
  42void (*flush_icache_all)(void);
  43
  44EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
  45EXPORT_SYMBOL(flush_data_cache_page);
  46
  47#ifdef CONFIG_DMA_NONCOHERENT
  48
  49/* DMA cache operations. */
  50void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
  51void (*_dma_cache_wback)(unsigned long start, unsigned long size);
  52void (*_dma_cache_inv)(unsigned long start, unsigned long size);
  53
  54EXPORT_SYMBOL(_dma_cache_wback_inv);
  55
  56#endif /* CONFIG_DMA_NONCOHERENT */
  57
  58/*
  59 * We could optimize the case where the cache argument is not BCACHE but
  60 * that seems very atypical use ...
  61 */
  62SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
  63        unsigned int, cache)
  64{
  65        if (bytes == 0)
  66                return 0;
  67        if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
  68                return -EFAULT;
  69
  70        flush_icache_range(addr, addr + bytes);
  71
  72        return 0;
  73}
  74
  75void __flush_dcache_page(struct page *page)
  76{
  77        struct address_space *mapping = page_mapping(page);
  78        unsigned long addr;
  79
  80        if (PageHighMem(page))
  81                return;
  82        if (mapping && !mapping_mapped(mapping)) {
  83                SetPageDcacheDirty(page);
  84                return;
  85        }
  86
  87        /*
  88         * We could delay the flush for the !page_mapping case too.  But that
  89         * case is for exec env/arg pages and those are %99 certainly going to
  90         * get faulted into the tlb (and thus flushed) anyways.
  91         */
  92        addr = (unsigned long) page_address(page);
  93        flush_data_cache_page(addr);
  94}
  95
  96EXPORT_SYMBOL(__flush_dcache_page);
  97
  98void __flush_anon_page(struct page *page, unsigned long vmaddr)
  99{
 100        unsigned long addr = (unsigned long) page_address(page);
 101
 102        if (pages_do_alias(addr, vmaddr)) {
 103                if (page_mapped(page) && !Page_dcache_dirty(page)) {
 104                        void *kaddr;
 105
 106                        kaddr = kmap_coherent(page, vmaddr);
 107                        flush_data_cache_page((unsigned long)kaddr);
 108                        kunmap_coherent();
 109                } else
 110                        flush_data_cache_page(addr);
 111        }
 112}
 113
 114EXPORT_SYMBOL(__flush_anon_page);
 115
 116void __update_cache(struct vm_area_struct *vma, unsigned long address,
 117        pte_t pte)
 118{
 119        struct page *page;
 120        unsigned long pfn, addr;
 121        int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
 122
 123        pfn = pte_pfn(pte);
 124        if (unlikely(!pfn_valid(pfn)))
 125                return;
 126        page = pfn_to_page(pfn);
 127        if (page_mapping(page) && Page_dcache_dirty(page)) {
 128                addr = (unsigned long) page_address(page);
 129                if (exec || pages_do_alias(addr, address & PAGE_MASK))
 130                        flush_data_cache_page(addr);
 131                ClearPageDcacheDirty(page);
 132        }
 133}
 134
 135unsigned long _page_cachable_default;
 136EXPORT_SYMBOL_GPL(_page_cachable_default);
 137
 138static inline void setup_protection_map(void)
 139{
 140        protection_map[0] = PAGE_NONE;
 141        protection_map[1] = PAGE_READONLY;
 142        protection_map[2] = PAGE_COPY;
 143        protection_map[3] = PAGE_COPY;
 144        protection_map[4] = PAGE_READONLY;
 145        protection_map[5] = PAGE_READONLY;
 146        protection_map[6] = PAGE_COPY;
 147        protection_map[7] = PAGE_COPY;
 148        protection_map[8] = PAGE_NONE;
 149        protection_map[9] = PAGE_READONLY;
 150        protection_map[10] = PAGE_SHARED;
 151        protection_map[11] = PAGE_SHARED;
 152        protection_map[12] = PAGE_READONLY;
 153        protection_map[13] = PAGE_READONLY;
 154        protection_map[14] = PAGE_SHARED;
 155        protection_map[15] = PAGE_SHARED;
 156}
 157
 158void __devinit cpu_cache_init(void)
 159{
 160        if (cpu_has_3k_cache) {
 161                extern void __weak r3k_cache_init(void);
 162
 163                r3k_cache_init();
 164        }
 165        if (cpu_has_6k_cache) {
 166                extern void __weak r6k_cache_init(void);
 167
 168                r6k_cache_init();
 169        }
 170        if (cpu_has_4k_cache) {
 171                extern void __weak r4k_cache_init(void);
 172
 173                r4k_cache_init();
 174        }
 175        if (cpu_has_8k_cache) {
 176                extern void __weak r8k_cache_init(void);
 177
 178                r8k_cache_init();
 179        }
 180        if (cpu_has_tx39_cache) {
 181                extern void __weak tx39_cache_init(void);
 182
 183                tx39_cache_init();
 184        }
 185
 186        if (cpu_has_octeon_cache) {
 187                extern void __weak octeon_cache_init(void);
 188
 189                octeon_cache_init();
 190        }
 191
 192        setup_protection_map();
 193}
 194
 195int __weak __uncached_access(struct file *file, unsigned long addr)
 196{
 197        if (file->f_flags & O_SYNC)
 198                return 1;
 199
 200        return addr >= __pa(high_memory);
 201}
 202