linux/arch/mips/mm/cache.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
   7 * Copyright (C) 2007 MIPS Technologies, Inc.
   8 */
   9#include <linux/fs.h>
  10#include <linux/fcntl.h>
  11#include <linux/kernel.h>
  12#include <linux/linkage.h>
  13#include <linux/export.h>
  14#include <linux/sched.h>
  15#include <linux/syscalls.h>
  16#include <linux/mm.h>
  17#include <linux/highmem.h>
  18#include <linux/pagemap.h>
  19
  20#include <asm/cacheflush.h>
  21#include <asm/processor.h>
  22#include <asm/cpu.h>
  23#include <asm/cpu-features.h>
  24#include <asm/setup.h>
  25#include <asm/pgtable.h>
  26
  27/* Cache operations. */
  28void (*flush_cache_all)(void);
  29void (*__flush_cache_all)(void);
  30EXPORT_SYMBOL_GPL(__flush_cache_all);
  31void (*flush_cache_mm)(struct mm_struct *mm);
  32void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
  33        unsigned long end);
  34void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
  35        unsigned long pfn);
  36void (*flush_icache_range)(unsigned long start, unsigned long end);
  37EXPORT_SYMBOL_GPL(flush_icache_range);
  38void (*local_flush_icache_range)(unsigned long start, unsigned long end);
  39EXPORT_SYMBOL_GPL(local_flush_icache_range);
  40void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
  41void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
  42EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
  43
  44void (*__flush_cache_vmap)(void);
  45void (*__flush_cache_vunmap)(void);
  46
  47void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
  48EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
  49
  50/* MIPS specific cache operations */
  51void (*local_flush_data_cache_page)(void * addr);
  52void (*flush_data_cache_page)(unsigned long addr);
  53void (*flush_icache_all)(void);
  54
  55EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
  56EXPORT_SYMBOL(flush_data_cache_page);
  57EXPORT_SYMBOL(flush_icache_all);
  58
  59#ifdef CONFIG_DMA_NONCOHERENT
  60
  61/* DMA cache operations. */
  62void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
  63void (*_dma_cache_wback)(unsigned long start, unsigned long size);
  64void (*_dma_cache_inv)(unsigned long start, unsigned long size);
  65
  66#endif /* CONFIG_DMA_NONCOHERENT */
  67
  68/*
  69 * We could optimize the case where the cache argument is not BCACHE but
  70 * that seems very atypical use ...
  71 */
  72SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
  73        unsigned int, cache)
  74{
  75        if (bytes == 0)
  76                return 0;
  77        if (!access_ok((void __user *) addr, bytes))
  78                return -EFAULT;
  79
  80        __flush_icache_user_range(addr, addr + bytes);
  81
  82        return 0;
  83}
  84
  85void __flush_dcache_page(struct page *page)
  86{
  87        struct address_space *mapping = page_mapping_file(page);
  88        unsigned long addr;
  89
  90        if (mapping && !mapping_mapped(mapping)) {
  91                SetPageDcacheDirty(page);
  92                return;
  93        }
  94
  95        /*
  96         * We could delay the flush for the !page_mapping case too.  But that
  97         * case is for exec env/arg pages and those are %99 certainly going to
  98         * get faulted into the tlb (and thus flushed) anyways.
  99         */
 100        if (PageHighMem(page))
 101                addr = (unsigned long)kmap_atomic(page);
 102        else
 103                addr = (unsigned long)page_address(page);
 104
 105        flush_data_cache_page(addr);
 106
 107        if (PageHighMem(page))
 108                kunmap_atomic((void *)addr);
 109}
 110
 111EXPORT_SYMBOL(__flush_dcache_page);
 112
 113void __flush_anon_page(struct page *page, unsigned long vmaddr)
 114{
 115        unsigned long addr = (unsigned long) page_address(page);
 116
 117        if (pages_do_alias(addr, vmaddr)) {
 118                if (page_mapcount(page) && !Page_dcache_dirty(page)) {
 119                        void *kaddr;
 120
 121                        kaddr = kmap_coherent(page, vmaddr);
 122                        flush_data_cache_page((unsigned long)kaddr);
 123                        kunmap_coherent();
 124                } else
 125                        flush_data_cache_page(addr);
 126        }
 127}
 128
 129EXPORT_SYMBOL(__flush_anon_page);
 130
 131void __update_cache(unsigned long address, pte_t pte)
 132{
 133        struct page *page;
 134        unsigned long pfn, addr;
 135        int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
 136
 137        pfn = pte_pfn(pte);
 138        if (unlikely(!pfn_valid(pfn)))
 139                return;
 140        page = pfn_to_page(pfn);
 141        if (Page_dcache_dirty(page)) {
 142                if (PageHighMem(page))
 143                        addr = (unsigned long)kmap_atomic(page);
 144                else
 145                        addr = (unsigned long)page_address(page);
 146
 147                if (exec || pages_do_alias(addr, address & PAGE_MASK))
 148                        flush_data_cache_page(addr);
 149
 150                if (PageHighMem(page))
 151                        kunmap_atomic((void *)addr);
 152
 153                ClearPageDcacheDirty(page);
 154        }
 155}
 156
 157unsigned long _page_cachable_default;
 158EXPORT_SYMBOL(_page_cachable_default);
 159
 160#define PM(p)   __pgprot(_page_cachable_default | (p))
 161
 162static inline void setup_protection_map(void)
 163{
 164        protection_map[0]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 165        protection_map[1]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
 166        protection_map[2]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 167        protection_map[3]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
 168        protection_map[4]  = PM(_PAGE_PRESENT);
 169        protection_map[5]  = PM(_PAGE_PRESENT);
 170        protection_map[6]  = PM(_PAGE_PRESENT);
 171        protection_map[7]  = PM(_PAGE_PRESENT);
 172
 173        protection_map[8]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 174        protection_map[9]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
 175        protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
 176                                _PAGE_NO_READ);
 177        protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
 178        protection_map[12] = PM(_PAGE_PRESENT);
 179        protection_map[13] = PM(_PAGE_PRESENT);
 180        protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
 181        protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
 182}
 183
 184#undef PM
 185
 186void cpu_cache_init(void)
 187{
 188        if (cpu_has_3k_cache) {
 189                extern void __weak r3k_cache_init(void);
 190
 191                r3k_cache_init();
 192        }
 193        if (cpu_has_4k_cache) {
 194                extern void __weak r4k_cache_init(void);
 195
 196                r4k_cache_init();
 197        }
 198        if (cpu_has_tx39_cache) {
 199                extern void __weak tx39_cache_init(void);
 200
 201                tx39_cache_init();
 202        }
 203
 204        if (cpu_has_octeon_cache) {
 205                extern void __weak octeon_cache_init(void);
 206
 207                octeon_cache_init();
 208        }
 209
 210        setup_protection_map();
 211}
 212