linux/arch/mips/mm/cache.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
   7 * Copyright (C) 2007 MIPS Technologies, Inc.
   8 */
   9#include <linux/fs.h>
  10#include <linux/fcntl.h>
  11#include <linux/kernel.h>
  12#include <linux/linkage.h>
  13#include <linux/module.h>
  14#include <linux/sched.h>
  15#include <linux/syscalls.h>
  16#include <linux/mm.h>
  17
  18#include <asm/cacheflush.h>
  19#include <asm/processor.h>
  20#include <asm/cpu.h>
  21#include <asm/cpu-features.h>
  22
  23/* Cache operations. */
  24void (*flush_cache_all)(void);
  25void (*__flush_cache_all)(void);
  26void (*flush_cache_mm)(struct mm_struct *mm);
  27void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
  28        unsigned long end);
  29void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
  30        unsigned long pfn);
  31void (*flush_icache_range)(unsigned long start, unsigned long end);
  32EXPORT_SYMBOL_GPL(flush_icache_range);
  33void (*local_flush_icache_range)(unsigned long start, unsigned long end);
  34EXPORT_SYMBOL_GPL(local_flush_icache_range);
  35
  36void (*__flush_cache_vmap)(void);
  37void (*__flush_cache_vunmap)(void);
  38
  39void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
  40EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
  41void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
  42
  43/* MIPS specific cache operations */
  44void (*flush_cache_sigtramp)(unsigned long addr);
  45void (*local_flush_data_cache_page)(void * addr);
  46void (*flush_data_cache_page)(unsigned long addr);
  47void (*flush_icache_all)(void);
  48
  49EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
  50EXPORT_SYMBOL(flush_data_cache_page);
  51EXPORT_SYMBOL(flush_icache_all);
  52
  53#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
  54
  55/* DMA cache operations. */
  56void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
  57void (*_dma_cache_wback)(unsigned long start, unsigned long size);
  58void (*_dma_cache_inv)(unsigned long start, unsigned long size);
  59
  60EXPORT_SYMBOL(_dma_cache_wback_inv);
  61
  62#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
  63
  64/*
  65 * We could optimize the case where the cache argument is not BCACHE but
  66 * that seems very atypical use ...
  67 */
  68SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
  69        unsigned int, cache)
  70{
  71        if (bytes == 0)
  72                return 0;
  73        if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
  74                return -EFAULT;
  75
  76        flush_icache_range(addr, addr + bytes);
  77
  78        return 0;
  79}
  80
  81void __flush_dcache_page(struct page *page)
  82{
  83        struct address_space *mapping = page_mapping(page);
  84        unsigned long addr;
  85
  86        if (PageHighMem(page))
  87                return;
  88        if (mapping && !mapping_mapped(mapping)) {
  89                SetPageDcacheDirty(page);
  90                return;
  91        }
  92
  93        /*
  94         * We could delay the flush for the !page_mapping case too.  But that
  95         * case is for exec env/arg pages and those are %99 certainly going to
  96         * get faulted into the tlb (and thus flushed) anyways.
  97         */
  98        addr = (unsigned long) page_address(page);
  99        flush_data_cache_page(addr);
 100}
 101
 102EXPORT_SYMBOL(__flush_dcache_page);
 103
 104void __flush_anon_page(struct page *page, unsigned long vmaddr)
 105{
 106        unsigned long addr = (unsigned long) page_address(page);
 107
 108        if (pages_do_alias(addr, vmaddr)) {
 109                if (page_mapped(page) && !Page_dcache_dirty(page)) {
 110                        void *kaddr;
 111
 112                        kaddr = kmap_coherent(page, vmaddr);
 113                        flush_data_cache_page((unsigned long)kaddr);
 114                        kunmap_coherent();
 115                } else
 116                        flush_data_cache_page(addr);
 117        }
 118}
 119
 120EXPORT_SYMBOL(__flush_anon_page);
 121
 122void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
 123{
 124        unsigned long addr;
 125
 126        if (PageHighMem(page))
 127                return;
 128
 129        addr = (unsigned long) page_address(page);
 130        flush_data_cache_page(addr);
 131}
 132EXPORT_SYMBOL_GPL(__flush_icache_page);
 133
 134void __update_cache(struct vm_area_struct *vma, unsigned long address,
 135        pte_t pte)
 136{
 137        struct page *page;
 138        unsigned long pfn, addr;
 139        int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
 140
 141        pfn = pte_pfn(pte);
 142        if (unlikely(!pfn_valid(pfn)))
 143                return;
 144        page = pfn_to_page(pfn);
 145        if (page_mapping(page) && Page_dcache_dirty(page)) {
 146                addr = (unsigned long) page_address(page);
 147                if (exec || pages_do_alias(addr, address & PAGE_MASK))
 148                        flush_data_cache_page(addr);
 149                ClearPageDcacheDirty(page);
 150        }
 151}
 152
 153unsigned long _page_cachable_default;
 154EXPORT_SYMBOL(_page_cachable_default);
 155
 156static inline void setup_protection_map(void)
 157{
 158        if (cpu_has_rixi) {
 159                protection_map[0]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 160                protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
 161                protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 162                protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
 163                protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 164                protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 165                protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 166                protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 167
 168                protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 169                protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
 170                protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
 171                protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
 172                protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 173                protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 174                protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
 175                protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
 176
 177        } else {
 178                protection_map[0] = PAGE_NONE;
 179                protection_map[1] = PAGE_READONLY;
 180                protection_map[2] = PAGE_COPY;
 181                protection_map[3] = PAGE_COPY;
 182                protection_map[4] = PAGE_READONLY;
 183                protection_map[5] = PAGE_READONLY;
 184                protection_map[6] = PAGE_COPY;
 185                protection_map[7] = PAGE_COPY;
 186                protection_map[8] = PAGE_NONE;
 187                protection_map[9] = PAGE_READONLY;
 188                protection_map[10] = PAGE_SHARED;
 189                protection_map[11] = PAGE_SHARED;
 190                protection_map[12] = PAGE_READONLY;
 191                protection_map[13] = PAGE_READONLY;
 192                protection_map[14] = PAGE_SHARED;
 193                protection_map[15] = PAGE_SHARED;
 194        }
 195}
 196
 197void cpu_cache_init(void)
 198{
 199        if (cpu_has_3k_cache) {
 200                extern void __weak r3k_cache_init(void);
 201
 202                r3k_cache_init();
 203        }
 204        if (cpu_has_6k_cache) {
 205                extern void __weak r6k_cache_init(void);
 206
 207                r6k_cache_init();
 208        }
 209        if (cpu_has_4k_cache) {
 210                extern void __weak r4k_cache_init(void);
 211
 212                r4k_cache_init();
 213        }
 214        if (cpu_has_8k_cache) {
 215                extern void __weak r8k_cache_init(void);
 216
 217                r8k_cache_init();
 218        }
 219        if (cpu_has_tx39_cache) {
 220                extern void __weak tx39_cache_init(void);
 221
 222                tx39_cache_init();
 223        }
 224
 225        if (cpu_has_octeon_cache) {
 226                extern void __weak octeon_cache_init(void);
 227
 228                octeon_cache_init();
 229        }
 230
 231        setup_protection_map();
 232}
 233
 234int __weak __uncached_access(struct file *file, unsigned long addr)
 235{
 236        if (file->f_flags & O_DSYNC)
 237                return 1;
 238
 239        return addr >= __pa(high_memory);
 240}
 241