linux/arch/mips/mm/cache.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
   7 * Copyright (C) 2007 MIPS Technologies, Inc.
   8 */
   9#include <linux/fs.h>
  10#include <linux/fcntl.h>
  11#include <linux/init.h>
  12#include <linux/kernel.h>
  13#include <linux/linkage.h>
  14#include <linux/module.h>
  15#include <linux/sched.h>
  16#include <linux/syscalls.h>
  17#include <linux/mm.h>
  18
  19#include <asm/cacheflush.h>
  20#include <asm/processor.h>
  21#include <asm/cpu.h>
  22#include <asm/cpu-features.h>
  23
  24/* Cache operations. */
  25void (*flush_cache_all)(void);
  26void (*__flush_cache_all)(void);
  27void (*flush_cache_mm)(struct mm_struct *mm);
  28void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
  29        unsigned long end);
  30void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
  31        unsigned long pfn);
  32void (*flush_icache_range)(unsigned long start, unsigned long end);
  33void (*local_flush_icache_range)(unsigned long start, unsigned long end);
  34
  35void (*__flush_cache_vmap)(void);
  36void (*__flush_cache_vunmap)(void);
  37
  38/* MIPS specific cache operations */
  39void (*flush_cache_sigtramp)(unsigned long addr);
  40void (*local_flush_data_cache_page)(void * addr);
  41void (*flush_data_cache_page)(unsigned long addr);
  42void (*flush_icache_all)(void);
  43
  44EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
  45EXPORT_SYMBOL(flush_data_cache_page);
  46
  47#ifdef CONFIG_DMA_NONCOHERENT
  48
  49/* DMA cache operations. */
  50void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
  51void (*_dma_cache_wback)(unsigned long start, unsigned long size);
  52void (*_dma_cache_inv)(unsigned long start, unsigned long size);
  53
  54EXPORT_SYMBOL(_dma_cache_wback_inv);
  55
  56#endif /* CONFIG_DMA_NONCOHERENT */
  57
  58/*
  59 * We could optimize the case where the cache argument is not BCACHE but
  60 * that seems very atypical use ...
  61 */
  62SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
  63        unsigned int, cache)
  64{
  65        if (bytes == 0)
  66                return 0;
  67        if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
  68                return -EFAULT;
  69
  70        flush_icache_range(addr, addr + bytes);
  71
  72        return 0;
  73}
  74
  75void __flush_dcache_page(struct page *page)
  76{
  77        struct address_space *mapping = page_mapping(page);
  78        unsigned long addr;
  79
  80        if (PageHighMem(page))
  81                return;
  82        if (mapping && !mapping_mapped(mapping)) {
  83                SetPageDcacheDirty(page);
  84                return;
  85        }
  86
  87        /*
  88         * We could delay the flush for the !page_mapping case too.  But that
  89         * case is for exec env/arg pages and those are %99 certainly going to
  90         * get faulted into the tlb (and thus flushed) anyways.
  91         */
  92        addr = (unsigned long) page_address(page);
  93        flush_data_cache_page(addr);
  94}
  95
  96EXPORT_SYMBOL(__flush_dcache_page);
  97
  98void __flush_anon_page(struct page *page, unsigned long vmaddr)
  99{
 100        unsigned long addr = (unsigned long) page_address(page);
 101
 102        if (pages_do_alias(addr, vmaddr)) {
 103                if (page_mapped(page) && !Page_dcache_dirty(page)) {
 104                        void *kaddr;
 105
 106                        kaddr = kmap_coherent(page, vmaddr);
 107                        flush_data_cache_page((unsigned long)kaddr);
 108                        kunmap_coherent();
 109                } else
 110                        flush_data_cache_page(addr);
 111        }
 112}
 113
 114EXPORT_SYMBOL(__flush_anon_page);
 115
 116void __update_cache(struct vm_area_struct *vma, unsigned long address,
 117        pte_t pte)
 118{
 119        struct page *page;
 120        unsigned long pfn, addr;
 121        int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
 122
 123        pfn = pte_pfn(pte);
 124        if (unlikely(!pfn_valid(pfn)))
 125                return;
 126        page = pfn_to_page(pfn);
 127        if (page_mapping(page) && Page_dcache_dirty(page)) {
 128                addr = (unsigned long) page_address(page);
 129                if (exec || pages_do_alias(addr, address & PAGE_MASK))
 130                        flush_data_cache_page(addr);
 131                ClearPageDcacheDirty(page);
 132        }
 133}
 134
 135unsigned long _page_cachable_default;
 136EXPORT_SYMBOL(_page_cachable_default);
 137
 138static inline void setup_protection_map(void)
 139{
 140        if (kernel_uses_smartmips_rixi) {
 141                protection_map[0]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 142                protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
 143                protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 144                protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
 145                protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
 146                protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 147                protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
 148                protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 149
 150                protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 151                protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
 152                protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
 153                protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
 154                protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
 155                protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 156                protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE  | _PAGE_NO_READ);
 157                protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
 158
 159        } else {
 160                protection_map[0] = PAGE_NONE;
 161                protection_map[1] = PAGE_READONLY;
 162                protection_map[2] = PAGE_COPY;
 163                protection_map[3] = PAGE_COPY;
 164                protection_map[4] = PAGE_READONLY;
 165                protection_map[5] = PAGE_READONLY;
 166                protection_map[6] = PAGE_COPY;
 167                protection_map[7] = PAGE_COPY;
 168                protection_map[8] = PAGE_NONE;
 169                protection_map[9] = PAGE_READONLY;
 170                protection_map[10] = PAGE_SHARED;
 171                protection_map[11] = PAGE_SHARED;
 172                protection_map[12] = PAGE_READONLY;
 173                protection_map[13] = PAGE_READONLY;
 174                protection_map[14] = PAGE_SHARED;
 175                protection_map[15] = PAGE_SHARED;
 176        }
 177}
 178
 179void __cpuinit cpu_cache_init(void)
 180{
 181        if (cpu_has_3k_cache) {
 182                extern void __weak r3k_cache_init(void);
 183
 184                r3k_cache_init();
 185        }
 186        if (cpu_has_6k_cache) {
 187                extern void __weak r6k_cache_init(void);
 188
 189                r6k_cache_init();
 190        }
 191        if (cpu_has_4k_cache) {
 192                extern void __weak r4k_cache_init(void);
 193
 194                r4k_cache_init();
 195        }
 196        if (cpu_has_8k_cache) {
 197                extern void __weak r8k_cache_init(void);
 198
 199                r8k_cache_init();
 200        }
 201        if (cpu_has_tx39_cache) {
 202                extern void __weak tx39_cache_init(void);
 203
 204                tx39_cache_init();
 205        }
 206
 207        if (cpu_has_octeon_cache) {
 208                extern void __weak octeon_cache_init(void);
 209
 210                octeon_cache_init();
 211        }
 212
 213        setup_protection_map();
 214}
 215
 216int __weak __uncached_access(struct file *file, unsigned long addr)
 217{
 218        if (file->f_flags & O_DSYNC)
 219                return 1;
 220
 221        return addr >= __pa(high_memory);
 222}
 223