linux/arch/mips/mm/cache.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
   7 * Copyright (C) 2007 MIPS Technologies, Inc.
   8 */
   9#include <linux/fs.h>
  10#include <linux/fcntl.h>
  11#include <linux/kernel.h>
  12#include <linux/linkage.h>
  13#include <linux/export.h>
  14#include <linux/sched.h>
  15#include <linux/syscalls.h>
  16#include <linux/mm.h>
  17
  18#include <asm/cacheflush.h>
  19#include <asm/highmem.h>
  20#include <asm/processor.h>
  21#include <asm/cpu.h>
  22#include <asm/cpu-features.h>
  23#include <asm/setup.h>
  24
  25/* Cache operations. */
  26void (*flush_cache_all)(void);
  27void (*__flush_cache_all)(void);
  28EXPORT_SYMBOL_GPL(__flush_cache_all);
  29void (*flush_cache_mm)(struct mm_struct *mm);
  30void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
  31        unsigned long end);
  32void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
  33        unsigned long pfn);
  34void (*flush_icache_range)(unsigned long start, unsigned long end);
  35EXPORT_SYMBOL_GPL(flush_icache_range);
  36void (*local_flush_icache_range)(unsigned long start, unsigned long end);
  37EXPORT_SYMBOL_GPL(local_flush_icache_range);
  38void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
  39EXPORT_SYMBOL_GPL(__flush_icache_user_range);
  40void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
  41EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
  42
  43void (*__flush_cache_vmap)(void);
  44void (*__flush_cache_vunmap)(void);
  45
  46void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
  47EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
  48
  49/* MIPS specific cache operations */
  50void (*local_flush_data_cache_page)(void * addr);
  51void (*flush_data_cache_page)(unsigned long addr);
  52void (*flush_icache_all)(void);
  53
  54EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
  55EXPORT_SYMBOL(flush_data_cache_page);
  56EXPORT_SYMBOL(flush_icache_all);
  57
  58#ifdef CONFIG_DMA_NONCOHERENT
  59
  60/* DMA cache operations. */
  61void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
  62void (*_dma_cache_wback)(unsigned long start, unsigned long size);
  63void (*_dma_cache_inv)(unsigned long start, unsigned long size);
  64
  65#endif /* CONFIG_DMA_NONCOHERENT */
  66
  67/*
  68 * We could optimize the case where the cache argument is not BCACHE but
  69 * that seems very atypical use ...
  70 */
  71SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
  72        unsigned int, cache)
  73{
  74        if (bytes == 0)
  75                return 0;
  76        if (!access_ok((void __user *) addr, bytes))
  77                return -EFAULT;
  78
  79        __flush_icache_user_range(addr, addr + bytes);
  80
  81        return 0;
  82}
  83
  84void __flush_dcache_page(struct page *page)
  85{
  86        struct address_space *mapping = page_mapping_file(page);
  87        unsigned long addr;
  88
  89        if (mapping && !mapping_mapped(mapping)) {
  90                SetPageDcacheDirty(page);
  91                return;
  92        }
  93
  94        /*
  95         * We could delay the flush for the !page_mapping case too.  But that
  96         * case is for exec env/arg pages and those are %99 certainly going to
  97         * get faulted into the tlb (and thus flushed) anyways.
  98         */
  99        if (PageHighMem(page))
 100                addr = (unsigned long)kmap_atomic(page);
 101        else
 102                addr = (unsigned long)page_address(page);
 103
 104        flush_data_cache_page(addr);
 105
 106        if (PageHighMem(page))
 107                __kunmap_atomic((void *)addr);
 108}
 109
 110EXPORT_SYMBOL(__flush_dcache_page);
 111
 112void __flush_anon_page(struct page *page, unsigned long vmaddr)
 113{
 114        unsigned long addr = (unsigned long) page_address(page);
 115
 116        if (pages_do_alias(addr, vmaddr)) {
 117                if (page_mapcount(page) && !Page_dcache_dirty(page)) {
 118                        void *kaddr;
 119
 120                        kaddr = kmap_coherent(page, vmaddr);
 121                        flush_data_cache_page((unsigned long)kaddr);
 122                        kunmap_coherent();
 123                } else
 124                        flush_data_cache_page(addr);
 125        }
 126}
 127
 128EXPORT_SYMBOL(__flush_anon_page);
 129
 130void __update_cache(unsigned long address, pte_t pte)
 131{
 132        struct page *page;
 133        unsigned long pfn, addr;
 134        int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
 135
 136        pfn = pte_pfn(pte);
 137        if (unlikely(!pfn_valid(pfn)))
 138                return;
 139        page = pfn_to_page(pfn);
 140        if (Page_dcache_dirty(page)) {
 141                if (PageHighMem(page))
 142                        addr = (unsigned long)kmap_atomic(page);
 143                else
 144                        addr = (unsigned long)page_address(page);
 145
 146                if (exec || pages_do_alias(addr, address & PAGE_MASK))
 147                        flush_data_cache_page(addr);
 148
 149                if (PageHighMem(page))
 150                        __kunmap_atomic((void *)addr);
 151
 152                ClearPageDcacheDirty(page);
 153        }
 154}
 155
 156unsigned long _page_cachable_default;
 157EXPORT_SYMBOL(_page_cachable_default);
 158
 159static inline void setup_protection_map(void)
 160{
 161        if (cpu_has_rixi) {
 162                protection_map[0]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 163                protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
 164                protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 165                protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
 166                protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 167                protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 168                protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 169                protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 170
 171                protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 172                protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
 173                protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
 174                protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
 175                protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 176                protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 177                protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
 178                protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
 179
 180        } else {
 181                protection_map[0] = PAGE_NONE;
 182                protection_map[1] = PAGE_READONLY;
 183                protection_map[2] = PAGE_COPY;
 184                protection_map[3] = PAGE_COPY;
 185                protection_map[4] = PAGE_READONLY;
 186                protection_map[5] = PAGE_READONLY;
 187                protection_map[6] = PAGE_COPY;
 188                protection_map[7] = PAGE_COPY;
 189                protection_map[8] = PAGE_NONE;
 190                protection_map[9] = PAGE_READONLY;
 191                protection_map[10] = PAGE_SHARED;
 192                protection_map[11] = PAGE_SHARED;
 193                protection_map[12] = PAGE_READONLY;
 194                protection_map[13] = PAGE_READONLY;
 195                protection_map[14] = PAGE_SHARED;
 196                protection_map[15] = PAGE_SHARED;
 197        }
 198}
 199
 200void cpu_cache_init(void)
 201{
 202        if (cpu_has_3k_cache) {
 203                extern void __weak r3k_cache_init(void);
 204
 205                r3k_cache_init();
 206        }
 207        if (cpu_has_6k_cache) {
 208                extern void __weak r6k_cache_init(void);
 209
 210                r6k_cache_init();
 211        }
 212        if (cpu_has_4k_cache) {
 213                extern void __weak r4k_cache_init(void);
 214
 215                r4k_cache_init();
 216        }
 217        if (cpu_has_8k_cache) {
 218                extern void __weak r8k_cache_init(void);
 219
 220                r8k_cache_init();
 221        }
 222        if (cpu_has_tx39_cache) {
 223                extern void __weak tx39_cache_init(void);
 224
 225                tx39_cache_init();
 226        }
 227
 228        if (cpu_has_octeon_cache) {
 229                extern void __weak octeon_cache_init(void);
 230
 231                octeon_cache_init();
 232        }
 233
 234        setup_protection_map();
 235}
 236
 237int __weak __uncached_access(struct file *file, unsigned long addr)
 238{
 239        if (file->f_flags & O_DSYNC)
 240                return 1;
 241
 242        return addr >= __pa(high_memory);
 243}
 244