linux/arch/sh/mm/cache.c
<<
>>
Prefs
   1/*
   2 * arch/sh/mm/cache.c
   3 *
   4 * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
   5 * Copyright (C) 2002 - 2009  Paul Mundt
   6 *
   7 * Released under the terms of the GNU GPL v2.0.
   8 */
   9#include <linux/mm.h>
  10#include <linux/init.h>
  11#include <linux/mutex.h>
  12#include <linux/fs.h>
  13#include <linux/smp.h>
  14#include <linux/highmem.h>
  15#include <linux/module.h>
  16#include <asm/mmu_context.h>
  17#include <asm/cacheflush.h>
  18
  19void (*local_flush_cache_all)(void *args) = cache_noop;
  20void (*local_flush_cache_mm)(void *args) = cache_noop;
  21void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
  22void (*local_flush_cache_page)(void *args) = cache_noop;
  23void (*local_flush_cache_range)(void *args) = cache_noop;
  24void (*local_flush_dcache_page)(void *args) = cache_noop;
  25void (*local_flush_icache_range)(void *args) = cache_noop;
  26void (*local_flush_icache_page)(void *args) = cache_noop;
  27void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
  28
  29void (*__flush_wback_region)(void *start, int size);
  30void (*__flush_purge_region)(void *start, int size);
  31void (*__flush_invalidate_region)(void *start, int size);
  32
  33static inline void noop__flush_region(void *start, int size)
  34{
  35}
  36
  37static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
  38                                   int wait)
  39{
  40        preempt_disable();
  41        smp_call_function(func, info, wait);
  42        func(info);
  43        preempt_enable();
  44}
  45
  46void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  47                       unsigned long vaddr, void *dst, const void *src,
  48                       unsigned long len)
  49{
  50        if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  51            !test_bit(PG_dcache_dirty, &page->flags)) {
  52                void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  53                memcpy(vto, src, len);
  54                kunmap_coherent(vto);
  55        } else {
  56                memcpy(dst, src, len);
  57                if (boot_cpu_data.dcache.n_aliases)
  58                        set_bit(PG_dcache_dirty, &page->flags);
  59        }
  60
  61        if (vma->vm_flags & VM_EXEC)
  62                flush_cache_page(vma, vaddr, page_to_pfn(page));
  63}
  64
  65void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  66                         unsigned long vaddr, void *dst, const void *src,
  67                         unsigned long len)
  68{
  69        if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  70            !test_bit(PG_dcache_dirty, &page->flags)) {
  71                void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  72                memcpy(dst, vfrom, len);
  73                kunmap_coherent(vfrom);
  74        } else {
  75                memcpy(dst, src, len);
  76                if (boot_cpu_data.dcache.n_aliases)
  77                        set_bit(PG_dcache_dirty, &page->flags);
  78        }
  79}
  80
  81void copy_user_highpage(struct page *to, struct page *from,
  82                        unsigned long vaddr, struct vm_area_struct *vma)
  83{
  84        void *vfrom, *vto;
  85
  86        vto = kmap_atomic(to, KM_USER1);
  87
  88        if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
  89            !test_bit(PG_dcache_dirty, &from->flags)) {
  90                vfrom = kmap_coherent(from, vaddr);
  91                copy_page(vto, vfrom);
  92                kunmap_coherent(vfrom);
  93        } else {
  94                vfrom = kmap_atomic(from, KM_USER0);
  95                copy_page(vto, vfrom);
  96                kunmap_atomic(vfrom, KM_USER0);
  97        }
  98
  99        if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
 100                __flush_purge_region(vto, PAGE_SIZE);
 101
 102        kunmap_atomic(vto, KM_USER1);
 103        /* Make sure this page is cleared on other CPU's too before using it */
 104        smp_wmb();
 105}
 106EXPORT_SYMBOL(copy_user_highpage);
 107
 108void clear_user_highpage(struct page *page, unsigned long vaddr)
 109{
 110        void *kaddr = kmap_atomic(page, KM_USER0);
 111
 112        clear_page(kaddr);
 113
 114        if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
 115                __flush_purge_region(kaddr, PAGE_SIZE);
 116
 117        kunmap_atomic(kaddr, KM_USER0);
 118}
 119EXPORT_SYMBOL(clear_user_highpage);
 120
 121void __update_cache(struct vm_area_struct *vma,
 122                    unsigned long address, pte_t pte)
 123{
 124        struct page *page;
 125        unsigned long pfn = pte_pfn(pte);
 126
 127        if (!boot_cpu_data.dcache.n_aliases)
 128                return;
 129
 130        page = pfn_to_page(pfn);
 131        if (pfn_valid(pfn)) {
 132                int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
 133                if (dirty) {
 134                        unsigned long addr = (unsigned long)page_address(page);
 135
 136                        if (pages_do_alias(addr, address & PAGE_MASK))
 137                                __flush_purge_region((void *)addr, PAGE_SIZE);
 138                }
 139        }
 140}
 141
 142void __flush_anon_page(struct page *page, unsigned long vmaddr)
 143{
 144        unsigned long addr = (unsigned long) page_address(page);
 145
 146        if (pages_do_alias(addr, vmaddr)) {
 147                if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
 148                    !test_bit(PG_dcache_dirty, &page->flags)) {
 149                        void *kaddr;
 150
 151                        kaddr = kmap_coherent(page, vmaddr);
 152                        /* XXX.. For now kunmap_coherent() does a purge */
 153                        /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
 154                        kunmap_coherent(kaddr);
 155                } else
 156                        __flush_purge_region((void *)addr, PAGE_SIZE);
 157        }
 158}
 159
 160void flush_cache_all(void)
 161{
 162        cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
 163}
 164
 165void flush_cache_mm(struct mm_struct *mm)
 166{
 167        cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
 168}
 169
 170void flush_cache_dup_mm(struct mm_struct *mm)
 171{
 172        cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
 173}
 174
 175void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
 176                      unsigned long pfn)
 177{
 178        struct flusher_data data;
 179
 180        data.vma = vma;
 181        data.addr1 = addr;
 182        data.addr2 = pfn;
 183
 184        cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
 185}
 186
 187void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
 188                       unsigned long end)
 189{
 190        struct flusher_data data;
 191
 192        data.vma = vma;
 193        data.addr1 = start;
 194        data.addr2 = end;
 195
 196        cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
 197}
 198
 199void flush_dcache_page(struct page *page)
 200{
 201        cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
 202}
 203
 204void flush_icache_range(unsigned long start, unsigned long end)
 205{
 206        struct flusher_data data;
 207
 208        data.vma = NULL;
 209        data.addr1 = start;
 210        data.addr2 = end;
 211
 212        cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
 213}
 214
 215void flush_icache_page(struct vm_area_struct *vma, struct page *page)
 216{
 217        /* Nothing uses the VMA, so just pass the struct page along */
 218        cacheop_on_each_cpu(local_flush_icache_page, page, 1);
 219}
 220
 221void flush_cache_sigtramp(unsigned long address)
 222{
 223        cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
 224}
 225
 226static void compute_alias(struct cache_info *c)
 227{
 228        c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
 229        c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
 230}
 231
 232static void __init emit_cache_params(void)
 233{
 234        printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
 235                boot_cpu_data.icache.ways,
 236                boot_cpu_data.icache.sets,
 237                boot_cpu_data.icache.way_incr);
 238        printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
 239                boot_cpu_data.icache.entry_mask,
 240                boot_cpu_data.icache.alias_mask,
 241                boot_cpu_data.icache.n_aliases);
 242        printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
 243                boot_cpu_data.dcache.ways,
 244                boot_cpu_data.dcache.sets,
 245                boot_cpu_data.dcache.way_incr);
 246        printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
 247                boot_cpu_data.dcache.entry_mask,
 248                boot_cpu_data.dcache.alias_mask,
 249                boot_cpu_data.dcache.n_aliases);
 250
 251        /*
 252         * Emit Secondary Cache parameters if the CPU has a probed L2.
 253         */
 254        if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
 255                printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
 256                        boot_cpu_data.scache.ways,
 257                        boot_cpu_data.scache.sets,
 258                        boot_cpu_data.scache.way_incr);
 259                printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
 260                        boot_cpu_data.scache.entry_mask,
 261                        boot_cpu_data.scache.alias_mask,
 262                        boot_cpu_data.scache.n_aliases);
 263        }
 264}
 265
 266void __init cpu_cache_init(void)
 267{
 268        unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
 269
 270        compute_alias(&boot_cpu_data.icache);
 271        compute_alias(&boot_cpu_data.dcache);
 272        compute_alias(&boot_cpu_data.scache);
 273
 274        __flush_wback_region            = noop__flush_region;
 275        __flush_purge_region            = noop__flush_region;
 276        __flush_invalidate_region       = noop__flush_region;
 277
 278        /*
 279         * No flushing is necessary in the disabled cache case so we can
 280         * just keep the noop functions in local_flush_..() and __flush_..()
 281         */
 282        if (unlikely(cache_disabled))
 283                goto skip;
 284
 285        if (boot_cpu_data.family == CPU_FAMILY_SH2) {
 286                extern void __weak sh2_cache_init(void);
 287
 288                sh2_cache_init();
 289        }
 290
 291        if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
 292                extern void __weak sh2a_cache_init(void);
 293
 294                sh2a_cache_init();
 295        }
 296
 297        if (boot_cpu_data.family == CPU_FAMILY_SH3) {
 298                extern void __weak sh3_cache_init(void);
 299
 300                sh3_cache_init();
 301
 302                if ((boot_cpu_data.type == CPU_SH7705) &&
 303                    (boot_cpu_data.dcache.sets == 512)) {
 304                        extern void __weak sh7705_cache_init(void);
 305
 306                        sh7705_cache_init();
 307                }
 308        }
 309
 310        if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
 311            (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
 312            (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
 313                extern void __weak sh4_cache_init(void);
 314
 315                sh4_cache_init();
 316        }
 317
 318        if (boot_cpu_data.family == CPU_FAMILY_SH5) {
 319                extern void __weak sh5_cache_init(void);
 320
 321                sh5_cache_init();
 322        }
 323
 324skip:
 325        emit_cache_params();
 326}
 327