linux/arch/sh/mm/cache.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * arch/sh/mm/cache.c
   4 *
   5 * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
   6 * Copyright (C) 2002 - 2010  Paul Mundt
   7 */
   8#include <linux/mm.h>
   9#include <linux/init.h>
  10#include <linux/mutex.h>
  11#include <linux/fs.h>
  12#include <linux/smp.h>
  13#include <linux/highmem.h>
  14#include <linux/module.h>
  15#include <asm/mmu_context.h>
  16#include <asm/cacheflush.h>
  17
  18void (*local_flush_cache_all)(void *args) = cache_noop;
  19void (*local_flush_cache_mm)(void *args) = cache_noop;
  20void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
  21void (*local_flush_cache_page)(void *args) = cache_noop;
  22void (*local_flush_cache_range)(void *args) = cache_noop;
  23void (*local_flush_dcache_page)(void *args) = cache_noop;
  24void (*local_flush_icache_range)(void *args) = cache_noop;
  25void (*local_flush_icache_page)(void *args) = cache_noop;
  26void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
  27
  28void (*__flush_wback_region)(void *start, int size);
  29EXPORT_SYMBOL(__flush_wback_region);
  30void (*__flush_purge_region)(void *start, int size);
  31EXPORT_SYMBOL(__flush_purge_region);
  32void (*__flush_invalidate_region)(void *start, int size);
  33EXPORT_SYMBOL(__flush_invalidate_region);
  34
  35static inline void noop__flush_region(void *start, int size)
  36{
  37}
  38
  39static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
  40                                   int wait)
  41{
  42        preempt_disable();
  43
  44        /* Needing IPI for cross-core flush is SHX3-specific. */
  45#ifdef CONFIG_CPU_SHX3
  46        /*
  47         * It's possible that this gets called early on when IRQs are
  48         * still disabled due to ioremapping by the boot CPU, so don't
  49         * even attempt IPIs unless there are other CPUs online.
  50         */
  51        if (num_online_cpus() > 1)
  52                smp_call_function(func, info, wait);
  53#endif
  54
  55        func(info);
  56
  57        preempt_enable();
  58}
  59
  60void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  61                       unsigned long vaddr, void *dst, const void *src,
  62                       unsigned long len)
  63{
  64        if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
  65            test_bit(PG_dcache_clean, &page->flags)) {
  66                void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  67                memcpy(vto, src, len);
  68                kunmap_coherent(vto);
  69        } else {
  70                memcpy(dst, src, len);
  71                if (boot_cpu_data.dcache.n_aliases)
  72                        clear_bit(PG_dcache_clean, &page->flags);
  73        }
  74
  75        if (vma->vm_flags & VM_EXEC)
  76                flush_cache_page(vma, vaddr, page_to_pfn(page));
  77}
  78
  79void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  80                         unsigned long vaddr, void *dst, const void *src,
  81                         unsigned long len)
  82{
  83        if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
  84            test_bit(PG_dcache_clean, &page->flags)) {
  85                void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  86                memcpy(dst, vfrom, len);
  87                kunmap_coherent(vfrom);
  88        } else {
  89                memcpy(dst, src, len);
  90                if (boot_cpu_data.dcache.n_aliases)
  91                        clear_bit(PG_dcache_clean, &page->flags);
  92        }
  93}
  94
  95void copy_user_highpage(struct page *to, struct page *from,
  96                        unsigned long vaddr, struct vm_area_struct *vma)
  97{
  98        void *vfrom, *vto;
  99
 100        vto = kmap_atomic(to);
 101
 102        if (boot_cpu_data.dcache.n_aliases && page_mapcount(from) &&
 103            test_bit(PG_dcache_clean, &from->flags)) {
 104                vfrom = kmap_coherent(from, vaddr);
 105                copy_page(vto, vfrom);
 106                kunmap_coherent(vfrom);
 107        } else {
 108                vfrom = kmap_atomic(from);
 109                copy_page(vto, vfrom);
 110                kunmap_atomic(vfrom);
 111        }
 112
 113        if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
 114            (vma->vm_flags & VM_EXEC))
 115                __flush_purge_region(vto, PAGE_SIZE);
 116
 117        kunmap_atomic(vto);
 118        /* Make sure this page is cleared on other CPU's too before using it */
 119        smp_wmb();
 120}
 121EXPORT_SYMBOL(copy_user_highpage);
 122
 123void clear_user_highpage(struct page *page, unsigned long vaddr)
 124{
 125        void *kaddr = kmap_atomic(page);
 126
 127        clear_page(kaddr);
 128
 129        if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
 130                __flush_purge_region(kaddr, PAGE_SIZE);
 131
 132        kunmap_atomic(kaddr);
 133}
 134EXPORT_SYMBOL(clear_user_highpage);
 135
 136void __update_cache(struct vm_area_struct *vma,
 137                    unsigned long address, pte_t pte)
 138{
 139        struct page *page;
 140        unsigned long pfn = pte_pfn(pte);
 141
 142        if (!boot_cpu_data.dcache.n_aliases)
 143                return;
 144
 145        page = pfn_to_page(pfn);
 146        if (pfn_valid(pfn)) {
 147                int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
 148                if (dirty)
 149                        __flush_purge_region(page_address(page), PAGE_SIZE);
 150        }
 151}
 152
 153void __flush_anon_page(struct page *page, unsigned long vmaddr)
 154{
 155        unsigned long addr = (unsigned long) page_address(page);
 156
 157        if (pages_do_alias(addr, vmaddr)) {
 158                if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
 159                    test_bit(PG_dcache_clean, &page->flags)) {
 160                        void *kaddr;
 161
 162                        kaddr = kmap_coherent(page, vmaddr);
 163                        /* XXX.. For now kunmap_coherent() does a purge */
 164                        /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
 165                        kunmap_coherent(kaddr);
 166                } else
 167                        __flush_purge_region((void *)addr, PAGE_SIZE);
 168        }
 169}
 170
 171void flush_cache_all(void)
 172{
 173        cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
 174}
 175EXPORT_SYMBOL(flush_cache_all);
 176
 177void flush_cache_mm(struct mm_struct *mm)
 178{
 179        if (boot_cpu_data.dcache.n_aliases == 0)
 180                return;
 181
 182        cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
 183}
 184
 185void flush_cache_dup_mm(struct mm_struct *mm)
 186{
 187        if (boot_cpu_data.dcache.n_aliases == 0)
 188                return;
 189
 190        cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
 191}
 192
 193void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
 194                      unsigned long pfn)
 195{
 196        struct flusher_data data;
 197
 198        data.vma = vma;
 199        data.addr1 = addr;
 200        data.addr2 = pfn;
 201
 202        cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
 203}
 204
 205void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
 206                       unsigned long end)
 207{
 208        struct flusher_data data;
 209
 210        data.vma = vma;
 211        data.addr1 = start;
 212        data.addr2 = end;
 213
 214        cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
 215}
 216EXPORT_SYMBOL(flush_cache_range);
 217
 218void flush_dcache_page(struct page *page)
 219{
 220        cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
 221}
 222EXPORT_SYMBOL(flush_dcache_page);
 223
 224void flush_icache_range(unsigned long start, unsigned long end)
 225{
 226        struct flusher_data data;
 227
 228        data.vma = NULL;
 229        data.addr1 = start;
 230        data.addr2 = end;
 231
 232        cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
 233}
 234EXPORT_SYMBOL(flush_icache_range);
 235
 236void flush_icache_page(struct vm_area_struct *vma, struct page *page)
 237{
 238        /* Nothing uses the VMA, so just pass the struct page along */
 239        cacheop_on_each_cpu(local_flush_icache_page, page, 1);
 240}
 241
 242void flush_cache_sigtramp(unsigned long address)
 243{
 244        cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
 245}
 246
 247static void compute_alias(struct cache_info *c)
 248{
 249#ifdef CONFIG_MMU
 250        c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
 251#else
 252        c->alias_mask = 0;
 253#endif
 254        c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
 255}
 256
 257static void __init emit_cache_params(void)
 258{
 259        printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
 260                boot_cpu_data.icache.ways,
 261                boot_cpu_data.icache.sets,
 262                boot_cpu_data.icache.way_incr);
 263        printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
 264                boot_cpu_data.icache.entry_mask,
 265                boot_cpu_data.icache.alias_mask,
 266                boot_cpu_data.icache.n_aliases);
 267        printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
 268                boot_cpu_data.dcache.ways,
 269                boot_cpu_data.dcache.sets,
 270                boot_cpu_data.dcache.way_incr);
 271        printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
 272                boot_cpu_data.dcache.entry_mask,
 273                boot_cpu_data.dcache.alias_mask,
 274                boot_cpu_data.dcache.n_aliases);
 275
 276        /*
 277         * Emit Secondary Cache parameters if the CPU has a probed L2.
 278         */
 279        if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
 280                printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
 281                        boot_cpu_data.scache.ways,
 282                        boot_cpu_data.scache.sets,
 283                        boot_cpu_data.scache.way_incr);
 284                printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
 285                        boot_cpu_data.scache.entry_mask,
 286                        boot_cpu_data.scache.alias_mask,
 287                        boot_cpu_data.scache.n_aliases);
 288        }
 289}
 290
 291void __init cpu_cache_init(void)
 292{
 293        unsigned int cache_disabled = 0;
 294
 295#ifdef SH_CCR
 296        cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
 297#endif
 298
 299        compute_alias(&boot_cpu_data.icache);
 300        compute_alias(&boot_cpu_data.dcache);
 301        compute_alias(&boot_cpu_data.scache);
 302
 303        __flush_wback_region            = noop__flush_region;
 304        __flush_purge_region            = noop__flush_region;
 305        __flush_invalidate_region       = noop__flush_region;
 306
 307        /*
 308         * No flushing is necessary in the disabled cache case so we can
 309         * just keep the noop functions in local_flush_..() and __flush_..()
 310         */
 311        if (unlikely(cache_disabled))
 312                goto skip;
 313
 314        if (boot_cpu_data.type == CPU_J2) {
 315                extern void __weak j2_cache_init(void);
 316
 317                j2_cache_init();
 318        } else if (boot_cpu_data.family == CPU_FAMILY_SH2) {
 319                extern void __weak sh2_cache_init(void);
 320
 321                sh2_cache_init();
 322        }
 323
 324        if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
 325                extern void __weak sh2a_cache_init(void);
 326
 327                sh2a_cache_init();
 328        }
 329
 330        if (boot_cpu_data.family == CPU_FAMILY_SH3) {
 331                extern void __weak sh3_cache_init(void);
 332
 333                sh3_cache_init();
 334
 335                if ((boot_cpu_data.type == CPU_SH7705) &&
 336                    (boot_cpu_data.dcache.sets == 512)) {
 337                        extern void __weak sh7705_cache_init(void);
 338
 339                        sh7705_cache_init();
 340                }
 341        }
 342
 343        if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
 344            (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
 345            (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
 346                extern void __weak sh4_cache_init(void);
 347
 348                sh4_cache_init();
 349
 350                if ((boot_cpu_data.type == CPU_SH7786) ||
 351                    (boot_cpu_data.type == CPU_SHX3)) {
 352                        extern void __weak shx3_cache_init(void);
 353
 354                        shx3_cache_init();
 355                }
 356        }
 357
 358skip:
 359        emit_cache_params();
 360}
 361