linux/arch/arm/mm/cache-xsc3l2.c
<<
>>
Prefs
   1/*
   2 * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support
   3 *
   4 * Copyright (C) 2007 ARM Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18 */
  19#include <linux/init.h>
  20#include <linux/highmem.h>
  21#include <asm/cp15.h>
  22#include <asm/cputype.h>
  23#include <asm/cacheflush.h>
  24
  25#define CR_L2   (1 << 26)
  26
  27#define CACHE_LINE_SIZE         32
  28#define CACHE_LINE_SHIFT        5
  29#define CACHE_WAY_PER_SET       8
  30
  31#define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf))
  32#define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
  33
  34static inline int xsc3_l2_present(void)
  35{
  36        unsigned long l2ctype;
  37
  38        __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
  39
  40        return !!(l2ctype & 0xf8);
  41}
  42
  43static inline void xsc3_l2_clean_mva(unsigned long addr)
  44{
  45        __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr));
  46}
  47
  48static inline void xsc3_l2_inv_mva(unsigned long addr)
  49{
  50        __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr));
  51}
  52
  53static inline void xsc3_l2_inv_all(void)
  54{
  55        unsigned long l2ctype, set_way;
  56        int set, way;
  57
  58        __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
  59
  60        for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
  61                for (way = 0; way < CACHE_WAY_PER_SET; way++) {
  62                        set_way = (way << 29) | (set << 5);
  63                        __asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way));
  64                }
  65        }
  66
  67        dsb();
  68}
  69
  70static inline void l2_unmap_va(unsigned long va)
  71{
  72#ifdef CONFIG_HIGHMEM
  73        if (va != -1)
  74                kunmap_atomic((void *)va);
  75#endif
  76}
  77
  78static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
  79{
  80#ifdef CONFIG_HIGHMEM
  81        unsigned long va = prev_va & PAGE_MASK;
  82        unsigned long pa_offset = pa << (32 - PAGE_SHIFT);
  83        if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) {
  84                /*
  85                 * Switching to a new page.  Because cache ops are
  86                 * using virtual addresses only, we must put a mapping
  87                 * in place for it.
  88                 */
  89                l2_unmap_va(prev_va);
  90                va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
  91        }
  92        return va + (pa_offset >> (32 - PAGE_SHIFT));
  93#else
  94        return __phys_to_virt(pa);
  95#endif
  96}
  97
  98static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
  99{
 100        unsigned long vaddr;
 101
 102        if (start == 0 && end == -1ul) {
 103                xsc3_l2_inv_all();
 104                return;
 105        }
 106
 107        vaddr = -1;  /* to force the first mapping */
 108
 109        /*
 110         * Clean and invalidate partial first cache line.
 111         */
 112        if (start & (CACHE_LINE_SIZE - 1)) {
 113                vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
 114                xsc3_l2_clean_mva(vaddr);
 115                xsc3_l2_inv_mva(vaddr);
 116                start = (start | (CACHE_LINE_SIZE - 1)) + 1;
 117        }
 118
 119        /*
 120         * Invalidate all full cache lines between 'start' and 'end'.
 121         */
 122        while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
 123                vaddr = l2_map_va(start, vaddr);
 124                xsc3_l2_inv_mva(vaddr);
 125                start += CACHE_LINE_SIZE;
 126        }
 127
 128        /*
 129         * Clean and invalidate partial last cache line.
 130         */
 131        if (start < end) {
 132                vaddr = l2_map_va(start, vaddr);
 133                xsc3_l2_clean_mva(vaddr);
 134                xsc3_l2_inv_mva(vaddr);
 135        }
 136
 137        l2_unmap_va(vaddr);
 138
 139        dsb();
 140}
 141
 142static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
 143{
 144        unsigned long vaddr;
 145
 146        vaddr = -1;  /* to force the first mapping */
 147
 148        start &= ~(CACHE_LINE_SIZE - 1);
 149        while (start < end) {
 150                vaddr = l2_map_va(start, vaddr);
 151                xsc3_l2_clean_mva(vaddr);
 152                start += CACHE_LINE_SIZE;
 153        }
 154
 155        l2_unmap_va(vaddr);
 156
 157        dsb();
 158}
 159
 160/*
 161 * optimize L2 flush all operation by set/way format
 162 */
 163static inline void xsc3_l2_flush_all(void)
 164{
 165        unsigned long l2ctype, set_way;
 166        int set, way;
 167
 168        __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
 169
 170        for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
 171                for (way = 0; way < CACHE_WAY_PER_SET; way++) {
 172                        set_way = (way << 29) | (set << 5);
 173                        __asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way));
 174                }
 175        }
 176
 177        dsb();
 178}
 179
 180static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
 181{
 182        unsigned long vaddr;
 183
 184        if (start == 0 && end == -1ul) {
 185                xsc3_l2_flush_all();
 186                return;
 187        }
 188
 189        vaddr = -1;  /* to force the first mapping */
 190
 191        start &= ~(CACHE_LINE_SIZE - 1);
 192        while (start < end) {
 193                vaddr = l2_map_va(start, vaddr);
 194                xsc3_l2_clean_mva(vaddr);
 195                xsc3_l2_inv_mva(vaddr);
 196                start += CACHE_LINE_SIZE;
 197        }
 198
 199        l2_unmap_va(vaddr);
 200
 201        dsb();
 202}
 203
 204static int __init xsc3_l2_init(void)
 205{
 206        if (!cpu_is_xsc3() || !xsc3_l2_present())
 207                return 0;
 208
 209        if (get_cr() & CR_L2) {
 210                pr_info("XScale3 L2 cache enabled.\n");
 211                xsc3_l2_inv_all();
 212
 213                outer_cache.inv_range = xsc3_l2_inv_range;
 214                outer_cache.clean_range = xsc3_l2_clean_range;
 215                outer_cache.flush_range = xsc3_l2_flush_range;
 216        }
 217
 218        return 0;
 219}
 220core_initcall(xsc3_l2_init);
 221