linux/arch/score/mm/cache.c
<<
>>
Prefs
   1/*
   2 * arch/score/mm/cache.c
   3 *
   4 * Score Processor version.
   5 *
   6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
   7 *  Lennox Wu <lennox.wu@sunplusct.com>
   8 *  Chen Liqin <liqin.chen@sunplusct.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, see the file COPYING, or write
  22 * to the Free Software Foundation, Inc.,
  23 * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  24 */
  25
  26#include <linux/init.h>
  27#include <linux/linkage.h>
  28#include <linux/kernel.h>
  29#include <linux/mm.h>
  30#include <linux/module.h>
  31#include <linux/sched.h>
  32
  33#include <asm/mmu_context.h>
  34
  35/*
  36Just flush entire Dcache!!
  37You must ensure the page doesn't include instructions, because
  38the function will not flush the Icache.
  39The addr must be cache aligned.
  40*/
  41static void flush_data_cache_page(unsigned long addr)
  42{
  43        unsigned int i;
  44        for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
  45                __asm__ __volatile__(
  46                "cache 0x0e, [%0, 0]\n"
  47                "cache 0x1a, [%0, 0]\n"
  48                "nop\n"
  49                : : "r" (addr));
  50                addr += L1_CACHE_BYTES;
  51        }
  52}
  53
  54/* called by update_mmu_cache. */
  55void __update_cache(struct vm_area_struct *vma, unsigned long address,
  56                pte_t pte)
  57{
  58        struct page *page;
  59        unsigned long pfn, addr;
  60        int exec = (vma->vm_flags & VM_EXEC);
  61
  62        pfn = pte_pfn(pte);
  63        if (unlikely(!pfn_valid(pfn)))
  64                return;
  65        page = pfn_to_page(pfn);
  66        if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) {
  67                addr = (unsigned long) page_address(page);
  68                if (exec)
  69                        flush_data_cache_page(addr);
  70                clear_bit(PG_arch_1, &page->flags);
  71        }
  72}
  73
  74static inline void setup_protection_map(void)
  75{
  76        protection_map[0] = PAGE_NONE;
  77        protection_map[1] = PAGE_READONLY;
  78        protection_map[2] = PAGE_COPY;
  79        protection_map[3] = PAGE_COPY;
  80        protection_map[4] = PAGE_READONLY;
  81        protection_map[5] = PAGE_READONLY;
  82        protection_map[6] = PAGE_COPY;
  83        protection_map[7] = PAGE_COPY;
  84        protection_map[8] = PAGE_NONE;
  85        protection_map[9] = PAGE_READONLY;
  86        protection_map[10] = PAGE_SHARED;
  87        protection_map[11] = PAGE_SHARED;
  88        protection_map[12] = PAGE_READONLY;
  89        protection_map[13] = PAGE_READONLY;
  90        protection_map[14] = PAGE_SHARED;
  91        protection_map[15] = PAGE_SHARED;
  92}
  93
  94void __devinit cpu_cache_init(void)
  95{
  96        setup_protection_map();
  97}
  98
  99void flush_icache_all(void)
 100{
 101        __asm__ __volatile__(
 102        "la r8, flush_icache_all\n"
 103        "cache 0x10, [r8, 0]\n"
 104        "nop\nnop\nnop\nnop\nnop\nnop\n"
 105        : : : "r8");
 106}
 107
 108void flush_dcache_all(void)
 109{
 110        __asm__ __volatile__(
 111        "la r8, flush_dcache_all\n"
 112        "cache 0x1f, [r8, 0]\n"
 113        "nop\nnop\nnop\nnop\nnop\nnop\n"
 114        "cache 0x1a, [r8, 0]\n"
 115        "nop\nnop\nnop\nnop\nnop\nnop\n"
 116        : : : "r8");
 117}
 118
 119void flush_cache_all(void)
 120{
 121        __asm__ __volatile__(
 122        "la r8, flush_cache_all\n"
 123        "cache 0x10, [r8, 0]\n"
 124        "nop\nnop\nnop\nnop\nnop\nnop\n"
 125        "cache 0x1f, [r8, 0]\n"
 126        "nop\nnop\nnop\nnop\nnop\nnop\n"
 127        "cache 0x1a, [r8, 0]\n"
 128        "nop\nnop\nnop\nnop\nnop\nnop\n"
 129        : : : "r8");
 130}
 131
 132void flush_cache_mm(struct mm_struct *mm)
 133{
 134        if (!(mm->context))
 135                return;
 136        flush_cache_all();
 137}
 138
 139/*if we flush a range precisely , the processing may be very long.
 140We must check each page in the range whether present. If the page is present,
 141we can flush the range in the page. Be careful, the range may be cross two
 142page, a page is present and another is not present.
 143*/
 144/*
 145The interface is provided in hopes that the port can find
 146a suitably efficient method for removing multiple page
 147sized regions from the cache.
 148*/
 149void flush_cache_range(struct vm_area_struct *vma,
 150                unsigned long start, unsigned long end)
 151{
 152        struct mm_struct *mm = vma->vm_mm;
 153        int exec = vma->vm_flags & VM_EXEC;
 154        pgd_t *pgdp;
 155        pud_t *pudp;
 156        pmd_t *pmdp;
 157        pte_t *ptep;
 158
 159        if (!(mm->context))
 160                return;
 161
 162        pgdp = pgd_offset(mm, start);
 163        pudp = pud_offset(pgdp, start);
 164        pmdp = pmd_offset(pudp, start);
 165        ptep = pte_offset(pmdp, start);
 166
 167        while (start <= end) {
 168                unsigned long tmpend;
 169                pgdp = pgd_offset(mm, start);
 170                pudp = pud_offset(pgdp, start);
 171                pmdp = pmd_offset(pudp, start);
 172                ptep = pte_offset(pmdp, start);
 173
 174                if (!(pte_val(*ptep) & _PAGE_PRESENT)) {
 175                        start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
 176                        continue;
 177                }
 178                tmpend = (start | (PAGE_SIZE-1)) > end ?
 179                                 end : (start | (PAGE_SIZE-1));
 180
 181                flush_dcache_range(start, tmpend);
 182                if (exec)
 183                        flush_icache_range(start, tmpend);
 184                start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
 185        }
 186}
 187
 188void flush_cache_page(struct vm_area_struct *vma,
 189                unsigned long addr, unsigned long pfn)
 190{
 191        int exec = vma->vm_flags & VM_EXEC;
 192        unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
 193
 194        flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
 195
 196        if (exec)
 197                flush_icache_range(kaddr, kaddr + PAGE_SIZE);
 198}
 199
 200void flush_cache_sigtramp(unsigned long addr)
 201{
 202        __asm__ __volatile__(
 203        "cache 0x02, [%0, 0]\n"
 204        "nop\nnop\nnop\nnop\nnop\n"
 205        "cache 0x02, [%0, 0x4]\n"
 206        "nop\nnop\nnop\nnop\nnop\n"
 207
 208        "cache 0x0d, [%0, 0]\n"
 209        "nop\nnop\nnop\nnop\nnop\n"
 210        "cache 0x0d, [%0, 0x4]\n"
 211        "nop\nnop\nnop\nnop\nnop\n"
 212
 213        "cache 0x1a, [%0, 0]\n"
 214        "nop\nnop\nnop\nnop\nnop\n"
 215        : : "r" (addr));
 216}
 217
 218/*
 2191. WB and invalid a cache line of Dcache
 2202. Drain Write Buffer
 221the range must be smaller than PAGE_SIZE
 222*/
 223void flush_dcache_range(unsigned long start, unsigned long end)
 224{
 225        int size, i;
 226
 227        start = start & ~(L1_CACHE_BYTES - 1);
 228        end = end & ~(L1_CACHE_BYTES - 1);
 229        size = end - start;
 230        /* flush dcache to ram, and invalidate dcache lines. */
 231        for (i = 0; i < size; i += L1_CACHE_BYTES) {
 232                __asm__ __volatile__(
 233                "cache 0x0e, [%0, 0]\n"
 234                "nop\nnop\nnop\nnop\nnop\n"
 235                "cache 0x1a, [%0, 0]\n"
 236                "nop\nnop\nnop\nnop\nnop\n"
 237                : : "r" (start));
 238                start += L1_CACHE_BYTES;
 239        }
 240}
 241
 242void flush_icache_range(unsigned long start, unsigned long end)
 243{
 244        int size, i;
 245        start = start & ~(L1_CACHE_BYTES - 1);
 246        end = end & ~(L1_CACHE_BYTES - 1);
 247
 248        size = end - start;
 249        /* invalidate icache lines. */
 250        for (i = 0; i < size; i += L1_CACHE_BYTES) {
 251                __asm__ __volatile__(
 252                "cache 0x02, [%0, 0]\n"
 253                "nop\nnop\nnop\nnop\nnop\n"
 254                : : "r" (start));
 255                start += L1_CACHE_BYTES;
 256        }
 257}
 258