linux/arch/score/mm/cache.c
<<
>>
Prefs
   1/*
   2 * arch/score/mm/cache.c
   3 *
   4 * Score Processor version.
   5 *
   6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
   7 *  Lennox Wu <lennox.wu@sunplusct.com>
   8 *  Chen Liqin <liqin.chen@sunplusct.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, see the file COPYING, or write
  22 * to the Free Software Foundation, Inc.,
  23 * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  24 */
  25
  26#include <linux/init.h>
  27#include <linux/linkage.h>
  28#include <linux/kernel.h>
  29#include <linux/mm.h>
  30#include <linux/module.h>
  31#include <linux/sched.h>
  32#include <linux/fs.h>
  33
  34#include <asm/mmu_context.h>
  35
  36/*
  37Just flush entire Dcache!!
  38You must ensure the page doesn't include instructions, because
  39the function will not flush the Icache.
  40The addr must be cache aligned.
  41*/
  42static void flush_data_cache_page(unsigned long addr)
  43{
  44        unsigned int i;
  45        for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
  46                __asm__ __volatile__(
  47                "cache 0x0e, [%0, 0]\n"
  48                "cache 0x1a, [%0, 0]\n"
  49                "nop\n"
  50                : : "r" (addr));
  51                addr += L1_CACHE_BYTES;
  52        }
  53}
  54
  55void flush_dcache_page(struct page *page)
  56{
  57        struct address_space *mapping = page_mapping(page);
  58        unsigned long addr;
  59
  60        if (PageHighMem(page))
  61                return;
  62        if (mapping && !mapping_mapped(mapping)) {
  63                set_bit(PG_dcache_dirty, &(page)->flags);
  64                return;
  65        }
  66
  67        /*
  68         * We could delay the flush for the !page_mapping case too.  But that
  69         * case is for exec env/arg pages and those are %99 certainly going to
  70         * get faulted into the tlb (and thus flushed) anyways.
  71         */
  72        addr = (unsigned long) page_address(page);
  73        flush_data_cache_page(addr);
  74}
  75
  76/* called by update_mmu_cache. */
  77void __update_cache(struct vm_area_struct *vma, unsigned long address,
  78                pte_t pte)
  79{
  80        struct page *page;
  81        unsigned long pfn, addr;
  82        int exec = (vma->vm_flags & VM_EXEC);
  83
  84        pfn = pte_pfn(pte);
  85        if (unlikely(!pfn_valid(pfn)))
  86                return;
  87        page = pfn_to_page(pfn);
  88        if (page_mapping(page) && test_bit(PG_dcache_dirty, &(page)->flags)) {
  89                addr = (unsigned long) page_address(page);
  90                if (exec)
  91                        flush_data_cache_page(addr);
  92                clear_bit(PG_dcache_dirty, &(page)->flags);
  93        }
  94}
  95
  96static inline void setup_protection_map(void)
  97{
  98        protection_map[0] = PAGE_NONE;
  99        protection_map[1] = PAGE_READONLY;
 100        protection_map[2] = PAGE_COPY;
 101        protection_map[3] = PAGE_COPY;
 102        protection_map[4] = PAGE_READONLY;
 103        protection_map[5] = PAGE_READONLY;
 104        protection_map[6] = PAGE_COPY;
 105        protection_map[7] = PAGE_COPY;
 106        protection_map[8] = PAGE_NONE;
 107        protection_map[9] = PAGE_READONLY;
 108        protection_map[10] = PAGE_SHARED;
 109        protection_map[11] = PAGE_SHARED;
 110        protection_map[12] = PAGE_READONLY;
 111        protection_map[13] = PAGE_READONLY;
 112        protection_map[14] = PAGE_SHARED;
 113        protection_map[15] = PAGE_SHARED;
 114}
 115
 116void cpu_cache_init(void)
 117{
 118        setup_protection_map();
 119}
 120
 121void flush_icache_all(void)
 122{
 123        __asm__ __volatile__(
 124        "la r8, flush_icache_all\n"
 125        "cache 0x10, [r8, 0]\n"
 126        "nop\nnop\nnop\nnop\nnop\nnop\n"
 127        : : : "r8");
 128}
 129
 130void flush_dcache_all(void)
 131{
 132        __asm__ __volatile__(
 133        "la r8, flush_dcache_all\n"
 134        "cache 0x1f, [r8, 0]\n"
 135        "nop\nnop\nnop\nnop\nnop\nnop\n"
 136        "cache 0x1a, [r8, 0]\n"
 137        "nop\nnop\nnop\nnop\nnop\nnop\n"
 138        : : : "r8");
 139}
 140
 141void flush_cache_all(void)
 142{
 143        __asm__ __volatile__(
 144        "la r8, flush_cache_all\n"
 145        "cache 0x10, [r8, 0]\n"
 146        "nop\nnop\nnop\nnop\nnop\nnop\n"
 147        "cache 0x1f, [r8, 0]\n"
 148        "nop\nnop\nnop\nnop\nnop\nnop\n"
 149        "cache 0x1a, [r8, 0]\n"
 150        "nop\nnop\nnop\nnop\nnop\nnop\n"
 151        : : : "r8");
 152}
 153
 154void flush_cache_mm(struct mm_struct *mm)
 155{
 156        if (!(mm->context))
 157                return;
 158        flush_cache_all();
 159}
 160
 161/*if we flush a range precisely , the processing may be very long.
 162We must check each page in the range whether present. If the page is present,
 163we can flush the range in the page. Be careful, the range may be cross two
 164page, a page is present and another is not present.
 165*/
 166/*
 167The interface is provided in hopes that the port can find
 168a suitably efficient method for removing multiple page
 169sized regions from the cache.
 170*/
 171void flush_cache_range(struct vm_area_struct *vma,
 172                unsigned long start, unsigned long end)
 173{
 174        struct mm_struct *mm = vma->vm_mm;
 175        int exec = vma->vm_flags & VM_EXEC;
 176        pgd_t *pgdp;
 177        pud_t *pudp;
 178        pmd_t *pmdp;
 179        pte_t *ptep;
 180
 181        if (!(mm->context))
 182                return;
 183
 184        pgdp = pgd_offset(mm, start);
 185        pudp = pud_offset(pgdp, start);
 186        pmdp = pmd_offset(pudp, start);
 187        ptep = pte_offset(pmdp, start);
 188
 189        while (start <= end) {
 190                unsigned long tmpend;
 191                pgdp = pgd_offset(mm, start);
 192                pudp = pud_offset(pgdp, start);
 193                pmdp = pmd_offset(pudp, start);
 194                ptep = pte_offset(pmdp, start);
 195
 196                if (!(pte_val(*ptep) & _PAGE_PRESENT)) {
 197                        start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
 198                        continue;
 199                }
 200                tmpend = (start | (PAGE_SIZE-1)) > end ?
 201                                 end : (start | (PAGE_SIZE-1));
 202
 203                flush_dcache_range(start, tmpend);
 204                if (exec)
 205                        flush_icache_range(start, tmpend);
 206                start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
 207        }
 208}
 209
 210void flush_cache_page(struct vm_area_struct *vma,
 211                unsigned long addr, unsigned long pfn)
 212{
 213        int exec = vma->vm_flags & VM_EXEC;
 214        unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
 215
 216        flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
 217
 218        if (exec)
 219                flush_icache_range(kaddr, kaddr + PAGE_SIZE);
 220}
 221
 222void flush_cache_sigtramp(unsigned long addr)
 223{
 224        __asm__ __volatile__(
 225        "cache 0x02, [%0, 0]\n"
 226        "nop\nnop\nnop\nnop\nnop\n"
 227        "cache 0x02, [%0, 0x4]\n"
 228        "nop\nnop\nnop\nnop\nnop\n"
 229
 230        "cache 0x0d, [%0, 0]\n"
 231        "nop\nnop\nnop\nnop\nnop\n"
 232        "cache 0x0d, [%0, 0x4]\n"
 233        "nop\nnop\nnop\nnop\nnop\n"
 234
 235        "cache 0x1a, [%0, 0]\n"
 236        "nop\nnop\nnop\nnop\nnop\n"
 237        : : "r" (addr));
 238}
 239
 240/*
 2411. WB and invalid a cache line of Dcache
 2422. Drain Write Buffer
 243the range must be smaller than PAGE_SIZE
 244*/
 245void flush_dcache_range(unsigned long start, unsigned long end)
 246{
 247        int size, i;
 248
 249        start = start & ~(L1_CACHE_BYTES - 1);
 250        end = end & ~(L1_CACHE_BYTES - 1);
 251        size = end - start;
 252        /* flush dcache to ram, and invalidate dcache lines. */
 253        for (i = 0; i < size; i += L1_CACHE_BYTES) {
 254                __asm__ __volatile__(
 255                "cache 0x0e, [%0, 0]\n"
 256                "nop\nnop\nnop\nnop\nnop\n"
 257                "cache 0x1a, [%0, 0]\n"
 258                "nop\nnop\nnop\nnop\nnop\n"
 259                : : "r" (start));
 260                start += L1_CACHE_BYTES;
 261        }
 262}
 263
 264void flush_icache_range(unsigned long start, unsigned long end)
 265{
 266        int size, i;
 267        start = start & ~(L1_CACHE_BYTES - 1);
 268        end = end & ~(L1_CACHE_BYTES - 1);
 269
 270        size = end - start;
 271        /* invalidate icache lines. */
 272        for (i = 0; i < size; i += L1_CACHE_BYTES) {
 273                __asm__ __volatile__(
 274                "cache 0x02, [%0, 0]\n"
 275                "nop\nnop\nnop\nnop\nnop\n"
 276                : : "r" (start));
 277                start += L1_CACHE_BYTES;
 278        }
 279}
 280