linux/arch/xtensa/mm/cache.c
<<
>>
Prefs
   1/*
   2 * arch/xtensa/mm/cache.c
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 2001-2006 Tensilica Inc.
   9 *
  10 * Chris Zankel <chris@zankel.net>
  11 * Joe Taylor
  12 * Marc Gauthier
  13 *
  14 */
  15
  16#include <linux/init.h>
  17#include <linux/signal.h>
  18#include <linux/sched.h>
  19#include <linux/kernel.h>
  20#include <linux/errno.h>
  21#include <linux/string.h>
  22#include <linux/types.h>
  23#include <linux/ptrace.h>
  24#include <linux/bootmem.h>
  25#include <linux/swap.h>
  26#include <linux/pagemap.h>
  27
  28#include <asm/bootparam.h>
  29#include <asm/mmu_context.h>
  30#include <asm/tlb.h>
  31#include <asm/tlbflush.h>
  32#include <asm/page.h>
  33#include <asm/pgalloc.h>
  34#include <asm/pgtable.h>
  35
  36//#define printd(x...) printk(x)
  37#define printd(x...) do { } while(0)
  38
  39/* 
  40 * Note:
  41 * The kernel provides one architecture bit PG_arch_1 in the page flags that 
  42 * can be used for cache coherency.
  43 *
  44 * I$-D$ coherency.
  45 *
  46 * The Xtensa architecture doesn't keep the instruction cache coherent with
  47 * the data cache. We use the architecture bit to indicate if the caches
  48 * are coherent. The kernel clears this bit whenever a page is added to the
  49 * page cache. At that time, the caches might not be in sync. We, therefore,
  50 * define this flag as 'clean' if set.
  51 *
  52 * D-cache aliasing.
  53 *
  54 * With cache aliasing, we have to always flush the cache when pages are
  55 * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
  56 * page.
  57 * 
  58 *
  59 *
  60 */
  61
  62#if (DCACHE_WAY_SIZE > PAGE_SIZE)
  63static inline void kmap_invalidate_coherent(struct page *page,
  64                                            unsigned long vaddr)
  65{
  66        if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
  67                unsigned long kvaddr;
  68
  69                if (!PageHighMem(page)) {
  70                        kvaddr = (unsigned long)page_to_virt(page);
  71
  72                        __invalidate_dcache_page(kvaddr);
  73                } else {
  74                        kvaddr = TLBTEMP_BASE_1 +
  75                                (page_to_phys(page) & DCACHE_ALIAS_MASK);
  76
  77                        __invalidate_dcache_page_alias(kvaddr,
  78                                                       page_to_phys(page));
  79                }
  80        }
  81}
  82
  83static inline void *coherent_kvaddr(struct page *page, unsigned long base,
  84                                    unsigned long vaddr, unsigned long *paddr)
  85{
  86        if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
  87                *paddr = page_to_phys(page);
  88                return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
  89        } else {
  90                *paddr = 0;
  91                return page_to_virt(page);
  92        }
  93}
  94
  95void clear_user_highpage(struct page *page, unsigned long vaddr)
  96{
  97        unsigned long paddr;
  98        void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
  99
 100        preempt_disable();
 101        kmap_invalidate_coherent(page, vaddr);
 102        set_bit(PG_arch_1, &page->flags);
 103        clear_page_alias(kvaddr, paddr);
 104        preempt_enable();
 105}
 106
 107void copy_user_highpage(struct page *dst, struct page *src,
 108                        unsigned long vaddr, struct vm_area_struct *vma)
 109{
 110        unsigned long dst_paddr, src_paddr;
 111        void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
 112                                          &dst_paddr);
 113        void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
 114                                          &src_paddr);
 115
 116        preempt_disable();
 117        kmap_invalidate_coherent(dst, vaddr);
 118        set_bit(PG_arch_1, &dst->flags);
 119        copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
 120        preempt_enable();
 121}
 122
 123#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
 124
 125#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
 126
 127/*
 128 * Any time the kernel writes to a user page cache page, or it is about to
 129 * read from a page cache page this routine is called.
 130 *
 131 */
 132
 133void flush_dcache_page(struct page *page)
 134{
 135        struct address_space *mapping = page_mapping(page);
 136
 137        /*
 138         * If we have a mapping but the page is not mapped to user-space
 139         * yet, we simply mark this page dirty and defer flushing the 
 140         * caches until update_mmu().
 141         */
 142
 143        if (mapping && !mapping_mapped(mapping)) {
 144                if (!test_bit(PG_arch_1, &page->flags))
 145                        set_bit(PG_arch_1, &page->flags);
 146                return;
 147
 148        } else {
 149
 150                unsigned long phys = page_to_phys(page);
 151                unsigned long temp = page->index << PAGE_SHIFT;
 152                unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
 153                unsigned long virt;
 154
 155                /* 
 156                 * Flush the page in kernel space and user space.
 157                 * Note that we can omit that step if aliasing is not
 158                 * an issue, but we do have to synchronize I$ and D$
 159                 * if we have a mapping.
 160                 */
 161
 162                if (!alias && !mapping)
 163                        return;
 164
 165                virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
 166                __flush_invalidate_dcache_page_alias(virt, phys);
 167
 168                virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
 169
 170                if (alias)
 171                        __flush_invalidate_dcache_page_alias(virt, phys);
 172
 173                if (mapping)
 174                        __invalidate_icache_page_alias(virt, phys);
 175        }
 176
 177        /* There shouldn't be an entry in the cache for this page anymore. */
 178}
 179
 180
 181/*
 182 * For now, flush the whole cache. FIXME??
 183 */
 184
 185void local_flush_cache_range(struct vm_area_struct *vma,
 186                       unsigned long start, unsigned long end)
 187{
 188        __flush_invalidate_dcache_all();
 189        __invalidate_icache_all();
 190}
 191
 192/* 
 193 * Remove any entry in the cache for this page. 
 194 *
 195 * Note that this function is only called for user pages, so use the
 196 * alias versions of the cache flush functions.
 197 */
 198
 199void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
 200                      unsigned long pfn)
 201{
 202        /* Note that we have to use the 'alias' address to avoid multi-hit */
 203
 204        unsigned long phys = page_to_phys(pfn_to_page(pfn));
 205        unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
 206
 207        __flush_invalidate_dcache_page_alias(virt, phys);
 208        __invalidate_icache_page_alias(virt, phys);
 209}
 210
 211#endif
 212
 213void
 214update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
 215{
 216        unsigned long pfn = pte_pfn(*ptep);
 217        struct page *page;
 218
 219        if (!pfn_valid(pfn))
 220                return;
 221
 222        page = pfn_to_page(pfn);
 223
 224        /* Invalidate old entry in TLBs */
 225
 226        flush_tlb_page(vma, addr);
 227
 228#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
 229
 230        if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
 231                unsigned long phys = page_to_phys(page);
 232                unsigned long tmp;
 233
 234                tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
 235                __flush_invalidate_dcache_page_alias(tmp, phys);
 236                tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
 237                __flush_invalidate_dcache_page_alias(tmp, phys);
 238                __invalidate_icache_page_alias(tmp, phys);
 239
 240                clear_bit(PG_arch_1, &page->flags);
 241        }
 242#else
 243        if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
 244            && (vma->vm_flags & VM_EXEC) != 0) {
 245                unsigned long paddr = (unsigned long)kmap_atomic(page);
 246                __flush_dcache_page(paddr);
 247                __invalidate_icache_page(paddr);
 248                set_bit(PG_arch_1, &page->flags);
 249                kunmap_atomic((void *)paddr);
 250        }
 251#endif
 252}
 253
 254/*
 255 * access_process_vm() has called get_user_pages(), which has done a
 256 * flush_dcache_page() on the page.
 257 */
 258
 259#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
 260
 261void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 262                unsigned long vaddr, void *dst, const void *src,
 263                unsigned long len)
 264{
 265        unsigned long phys = page_to_phys(page);
 266        unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
 267
 268        /* Flush and invalidate user page if aliased. */
 269
 270        if (alias) {
 271                unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
 272                __flush_invalidate_dcache_page_alias(t, phys);
 273        }
 274
 275        /* Copy data */
 276        
 277        memcpy(dst, src, len);
 278
 279        /*
 280         * Flush and invalidate kernel page if aliased and synchronize 
 281         * data and instruction caches for executable pages. 
 282         */
 283
 284        if (alias) {
 285                unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
 286
 287                __flush_invalidate_dcache_range((unsigned long) dst, len);
 288                if ((vma->vm_flags & VM_EXEC) != 0)
 289                        __invalidate_icache_page_alias(t, phys);
 290
 291        } else if ((vma->vm_flags & VM_EXEC) != 0) {
 292                __flush_dcache_range((unsigned long)dst,len);
 293                __invalidate_icache_range((unsigned long) dst, len);
 294        }
 295}
 296
 297extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
 298                unsigned long vaddr, void *dst, const void *src,
 299                unsigned long len)
 300{
 301        unsigned long phys = page_to_phys(page);
 302        unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
 303
 304        /*
 305         * Flush user page if aliased. 
 306         * (Note: a simply flush would be sufficient) 
 307         */
 308
 309        if (alias) {
 310                unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
 311                __flush_invalidate_dcache_page_alias(t, phys);
 312        }
 313
 314        memcpy(dst, src, len);
 315}
 316
 317#endif
 318