linux/arch/nds32/mm/cacheflush.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (C) 2005-2017 Andes Technology Corporation
   3
   4#include <linux/mm.h>
   5#include <linux/sched.h>
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
   8#include <linux/module.h>
   9#include <asm/cacheflush.h>
  10#include <asm/proc-fns.h>
  11#include <asm/shmparam.h>
  12#include <asm/cache_info.h>
  13
  14extern struct cache_info L1_cache_info[2];
  15
  16#ifndef CONFIG_CPU_CACHE_ALIASING
  17void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
  18                      pte_t * pte)
  19{
  20        struct page *page;
  21        unsigned long pfn = pte_pfn(*pte);
  22        unsigned long flags;
  23
  24        if (!pfn_valid(pfn))
  25                return;
  26
  27        if (vma->vm_mm == current->active_mm) {
  28                local_irq_save(flags);
  29                __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
  30                __nds32__tlbop_rwr(*pte);
  31                __nds32__isb();
  32                local_irq_restore(flags);
  33        }
  34        page = pfn_to_page(pfn);
  35
  36        if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
  37            (vma->vm_flags & VM_EXEC)) {
  38
  39                if (!PageHighMem(page)) {
  40                        cpu_cache_wbinval_page((unsigned long)
  41                                               page_address(page),
  42                                               vma->vm_flags & VM_EXEC);
  43                } else {
  44                        unsigned long kaddr = (unsigned long)kmap_atomic(page);
  45                        cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
  46                        kunmap_atomic((void *)kaddr);
  47                }
  48        }
  49}
  50#else
  51extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
  52
  53static inline unsigned long aliasing(unsigned long addr, unsigned long page)
  54{
  55        return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
  56}
  57
  58static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
  59{
  60        unsigned long kaddr, pte;
  61
  62#define BASE_ADDR0 0xffffc000
  63        kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
  64        pte = (pa | PAGE_KERNEL);
  65        __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
  66        __nds32__tlbop_rwlk(pte);
  67        __nds32__isb();
  68        return kaddr;
  69}
  70
  71static inline void kunmap01(unsigned long kaddr)
  72{
  73        __nds32__tlbop_unlk(kaddr);
  74        __nds32__tlbop_inv(kaddr);
  75        __nds32__isb();
  76}
  77
  78static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
  79{
  80        unsigned long kaddr, pte;
  81
  82#define BASE_ADDR1 0xffff8000
  83        kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
  84        pte = (pa | PAGE_KERNEL);
  85        __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
  86        __nds32__tlbop_rwlk(pte);
  87        __nds32__isb();
  88        return kaddr;
  89}
  90
  91void flush_cache_mm(struct mm_struct *mm)
  92{
  93        unsigned long flags;
  94
  95        local_irq_save(flags);
  96        cpu_dcache_wbinval_all();
  97        cpu_icache_inval_all();
  98        local_irq_restore(flags);
  99}
 100
 101void flush_cache_dup_mm(struct mm_struct *mm)
 102{
 103}
 104
 105void flush_cache_range(struct vm_area_struct *vma,
 106                       unsigned long start, unsigned long end)
 107{
 108        unsigned long flags;
 109
 110        if ((end - start) > 8 * PAGE_SIZE) {
 111                cpu_dcache_wbinval_all();
 112                if (vma->vm_flags & VM_EXEC)
 113                        cpu_icache_inval_all();
 114                return;
 115        }
 116        local_irq_save(flags);
 117        while (start < end) {
 118                if (va_present(vma->vm_mm, start))
 119                        cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
 120                start += PAGE_SIZE;
 121        }
 122        local_irq_restore(flags);
 123        return;
 124}
 125
 126void flush_cache_page(struct vm_area_struct *vma,
 127                      unsigned long addr, unsigned long pfn)
 128{
 129        unsigned long vto, flags;
 130
 131        local_irq_save(flags);
 132        vto = kremap0(addr, pfn << PAGE_SHIFT);
 133        cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
 134        kunmap01(vto);
 135        local_irq_restore(flags);
 136}
 137
 138void flush_cache_vmap(unsigned long start, unsigned long end)
 139{
 140        cpu_dcache_wbinval_all();
 141        cpu_icache_inval_all();
 142}
 143
 144void flush_cache_vunmap(unsigned long start, unsigned long end)
 145{
 146        cpu_dcache_wbinval_all();
 147        cpu_icache_inval_all();
 148}
 149
 150void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 151                    struct page *to)
 152{
 153        cpu_dcache_wbinval_page((unsigned long)vaddr);
 154        cpu_icache_inval_page((unsigned long)vaddr);
 155        copy_page(vto, vfrom);
 156        cpu_dcache_wbinval_page((unsigned long)vto);
 157        cpu_icache_inval_page((unsigned long)vto);
 158}
 159
 160void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
 161{
 162        cpu_dcache_wbinval_page((unsigned long)vaddr);
 163        cpu_icache_inval_page((unsigned long)vaddr);
 164        clear_page(addr);
 165        cpu_dcache_wbinval_page((unsigned long)addr);
 166        cpu_icache_inval_page((unsigned long)addr);
 167}
 168
 169void copy_user_highpage(struct page *to, struct page *from,
 170                        unsigned long vaddr, struct vm_area_struct *vma)
 171{
 172        unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
 173        kto = ((unsigned long)page_address(to) & PAGE_MASK);
 174        kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
 175        pto = page_to_phys(to);
 176        pfrom = page_to_phys(from);
 177
 178        local_irq_save(flags);
 179        if (aliasing(vaddr, (unsigned long)kfrom))
 180                cpu_dcache_wb_page((unsigned long)kfrom);
 181        vto = kremap0(vaddr, pto);
 182        vfrom = kremap1(vaddr, pfrom);
 183        copy_page((void *)vto, (void *)vfrom);
 184        kunmap01(vfrom);
 185        kunmap01(vto);
 186        local_irq_restore(flags);
 187}
 188
 189EXPORT_SYMBOL(copy_user_highpage);
 190
 191void clear_user_highpage(struct page *page, unsigned long vaddr)
 192{
 193        unsigned long vto, flags, kto;
 194
 195        kto = ((unsigned long)page_address(page) & PAGE_MASK);
 196
 197        local_irq_save(flags);
 198        if (aliasing(kto, vaddr) && kto != 0) {
 199                cpu_dcache_inval_page(kto);
 200                cpu_icache_inval_page(kto);
 201        }
 202        vto = kremap0(vaddr, page_to_phys(page));
 203        clear_page((void *)vto);
 204        kunmap01(vto);
 205        local_irq_restore(flags);
 206}
 207
 208EXPORT_SYMBOL(clear_user_highpage);
 209
 210void flush_dcache_page(struct page *page)
 211{
 212        struct address_space *mapping;
 213
 214        mapping = page_mapping(page);
 215        if (mapping && !mapping_mapped(mapping))
 216                set_bit(PG_dcache_dirty, &page->flags);
 217        else {
 218                unsigned long kaddr, flags;
 219
 220                kaddr = (unsigned long)page_address(page);
 221                local_irq_save(flags);
 222                cpu_dcache_wbinval_page(kaddr);
 223                if (mapping) {
 224                        unsigned long vaddr, kto;
 225
 226                        vaddr = page->index << PAGE_SHIFT;
 227                        if (aliasing(vaddr, kaddr)) {
 228                                kto = kremap0(vaddr, page_to_phys(page));
 229                                cpu_dcache_wbinval_page(kto);
 230                                kunmap01(kto);
 231                        }
 232                }
 233                local_irq_restore(flags);
 234        }
 235}
 236EXPORT_SYMBOL(flush_dcache_page);
 237
 238void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 239                       unsigned long vaddr, void *dst, void *src, int len)
 240{
 241        unsigned long line_size, start, end, vto, flags;
 242
 243        local_irq_save(flags);
 244        vto = kremap0(vaddr, page_to_phys(page));
 245        dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
 246        memcpy(dst, src, len);
 247        if (vma->vm_flags & VM_EXEC) {
 248                line_size = L1_cache_info[DCACHE].line_size;
 249                start = (unsigned long)dst & ~(line_size - 1);
 250                end =
 251                    ((unsigned long)dst + len + line_size - 1) & ~(line_size -
 252                                                                   1);
 253                cpu_cache_wbinval_range(start, end, 1);
 254        }
 255        kunmap01(vto);
 256        local_irq_restore(flags);
 257}
 258
 259void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
 260                         unsigned long vaddr, void *dst, void *src, int len)
 261{
 262        unsigned long vto, flags;
 263
 264        local_irq_save(flags);
 265        vto = kremap0(vaddr, page_to_phys(page));
 266        src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
 267        memcpy(dst, src, len);
 268        kunmap01(vto);
 269        local_irq_restore(flags);
 270}
 271
 272void flush_anon_page(struct vm_area_struct *vma,
 273                     struct page *page, unsigned long vaddr)
 274{
 275        unsigned long kaddr, flags, ktmp;
 276        if (!PageAnon(page))
 277                return;
 278
 279        if (vma->vm_mm != current->active_mm)
 280                return;
 281
 282        local_irq_save(flags);
 283        if (vma->vm_flags & VM_EXEC)
 284                cpu_icache_inval_page(vaddr & PAGE_MASK);
 285        kaddr = (unsigned long)page_address(page);
 286        if (aliasing(vaddr, kaddr)) {
 287                ktmp = kremap0(vaddr, page_to_phys(page));
 288                cpu_dcache_wbinval_page(ktmp);
 289                kunmap01(ktmp);
 290        }
 291        local_irq_restore(flags);
 292}
 293
 294void flush_kernel_dcache_page(struct page *page)
 295{
 296        unsigned long flags;
 297        local_irq_save(flags);
 298        cpu_dcache_wbinval_page((unsigned long)page_address(page));
 299        local_irq_restore(flags);
 300}
 301EXPORT_SYMBOL(flush_kernel_dcache_page);
 302
 303void flush_kernel_vmap_range(void *addr, int size)
 304{
 305        unsigned long flags;
 306        local_irq_save(flags);
 307        cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr +  size);
 308        local_irq_restore(flags);
 309}
 310EXPORT_SYMBOL(flush_kernel_vmap_range);
 311
 312void invalidate_kernel_vmap_range(void *addr, int size)
 313{
 314        unsigned long flags;
 315        local_irq_save(flags);
 316        cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
 317        local_irq_restore(flags);
 318}
 319EXPORT_SYMBOL(invalidate_kernel_vmap_range);
 320
 321void flush_icache_range(unsigned long start, unsigned long end)
 322{
 323        unsigned long line_size, flags;
 324        line_size = L1_cache_info[DCACHE].line_size;
 325        start = start & ~(line_size - 1);
 326        end = (end + line_size - 1) & ~(line_size - 1);
 327        local_irq_save(flags);
 328        cpu_cache_wbinval_range(start, end, 1);
 329        local_irq_restore(flags);
 330}
 331EXPORT_SYMBOL(flush_icache_range);
 332
 333void flush_icache_page(struct vm_area_struct *vma, struct page *page)
 334{
 335        unsigned long flags;
 336        local_irq_save(flags);
 337        cpu_cache_wbinval_page((unsigned long)page_address(page),
 338                               vma->vm_flags & VM_EXEC);
 339        local_irq_restore(flags);
 340}
 341
 342void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 343                      pte_t * pte)
 344{
 345        struct page *page;
 346        unsigned long flags;
 347        unsigned long pfn = pte_pfn(*pte);
 348
 349        if (!pfn_valid(pfn))
 350                return;
 351
 352        if (vma->vm_mm == current->active_mm) {
 353                local_irq_save(flags);
 354                __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
 355                __nds32__tlbop_rwr(*pte);
 356                __nds32__isb();
 357                local_irq_restore(flags);
 358        }
 359
 360        page = pfn_to_page(pfn);
 361        if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
 362            (vma->vm_flags & VM_EXEC)) {
 363                local_irq_save(flags);
 364                cpu_dcache_wbinval_page((unsigned long)page_address(page));
 365                local_irq_restore(flags);
 366        }
 367}
 368#endif
 369