linux/arch/csky/abiv1/inc/abi/cacheflush.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2
   3#ifndef __ABI_CSKY_CACHEFLUSH_H
   4#define __ABI_CSKY_CACHEFLUSH_H
   5
   6#include <linux/mm.h>
   7#include <asm/string.h>
   8#include <asm/cache.h>
   9
  10#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  11extern void flush_dcache_page(struct page *);
  12
  13#define flush_cache_mm(mm)                      dcache_wbinv_all()
  14#define flush_cache_page(vma, page, pfn)        cache_wbinv_all()
  15#define flush_cache_dup_mm(mm)                  cache_wbinv_all()
  16
  17#define flush_dcache_mmap_lock(mapping)         xa_lock_irq(&mapping->i_pages)
  18#define flush_dcache_mmap_unlock(mapping)       xa_unlock_irq(&mapping->i_pages)
  19
  20#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
  21static inline void flush_kernel_vmap_range(void *addr, int size)
  22{
  23        dcache_wbinv_all();
  24}
  25static inline void invalidate_kernel_vmap_range(void *addr, int size)
  26{
  27        dcache_wbinv_all();
  28}
  29
  30#define ARCH_HAS_FLUSH_ANON_PAGE
  31static inline void flush_anon_page(struct vm_area_struct *vma,
  32                         struct page *page, unsigned long vmaddr)
  33{
  34        if (PageAnon(page))
  35                cache_wbinv_all();
  36}
  37
  38/*
  39 * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
  40 * Use cache_wbinv_all() here and need to be improved in future.
  41 */
  42extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
  43#define flush_cache_vmap(start, end)            cache_wbinv_all()
  44#define flush_cache_vunmap(start, end)          cache_wbinv_all()
  45
  46#define flush_icache_page(vma, page)            do {} while (0);
  47#define flush_icache_range(start, end)          cache_wbinv_range(start, end)
  48#define flush_icache_mm_range(mm, start, end)   cache_wbinv_range(start, end)
  49#define flush_icache_deferred(mm)               do {} while (0);
  50
  51#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  52do { \
  53        memcpy(dst, src, len); \
  54} while (0)
  55
  56#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  57do { \
  58        memcpy(dst, src, len); \
  59        cache_wbinv_all(); \
  60} while (0)
  61
  62#endif /* __ABI_CSKY_CACHEFLUSH_H */
  63