linux/include/asm-generic/cacheflush.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_GENERIC_CACHEFLUSH_H
   3#define _ASM_GENERIC_CACHEFLUSH_H
   4
   5struct mm_struct;
   6struct vm_area_struct;
   7struct page;
   8struct address_space;
   9
  10/*
  11 * The cache doesn't need to be flushed when TLB entries change when
  12 * the cache is mapped to physical memory, not virtual memory
  13 */
  14#ifndef flush_cache_all
  15static inline void flush_cache_all(void)
  16{
  17}
  18#endif
  19
  20#ifndef flush_cache_mm
  21static inline void flush_cache_mm(struct mm_struct *mm)
  22{
  23}
  24#endif
  25
  26#ifndef flush_cache_dup_mm
  27static inline void flush_cache_dup_mm(struct mm_struct *mm)
  28{
  29}
  30#endif
  31
  32#ifndef flush_cache_range
  33static inline void flush_cache_range(struct vm_area_struct *vma,
  34                                     unsigned long start,
  35                                     unsigned long end)
  36{
  37}
  38#endif
  39
  40#ifndef flush_cache_page
  41static inline void flush_cache_page(struct vm_area_struct *vma,
  42                                    unsigned long vmaddr,
  43                                    unsigned long pfn)
  44{
  45}
  46#endif
  47
  48#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
  49static inline void flush_dcache_page(struct page *page)
  50{
  51}
  52#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
  53#endif
  54
  55
  56#ifndef flush_dcache_mmap_lock
  57static inline void flush_dcache_mmap_lock(struct address_space *mapping)
  58{
  59}
  60#endif
  61
  62#ifndef flush_dcache_mmap_unlock
  63static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
  64{
  65}
  66#endif
  67
  68#ifndef flush_icache_range
  69static inline void flush_icache_range(unsigned long start, unsigned long end)
  70{
  71}
  72#endif
  73
  74#ifndef flush_icache_user_range
  75#define flush_icache_user_range flush_icache_range
  76#endif
  77
  78#ifndef flush_icache_page
  79static inline void flush_icache_page(struct vm_area_struct *vma,
  80                                     struct page *page)
  81{
  82}
  83#endif
  84
  85#ifndef flush_icache_user_page
  86static inline void flush_icache_user_page(struct vm_area_struct *vma,
  87                                           struct page *page,
  88                                           unsigned long addr, int len)
  89{
  90}
  91#endif
  92
  93#ifndef flush_cache_vmap
  94static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  95{
  96}
  97#endif
  98
  99#ifndef flush_cache_vunmap
 100static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 101{
 102}
 103#endif
 104
 105#ifndef copy_to_user_page
 106#define copy_to_user_page(vma, page, vaddr, dst, src, len)      \
 107        do { \
 108                memcpy(dst, src, len); \
 109                flush_icache_user_page(vma, page, vaddr, len); \
 110        } while (0)
 111#endif
 112
 113#ifndef copy_from_user_page
 114#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 115        memcpy(dst, src, len)
 116#endif
 117
 118#endif /* _ASM_GENERIC_CACHEFLUSH_H */
 119