linux/include/asm-generic/cacheflush.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ASM_CACHEFLUSH_H
   3#define __ASM_CACHEFLUSH_H
   4
   5/* Keep includes the same across arches.  */
   6#include <linux/mm.h>
   7
   8#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
   9
  10/*
  11 * The cache doesn't need to be flushed when TLB entries change when
  12 * the cache is mapped to physical memory, not virtual memory
  13 */
  14static inline void flush_cache_all(void)
  15{
  16}
  17
  18static inline void flush_cache_mm(struct mm_struct *mm)
  19{
  20}
  21
  22static inline void flush_cache_dup_mm(struct mm_struct *mm)
  23{
  24}
  25
  26static inline void flush_cache_range(struct vm_area_struct *vma,
  27                                     unsigned long start,
  28                                     unsigned long end)
  29{
  30}
  31
  32static inline void flush_cache_page(struct vm_area_struct *vma,
  33                                    unsigned long vmaddr,
  34                                    unsigned long pfn)
  35{
  36}
  37
  38static inline void flush_dcache_page(struct page *page)
  39{
  40}
  41
  42static inline void flush_dcache_mmap_lock(struct address_space *mapping)
  43{
  44}
  45
  46static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
  47{
  48}
  49
  50static inline void flush_icache_range(unsigned long start, unsigned long end)
  51{
  52}
  53
  54static inline void flush_icache_page(struct vm_area_struct *vma,
  55                                     struct page *page)
  56{
  57}
  58
  59static inline void flush_icache_user_range(struct vm_area_struct *vma,
  60                                           struct page *page,
  61                                           unsigned long addr, int len)
  62{
  63}
  64
  65static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  66{
  67}
  68
  69static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
  70{
  71}
  72
  73#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  74        do { \
  75                memcpy(dst, src, len); \
  76                flush_icache_user_range(vma, page, vaddr, len); \
  77        } while (0)
  78#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  79        memcpy(dst, src, len)
  80
  81#endif /* __ASM_CACHEFLUSH_H */
  82