1
2
3
4#ifndef __NDS32_CACHEFLUSH_H__
5#define __NDS32_CACHEFLUSH_H__
6
7#include <linux/mm.h>
8
9#define PG_dcache_dirty PG_arch_1
10
11#ifdef CONFIG_CPU_CACHE_ALIASING
12void flush_cache_mm(struct mm_struct *mm);
13void flush_cache_dup_mm(struct mm_struct *mm);
14void flush_cache_range(struct vm_area_struct *vma,
15 unsigned long start, unsigned long end);
16void flush_cache_page(struct vm_area_struct *vma,
17 unsigned long addr, unsigned long pfn);
18void flush_cache_kmaps(void);
19void flush_cache_vmap(unsigned long start, unsigned long end);
20void flush_cache_vunmap(unsigned long start, unsigned long end);
21
22#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
23void flush_dcache_page(struct page *page);
24void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
25 unsigned long vaddr, void *dst, void *src, int len);
26void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
27 unsigned long vaddr, void *dst, void *src, int len);
28
29#define ARCH_HAS_FLUSH_ANON_PAGE
30void flush_anon_page(struct vm_area_struct *vma,
31 struct page *page, unsigned long vaddr);
32
33#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
34void flush_kernel_dcache_page(struct page *page);
35void flush_kernel_vmap_range(void *addr, int size);
36void invalidate_kernel_vmap_range(void *addr, int size);
37void flush_icache_range(unsigned long start, unsigned long end);
38void flush_icache_page(struct vm_area_struct *vma, struct page *page);
39#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages)
40#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
41
42#else
43#include <asm-generic/cacheflush.h>
44#endif
45
46#endif
47