1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef __ASM_CACHEFLUSH_H
19#define __ASM_CACHEFLUSH_H
20
21#include <linux/mm.h>
22
23
24
25
26
27
28extern void local_dcache_page_flush(struct page *page);
29extern void local_icache_page_inv(struct page *page);
30
31
32
33
34
35
36#ifndef CONFIG_SMP
37#define dcache_page_flush(page) local_dcache_page_flush(page)
38#define icache_page_inv(page) local_icache_page_inv(page)
39#else
40#define dcache_page_flush(page) local_dcache_page_flush(page)
41#define icache_page_inv(page) smp_icache_page_inv(page)
42extern void smp_icache_page_inv(struct page *page);
43#endif
44
45
46
47
48
49static inline void sync_icache_dcache(struct page *page)
50{
51 if (!IS_ENABLED(CONFIG_DCACHE_WRITETHROUGH))
52 dcache_page_flush(page);
53 icache_page_inv(page);
54}
55
56
57
58
59
60
61#define PG_dc_clean PG_arch_1
62
63#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
64static inline void flush_dcache_page(struct page *page)
65{
66 clear_bit(PG_dc_clean, &page->flags);
67}
68
69
70
71
72
73#define flush_cache_all() do { } while (0)
74#define flush_cache_mm(mm) do { } while (0)
75#define flush_cache_dup_mm(mm) do { } while (0)
76#define flush_cache_range(vma, start, end) do { } while (0)
77#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
78#define flush_dcache_mmap_lock(mapping) do { } while (0)
79#define flush_dcache_mmap_unlock(mapping) do { } while (0)
80#define flush_icache_range(start, end) do { } while (0)
81#define flush_icache_page(vma, pg) do { } while (0)
82#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
83#define flush_cache_vmap(start, end) do { } while (0)
84#define flush_cache_vunmap(start, end) do { } while (0)
85
86#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
87 do { \
88 memcpy(dst, src, len); \
89 if (vma->vm_flags & VM_EXEC) \
90 sync_icache_dcache(page); \
91 } while (0)
92
93#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
94 memcpy(dst, src, len)
95
96#endif
97