1
2
3
4
5
6
7
8
9#include <linux/highmem.h>
10#include <linux/unistd.h>
11
12#include <asm/cacheflush.h>
13#include <asm/cachectl.h>
14#include <asm/processor.h>
15#include <linux/uaccess.h>
16#include <asm/syscalls.h>
17
18
19
20
21
22#define CACHEFLUSH_MAX_LEN 1024
23
24void invalidate_dcache_region(void *start, size_t size)
25{
26 unsigned long v, begin, end, linesz, mask;
27
28 linesz = boot_cpu_data.dcache.linesz;
29 mask = linesz - 1;
30
31
32
33
34 begin = (unsigned long)start;
35 end = begin + size;
36
37 if (begin & mask) {
38 flush_dcache_line(start);
39 begin += linesz;
40 }
41 if (end & mask) {
42 flush_dcache_line((void *)end);
43 end &= ~mask;
44 }
45
46
47 for (v = begin; v < end; v += linesz)
48 invalidate_dcache_line((void *)v);
49 flush_write_buffer();
50}
51
52void clean_dcache_region(void *start, size_t size)
53{
54 unsigned long v, begin, end, linesz;
55
56 linesz = boot_cpu_data.dcache.linesz;
57 begin = (unsigned long)start & ~(linesz - 1);
58 end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1);
59
60 for (v = begin; v < end; v += linesz)
61 clean_dcache_line((void *)v);
62 flush_write_buffer();
63}
64
65void flush_dcache_region(void *start, size_t size)
66{
67 unsigned long v, begin, end, linesz;
68
69 linesz = boot_cpu_data.dcache.linesz;
70 begin = (unsigned long)start & ~(linesz - 1);
71 end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1);
72
73 for (v = begin; v < end; v += linesz)
74 flush_dcache_line((void *)v);
75 flush_write_buffer();
76}
77
78void invalidate_icache_region(void *start, size_t size)
79{
80 unsigned long v, begin, end, linesz;
81
82 linesz = boot_cpu_data.icache.linesz;
83 begin = (unsigned long)start & ~(linesz - 1);
84 end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1);
85
86 for (v = begin; v < end; v += linesz)
87 invalidate_icache_line((void *)v);
88}
89
90static inline void __flush_icache_range(unsigned long start, unsigned long end)
91{
92 unsigned long v, linesz;
93
94 linesz = boot_cpu_data.dcache.linesz;
95 for (v = start; v < end; v += linesz) {
96 clean_dcache_line((void *)v);
97 invalidate_icache_line((void *)v);
98 }
99
100 flush_write_buffer();
101}
102
103
104
105
106void flush_icache_range(unsigned long start, unsigned long end)
107{
108 unsigned long linesz;
109
110 linesz = boot_cpu_data.dcache.linesz;
111 __flush_icache_range(start & ~(linesz - 1),
112 (end + linesz - 1) & ~(linesz - 1));
113}
114EXPORT_SYMBOL(flush_icache_range);
115
116
117
118
119void flush_icache_page(struct vm_area_struct *vma, struct page *page)
120{
121 if (vma->vm_flags & VM_EXEC) {
122 void *v = page_address(page);
123 __flush_icache_range((unsigned long)v, (unsigned long)v + PAGE_SIZE);
124 }
125}
126
127asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len)
128{
129 int ret;
130
131 if (len > CACHEFLUSH_MAX_LEN) {
132 ret = -EPERM;
133 if (!capable(CAP_SYS_ADMIN))
134 goto out;
135 }
136
137 ret = -EFAULT;
138 if (!access_ok(VERIFY_WRITE, addr, len))
139 goto out;
140
141 switch (operation) {
142 case CACHE_IFLUSH:
143 flush_icache_range((unsigned long)addr,
144 (unsigned long)addr + len);
145 ret = 0;
146 break;
147 default:
148 ret = -EINVAL;
149 }
150
151out:
152 return ret;
153}
154
155void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
156 unsigned long vaddr, void *dst, const void *src,
157 unsigned long len)
158{
159 memcpy(dst, src, len);
160 if (vma->vm_flags & VM_EXEC)
161 flush_icache_range((unsigned long)dst,
162 (unsigned long)dst + len);
163}
164