1
2#ifndef _LINUX_HIGHMEM_H
3#define _LINUX_HIGHMEM_H
4
5#include <linux/fs.h>
6#include <linux/kernel.h>
7#include <linux/bug.h>
8#include <linux/cacheflush.h>
9#include <linux/mm.h>
10#include <linux/uaccess.h>
11#include <linux/hardirq.h>
12
13#include "highmem-internal.h"
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36static inline void *kmap(struct page *page);
37
38
39
40
41
42
43
44
45static inline void kunmap(struct page *page);
46
47
48
49
50
51
52
53static inline struct page *kmap_to_page(void *addr);
54
55
56
57
58
59static inline void kmap_flush_unused(void);
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96static inline void *kmap_local_page(struct page *page);
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133static inline void *kmap_local_folio(struct folio *folio, size_t offset);
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180static inline void *kmap_atomic(struct page *page);
181
182
183static inline unsigned int nr_free_highpages(void);
184static inline unsigned long totalhigh_pages(void);
185
186#ifndef ARCH_HAS_FLUSH_ANON_PAGE
187static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
188{
189}
190#endif
191
192#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
193static inline void flush_kernel_vmap_range(void *vaddr, int size)
194{
195}
196static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
197{
198}
199#endif
200
201
202#ifndef clear_user_highpage
203static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
204{
205 void *addr = kmap_local_page(page);
206 clear_user_page(addr, vaddr, page);
207 kunmap_local(addr);
208}
209#endif
210
211#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226static inline struct page *
227alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
228 unsigned long vaddr)
229{
230 struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
231
232 if (page)
233 clear_user_highpage(page, vaddr);
234
235 return page;
236}
237#endif
238
239static inline void clear_highpage(struct page *page)
240{
241 void *kaddr = kmap_local_page(page);
242 clear_page(kaddr);
243 kunmap_local(kaddr);
244}
245
246#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
247
248static inline void tag_clear_highpage(struct page *page)
249{
250}
251
252#endif
253
254
255
256
257
258#ifdef CONFIG_HIGHMEM
259void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
260 unsigned start2, unsigned end2);
261#else
262static inline void zero_user_segments(struct page *page,
263 unsigned start1, unsigned end1,
264 unsigned start2, unsigned end2)
265{
266 void *kaddr = kmap_local_page(page);
267 unsigned int i;
268
269 BUG_ON(end1 > page_size(page) || end2 > page_size(page));
270
271 if (end1 > start1)
272 memset(kaddr + start1, 0, end1 - start1);
273
274 if (end2 > start2)
275 memset(kaddr + start2, 0, end2 - start2);
276
277 kunmap_local(kaddr);
278 for (i = 0; i < compound_nr(page); i++)
279 flush_dcache_page(page + i);
280}
281#endif
282
283static inline void zero_user_segment(struct page *page,
284 unsigned start, unsigned end)
285{
286 zero_user_segments(page, start, end, 0, 0);
287}
288
289static inline void zero_user(struct page *page,
290 unsigned start, unsigned size)
291{
292 zero_user_segments(page, start, start + size, 0, 0);
293}
294
295#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
296
297static inline void copy_user_highpage(struct page *to, struct page *from,
298 unsigned long vaddr, struct vm_area_struct *vma)
299{
300 char *vfrom, *vto;
301
302 vfrom = kmap_local_page(from);
303 vto = kmap_local_page(to);
304 copy_user_page(vto, vfrom, vaddr, to);
305 kunmap_local(vto);
306 kunmap_local(vfrom);
307}
308
309#endif
310
311#ifndef __HAVE_ARCH_COPY_HIGHPAGE
312
313static inline void copy_highpage(struct page *to, struct page *from)
314{
315 char *vfrom, *vto;
316
317 vfrom = kmap_local_page(from);
318 vto = kmap_local_page(to);
319 copy_page(vto, vfrom);
320 kunmap_local(vto);
321 kunmap_local(vfrom);
322}
323
324#endif
325
326static inline void memcpy_page(struct page *dst_page, size_t dst_off,
327 struct page *src_page, size_t src_off,
328 size_t len)
329{
330 char *dst = kmap_local_page(dst_page);
331 char *src = kmap_local_page(src_page);
332
333 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
334 memcpy(dst + dst_off, src + src_off, len);
335 kunmap_local(src);
336 kunmap_local(dst);
337}
338
339static inline void memmove_page(struct page *dst_page, size_t dst_off,
340 struct page *src_page, size_t src_off,
341 size_t len)
342{
343 char *dst = kmap_local_page(dst_page);
344 char *src = kmap_local_page(src_page);
345
346 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
347 memmove(dst + dst_off, src + src_off, len);
348 kunmap_local(src);
349 kunmap_local(dst);
350}
351
352static inline void memset_page(struct page *page, size_t offset, int val,
353 size_t len)
354{
355 char *addr = kmap_local_page(page);
356
357 VM_BUG_ON(offset + len > PAGE_SIZE);
358 memset(addr + offset, val, len);
359 kunmap_local(addr);
360}
361
362static inline void memcpy_from_page(char *to, struct page *page,
363 size_t offset, size_t len)
364{
365 char *from = kmap_local_page(page);
366
367 VM_BUG_ON(offset + len > PAGE_SIZE);
368 memcpy(to, from + offset, len);
369 kunmap_local(from);
370}
371
372static inline void memcpy_to_page(struct page *page, size_t offset,
373 const char *from, size_t len)
374{
375 char *to = kmap_local_page(page);
376
377 VM_BUG_ON(offset + len > PAGE_SIZE);
378 memcpy(to + offset, from, len);
379 flush_dcache_page(page);
380 kunmap_local(to);
381}
382
383static inline void memzero_page(struct page *page, size_t offset, size_t len)
384{
385 char *addr = kmap_local_page(page);
386
387 VM_BUG_ON(offset + len > PAGE_SIZE);
388 memset(addr + offset, 0, len);
389 flush_dcache_page(page);
390 kunmap_local(addr);
391}
392
393
394
395
396
397
398
399
400
401static inline void folio_zero_segments(struct folio *folio,
402 size_t start1, size_t xend1, size_t start2, size_t xend2)
403{
404 zero_user_segments(&folio->page, start1, xend1, start2, xend2);
405}
406
407
408
409
410
411
412
413static inline void folio_zero_segment(struct folio *folio,
414 size_t start, size_t xend)
415{
416 zero_user_segments(&folio->page, start, xend, 0, 0);
417}
418
419
420
421
422
423
424
425static inline void folio_zero_range(struct folio *folio,
426 size_t start, size_t length)
427{
428 zero_user_segments(&folio->page, start, start + length, 0, 0);
429}
430
431#endif
432