1
2#ifndef _LINUX_MMU_NOTIFIER_H
3#define _LINUX_MMU_NOTIFIER_H
4
5#include <linux/list.h>
6#include <linux/spinlock.h>
7#include <linux/mm_types.h>
8#include <linux/srcu.h>
9
10struct mmu_notifier;
11struct mmu_notifier_ops;
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35enum mmu_notifier_event {
36 MMU_NOTIFY_UNMAP = 0,
37 MMU_NOTIFY_CLEAR,
38 MMU_NOTIFY_PROTECTION_VMA,
39 MMU_NOTIFY_PROTECTION_PAGE,
40 MMU_NOTIFY_SOFT_DIRTY,
41};
42
43#ifdef CONFIG_MMU_NOTIFIER
44
45
46
47
48
49
50
51struct mmu_notifier_mm {
52
53 struct hlist_head list;
54
55 spinlock_t lock;
56};
57
58#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
59
60struct mmu_notifier_range {
61 struct vm_area_struct *vma;
62 struct mm_struct *mm;
63 unsigned long start;
64 unsigned long end;
65 unsigned flags;
66 enum mmu_notifier_event event;
67};
68
69struct mmu_notifier_ops {
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93 void (*release)(struct mmu_notifier *mn,
94 struct mm_struct *mm);
95
96
97
98
99
100
101
102
103
104
105 int (*clear_flush_young)(struct mmu_notifier *mn,
106 struct mm_struct *mm,
107 unsigned long start,
108 unsigned long end);
109
110
111
112
113
114
115 int (*clear_young)(struct mmu_notifier *mn,
116 struct mm_struct *mm,
117 unsigned long start,
118 unsigned long end);
119
120
121
122
123
124
125
126 int (*test_young)(struct mmu_notifier *mn,
127 struct mm_struct *mm,
128 unsigned long address);
129
130
131
132
133
134 void (*change_pte)(struct mmu_notifier *mn,
135 struct mm_struct *mm,
136 unsigned long address,
137 pte_t pte);
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189 int (*invalidate_range_start)(struct mmu_notifier *mn,
190 const struct mmu_notifier_range *range);
191 void (*invalidate_range_end)(struct mmu_notifier *mn,
192 const struct mmu_notifier_range *range);
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
213 unsigned long start, unsigned long end);
214};
215
216
217
218
219
220
221
222
223
224
225
226
227struct mmu_notifier {
228 struct hlist_node hlist;
229 const struct mmu_notifier_ops *ops;
230};
231
232static inline int mm_has_notifiers(struct mm_struct *mm)
233{
234 return unlikely(mm->mmu_notifier_mm);
235}
236
237extern int mmu_notifier_register(struct mmu_notifier *mn,
238 struct mm_struct *mm);
239extern int __mmu_notifier_register(struct mmu_notifier *mn,
240 struct mm_struct *mm);
241extern void mmu_notifier_unregister(struct mmu_notifier *mn,
242 struct mm_struct *mm);
243extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
244 struct mm_struct *mm);
245extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
246extern void __mmu_notifier_release(struct mm_struct *mm);
247extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
248 unsigned long start,
249 unsigned long end);
250extern int __mmu_notifier_clear_young(struct mm_struct *mm,
251 unsigned long start,
252 unsigned long end);
253extern int __mmu_notifier_test_young(struct mm_struct *mm,
254 unsigned long address);
255extern void __mmu_notifier_change_pte(struct mm_struct *mm,
256 unsigned long address, pte_t pte);
257extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
258extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
259 bool only_end);
260extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
261 unsigned long start, unsigned long end);
262extern bool
263mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
264
265static inline bool
266mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
267{
268 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
269}
270
271static inline void mmu_notifier_release(struct mm_struct *mm)
272{
273 if (mm_has_notifiers(mm))
274 __mmu_notifier_release(mm);
275}
276
277static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
278 unsigned long start,
279 unsigned long end)
280{
281 if (mm_has_notifiers(mm))
282 return __mmu_notifier_clear_flush_young(mm, start, end);
283 return 0;
284}
285
286static inline int mmu_notifier_clear_young(struct mm_struct *mm,
287 unsigned long start,
288 unsigned long end)
289{
290 if (mm_has_notifiers(mm))
291 return __mmu_notifier_clear_young(mm, start, end);
292 return 0;
293}
294
295static inline int mmu_notifier_test_young(struct mm_struct *mm,
296 unsigned long address)
297{
298 if (mm_has_notifiers(mm))
299 return __mmu_notifier_test_young(mm, address);
300 return 0;
301}
302
303static inline void mmu_notifier_change_pte(struct mm_struct *mm,
304 unsigned long address, pte_t pte)
305{
306 if (mm_has_notifiers(mm))
307 __mmu_notifier_change_pte(mm, address, pte);
308}
309
310static inline void
311mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
312{
313 if (mm_has_notifiers(range->mm)) {
314 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
315 __mmu_notifier_invalidate_range_start(range);
316 }
317}
318
319static inline int
320mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
321{
322 if (mm_has_notifiers(range->mm)) {
323 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
324 return __mmu_notifier_invalidate_range_start(range);
325 }
326 return 0;
327}
328
329static inline void
330mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
331{
332 if (mm_has_notifiers(range->mm))
333 __mmu_notifier_invalidate_range_end(range, false);
334}
335
336static inline void
337mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
338{
339 if (mm_has_notifiers(range->mm))
340 __mmu_notifier_invalidate_range_end(range, true);
341}
342
343static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
344 unsigned long start, unsigned long end)
345{
346 if (mm_has_notifiers(mm))
347 __mmu_notifier_invalidate_range(mm, start, end);
348}
349
350static inline void mmu_notifier_mm_init(struct mm_struct *mm)
351{
352 mm->mmu_notifier_mm = NULL;
353}
354
355static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
356{
357 if (mm_has_notifiers(mm))
358 __mmu_notifier_mm_destroy(mm);
359}
360
361
362static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
363 enum mmu_notifier_event event,
364 unsigned flags,
365 struct vm_area_struct *vma,
366 struct mm_struct *mm,
367 unsigned long start,
368 unsigned long end)
369{
370 range->vma = vma;
371 range->event = event;
372 range->mm = mm;
373 range->start = start;
374 range->end = end;
375 range->flags = flags;
376}
377
378#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
379({ \
380 int __young; \
381 struct vm_area_struct *___vma = __vma; \
382 unsigned long ___address = __address; \
383 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
384 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
385 ___address, \
386 ___address + \
387 PAGE_SIZE); \
388 __young; \
389})
390
391#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
392({ \
393 int __young; \
394 struct vm_area_struct *___vma = __vma; \
395 unsigned long ___address = __address; \
396 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
397 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
398 ___address, \
399 ___address + \
400 PMD_SIZE); \
401 __young; \
402})
403
404#define ptep_clear_young_notify(__vma, __address, __ptep) \
405({ \
406 int __young; \
407 struct vm_area_struct *___vma = __vma; \
408 unsigned long ___address = __address; \
409 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
410 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
411 ___address + PAGE_SIZE); \
412 __young; \
413})
414
415#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
416({ \
417 int __young; \
418 struct vm_area_struct *___vma = __vma; \
419 unsigned long ___address = __address; \
420 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
421 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
422 ___address + PMD_SIZE); \
423 __young; \
424})
425
426#define ptep_clear_flush_notify(__vma, __address, __ptep) \
427({ \
428 unsigned long ___addr = __address & PAGE_MASK; \
429 struct mm_struct *___mm = (__vma)->vm_mm; \
430 pte_t ___pte; \
431 \
432 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
433 mmu_notifier_invalidate_range(___mm, ___addr, \
434 ___addr + PAGE_SIZE); \
435 \
436 ___pte; \
437})
438
439#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
440({ \
441 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
442 struct mm_struct *___mm = (__vma)->vm_mm; \
443 pmd_t ___pmd; \
444 \
445 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
446 mmu_notifier_invalidate_range(___mm, ___haddr, \
447 ___haddr + HPAGE_PMD_SIZE); \
448 \
449 ___pmd; \
450})
451
452#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
453({ \
454 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
455 struct mm_struct *___mm = (__vma)->vm_mm; \
456 pud_t ___pud; \
457 \
458 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
459 mmu_notifier_invalidate_range(___mm, ___haddr, \
460 ___haddr + HPAGE_PUD_SIZE); \
461 \
462 ___pud; \
463})
464
465
466
467
468
469
470
471
472
473
474
475#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
476({ \
477 struct mm_struct *___mm = __mm; \
478 unsigned long ___address = __address; \
479 pte_t ___pte = __pte; \
480 \
481 mmu_notifier_change_pte(___mm, ___address, ___pte); \
482 set_pte_at(___mm, ___address, __ptep, ___pte); \
483})
484
485extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
486 void (*func)(struct rcu_head *rcu));
487
488#else
489
490struct mmu_notifier_range {
491 unsigned long start;
492 unsigned long end;
493};
494
495static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
496 unsigned long start,
497 unsigned long end)
498{
499 range->start = start;
500 range->end = end;
501}
502
503#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
504 _mmu_notifier_range_init(range, start, end)
505
506static inline bool
507mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
508{
509 return true;
510}
511
512static inline int mm_has_notifiers(struct mm_struct *mm)
513{
514 return 0;
515}
516
517static inline void mmu_notifier_release(struct mm_struct *mm)
518{
519}
520
521static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
522 unsigned long start,
523 unsigned long end)
524{
525 return 0;
526}
527
528static inline int mmu_notifier_test_young(struct mm_struct *mm,
529 unsigned long address)
530{
531 return 0;
532}
533
534static inline void mmu_notifier_change_pte(struct mm_struct *mm,
535 unsigned long address, pte_t pte)
536{
537}
538
539static inline void
540mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
541{
542}
543
544static inline int
545mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
546{
547 return 0;
548}
549
550static inline
551void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
552{
553}
554
555static inline void
556mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
557{
558}
559
560static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
561 unsigned long start, unsigned long end)
562{
563}
564
565static inline void mmu_notifier_mm_init(struct mm_struct *mm)
566{
567}
568
569static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
570{
571}
572
573#define mmu_notifier_range_update_to_read_only(r) false
574
575#define ptep_clear_flush_young_notify ptep_clear_flush_young
576#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
577#define ptep_clear_young_notify ptep_test_and_clear_young
578#define pmdp_clear_young_notify pmdp_test_and_clear_young
579#define ptep_clear_flush_notify ptep_clear_flush
580#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
581#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
582#define set_pte_at_notify set_pte_at
583
584#endif
585
586#endif
587