1
2#ifndef _LINUX_MMU_NOTIFIER_H
3#define _LINUX_MMU_NOTIFIER_H
4
5#include <linux/list.h>
6#include <linux/spinlock.h>
7#include <linux/mm_types.h>
8#include <linux/srcu.h>
9#include <linux/interval_tree.h>
10
11struct mmu_notifier_subscriptions;
12struct mmu_notifier;
13struct mmu_notifier_range;
14struct mmu_interval_notifier;
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41enum mmu_notifier_event {
42 MMU_NOTIFY_UNMAP = 0,
43 MMU_NOTIFY_CLEAR,
44 MMU_NOTIFY_PROTECTION_VMA,
45 MMU_NOTIFY_PROTECTION_PAGE,
46 MMU_NOTIFY_SOFT_DIRTY,
47 MMU_NOTIFY_RELEASE,
48};
49
50#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
51
52struct mmu_notifier_ops {
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76 void (*release)(struct mmu_notifier *subscription,
77 struct mm_struct *mm);
78
79
80
81
82
83
84
85
86
87
88 int (*clear_flush_young)(struct mmu_notifier *subscription,
89 struct mm_struct *mm,
90 unsigned long start,
91 unsigned long end);
92
93
94
95
96
97
98 int (*clear_young)(struct mmu_notifier *subscription,
99 struct mm_struct *mm,
100 unsigned long start,
101 unsigned long end);
102
103
104
105
106
107
108
109 int (*test_young)(struct mmu_notifier *subscription,
110 struct mm_struct *mm,
111 unsigned long address);
112
113
114
115
116
117 void (*change_pte)(struct mmu_notifier *subscription,
118 struct mm_struct *mm,
119 unsigned long address,
120 pte_t pte);
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172 int (*invalidate_range_start)(struct mmu_notifier *subscription,
173 const struct mmu_notifier_range *range);
174 void (*invalidate_range_end)(struct mmu_notifier *subscription,
175 const struct mmu_notifier_range *range);
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195 void (*invalidate_range)(struct mmu_notifier *subscription,
196 struct mm_struct *mm,
197 unsigned long start,
198 unsigned long end);
199
200
201
202
203
204
205
206
207
208
209
210 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
211 void (*free_notifier)(struct mmu_notifier *subscription);
212};
213
214
215
216
217
218
219
220
221
222
223
224
225struct mmu_notifier {
226 struct hlist_node hlist;
227 const struct mmu_notifier_ops *ops;
228 struct mm_struct *mm;
229 struct rcu_head rcu;
230 unsigned int users;
231};
232
233
234
235
236
237
238
239struct mmu_interval_notifier_ops {
240 bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
241 const struct mmu_notifier_range *range,
242 unsigned long cur_seq);
243};
244
245struct mmu_interval_notifier {
246 struct interval_tree_node interval_tree;
247 const struct mmu_interval_notifier_ops *ops;
248 struct mm_struct *mm;
249 struct hlist_node deferred_item;
250 unsigned long invalidate_seq;
251};
252
253#ifdef CONFIG_MMU_NOTIFIER
254
255#ifdef CONFIG_LOCKDEP
256extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
257#endif
258
259struct mmu_notifier_range {
260 struct vm_area_struct *vma;
261 struct mm_struct *mm;
262 unsigned long start;
263 unsigned long end;
264 unsigned flags;
265 enum mmu_notifier_event event;
266};
267
268static inline int mm_has_notifiers(struct mm_struct *mm)
269{
270 return unlikely(mm->notifier_subscriptions);
271}
272
273struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
274 struct mm_struct *mm);
275static inline struct mmu_notifier *
276mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
277{
278 struct mmu_notifier *ret;
279
280 down_write(&mm->mmap_sem);
281 ret = mmu_notifier_get_locked(ops, mm);
282 up_write(&mm->mmap_sem);
283 return ret;
284}
285void mmu_notifier_put(struct mmu_notifier *subscription);
286void mmu_notifier_synchronize(void);
287
288extern int mmu_notifier_register(struct mmu_notifier *subscription,
289 struct mm_struct *mm);
290extern int __mmu_notifier_register(struct mmu_notifier *subscription,
291 struct mm_struct *mm);
292extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
293 struct mm_struct *mm);
294
295unsigned long
296mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
297int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
298 struct mm_struct *mm, unsigned long start,
299 unsigned long length,
300 const struct mmu_interval_notifier_ops *ops);
301int mmu_interval_notifier_insert_locked(
302 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
303 unsigned long start, unsigned long length,
304 const struct mmu_interval_notifier_ops *ops);
305void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320static inline void
321mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
322 unsigned long cur_seq)
323{
324 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
325}
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341static inline bool
342mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
343 unsigned long seq)
344{
345 return interval_sub->invalidate_seq != seq;
346}
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365static inline bool
366mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
367 unsigned long seq)
368{
369
370 return READ_ONCE(interval_sub->invalidate_seq) != seq;
371}
372
373extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
374extern void __mmu_notifier_release(struct mm_struct *mm);
375extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
376 unsigned long start,
377 unsigned long end);
378extern int __mmu_notifier_clear_young(struct mm_struct *mm,
379 unsigned long start,
380 unsigned long end);
381extern int __mmu_notifier_test_young(struct mm_struct *mm,
382 unsigned long address);
383extern void __mmu_notifier_change_pte(struct mm_struct *mm,
384 unsigned long address, pte_t pte);
385extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
386extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
387 bool only_end);
388extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
389 unsigned long start, unsigned long end);
390extern bool
391mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
392
393static inline bool
394mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
395{
396 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
397}
398
399static inline void mmu_notifier_release(struct mm_struct *mm)
400{
401 if (mm_has_notifiers(mm))
402 __mmu_notifier_release(mm);
403}
404
405static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
406 unsigned long start,
407 unsigned long end)
408{
409 if (mm_has_notifiers(mm))
410 return __mmu_notifier_clear_flush_young(mm, start, end);
411 return 0;
412}
413
414static inline int mmu_notifier_clear_young(struct mm_struct *mm,
415 unsigned long start,
416 unsigned long end)
417{
418 if (mm_has_notifiers(mm))
419 return __mmu_notifier_clear_young(mm, start, end);
420 return 0;
421}
422
423static inline int mmu_notifier_test_young(struct mm_struct *mm,
424 unsigned long address)
425{
426 if (mm_has_notifiers(mm))
427 return __mmu_notifier_test_young(mm, address);
428 return 0;
429}
430
431static inline void mmu_notifier_change_pte(struct mm_struct *mm,
432 unsigned long address, pte_t pte)
433{
434 if (mm_has_notifiers(mm))
435 __mmu_notifier_change_pte(mm, address, pte);
436}
437
438static inline void
439mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
440{
441 might_sleep();
442
443 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
444 if (mm_has_notifiers(range->mm)) {
445 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
446 __mmu_notifier_invalidate_range_start(range);
447 }
448 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
449}
450
451static inline int
452mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
453{
454 int ret = 0;
455
456 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
457 if (mm_has_notifiers(range->mm)) {
458 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
459 ret = __mmu_notifier_invalidate_range_start(range);
460 }
461 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
462 return ret;
463}
464
465static inline void
466mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
467{
468 if (mmu_notifier_range_blockable(range))
469 might_sleep();
470
471 if (mm_has_notifiers(range->mm))
472 __mmu_notifier_invalidate_range_end(range, false);
473}
474
475static inline void
476mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
477{
478 if (mm_has_notifiers(range->mm))
479 __mmu_notifier_invalidate_range_end(range, true);
480}
481
482static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
483 unsigned long start, unsigned long end)
484{
485 if (mm_has_notifiers(mm))
486 __mmu_notifier_invalidate_range(mm, start, end);
487}
488
489static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
490{
491 mm->notifier_subscriptions = NULL;
492}
493
494static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
495{
496 if (mm_has_notifiers(mm))
497 __mmu_notifier_subscriptions_destroy(mm);
498}
499
500
501static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
502 enum mmu_notifier_event event,
503 unsigned flags,
504 struct vm_area_struct *vma,
505 struct mm_struct *mm,
506 unsigned long start,
507 unsigned long end)
508{
509 range->vma = vma;
510 range->event = event;
511 range->mm = mm;
512 range->start = start;
513 range->end = end;
514 range->flags = flags;
515}
516
517#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
518({ \
519 int __young; \
520 struct vm_area_struct *___vma = __vma; \
521 unsigned long ___address = __address; \
522 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
523 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
524 ___address, \
525 ___address + \
526 PAGE_SIZE); \
527 __young; \
528})
529
530#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
531({ \
532 int __young; \
533 struct vm_area_struct *___vma = __vma; \
534 unsigned long ___address = __address; \
535 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
536 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
537 ___address, \
538 ___address + \
539 PMD_SIZE); \
540 __young; \
541})
542
543#define ptep_clear_young_notify(__vma, __address, __ptep) \
544({ \
545 int __young; \
546 struct vm_area_struct *___vma = __vma; \
547 unsigned long ___address = __address; \
548 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
549 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
550 ___address + PAGE_SIZE); \
551 __young; \
552})
553
554#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
555({ \
556 int __young; \
557 struct vm_area_struct *___vma = __vma; \
558 unsigned long ___address = __address; \
559 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
560 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
561 ___address + PMD_SIZE); \
562 __young; \
563})
564
565#define ptep_clear_flush_notify(__vma, __address, __ptep) \
566({ \
567 unsigned long ___addr = __address & PAGE_MASK; \
568 struct mm_struct *___mm = (__vma)->vm_mm; \
569 pte_t ___pte; \
570 \
571 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
572 mmu_notifier_invalidate_range(___mm, ___addr, \
573 ___addr + PAGE_SIZE); \
574 \
575 ___pte; \
576})
577
578#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
579({ \
580 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
581 struct mm_struct *___mm = (__vma)->vm_mm; \
582 pmd_t ___pmd; \
583 \
584 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
585 mmu_notifier_invalidate_range(___mm, ___haddr, \
586 ___haddr + HPAGE_PMD_SIZE); \
587 \
588 ___pmd; \
589})
590
591#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
592({ \
593 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
594 struct mm_struct *___mm = (__vma)->vm_mm; \
595 pud_t ___pud; \
596 \
597 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
598 mmu_notifier_invalidate_range(___mm, ___haddr, \
599 ___haddr + HPAGE_PUD_SIZE); \
600 \
601 ___pud; \
602})
603
604
605
606
607
608
609
610
611
612
613
614#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
615({ \
616 struct mm_struct *___mm = __mm; \
617 unsigned long ___address = __address; \
618 pte_t ___pte = __pte; \
619 \
620 mmu_notifier_change_pte(___mm, ___address, ___pte); \
621 set_pte_at(___mm, ___address, __ptep, ___pte); \
622})
623
624#else
625
626struct mmu_notifier_range {
627 unsigned long start;
628 unsigned long end;
629};
630
631static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
632 unsigned long start,
633 unsigned long end)
634{
635 range->start = start;
636 range->end = end;
637}
638
639#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
640 _mmu_notifier_range_init(range, start, end)
641
642static inline bool
643mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
644{
645 return true;
646}
647
648static inline int mm_has_notifiers(struct mm_struct *mm)
649{
650 return 0;
651}
652
653static inline void mmu_notifier_release(struct mm_struct *mm)
654{
655}
656
657static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
658 unsigned long start,
659 unsigned long end)
660{
661 return 0;
662}
663
664static inline int mmu_notifier_test_young(struct mm_struct *mm,
665 unsigned long address)
666{
667 return 0;
668}
669
670static inline void mmu_notifier_change_pte(struct mm_struct *mm,
671 unsigned long address, pte_t pte)
672{
673}
674
675static inline void
676mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
677{
678}
679
680static inline int
681mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
682{
683 return 0;
684}
685
686static inline
687void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
688{
689}
690
691static inline void
692mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
693{
694}
695
696static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
697 unsigned long start, unsigned long end)
698{
699}
700
701static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
702{
703}
704
705static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
706{
707}
708
709#define mmu_notifier_range_update_to_read_only(r) false
710
711#define ptep_clear_flush_young_notify ptep_clear_flush_young
712#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
713#define ptep_clear_young_notify ptep_test_and_clear_young
714#define pmdp_clear_young_notify pmdp_test_and_clear_young
715#define ptep_clear_flush_notify ptep_clear_flush
716#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
717#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
718#define set_pte_at_notify set_pte_at
719
720static inline void mmu_notifier_synchronize(void)
721{
722}
723
724#endif
725
726#endif
727