1
2#ifndef _LINUX_MMU_NOTIFIER_H
3#define _LINUX_MMU_NOTIFIER_H
4
5#include <linux/list.h>
6#include <linux/spinlock.h>
7#include <linux/mm_types.h>
8#include <linux/mmap_lock.h>
9#include <linux/srcu.h>
10#include <linux/interval_tree.h>
11#include <linux/rh_kabi.h>
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#if !defined(RH_MMU_NOTIFIER_V2) || defined(__GENKSYMS__)
41# define RH_MMU_NOTIFIER_VERSION 1
42# define RH_MN_V1(__x) __x
43# define RH_MN_V2(__x) __x ## _v2
44#else
45# define RH_MMU_NOTIFIER_VERSION 2
46# define RH_MN_V1(__x) __x ## _v1
47# define RH_MN_V2(__x) __x
48#endif
49
50struct mmu_notifier;
51struct mmu_notifier_range;
52struct mmu_notifier_ops;
53
54
55#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01)
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86enum mmu_notifier_event {
87 MMU_NOTIFY_UNMAP = 0,
88 MMU_NOTIFY_CLEAR,
89 MMU_NOTIFY_PROTECTION_VMA,
90 MMU_NOTIFY_PROTECTION_PAGE,
91 MMU_NOTIFY_SOFT_DIRTY,
92 MMU_NOTIFY_RELEASE,
93 MMU_NOTIFY_MIGRATE,
94};
95
96#ifdef CONFIG_LOCKDEP
97extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
98#endif
99
100
101
102
103
104
105
106#ifdef __GENKSYMS__
107struct mmu_notifier_mm {
108
109 struct hlist_head list;
110 spinlock_t lock;
111};
112#else
113struct mmu_notifier_subscriptions;
114#endif
115struct mmu_interval_notifier;
116
117#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01)
118
119#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
120
121struct mmu_notifier_ops {
122
123
124
125
126
127
128
129 int flags;
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154 void (*release)(struct mmu_notifier *subscription,
155 struct mm_struct *mm);
156
157
158
159
160
161
162
163
164
165
166 int (*clear_flush_young)(struct mmu_notifier *subscription,
167 struct mm_struct *mm,
168 unsigned long start,
169 unsigned long end);
170
171
172
173
174
175
176 int (*clear_young)(struct mmu_notifier *subscription,
177 struct mm_struct *mm,
178 unsigned long start,
179 unsigned long end);
180
181
182
183
184
185
186
187 int (*test_young)(struct mmu_notifier *subscription,
188 struct mm_struct *mm,
189 unsigned long address);
190
191
192
193
194
195 void (*change_pte)(struct mmu_notifier *subscription,
196 struct mm_struct *mm,
197 unsigned long address,
198 pte_t pte);
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250 void (*RH_MN_V1(invalidate_range_start))(struct mmu_notifier *subscription,
251 struct mm_struct *mm,
252 unsigned long start, unsigned long end);
253 void (*RH_MN_V1(invalidate_range_end))(struct mmu_notifier *subscription,
254 struct mm_struct *mm,
255 unsigned long start, unsigned long end);
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279 void (*invalidate_range)(struct mmu_notifier *subscription,
280 struct mm_struct *mm,
281 unsigned long start,
282 unsigned long end);
283
284
285
286
287
288
289
290
291
292
293
294 RH_KABI_USE(1, struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm))
295 RH_KABI_USE(2, void (*free_notifier)(struct mmu_notifier *subscription))
296 RH_KABI_USE(3,
297 int (*RH_MN_V2(invalidate_range_start))(struct mmu_notifier *subscription,
298 const struct mmu_notifier_range *range)
299 )
300 RH_KABI_USE(4,
301 void (*RH_MN_V2(invalidate_range_end))(struct mmu_notifier *subscription,
302 const struct mmu_notifier_range *range)
303 )
304};
305
306
307
308
309
310
311
312
313
314
315
316
317struct mmu_notifier_rh {
318 struct mm_struct *mm;
319 struct rcu_head rcu;
320 unsigned int users;
321 unsigned int version;
322 struct mmu_notifier *back_ptr;
323};
324
325struct mmu_notifier {
326 struct hlist_node hlist;
327 const struct mmu_notifier_ops *ops;
328 RH_KABI_USE_AUX_PTR(1, 2, mmu_notifier)
329};
330
331
332
333
334
335
336
337struct mmu_interval_notifier_ops {
338 bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
339 const struct mmu_notifier_range *range,
340 unsigned long cur_seq);
341};
342
343struct mmu_interval_notifier {
344 struct interval_tree_node interval_tree;
345 const struct mmu_interval_notifier_ops *ops;
346 struct mm_struct *mm;
347 struct hlist_node deferred_item;
348 unsigned long invalidate_seq;
349};
350
351#ifdef CONFIG_MMU_NOTIFIER
352
353struct mmu_notifier_range {
354 struct vm_area_struct *vma;
355 struct mm_struct *mm;
356 unsigned long start;
357 unsigned long end;
358 unsigned flags;
359 enum mmu_notifier_event event;
360 void *migrate_pgmap_owner;
361};
362
363static inline int mm_has_notifiers(struct mm_struct *mm)
364{
365 return unlikely(mm->notifier_subscriptions);
366}
367
368struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
369 struct mm_struct *mm);
370static inline struct mmu_notifier *
371mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
372{
373 struct mmu_notifier *ret;
374
375 mmap_write_lock(mm);
376 ret = mmu_notifier_get_locked(ops, mm);
377 mmap_write_unlock(mm);
378 return ret;
379}
380void mmu_notifier_put(struct mmu_notifier *subscription);
381void mmu_notifier_synchronize(void);
382
383extern int mmu_notifier_register(struct mmu_notifier *subscription,
384 struct mm_struct *mm);
385extern int __mmu_notifier_register(struct mmu_notifier *subscription,
386 struct mm_struct *mm);
387extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
388 struct mm_struct *mm);
389extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
390 struct mm_struct *mm);
391
392unsigned long
393mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
394int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
395 struct mm_struct *mm, unsigned long start,
396 unsigned long length,
397 const struct mmu_interval_notifier_ops *ops);
398int mmu_interval_notifier_insert_locked(
399 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
400 unsigned long start, unsigned long length,
401 const struct mmu_interval_notifier_ops *ops);
402void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417static inline void
418mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
419 unsigned long cur_seq)
420{
421 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
422}
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438static inline bool
439mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
440 unsigned long seq)
441{
442 return interval_sub->invalidate_seq != seq;
443}
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462static inline bool
463mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
464 unsigned long seq)
465{
466
467 return READ_ONCE(interval_sub->invalidate_seq) != seq;
468}
469
470extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
471extern void __mmu_notifier_release(struct mm_struct *mm);
472extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
473 unsigned long start,
474 unsigned long end);
475extern int __mmu_notifier_clear_young(struct mm_struct *mm,
476 unsigned long start,
477 unsigned long end);
478extern int __mmu_notifier_test_young(struct mm_struct *mm,
479 unsigned long address);
480extern void __mmu_notifier_change_pte(struct mm_struct *mm,
481 unsigned long address, pte_t pte);
482extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
483extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
484 bool only_end);
485extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
486 unsigned long start, unsigned long end);
487extern bool
488mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
489
490static inline bool
491mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
492{
493 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
494}
495
496static inline void mmu_notifier_release(struct mm_struct *mm)
497{
498 if (mm_has_notifiers(mm))
499 __mmu_notifier_release(mm);
500}
501
502static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
503 unsigned long start,
504 unsigned long end)
505{
506 if (mm_has_notifiers(mm))
507 return __mmu_notifier_clear_flush_young(mm, start, end);
508 return 0;
509}
510
511static inline int mmu_notifier_clear_young(struct mm_struct *mm,
512 unsigned long start,
513 unsigned long end)
514{
515 if (mm_has_notifiers(mm))
516 return __mmu_notifier_clear_young(mm, start, end);
517 return 0;
518}
519
520static inline int mmu_notifier_test_young(struct mm_struct *mm,
521 unsigned long address)
522{
523 if (mm_has_notifiers(mm))
524 return __mmu_notifier_test_young(mm, address);
525 return 0;
526}
527
528static inline void mmu_notifier_change_pte(struct mm_struct *mm,
529 unsigned long address, pte_t pte)
530{
531 if (mm_has_notifiers(mm))
532 __mmu_notifier_change_pte(mm, address, pte);
533}
534
535static inline void
536mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
537{
538 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
539 if (mm_has_notifiers(range->mm)) {
540 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
541 __mmu_notifier_invalidate_range_start(range);
542 }
543 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
544}
545
546static inline int
547mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
548{
549 int ret = 0;
550
551 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
552 if (mm_has_notifiers(range->mm)) {
553 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
554 ret = __mmu_notifier_invalidate_range_start(range);
555 }
556 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
557 return ret;
558}
559
560static inline void
561mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
562{
563 if (mm_has_notifiers(range->mm))
564 __mmu_notifier_invalidate_range_end(range, false);
565}
566
567static inline void
568mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
569{
570 if (mm_has_notifiers(range->mm))
571 __mmu_notifier_invalidate_range_end(range, true);
572}
573
574static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
575 unsigned long start, unsigned long end)
576{
577 if (mm_has_notifiers(mm))
578 __mmu_notifier_invalidate_range(mm, start, end);
579}
580
581static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
582{
583 mm->notifier_subscriptions = NULL;
584}
585
586static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
587{
588 if (mm_has_notifiers(mm))
589 __mmu_notifier_subscriptions_destroy(mm);
590}
591
592
593static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
594 enum mmu_notifier_event event,
595 unsigned flags,
596 struct vm_area_struct *vma,
597 struct mm_struct *mm,
598 unsigned long start,
599 unsigned long end)
600{
601 range->vma = vma;
602 range->event = event;
603 range->mm = mm;
604 range->start = start;
605 range->end = end;
606 range->flags = flags;
607}
608
609static inline void mmu_notifier_range_init_migrate(
610 struct mmu_notifier_range *range, unsigned int flags,
611 struct vm_area_struct *vma, struct mm_struct *mm,
612 unsigned long start, unsigned long end, void *pgmap)
613{
614 mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm,
615 start, end);
616 range->migrate_pgmap_owner = pgmap;
617}
618
619#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
620({ \
621 int __young; \
622 struct vm_area_struct *___vma = __vma; \
623 unsigned long ___address = __address; \
624 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
625 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
626 ___address, \
627 ___address + \
628 PAGE_SIZE); \
629 __young; \
630})
631
632#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
633({ \
634 int __young; \
635 struct vm_area_struct *___vma = __vma; \
636 unsigned long ___address = __address; \
637 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
638 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
639 ___address, \
640 ___address + \
641 PMD_SIZE); \
642 __young; \
643})
644
645#define ptep_clear_young_notify(__vma, __address, __ptep) \
646({ \
647 int __young; \
648 struct vm_area_struct *___vma = __vma; \
649 unsigned long ___address = __address; \
650 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
651 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
652 ___address + PAGE_SIZE); \
653 __young; \
654})
655
656#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
657({ \
658 int __young; \
659 struct vm_area_struct *___vma = __vma; \
660 unsigned long ___address = __address; \
661 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
662 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
663 ___address + PMD_SIZE); \
664 __young; \
665})
666
667#define ptep_clear_flush_notify(__vma, __address, __ptep) \
668({ \
669 unsigned long ___addr = __address & PAGE_MASK; \
670 struct mm_struct *___mm = (__vma)->vm_mm; \
671 pte_t ___pte; \
672 \
673 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
674 mmu_notifier_invalidate_range(___mm, ___addr, \
675 ___addr + PAGE_SIZE); \
676 \
677 ___pte; \
678})
679
680#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
681({ \
682 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
683 struct mm_struct *___mm = (__vma)->vm_mm; \
684 pmd_t ___pmd; \
685 \
686 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
687 mmu_notifier_invalidate_range(___mm, ___haddr, \
688 ___haddr + HPAGE_PMD_SIZE); \
689 \
690 ___pmd; \
691})
692
693#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
694({ \
695 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
696 struct mm_struct *___mm = (__vma)->vm_mm; \
697 pud_t ___pud; \
698 \
699 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
700 mmu_notifier_invalidate_range(___mm, ___haddr, \
701 ___haddr + HPAGE_PUD_SIZE); \
702 \
703 ___pud; \
704})
705
706
707
708
709
710
711
712
713
714
715
716#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
717({ \
718 struct mm_struct *___mm = __mm; \
719 unsigned long ___address = __address; \
720 pte_t ___pte = __pte; \
721 \
722 mmu_notifier_change_pte(___mm, ___address, ___pte); \
723 set_pte_at(___mm, ___address, __ptep, ___pte); \
724})
725
726extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
727 void (*func)(struct rcu_head *rcu));
728
729#else
730
731struct mmu_notifier_range {
732 unsigned long start;
733 unsigned long end;
734};
735
736static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
737 unsigned long start,
738 unsigned long end)
739{
740 range->start = start;
741 range->end = end;
742}
743
744#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
745 _mmu_notifier_range_init(range, start, end)
746#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \
747 pgmap) \
748 _mmu_notifier_range_init(range, start, end)
749
750static inline bool
751mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
752{
753 return true;
754}
755
756static inline int mm_has_notifiers(struct mm_struct *mm)
757{
758 return 0;
759}
760
761static inline void mmu_notifier_release(struct mm_struct *mm)
762{
763}
764
765static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
766 unsigned long start,
767 unsigned long end)
768{
769 return 0;
770}
771
772static inline int mmu_notifier_test_young(struct mm_struct *mm,
773 unsigned long address)
774{
775 return 0;
776}
777
778static inline void mmu_notifier_change_pte(struct mm_struct *mm,
779 unsigned long address, pte_t pte)
780{
781}
782
783static inline void
784mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
785{
786}
787
788static inline int
789mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
790{
791 return 0;
792}
793
794static inline void
795mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
796{
797}
798
799static inline void
800mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
801{
802}
803
804static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
805 unsigned long start, unsigned long end)
806{
807}
808
809static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
810{
811}
812
813static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
814{
815}
816
817#define mmu_notifier_range_update_to_read_only(r) false
818
819#define ptep_clear_flush_young_notify ptep_clear_flush_young
820#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
821#define ptep_clear_young_notify ptep_test_and_clear_young
822#define pmdp_clear_young_notify pmdp_test_and_clear_young
823#define ptep_clear_flush_notify ptep_clear_flush
824#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
825#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
826#define set_pte_at_notify set_pte_at
827
828static inline void mmu_notifier_synchronize(void)
829{
830}
831
832#ifdef RH_MMU_NOTIFIER_V2
833extern int mmu_notifier_register_v2(struct mmu_notifier *subscription,
834 struct mm_struct *mm);
835extern int __mmu_notifier_register_v2(struct mmu_notifier *subscription,
836 struct mm_struct *mm);
837# define mmu_notifier_register(__a, __b) \
838 mmu_notifier_register_v2(__a, __b)
839# define __mmu_notifier_register(__a, __b) \
840 __mmu_notifier_register_v2(__a, __b)
841#endif
842#endif
843
844#endif
845