1
2#ifndef _LINUX_MMU_NOTIFIER_H
3#define _LINUX_MMU_NOTIFIER_H
4
5#include <linux/list.h>
6#include <linux/spinlock.h>
7#include <linux/mm_types.h>
8#include <linux/mmap_lock.h>
9#include <linux/srcu.h>
10#include <linux/interval_tree.h>
11
12struct mmu_notifier_subscriptions;
13struct mmu_notifier;
14struct mmu_notifier_range;
15struct mmu_interval_notifier;
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51enum mmu_notifier_event {
52 MMU_NOTIFY_UNMAP = 0,
53 MMU_NOTIFY_CLEAR,
54 MMU_NOTIFY_PROTECTION_VMA,
55 MMU_NOTIFY_PROTECTION_PAGE,
56 MMU_NOTIFY_SOFT_DIRTY,
57 MMU_NOTIFY_RELEASE,
58 MMU_NOTIFY_MIGRATE,
59 MMU_NOTIFY_EXCLUSIVE,
60};
61
62#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
63
64struct mmu_notifier_ops {
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88 void (*release)(struct mmu_notifier *subscription,
89 struct mm_struct *mm);
90
91
92
93
94
95
96
97
98
99
100 int (*clear_flush_young)(struct mmu_notifier *subscription,
101 struct mm_struct *mm,
102 unsigned long start,
103 unsigned long end);
104
105
106
107
108
109
110 int (*clear_young)(struct mmu_notifier *subscription,
111 struct mm_struct *mm,
112 unsigned long start,
113 unsigned long end);
114
115
116
117
118
119
120
121 int (*test_young)(struct mmu_notifier *subscription,
122 struct mm_struct *mm,
123 unsigned long address);
124
125
126
127
128
129 void (*change_pte)(struct mmu_notifier *subscription,
130 struct mm_struct *mm,
131 unsigned long address,
132 pte_t pte);
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184 int (*invalidate_range_start)(struct mmu_notifier *subscription,
185 const struct mmu_notifier_range *range);
186 void (*invalidate_range_end)(struct mmu_notifier *subscription,
187 const struct mmu_notifier_range *range);
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207 void (*invalidate_range)(struct mmu_notifier *subscription,
208 struct mm_struct *mm,
209 unsigned long start,
210 unsigned long end);
211
212
213
214
215
216
217
218
219
220
221
222 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
223 void (*free_notifier)(struct mmu_notifier *subscription);
224};
225
226
227
228
229
230
231
232
233
234
235
236
237struct mmu_notifier {
238 struct hlist_node hlist;
239 const struct mmu_notifier_ops *ops;
240 struct mm_struct *mm;
241 struct rcu_head rcu;
242 unsigned int users;
243};
244
245
246
247
248
249
250
251struct mmu_interval_notifier_ops {
252 bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
253 const struct mmu_notifier_range *range,
254 unsigned long cur_seq);
255};
256
257struct mmu_interval_notifier {
258 struct interval_tree_node interval_tree;
259 const struct mmu_interval_notifier_ops *ops;
260 struct mm_struct *mm;
261 struct hlist_node deferred_item;
262 unsigned long invalidate_seq;
263};
264
265#ifdef CONFIG_MMU_NOTIFIER
266
267#ifdef CONFIG_LOCKDEP
268extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
269#endif
270
271struct mmu_notifier_range {
272 struct vm_area_struct *vma;
273 struct mm_struct *mm;
274 unsigned long start;
275 unsigned long end;
276 unsigned flags;
277 enum mmu_notifier_event event;
278 void *owner;
279};
280
281static inline int mm_has_notifiers(struct mm_struct *mm)
282{
283 return unlikely(mm->notifier_subscriptions);
284}
285
286struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
287 struct mm_struct *mm);
288static inline struct mmu_notifier *
289mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
290{
291 struct mmu_notifier *ret;
292
293 mmap_write_lock(mm);
294 ret = mmu_notifier_get_locked(ops, mm);
295 mmap_write_unlock(mm);
296 return ret;
297}
298void mmu_notifier_put(struct mmu_notifier *subscription);
299void mmu_notifier_synchronize(void);
300
301extern int mmu_notifier_register(struct mmu_notifier *subscription,
302 struct mm_struct *mm);
303extern int __mmu_notifier_register(struct mmu_notifier *subscription,
304 struct mm_struct *mm);
305extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
306 struct mm_struct *mm);
307
308unsigned long
309mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
310int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
311 struct mm_struct *mm, unsigned long start,
312 unsigned long length,
313 const struct mmu_interval_notifier_ops *ops);
314int mmu_interval_notifier_insert_locked(
315 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
316 unsigned long start, unsigned long length,
317 const struct mmu_interval_notifier_ops *ops);
318void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333static inline void
334mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
335 unsigned long cur_seq)
336{
337 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
338}
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354static inline bool
355mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
356 unsigned long seq)
357{
358 return interval_sub->invalidate_seq != seq;
359}
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378static inline bool
379mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
380 unsigned long seq)
381{
382
383 return READ_ONCE(interval_sub->invalidate_seq) != seq;
384}
385
386extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
387extern void __mmu_notifier_release(struct mm_struct *mm);
388extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
389 unsigned long start,
390 unsigned long end);
391extern int __mmu_notifier_clear_young(struct mm_struct *mm,
392 unsigned long start,
393 unsigned long end);
394extern int __mmu_notifier_test_young(struct mm_struct *mm,
395 unsigned long address);
396extern void __mmu_notifier_change_pte(struct mm_struct *mm,
397 unsigned long address, pte_t pte);
398extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
399extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
400 bool only_end);
401extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
402 unsigned long start, unsigned long end);
403extern bool
404mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
405
406static inline bool
407mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
408{
409 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
410}
411
412static inline void mmu_notifier_release(struct mm_struct *mm)
413{
414 if (mm_has_notifiers(mm))
415 __mmu_notifier_release(mm);
416}
417
418static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
419 unsigned long start,
420 unsigned long end)
421{
422 if (mm_has_notifiers(mm))
423 return __mmu_notifier_clear_flush_young(mm, start, end);
424 return 0;
425}
426
427static inline int mmu_notifier_clear_young(struct mm_struct *mm,
428 unsigned long start,
429 unsigned long end)
430{
431 if (mm_has_notifiers(mm))
432 return __mmu_notifier_clear_young(mm, start, end);
433 return 0;
434}
435
436static inline int mmu_notifier_test_young(struct mm_struct *mm,
437 unsigned long address)
438{
439 if (mm_has_notifiers(mm))
440 return __mmu_notifier_test_young(mm, address);
441 return 0;
442}
443
444static inline void mmu_notifier_change_pte(struct mm_struct *mm,
445 unsigned long address, pte_t pte)
446{
447 if (mm_has_notifiers(mm))
448 __mmu_notifier_change_pte(mm, address, pte);
449}
450
451static inline void
452mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
453{
454 might_sleep();
455
456 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
457 if (mm_has_notifiers(range->mm)) {
458 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
459 __mmu_notifier_invalidate_range_start(range);
460 }
461 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
462}
463
464static inline int
465mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
466{
467 int ret = 0;
468
469 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
470 if (mm_has_notifiers(range->mm)) {
471 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
472 ret = __mmu_notifier_invalidate_range_start(range);
473 }
474 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
475 return ret;
476}
477
478static inline void
479mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
480{
481 if (mmu_notifier_range_blockable(range))
482 might_sleep();
483
484 if (mm_has_notifiers(range->mm))
485 __mmu_notifier_invalidate_range_end(range, false);
486}
487
488static inline void
489mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
490{
491 if (mm_has_notifiers(range->mm))
492 __mmu_notifier_invalidate_range_end(range, true);
493}
494
495static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
496 unsigned long start, unsigned long end)
497{
498 if (mm_has_notifiers(mm))
499 __mmu_notifier_invalidate_range(mm, start, end);
500}
501
502static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
503{
504 mm->notifier_subscriptions = NULL;
505}
506
507static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
508{
509 if (mm_has_notifiers(mm))
510 __mmu_notifier_subscriptions_destroy(mm);
511}
512
513
514static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
515 enum mmu_notifier_event event,
516 unsigned flags,
517 struct vm_area_struct *vma,
518 struct mm_struct *mm,
519 unsigned long start,
520 unsigned long end)
521{
522 range->vma = vma;
523 range->event = event;
524 range->mm = mm;
525 range->start = start;
526 range->end = end;
527 range->flags = flags;
528}
529
530static inline void mmu_notifier_range_init_owner(
531 struct mmu_notifier_range *range,
532 enum mmu_notifier_event event, unsigned int flags,
533 struct vm_area_struct *vma, struct mm_struct *mm,
534 unsigned long start, unsigned long end, void *owner)
535{
536 mmu_notifier_range_init(range, event, flags, vma, mm, start, end);
537 range->owner = owner;
538}
539
540#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
541({ \
542 int __young; \
543 struct vm_area_struct *___vma = __vma; \
544 unsigned long ___address = __address; \
545 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
546 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
547 ___address, \
548 ___address + \
549 PAGE_SIZE); \
550 __young; \
551})
552
553#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
554({ \
555 int __young; \
556 struct vm_area_struct *___vma = __vma; \
557 unsigned long ___address = __address; \
558 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
559 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
560 ___address, \
561 ___address + \
562 PMD_SIZE); \
563 __young; \
564})
565
566#define ptep_clear_young_notify(__vma, __address, __ptep) \
567({ \
568 int __young; \
569 struct vm_area_struct *___vma = __vma; \
570 unsigned long ___address = __address; \
571 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
572 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
573 ___address + PAGE_SIZE); \
574 __young; \
575})
576
577#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
578({ \
579 int __young; \
580 struct vm_area_struct *___vma = __vma; \
581 unsigned long ___address = __address; \
582 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
583 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
584 ___address + PMD_SIZE); \
585 __young; \
586})
587
588#define ptep_clear_flush_notify(__vma, __address, __ptep) \
589({ \
590 unsigned long ___addr = __address & PAGE_MASK; \
591 struct mm_struct *___mm = (__vma)->vm_mm; \
592 pte_t ___pte; \
593 \
594 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
595 mmu_notifier_invalidate_range(___mm, ___addr, \
596 ___addr + PAGE_SIZE); \
597 \
598 ___pte; \
599})
600
601#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
602({ \
603 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
604 struct mm_struct *___mm = (__vma)->vm_mm; \
605 pmd_t ___pmd; \
606 \
607 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
608 mmu_notifier_invalidate_range(___mm, ___haddr, \
609 ___haddr + HPAGE_PMD_SIZE); \
610 \
611 ___pmd; \
612})
613
614#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
615({ \
616 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
617 struct mm_struct *___mm = (__vma)->vm_mm; \
618 pud_t ___pud; \
619 \
620 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
621 mmu_notifier_invalidate_range(___mm, ___haddr, \
622 ___haddr + HPAGE_PUD_SIZE); \
623 \
624 ___pud; \
625})
626
627
628
629
630
631
632
633
634
635
636
637#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
638({ \
639 struct mm_struct *___mm = __mm; \
640 unsigned long ___address = __address; \
641 pte_t ___pte = __pte; \
642 \
643 mmu_notifier_change_pte(___mm, ___address, ___pte); \
644 set_pte_at(___mm, ___address, __ptep, ___pte); \
645})
646
647#else
648
649struct mmu_notifier_range {
650 unsigned long start;
651 unsigned long end;
652};
653
654static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
655 unsigned long start,
656 unsigned long end)
657{
658 range->start = start;
659 range->end = end;
660}
661
662#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
663 _mmu_notifier_range_init(range, start, end)
664#define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \
665 end, owner) \
666 _mmu_notifier_range_init(range, start, end)
667
668static inline bool
669mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
670{
671 return true;
672}
673
674static inline int mm_has_notifiers(struct mm_struct *mm)
675{
676 return 0;
677}
678
679static inline void mmu_notifier_release(struct mm_struct *mm)
680{
681}
682
683static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
684 unsigned long start,
685 unsigned long end)
686{
687 return 0;
688}
689
690static inline int mmu_notifier_test_young(struct mm_struct *mm,
691 unsigned long address)
692{
693 return 0;
694}
695
696static inline void mmu_notifier_change_pte(struct mm_struct *mm,
697 unsigned long address, pte_t pte)
698{
699}
700
701static inline void
702mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
703{
704}
705
706static inline int
707mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
708{
709 return 0;
710}
711
712static inline
713void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
714{
715}
716
717static inline void
718mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
719{
720}
721
722static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
723 unsigned long start, unsigned long end)
724{
725}
726
727static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
728{
729}
730
731static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
732{
733}
734
735#define mmu_notifier_range_update_to_read_only(r) false
736
737#define ptep_clear_flush_young_notify ptep_clear_flush_young
738#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
739#define ptep_clear_young_notify ptep_test_and_clear_young
740#define pmdp_clear_young_notify pmdp_test_and_clear_young
741#define ptep_clear_flush_notify ptep_clear_flush
742#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
743#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
744#define set_pte_at_notify set_pte_at
745
746static inline void mmu_notifier_synchronize(void)
747{
748}
749
750#endif
751
752#endif
753