1
2#ifndef _LINUX_MMU_NOTIFIER_H
3#define _LINUX_MMU_NOTIFIER_H
4
5#include <linux/list.h>
6#include <linux/spinlock.h>
7#include <linux/mm_types.h>
8#include <linux/mmap_lock.h>
9#include <linux/srcu.h>
10#include <linux/interval_tree.h>
11
12struct mmu_notifier_subscriptions;
13struct mmu_notifier;
14struct mmu_notifier_range;
15struct mmu_interval_notifier;
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46enum mmu_notifier_event {
47 MMU_NOTIFY_UNMAP = 0,
48 MMU_NOTIFY_CLEAR,
49 MMU_NOTIFY_PROTECTION_VMA,
50 MMU_NOTIFY_PROTECTION_PAGE,
51 MMU_NOTIFY_SOFT_DIRTY,
52 MMU_NOTIFY_RELEASE,
53 MMU_NOTIFY_MIGRATE,
54};
55
56#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
57
58struct mmu_notifier_ops {
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82 void (*release)(struct mmu_notifier *subscription,
83 struct mm_struct *mm);
84
85
86
87
88
89
90
91
92
93
94 int (*clear_flush_young)(struct mmu_notifier *subscription,
95 struct mm_struct *mm,
96 unsigned long start,
97 unsigned long end);
98
99
100
101
102
103
104 int (*clear_young)(struct mmu_notifier *subscription,
105 struct mm_struct *mm,
106 unsigned long start,
107 unsigned long end);
108
109
110
111
112
113
114
115 int (*test_young)(struct mmu_notifier *subscription,
116 struct mm_struct *mm,
117 unsigned long address);
118
119
120
121
122
123 void (*change_pte)(struct mmu_notifier *subscription,
124 struct mm_struct *mm,
125 unsigned long address,
126 pte_t pte);
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178 int (*invalidate_range_start)(struct mmu_notifier *subscription,
179 const struct mmu_notifier_range *range);
180 void (*invalidate_range_end)(struct mmu_notifier *subscription,
181 const struct mmu_notifier_range *range);
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201 void (*invalidate_range)(struct mmu_notifier *subscription,
202 struct mm_struct *mm,
203 unsigned long start,
204 unsigned long end);
205
206
207
208
209
210
211
212
213
214
215
216 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
217 void (*free_notifier)(struct mmu_notifier *subscription);
218};
219
220
221
222
223
224
225
226
227
228
229
230
231struct mmu_notifier {
232 struct hlist_node hlist;
233 const struct mmu_notifier_ops *ops;
234 struct mm_struct *mm;
235 struct rcu_head rcu;
236 unsigned int users;
237};
238
239
240
241
242
243
244
245struct mmu_interval_notifier_ops {
246 bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
247 const struct mmu_notifier_range *range,
248 unsigned long cur_seq);
249};
250
251struct mmu_interval_notifier {
252 struct interval_tree_node interval_tree;
253 const struct mmu_interval_notifier_ops *ops;
254 struct mm_struct *mm;
255 struct hlist_node deferred_item;
256 unsigned long invalidate_seq;
257};
258
259#ifdef CONFIG_MMU_NOTIFIER
260
261#ifdef CONFIG_LOCKDEP
262extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
263#endif
264
265struct mmu_notifier_range {
266 struct vm_area_struct *vma;
267 struct mm_struct *mm;
268 unsigned long start;
269 unsigned long end;
270 unsigned flags;
271 enum mmu_notifier_event event;
272 void *migrate_pgmap_owner;
273};
274
275static inline int mm_has_notifiers(struct mm_struct *mm)
276{
277 return unlikely(mm->notifier_subscriptions);
278}
279
280struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
281 struct mm_struct *mm);
282static inline struct mmu_notifier *
283mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
284{
285 struct mmu_notifier *ret;
286
287 mmap_write_lock(mm);
288 ret = mmu_notifier_get_locked(ops, mm);
289 mmap_write_unlock(mm);
290 return ret;
291}
292void mmu_notifier_put(struct mmu_notifier *subscription);
293void mmu_notifier_synchronize(void);
294
295extern int mmu_notifier_register(struct mmu_notifier *subscription,
296 struct mm_struct *mm);
297extern int __mmu_notifier_register(struct mmu_notifier *subscription,
298 struct mm_struct *mm);
299extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
300 struct mm_struct *mm);
301
302unsigned long
303mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
304int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
305 struct mm_struct *mm, unsigned long start,
306 unsigned long length,
307 const struct mmu_interval_notifier_ops *ops);
308int mmu_interval_notifier_insert_locked(
309 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
310 unsigned long start, unsigned long length,
311 const struct mmu_interval_notifier_ops *ops);
312void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327static inline void
328mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
329 unsigned long cur_seq)
330{
331 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
332}
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348static inline bool
349mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
350 unsigned long seq)
351{
352 return interval_sub->invalidate_seq != seq;
353}
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372static inline bool
373mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
374 unsigned long seq)
375{
376
377 return READ_ONCE(interval_sub->invalidate_seq) != seq;
378}
379
380extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
381extern void __mmu_notifier_release(struct mm_struct *mm);
382extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
383 unsigned long start,
384 unsigned long end);
385extern int __mmu_notifier_clear_young(struct mm_struct *mm,
386 unsigned long start,
387 unsigned long end);
388extern int __mmu_notifier_test_young(struct mm_struct *mm,
389 unsigned long address);
390extern void __mmu_notifier_change_pte(struct mm_struct *mm,
391 unsigned long address, pte_t pte);
392extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
393extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
394 bool only_end);
395extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
396 unsigned long start, unsigned long end);
397extern bool
398mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
399
400static inline bool
401mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
402{
403 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
404}
405
406static inline void mmu_notifier_release(struct mm_struct *mm)
407{
408 if (mm_has_notifiers(mm))
409 __mmu_notifier_release(mm);
410}
411
412static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
413 unsigned long start,
414 unsigned long end)
415{
416 if (mm_has_notifiers(mm))
417 return __mmu_notifier_clear_flush_young(mm, start, end);
418 return 0;
419}
420
421static inline int mmu_notifier_clear_young(struct mm_struct *mm,
422 unsigned long start,
423 unsigned long end)
424{
425 if (mm_has_notifiers(mm))
426 return __mmu_notifier_clear_young(mm, start, end);
427 return 0;
428}
429
430static inline int mmu_notifier_test_young(struct mm_struct *mm,
431 unsigned long address)
432{
433 if (mm_has_notifiers(mm))
434 return __mmu_notifier_test_young(mm, address);
435 return 0;
436}
437
438static inline void mmu_notifier_change_pte(struct mm_struct *mm,
439 unsigned long address, pte_t pte)
440{
441 if (mm_has_notifiers(mm))
442 __mmu_notifier_change_pte(mm, address, pte);
443}
444
445static inline void
446mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
447{
448 might_sleep();
449
450 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
451 if (mm_has_notifiers(range->mm)) {
452 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
453 __mmu_notifier_invalidate_range_start(range);
454 }
455 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
456}
457
458static inline int
459mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
460{
461 int ret = 0;
462
463 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
464 if (mm_has_notifiers(range->mm)) {
465 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
466 ret = __mmu_notifier_invalidate_range_start(range);
467 }
468 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
469 return ret;
470}
471
472static inline void
473mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
474{
475 if (mmu_notifier_range_blockable(range))
476 might_sleep();
477
478 if (mm_has_notifiers(range->mm))
479 __mmu_notifier_invalidate_range_end(range, false);
480}
481
482static inline void
483mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
484{
485 if (mm_has_notifiers(range->mm))
486 __mmu_notifier_invalidate_range_end(range, true);
487}
488
489static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
490 unsigned long start, unsigned long end)
491{
492 if (mm_has_notifiers(mm))
493 __mmu_notifier_invalidate_range(mm, start, end);
494}
495
496static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
497{
498 mm->notifier_subscriptions = NULL;
499}
500
501static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
502{
503 if (mm_has_notifiers(mm))
504 __mmu_notifier_subscriptions_destroy(mm);
505}
506
507
508static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
509 enum mmu_notifier_event event,
510 unsigned flags,
511 struct vm_area_struct *vma,
512 struct mm_struct *mm,
513 unsigned long start,
514 unsigned long end)
515{
516 range->vma = vma;
517 range->event = event;
518 range->mm = mm;
519 range->start = start;
520 range->end = end;
521 range->flags = flags;
522}
523
524static inline void mmu_notifier_range_init_migrate(
525 struct mmu_notifier_range *range, unsigned int flags,
526 struct vm_area_struct *vma, struct mm_struct *mm,
527 unsigned long start, unsigned long end, void *pgmap)
528{
529 mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm,
530 start, end);
531 range->migrate_pgmap_owner = pgmap;
532}
533
534#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
535({ \
536 int __young; \
537 struct vm_area_struct *___vma = __vma; \
538 unsigned long ___address = __address; \
539 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
540 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
541 ___address, \
542 ___address + \
543 PAGE_SIZE); \
544 __young; \
545})
546
547#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
548({ \
549 int __young; \
550 struct vm_area_struct *___vma = __vma; \
551 unsigned long ___address = __address; \
552 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
553 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
554 ___address, \
555 ___address + \
556 PMD_SIZE); \
557 __young; \
558})
559
560#define ptep_clear_young_notify(__vma, __address, __ptep) \
561({ \
562 int __young; \
563 struct vm_area_struct *___vma = __vma; \
564 unsigned long ___address = __address; \
565 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
566 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
567 ___address + PAGE_SIZE); \
568 __young; \
569})
570
571#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
572({ \
573 int __young; \
574 struct vm_area_struct *___vma = __vma; \
575 unsigned long ___address = __address; \
576 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
577 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
578 ___address + PMD_SIZE); \
579 __young; \
580})
581
582#define ptep_clear_flush_notify(__vma, __address, __ptep) \
583({ \
584 unsigned long ___addr = __address & PAGE_MASK; \
585 struct mm_struct *___mm = (__vma)->vm_mm; \
586 pte_t ___pte; \
587 \
588 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
589 mmu_notifier_invalidate_range(___mm, ___addr, \
590 ___addr + PAGE_SIZE); \
591 \
592 ___pte; \
593})
594
595#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
596({ \
597 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
598 struct mm_struct *___mm = (__vma)->vm_mm; \
599 pmd_t ___pmd; \
600 \
601 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
602 mmu_notifier_invalidate_range(___mm, ___haddr, \
603 ___haddr + HPAGE_PMD_SIZE); \
604 \
605 ___pmd; \
606})
607
608#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
609({ \
610 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
611 struct mm_struct *___mm = (__vma)->vm_mm; \
612 pud_t ___pud; \
613 \
614 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
615 mmu_notifier_invalidate_range(___mm, ___haddr, \
616 ___haddr + HPAGE_PUD_SIZE); \
617 \
618 ___pud; \
619})
620
621
622
623
624
625
626
627
628
629
630
631#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
632({ \
633 struct mm_struct *___mm = __mm; \
634 unsigned long ___address = __address; \
635 pte_t ___pte = __pte; \
636 \
637 mmu_notifier_change_pte(___mm, ___address, ___pte); \
638 set_pte_at(___mm, ___address, __ptep, ___pte); \
639})
640
641#else
642
643struct mmu_notifier_range {
644 unsigned long start;
645 unsigned long end;
646};
647
648static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
649 unsigned long start,
650 unsigned long end)
651{
652 range->start = start;
653 range->end = end;
654}
655
656#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
657 _mmu_notifier_range_init(range, start, end)
658#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \
659 pgmap) \
660 _mmu_notifier_range_init(range, start, end)
661
662static inline bool
663mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
664{
665 return true;
666}
667
668static inline int mm_has_notifiers(struct mm_struct *mm)
669{
670 return 0;
671}
672
673static inline void mmu_notifier_release(struct mm_struct *mm)
674{
675}
676
677static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
678 unsigned long start,
679 unsigned long end)
680{
681 return 0;
682}
683
684static inline int mmu_notifier_test_young(struct mm_struct *mm,
685 unsigned long address)
686{
687 return 0;
688}
689
690static inline void mmu_notifier_change_pte(struct mm_struct *mm,
691 unsigned long address, pte_t pte)
692{
693}
694
695static inline void
696mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
697{
698}
699
700static inline int
701mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
702{
703 return 0;
704}
705
706static inline
707void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
708{
709}
710
711static inline void
712mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
713{
714}
715
716static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
717 unsigned long start, unsigned long end)
718{
719}
720
721static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
722{
723}
724
725static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
726{
727}
728
729#define mmu_notifier_range_update_to_read_only(r) false
730
731#define ptep_clear_flush_young_notify ptep_clear_flush_young
732#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
733#define ptep_clear_young_notify ptep_test_and_clear_young
734#define pmdp_clear_young_notify pmdp_test_and_clear_young
735#define ptep_clear_flush_notify ptep_clear_flush
736#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
737#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
738#define set_pte_at_notify set_pte_at
739
740static inline void mmu_notifier_synchronize(void)
741{
742}
743
744#endif
745
746#endif
747