1
2
3
4
5
6
7
8
9
10#include <linux/rculist.h>
11#include <linux/mmu_notifier.h>
12#include <linux/export.h>
13#include <linux/mm.h>
14#include <linux/err.h>
15#include <linux/interval_tree.h>
16#include <linux/srcu.h>
17#include <linux/rcupdate.h>
18#include <linux/sched.h>
19#include <linux/sched/mm.h>
20#include <linux/slab.h>
21
22
23DEFINE_STATIC_SRCU(srcu);
24
25#ifdef CONFIG_LOCKDEP
26struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
27 .name = "mmu_notifier_invalidate_range_start"
28};
29#endif
30
31
32
33
34
35
36
37struct mmu_notifier_subscriptions {
38
39 struct hlist_head list;
40 bool has_itree;
41
42 spinlock_t lock;
43 unsigned long invalidate_seq;
44 unsigned long active_invalidate_ranges;
45 struct rb_root_cached itree;
46 wait_queue_head_t wq;
47 struct hlist_head deferred_list;
48};
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86static bool
87mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
88{
89 lockdep_assert_held(&subscriptions->lock);
90 return subscriptions->invalidate_seq & 1;
91}
92
93static struct mmu_interval_notifier *
94mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
95 const struct mmu_notifier_range *range,
96 unsigned long *seq)
97{
98 struct interval_tree_node *node;
99 struct mmu_interval_notifier *res = NULL;
100
101 spin_lock(&subscriptions->lock);
102 subscriptions->active_invalidate_ranges++;
103 node = interval_tree_iter_first(&subscriptions->itree, range->start,
104 range->end - 1);
105 if (node) {
106 subscriptions->invalidate_seq |= 1;
107 res = container_of(node, struct mmu_interval_notifier,
108 interval_tree);
109 }
110
111 *seq = subscriptions->invalidate_seq;
112 spin_unlock(&subscriptions->lock);
113 return res;
114}
115
116static struct mmu_interval_notifier *
117mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
118 const struct mmu_notifier_range *range)
119{
120 struct interval_tree_node *node;
121
122 node = interval_tree_iter_next(&interval_sub->interval_tree,
123 range->start, range->end - 1);
124 if (!node)
125 return NULL;
126 return container_of(node, struct mmu_interval_notifier, interval_tree);
127}
128
129static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
130{
131 struct mmu_interval_notifier *interval_sub;
132 struct hlist_node *next;
133
134 spin_lock(&subscriptions->lock);
135 if (--subscriptions->active_invalidate_ranges ||
136 !mn_itree_is_invalidating(subscriptions)) {
137 spin_unlock(&subscriptions->lock);
138 return;
139 }
140
141
142 subscriptions->invalidate_seq++;
143
144
145
146
147
148
149
150 hlist_for_each_entry_safe(interval_sub, next,
151 &subscriptions->deferred_list,
152 deferred_item) {
153 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
154 interval_tree_insert(&interval_sub->interval_tree,
155 &subscriptions->itree);
156 else
157 interval_tree_remove(&interval_sub->interval_tree,
158 &subscriptions->itree);
159 hlist_del(&interval_sub->deferred_item);
160 }
161 spin_unlock(&subscriptions->lock);
162
163 wake_up_all(&subscriptions->wq);
164}
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185unsigned long
186mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
187{
188 struct mmu_notifier_subscriptions *subscriptions =
189 interval_sub->mm->notifier_subscriptions;
190 unsigned long seq;
191 bool is_invalidating;
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232 spin_lock(&subscriptions->lock);
233
234 seq = READ_ONCE(interval_sub->invalidate_seq);
235 is_invalidating = seq == subscriptions->invalidate_seq;
236 spin_unlock(&subscriptions->lock);
237
238
239
240
241
242
243
244
245 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
246 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
247 if (is_invalidating)
248 wait_event(subscriptions->wq,
249 READ_ONCE(subscriptions->invalidate_seq) != seq);
250
251
252
253
254
255
256
257 return seq;
258}
259EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
260
261static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
262 struct mm_struct *mm)
263{
264 struct mmu_notifier_range range = {
265 .flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
266 .event = MMU_NOTIFY_RELEASE,
267 .mm = mm,
268 .start = 0,
269 .end = ULONG_MAX,
270 };
271 struct mmu_interval_notifier *interval_sub;
272 unsigned long cur_seq;
273 bool ret;
274
275 for (interval_sub =
276 mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
277 interval_sub;
278 interval_sub = mn_itree_inv_next(interval_sub, &range)) {
279 ret = interval_sub->ops->invalidate(interval_sub, &range,
280 cur_seq);
281 WARN_ON(!ret);
282 }
283
284 mn_itree_inv_end(subscriptions);
285}
286
287
288
289
290
291
292
293
294
295
296
297
298
299static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
300 struct mm_struct *mm)
301{
302 struct mmu_notifier *subscription;
303 int id;
304
305
306
307
308
309 id = srcu_read_lock(&srcu);
310 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
311 srcu_read_lock_held(&srcu))
312
313
314
315
316
317
318 if (subscription->ops->release)
319 subscription->ops->release(subscription, mm);
320
321 spin_lock(&subscriptions->lock);
322 while (unlikely(!hlist_empty(&subscriptions->list))) {
323 subscription = hlist_entry(subscriptions->list.first,
324 struct mmu_notifier, hlist);
325
326
327
328
329
330
331 hlist_del_init_rcu(&subscription->hlist);
332 }
333 spin_unlock(&subscriptions->lock);
334 srcu_read_unlock(&srcu, id);
335
336
337
338
339
340
341
342
343
344
345 synchronize_srcu(&srcu);
346}
347
348void __mmu_notifier_release(struct mm_struct *mm)
349{
350 struct mmu_notifier_subscriptions *subscriptions =
351 mm->notifier_subscriptions;
352
353 if (subscriptions->has_itree)
354 mn_itree_release(subscriptions, mm);
355
356 if (!hlist_empty(&subscriptions->list))
357 mn_hlist_release(subscriptions, mm);
358}
359
360
361
362
363
364
365int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
366 unsigned long start,
367 unsigned long end)
368{
369 struct mmu_notifier *subscription;
370 int young = 0, id;
371
372 id = srcu_read_lock(&srcu);
373 hlist_for_each_entry_rcu(subscription,
374 &mm->notifier_subscriptions->list, hlist,
375 srcu_read_lock_held(&srcu)) {
376 if (subscription->ops->clear_flush_young)
377 young |= subscription->ops->clear_flush_young(
378 subscription, mm, start, end);
379 }
380 srcu_read_unlock(&srcu, id);
381
382 return young;
383}
384
385int __mmu_notifier_clear_young(struct mm_struct *mm,
386 unsigned long start,
387 unsigned long end)
388{
389 struct mmu_notifier *subscription;
390 int young = 0, id;
391
392 id = srcu_read_lock(&srcu);
393 hlist_for_each_entry_rcu(subscription,
394 &mm->notifier_subscriptions->list, hlist,
395 srcu_read_lock_held(&srcu)) {
396 if (subscription->ops->clear_young)
397 young |= subscription->ops->clear_young(subscription,
398 mm, start, end);
399 }
400 srcu_read_unlock(&srcu, id);
401
402 return young;
403}
404
405int __mmu_notifier_test_young(struct mm_struct *mm,
406 unsigned long address)
407{
408 struct mmu_notifier *subscription;
409 int young = 0, id;
410
411 id = srcu_read_lock(&srcu);
412 hlist_for_each_entry_rcu(subscription,
413 &mm->notifier_subscriptions->list, hlist,
414 srcu_read_lock_held(&srcu)) {
415 if (subscription->ops->test_young) {
416 young = subscription->ops->test_young(subscription, mm,
417 address);
418 if (young)
419 break;
420 }
421 }
422 srcu_read_unlock(&srcu, id);
423
424 return young;
425}
426
427void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
428 pte_t pte)
429{
430 struct mmu_notifier *subscription;
431 int id;
432
433 id = srcu_read_lock(&srcu);
434 hlist_for_each_entry_rcu(subscription,
435 &mm->notifier_subscriptions->list, hlist,
436 srcu_read_lock_held(&srcu)) {
437 if (subscription->ops->change_pte)
438 subscription->ops->change_pte(subscription, mm, address,
439 pte);
440 }
441 srcu_read_unlock(&srcu, id);
442}
443
444static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
445 const struct mmu_notifier_range *range)
446{
447 struct mmu_interval_notifier *interval_sub;
448 unsigned long cur_seq;
449
450 for (interval_sub =
451 mn_itree_inv_start_range(subscriptions, range, &cur_seq);
452 interval_sub;
453 interval_sub = mn_itree_inv_next(interval_sub, range)) {
454 bool ret;
455
456 ret = interval_sub->ops->invalidate(interval_sub, range,
457 cur_seq);
458 if (!ret) {
459 if (WARN_ON(mmu_notifier_range_blockable(range)))
460 continue;
461 goto out_would_block;
462 }
463 }
464 return 0;
465
466out_would_block:
467
468
469
470
471 mn_itree_inv_end(subscriptions);
472 return -EAGAIN;
473}
474
475static int mn_hlist_invalidate_range_start(
476 struct mmu_notifier_subscriptions *subscriptions,
477 struct mmu_notifier_range *range)
478{
479 struct mmu_notifier *subscription;
480 int ret = 0;
481 int id;
482
483 id = srcu_read_lock(&srcu);
484 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
485 srcu_read_lock_held(&srcu)) {
486 const struct mmu_notifier_ops *ops = subscription->ops;
487
488 if (ops->invalidate_range_start) {
489 int _ret;
490
491 if (!mmu_notifier_range_blockable(range))
492 non_block_start();
493 _ret = ops->invalidate_range_start(subscription, range);
494 if (!mmu_notifier_range_blockable(range))
495 non_block_end();
496 if (_ret) {
497 pr_info("%pS callback failed with %d in %sblockable context.\n",
498 ops->invalidate_range_start, _ret,
499 !mmu_notifier_range_blockable(range) ?
500 "non-" :
501 "");
502 WARN_ON(mmu_notifier_range_blockable(range) ||
503 _ret != -EAGAIN);
504 ret = _ret;
505 }
506 }
507 }
508 srcu_read_unlock(&srcu, id);
509
510 return ret;
511}
512
513int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
514{
515 struct mmu_notifier_subscriptions *subscriptions =
516 range->mm->notifier_subscriptions;
517 int ret;
518
519 if (subscriptions->has_itree) {
520 ret = mn_itree_invalidate(subscriptions, range);
521 if (ret)
522 return ret;
523 }
524 if (!hlist_empty(&subscriptions->list))
525 return mn_hlist_invalidate_range_start(subscriptions, range);
526 return 0;
527}
528
529static void
530mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
531 struct mmu_notifier_range *range, bool only_end)
532{
533 struct mmu_notifier *subscription;
534 int id;
535
536 id = srcu_read_lock(&srcu);
537 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
538 srcu_read_lock_held(&srcu)) {
539
540
541
542
543
544
545
546
547
548
549
550
551
552 if (!only_end && subscription->ops->invalidate_range)
553 subscription->ops->invalidate_range(subscription,
554 range->mm,
555 range->start,
556 range->end);
557 if (subscription->ops->invalidate_range_end) {
558 if (!mmu_notifier_range_blockable(range))
559 non_block_start();
560 subscription->ops->invalidate_range_end(subscription,
561 range);
562 if (!mmu_notifier_range_blockable(range))
563 non_block_end();
564 }
565 }
566 srcu_read_unlock(&srcu, id);
567}
568
569void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
570 bool only_end)
571{
572 struct mmu_notifier_subscriptions *subscriptions =
573 range->mm->notifier_subscriptions;
574
575 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
576 if (subscriptions->has_itree)
577 mn_itree_inv_end(subscriptions);
578
579 if (!hlist_empty(&subscriptions->list))
580 mn_hlist_invalidate_end(subscriptions, range, only_end);
581 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
582}
583
584void __mmu_notifier_invalidate_range(struct mm_struct *mm,
585 unsigned long start, unsigned long end)
586{
587 struct mmu_notifier *subscription;
588 int id;
589
590 id = srcu_read_lock(&srcu);
591 hlist_for_each_entry_rcu(subscription,
592 &mm->notifier_subscriptions->list, hlist,
593 srcu_read_lock_held(&srcu)) {
594 if (subscription->ops->invalidate_range)
595 subscription->ops->invalidate_range(subscription, mm,
596 start, end);
597 }
598 srcu_read_unlock(&srcu, id);
599}
600
601
602
603
604
605
606int __mmu_notifier_register(struct mmu_notifier *subscription,
607 struct mm_struct *mm)
608{
609 struct mmu_notifier_subscriptions *subscriptions = NULL;
610 int ret;
611
612 mmap_assert_write_locked(mm);
613 BUG_ON(atomic_read(&mm->mm_users) <= 0);
614
615 if (!mm->notifier_subscriptions) {
616
617
618
619
620
621 subscriptions = kzalloc(
622 sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
623 if (!subscriptions)
624 return -ENOMEM;
625
626 INIT_HLIST_HEAD(&subscriptions->list);
627 spin_lock_init(&subscriptions->lock);
628 subscriptions->invalidate_seq = 2;
629 subscriptions->itree = RB_ROOT_CACHED;
630 init_waitqueue_head(&subscriptions->wq);
631 INIT_HLIST_HEAD(&subscriptions->deferred_list);
632 }
633
634 ret = mm_take_all_locks(mm);
635 if (unlikely(ret))
636 goto out_clean;
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654 if (subscriptions)
655 smp_store_release(&mm->notifier_subscriptions, subscriptions);
656
657 if (subscription) {
658
659 mmgrab(mm);
660 subscription->mm = mm;
661 subscription->users = 1;
662
663 spin_lock(&mm->notifier_subscriptions->lock);
664 hlist_add_head_rcu(&subscription->hlist,
665 &mm->notifier_subscriptions->list);
666 spin_unlock(&mm->notifier_subscriptions->lock);
667 } else
668 mm->notifier_subscriptions->has_itree = true;
669
670 mm_drop_all_locks(mm);
671 BUG_ON(atomic_read(&mm->mm_users) <= 0);
672 return 0;
673
674out_clean:
675 kfree(subscriptions);
676 return ret;
677}
678EXPORT_SYMBOL_GPL(__mmu_notifier_register);
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699int mmu_notifier_register(struct mmu_notifier *subscription,
700 struct mm_struct *mm)
701{
702 int ret;
703
704 mmap_write_lock(mm);
705 ret = __mmu_notifier_register(subscription, mm);
706 mmap_write_unlock(mm);
707 return ret;
708}
709EXPORT_SYMBOL_GPL(mmu_notifier_register);
710
711static struct mmu_notifier *
712find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
713{
714 struct mmu_notifier *subscription;
715
716 spin_lock(&mm->notifier_subscriptions->lock);
717 hlist_for_each_entry_rcu(subscription,
718 &mm->notifier_subscriptions->list, hlist,
719 lockdep_is_held(&mm->notifier_subscriptions->lock)) {
720 if (subscription->ops != ops)
721 continue;
722
723 if (likely(subscription->users != UINT_MAX))
724 subscription->users++;
725 else
726 subscription = ERR_PTR(-EOVERFLOW);
727 spin_unlock(&mm->notifier_subscriptions->lock);
728 return subscription;
729 }
730 spin_unlock(&mm->notifier_subscriptions->lock);
731 return NULL;
732}
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
752 struct mm_struct *mm)
753{
754 struct mmu_notifier *subscription;
755 int ret;
756
757 mmap_assert_write_locked(mm);
758
759 if (mm->notifier_subscriptions) {
760 subscription = find_get_mmu_notifier(mm, ops);
761 if (subscription)
762 return subscription;
763 }
764
765 subscription = ops->alloc_notifier(mm);
766 if (IS_ERR(subscription))
767 return subscription;
768 subscription->ops = ops;
769 ret = __mmu_notifier_register(subscription, mm);
770 if (ret)
771 goto out_free;
772 return subscription;
773out_free:
774 subscription->ops->free_notifier(subscription);
775 return ERR_PTR(ret);
776}
777EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
778
779
780void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
781{
782 BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
783 kfree(mm->notifier_subscriptions);
784 mm->notifier_subscriptions = LIST_POISON1;
785}
786
787
788
789
790
791
792
793
794
795
796
797void mmu_notifier_unregister(struct mmu_notifier *subscription,
798 struct mm_struct *mm)
799{
800 BUG_ON(atomic_read(&mm->mm_count) <= 0);
801
802 if (!hlist_unhashed(&subscription->hlist)) {
803
804
805
806
807 int id;
808
809 id = srcu_read_lock(&srcu);
810
811
812
813
814 if (subscription->ops->release)
815 subscription->ops->release(subscription, mm);
816 srcu_read_unlock(&srcu, id);
817
818 spin_lock(&mm->notifier_subscriptions->lock);
819
820
821
822
823 hlist_del_init_rcu(&subscription->hlist);
824 spin_unlock(&mm->notifier_subscriptions->lock);
825 }
826
827
828
829
830
831 synchronize_srcu(&srcu);
832
833 BUG_ON(atomic_read(&mm->mm_count) <= 0);
834
835 mmdrop(mm);
836}
837EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
838
839static void mmu_notifier_free_rcu(struct rcu_head *rcu)
840{
841 struct mmu_notifier *subscription =
842 container_of(rcu, struct mmu_notifier, rcu);
843 struct mm_struct *mm = subscription->mm;
844
845 subscription->ops->free_notifier(subscription);
846
847 mmdrop(mm);
848}
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872void mmu_notifier_put(struct mmu_notifier *subscription)
873{
874 struct mm_struct *mm = subscription->mm;
875
876 spin_lock(&mm->notifier_subscriptions->lock);
877 if (WARN_ON(!subscription->users) || --subscription->users)
878 goto out_unlock;
879 hlist_del_init_rcu(&subscription->hlist);
880 spin_unlock(&mm->notifier_subscriptions->lock);
881
882 call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
883 return;
884
885out_unlock:
886 spin_unlock(&mm->notifier_subscriptions->lock);
887}
888EXPORT_SYMBOL_GPL(mmu_notifier_put);
889
890static int __mmu_interval_notifier_insert(
891 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
892 struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
893 unsigned long length, const struct mmu_interval_notifier_ops *ops)
894{
895 interval_sub->mm = mm;
896 interval_sub->ops = ops;
897 RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
898 interval_sub->interval_tree.start = start;
899
900
901
902
903 if (length == 0 ||
904 check_add_overflow(start, length - 1,
905 &interval_sub->interval_tree.last))
906 return -EOVERFLOW;
907
908
909 if (WARN_ON(atomic_read(&mm->mm_users) <= 0))
910 return -EINVAL;
911
912
913 mmgrab(mm);
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928 spin_lock(&subscriptions->lock);
929 if (subscriptions->active_invalidate_ranges) {
930 if (mn_itree_is_invalidating(subscriptions))
931 hlist_add_head(&interval_sub->deferred_item,
932 &subscriptions->deferred_list);
933 else {
934 subscriptions->invalidate_seq |= 1;
935 interval_tree_insert(&interval_sub->interval_tree,
936 &subscriptions->itree);
937 }
938 interval_sub->invalidate_seq = subscriptions->invalidate_seq;
939 } else {
940 WARN_ON(mn_itree_is_invalidating(subscriptions));
941
942
943
944
945
946
947 interval_sub->invalidate_seq =
948 subscriptions->invalidate_seq - 1;
949 interval_tree_insert(&interval_sub->interval_tree,
950 &subscriptions->itree);
951 }
952 spin_unlock(&subscriptions->lock);
953 return 0;
954}
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
973 struct mm_struct *mm, unsigned long start,
974 unsigned long length,
975 const struct mmu_interval_notifier_ops *ops)
976{
977 struct mmu_notifier_subscriptions *subscriptions;
978 int ret;
979
980 might_lock(&mm->mmap_lock);
981
982 subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
983 if (!subscriptions || !subscriptions->has_itree) {
984 ret = mmu_notifier_register(NULL, mm);
985 if (ret)
986 return ret;
987 subscriptions = mm->notifier_subscriptions;
988 }
989 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
990 start, length, ops);
991}
992EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
993
994int mmu_interval_notifier_insert_locked(
995 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
996 unsigned long start, unsigned long length,
997 const struct mmu_interval_notifier_ops *ops)
998{
999 struct mmu_notifier_subscriptions *subscriptions =
1000 mm->notifier_subscriptions;
1001 int ret;
1002
1003 mmap_assert_write_locked(mm);
1004
1005 if (!subscriptions || !subscriptions->has_itree) {
1006 ret = __mmu_notifier_register(NULL, mm);
1007 if (ret)
1008 return ret;
1009 subscriptions = mm->notifier_subscriptions;
1010 }
1011 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1012 start, length, ops);
1013}
1014EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
1027{
1028 struct mm_struct *mm = interval_sub->mm;
1029 struct mmu_notifier_subscriptions *subscriptions =
1030 mm->notifier_subscriptions;
1031 unsigned long seq = 0;
1032
1033 might_sleep();
1034
1035 spin_lock(&subscriptions->lock);
1036 if (mn_itree_is_invalidating(subscriptions)) {
1037
1038
1039
1040
1041 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
1042 hlist_del(&interval_sub->deferred_item);
1043 } else {
1044 hlist_add_head(&interval_sub->deferred_item,
1045 &subscriptions->deferred_list);
1046 seq = subscriptions->invalidate_seq;
1047 }
1048 } else {
1049 WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
1050 interval_tree_remove(&interval_sub->interval_tree,
1051 &subscriptions->itree);
1052 }
1053 spin_unlock(&subscriptions->lock);
1054
1055
1056
1057
1058
1059 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
1060 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
1061 if (seq)
1062 wait_event(subscriptions->wq,
1063 READ_ONCE(subscriptions->invalidate_seq) != seq);
1064
1065
1066 mmdrop(mm);
1067}
1068EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083void mmu_notifier_synchronize(void)
1084{
1085 synchronize_srcu(&srcu);
1086}
1087EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
1088
1089bool
1090mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
1091{
1092 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
1093 return false;
1094
1095 return range->vma->vm_flags & VM_READ;
1096}
1097EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
1098