1
2
3
4
5
6
7
8
9
10#include <linux/rculist.h>
11#include <linux/mmu_notifier.h>
12#include <linux/export.h>
13#include <linux/mm.h>
14#include <linux/err.h>
15#include <linux/interval_tree.h>
16#include <linux/srcu.h>
17#include <linux/rcupdate.h>
18#include <linux/sched.h>
19#include <linux/sched/mm.h>
20#include <linux/slab.h>
21
22
23DEFINE_STATIC_SRCU(srcu);
24
25#ifdef CONFIG_LOCKDEP
26struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
27 .name = "mmu_notifier_invalidate_range_start"
28};
29#endif
30
31
32
33
34
35
36
37struct mmu_notifier_subscriptions {
38
39 struct hlist_head list;
40 bool has_itree;
41
42 spinlock_t lock;
43 unsigned long invalidate_seq;
44 unsigned long active_invalidate_ranges;
45 struct rb_root_cached itree;
46 wait_queue_head_t wq;
47 struct hlist_head deferred_list;
48};
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86static bool
87mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
88{
89 lockdep_assert_held(&subscriptions->lock);
90 return subscriptions->invalidate_seq & 1;
91}
92
93static struct mmu_interval_notifier *
94mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
95 const struct mmu_notifier_range *range,
96 unsigned long *seq)
97{
98 struct interval_tree_node *node;
99 struct mmu_interval_notifier *res = NULL;
100
101 spin_lock(&subscriptions->lock);
102 subscriptions->active_invalidate_ranges++;
103 node = interval_tree_iter_first(&subscriptions->itree, range->start,
104 range->end - 1);
105 if (node) {
106 subscriptions->invalidate_seq |= 1;
107 res = container_of(node, struct mmu_interval_notifier,
108 interval_tree);
109 }
110
111 *seq = subscriptions->invalidate_seq;
112 spin_unlock(&subscriptions->lock);
113 return res;
114}
115
116static struct mmu_interval_notifier *
117mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
118 const struct mmu_notifier_range *range)
119{
120 struct interval_tree_node *node;
121
122 node = interval_tree_iter_next(&interval_sub->interval_tree,
123 range->start, range->end - 1);
124 if (!node)
125 return NULL;
126 return container_of(node, struct mmu_interval_notifier, interval_tree);
127}
128
129static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
130{
131 struct mmu_interval_notifier *interval_sub;
132 struct hlist_node *next;
133
134 spin_lock(&subscriptions->lock);
135 if (--subscriptions->active_invalidate_ranges ||
136 !mn_itree_is_invalidating(subscriptions)) {
137 spin_unlock(&subscriptions->lock);
138 return;
139 }
140
141
142 subscriptions->invalidate_seq++;
143
144
145
146
147
148
149
150 hlist_for_each_entry_safe(interval_sub, next,
151 &subscriptions->deferred_list,
152 deferred_item) {
153 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
154 interval_tree_insert(&interval_sub->interval_tree,
155 &subscriptions->itree);
156 else
157 interval_tree_remove(&interval_sub->interval_tree,
158 &subscriptions->itree);
159 hlist_del(&interval_sub->deferred_item);
160 }
161 spin_unlock(&subscriptions->lock);
162
163 wake_up_all(&subscriptions->wq);
164}
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185unsigned long
186mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
187{
188 struct mmu_notifier_subscriptions *subscriptions =
189 interval_sub->mm->notifier_subscriptions;
190 unsigned long seq;
191 bool is_invalidating;
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232 spin_lock(&subscriptions->lock);
233
234 seq = READ_ONCE(interval_sub->invalidate_seq);
235 is_invalidating = seq == subscriptions->invalidate_seq;
236 spin_unlock(&subscriptions->lock);
237
238
239
240
241
242
243
244
245 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
246 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
247 if (is_invalidating)
248 wait_event(subscriptions->wq,
249 READ_ONCE(subscriptions->invalidate_seq) != seq);
250
251
252
253
254
255
256
257 return seq;
258}
259EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
260
261static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
262 struct mm_struct *mm)
263{
264 struct mmu_notifier_range range = {
265 .flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
266 .event = MMU_NOTIFY_RELEASE,
267 .mm = mm,
268 .start = 0,
269 .end = ULONG_MAX,
270 };
271 struct mmu_interval_notifier *interval_sub;
272 unsigned long cur_seq;
273 bool ret;
274
275 for (interval_sub =
276 mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
277 interval_sub;
278 interval_sub = mn_itree_inv_next(interval_sub, &range)) {
279 ret = interval_sub->ops->invalidate(interval_sub, &range,
280 cur_seq);
281 WARN_ON(!ret);
282 }
283
284 mn_itree_inv_end(subscriptions);
285}
286
287
288
289
290
291
292
293
294
295
296
297
298
299static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
300 struct mm_struct *mm)
301{
302 struct mmu_notifier *subscription;
303 int id;
304
305
306
307
308
309 id = srcu_read_lock(&srcu);
310 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
311 srcu_read_lock_held(&srcu))
312
313
314
315
316
317
318 if (subscription->ops->release)
319 subscription->ops->release(subscription, mm);
320
321 spin_lock(&subscriptions->lock);
322 while (unlikely(!hlist_empty(&subscriptions->list))) {
323 subscription = hlist_entry(subscriptions->list.first,
324 struct mmu_notifier, hlist);
325
326
327
328
329
330
331 hlist_del_init_rcu(&subscription->hlist);
332 }
333 spin_unlock(&subscriptions->lock);
334 srcu_read_unlock(&srcu, id);
335
336
337
338
339
340
341
342
343
344
345 synchronize_srcu(&srcu);
346}
347
348void __mmu_notifier_release(struct mm_struct *mm)
349{
350 struct mmu_notifier_subscriptions *subscriptions =
351 mm->notifier_subscriptions;
352
353 if (subscriptions->has_itree)
354 mn_itree_release(subscriptions, mm);
355
356 if (!hlist_empty(&subscriptions->list))
357 mn_hlist_release(subscriptions, mm);
358}
359
360
361
362
363
364
365int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
366 unsigned long start,
367 unsigned long end)
368{
369 struct mmu_notifier *subscription;
370 int young = 0, id;
371
372 id = srcu_read_lock(&srcu);
373 hlist_for_each_entry_rcu(subscription,
374 &mm->notifier_subscriptions->list, hlist,
375 srcu_read_lock_held(&srcu)) {
376 if (subscription->ops->clear_flush_young)
377 young |= subscription->ops->clear_flush_young(
378 subscription, mm, start, end);
379 }
380 srcu_read_unlock(&srcu, id);
381
382 return young;
383}
384
385int __mmu_notifier_clear_young(struct mm_struct *mm,
386 unsigned long start,
387 unsigned long end)
388{
389 struct mmu_notifier *subscription;
390 int young = 0, id;
391
392 id = srcu_read_lock(&srcu);
393 hlist_for_each_entry_rcu(subscription,
394 &mm->notifier_subscriptions->list, hlist,
395 srcu_read_lock_held(&srcu)) {
396 if (subscription->ops->clear_young)
397 young |= subscription->ops->clear_young(subscription,
398 mm, start, end);
399 }
400 srcu_read_unlock(&srcu, id);
401
402 return young;
403}
404
405int __mmu_notifier_test_young(struct mm_struct *mm,
406 unsigned long address)
407{
408 struct mmu_notifier *subscription;
409 int young = 0, id;
410
411 id = srcu_read_lock(&srcu);
412 hlist_for_each_entry_rcu(subscription,
413 &mm->notifier_subscriptions->list, hlist,
414 srcu_read_lock_held(&srcu)) {
415 if (subscription->ops->test_young) {
416 young = subscription->ops->test_young(subscription, mm,
417 address);
418 if (young)
419 break;
420 }
421 }
422 srcu_read_unlock(&srcu, id);
423
424 return young;
425}
426
427void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
428 pte_t pte)
429{
430 struct mmu_notifier *subscription;
431 int id;
432
433 id = srcu_read_lock(&srcu);
434 hlist_for_each_entry_rcu(subscription,
435 &mm->notifier_subscriptions->list, hlist,
436 srcu_read_lock_held(&srcu)) {
437 if (subscription->ops->change_pte)
438 subscription->ops->change_pte(subscription, mm, address,
439 pte);
440 }
441 srcu_read_unlock(&srcu, id);
442}
443
444static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
445 const struct mmu_notifier_range *range)
446{
447 struct mmu_interval_notifier *interval_sub;
448 unsigned long cur_seq;
449
450 for (interval_sub =
451 mn_itree_inv_start_range(subscriptions, range, &cur_seq);
452 interval_sub;
453 interval_sub = mn_itree_inv_next(interval_sub, range)) {
454 bool ret;
455
456 ret = interval_sub->ops->invalidate(interval_sub, range,
457 cur_seq);
458 if (!ret) {
459 if (WARN_ON(mmu_notifier_range_blockable(range)))
460 continue;
461 goto out_would_block;
462 }
463 }
464 return 0;
465
466out_would_block:
467
468
469
470
471 mn_itree_inv_end(subscriptions);
472 return -EAGAIN;
473}
474
475static int mn_hlist_invalidate_range_start(
476 struct mmu_notifier_subscriptions *subscriptions,
477 struct mmu_notifier_range *range)
478{
479 struct mmu_notifier *subscription;
480 int ret = 0;
481 int id;
482
483 id = srcu_read_lock(&srcu);
484 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
485 srcu_read_lock_held(&srcu)) {
486 const struct mmu_notifier_ops *ops = subscription->ops;
487
488 if (ops->invalidate_range_start) {
489 int _ret;
490
491 if (!mmu_notifier_range_blockable(range))
492 non_block_start();
493 _ret = ops->invalidate_range_start(subscription, range);
494 if (!mmu_notifier_range_blockable(range))
495 non_block_end();
496 if (_ret) {
497 pr_info("%pS callback failed with %d in %sblockable context.\n",
498 ops->invalidate_range_start, _ret,
499 !mmu_notifier_range_blockable(range) ?
500 "non-" :
501 "");
502 WARN_ON(mmu_notifier_range_blockable(range) ||
503 _ret != -EAGAIN);
504
505
506
507
508
509
510 WARN_ON(ops->invalidate_range_end);
511 ret = _ret;
512 }
513 }
514 }
515
516 if (ret) {
517
518
519
520
521
522 hlist_for_each_entry_rcu(subscription, &subscriptions->list,
523 hlist, srcu_read_lock_held(&srcu)) {
524 if (!subscription->ops->invalidate_range_end)
525 continue;
526
527 subscription->ops->invalidate_range_end(subscription,
528 range);
529 }
530 }
531 srcu_read_unlock(&srcu, id);
532
533 return ret;
534}
535
536int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
537{
538 struct mmu_notifier_subscriptions *subscriptions =
539 range->mm->notifier_subscriptions;
540 int ret;
541
542 if (subscriptions->has_itree) {
543 ret = mn_itree_invalidate(subscriptions, range);
544 if (ret)
545 return ret;
546 }
547 if (!hlist_empty(&subscriptions->list))
548 return mn_hlist_invalidate_range_start(subscriptions, range);
549 return 0;
550}
551
552static void
553mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
554 struct mmu_notifier_range *range, bool only_end)
555{
556 struct mmu_notifier *subscription;
557 int id;
558
559 id = srcu_read_lock(&srcu);
560 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
561 srcu_read_lock_held(&srcu)) {
562
563
564
565
566
567
568
569
570
571
572
573
574
575 if (!only_end && subscription->ops->invalidate_range)
576 subscription->ops->invalidate_range(subscription,
577 range->mm,
578 range->start,
579 range->end);
580 if (subscription->ops->invalidate_range_end) {
581 if (!mmu_notifier_range_blockable(range))
582 non_block_start();
583 subscription->ops->invalidate_range_end(subscription,
584 range);
585 if (!mmu_notifier_range_blockable(range))
586 non_block_end();
587 }
588 }
589 srcu_read_unlock(&srcu, id);
590}
591
592void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
593 bool only_end)
594{
595 struct mmu_notifier_subscriptions *subscriptions =
596 range->mm->notifier_subscriptions;
597
598 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
599 if (subscriptions->has_itree)
600 mn_itree_inv_end(subscriptions);
601
602 if (!hlist_empty(&subscriptions->list))
603 mn_hlist_invalidate_end(subscriptions, range, only_end);
604 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
605}
606
607void __mmu_notifier_invalidate_range(struct mm_struct *mm,
608 unsigned long start, unsigned long end)
609{
610 struct mmu_notifier *subscription;
611 int id;
612
613 id = srcu_read_lock(&srcu);
614 hlist_for_each_entry_rcu(subscription,
615 &mm->notifier_subscriptions->list, hlist,
616 srcu_read_lock_held(&srcu)) {
617 if (subscription->ops->invalidate_range)
618 subscription->ops->invalidate_range(subscription, mm,
619 start, end);
620 }
621 srcu_read_unlock(&srcu, id);
622}
623
624
625
626
627
628
629int __mmu_notifier_register(struct mmu_notifier *subscription,
630 struct mm_struct *mm)
631{
632 struct mmu_notifier_subscriptions *subscriptions = NULL;
633 int ret;
634
635 mmap_assert_write_locked(mm);
636 BUG_ON(atomic_read(&mm->mm_users) <= 0);
637
638 if (!mm->notifier_subscriptions) {
639
640
641
642
643
644 subscriptions = kzalloc(
645 sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
646 if (!subscriptions)
647 return -ENOMEM;
648
649 INIT_HLIST_HEAD(&subscriptions->list);
650 spin_lock_init(&subscriptions->lock);
651 subscriptions->invalidate_seq = 2;
652 subscriptions->itree = RB_ROOT_CACHED;
653 init_waitqueue_head(&subscriptions->wq);
654 INIT_HLIST_HEAD(&subscriptions->deferred_list);
655 }
656
657 ret = mm_take_all_locks(mm);
658 if (unlikely(ret))
659 goto out_clean;
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677 if (subscriptions)
678 smp_store_release(&mm->notifier_subscriptions, subscriptions);
679
680 if (subscription) {
681
682 mmgrab(mm);
683 subscription->mm = mm;
684 subscription->users = 1;
685
686 spin_lock(&mm->notifier_subscriptions->lock);
687 hlist_add_head_rcu(&subscription->hlist,
688 &mm->notifier_subscriptions->list);
689 spin_unlock(&mm->notifier_subscriptions->lock);
690 } else
691 mm->notifier_subscriptions->has_itree = true;
692
693 mm_drop_all_locks(mm);
694 BUG_ON(atomic_read(&mm->mm_users) <= 0);
695 return 0;
696
697out_clean:
698 kfree(subscriptions);
699 return ret;
700}
701EXPORT_SYMBOL_GPL(__mmu_notifier_register);
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722int mmu_notifier_register(struct mmu_notifier *subscription,
723 struct mm_struct *mm)
724{
725 int ret;
726
727 mmap_write_lock(mm);
728 ret = __mmu_notifier_register(subscription, mm);
729 mmap_write_unlock(mm);
730 return ret;
731}
732EXPORT_SYMBOL_GPL(mmu_notifier_register);
733
734static struct mmu_notifier *
735find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
736{
737 struct mmu_notifier *subscription;
738
739 spin_lock(&mm->notifier_subscriptions->lock);
740 hlist_for_each_entry_rcu(subscription,
741 &mm->notifier_subscriptions->list, hlist,
742 lockdep_is_held(&mm->notifier_subscriptions->lock)) {
743 if (subscription->ops != ops)
744 continue;
745
746 if (likely(subscription->users != UINT_MAX))
747 subscription->users++;
748 else
749 subscription = ERR_PTR(-EOVERFLOW);
750 spin_unlock(&mm->notifier_subscriptions->lock);
751 return subscription;
752 }
753 spin_unlock(&mm->notifier_subscriptions->lock);
754 return NULL;
755}
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
775 struct mm_struct *mm)
776{
777 struct mmu_notifier *subscription;
778 int ret;
779
780 mmap_assert_write_locked(mm);
781
782 if (mm->notifier_subscriptions) {
783 subscription = find_get_mmu_notifier(mm, ops);
784 if (subscription)
785 return subscription;
786 }
787
788 subscription = ops->alloc_notifier(mm);
789 if (IS_ERR(subscription))
790 return subscription;
791 subscription->ops = ops;
792 ret = __mmu_notifier_register(subscription, mm);
793 if (ret)
794 goto out_free;
795 return subscription;
796out_free:
797 subscription->ops->free_notifier(subscription);
798 return ERR_PTR(ret);
799}
800EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
801
802
803void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
804{
805 BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
806 kfree(mm->notifier_subscriptions);
807 mm->notifier_subscriptions = LIST_POISON1;
808}
809
810
811
812
813
814
815
816
817
818
819
820void mmu_notifier_unregister(struct mmu_notifier *subscription,
821 struct mm_struct *mm)
822{
823 BUG_ON(atomic_read(&mm->mm_count) <= 0);
824
825 if (!hlist_unhashed(&subscription->hlist)) {
826
827
828
829
830 int id;
831
832 id = srcu_read_lock(&srcu);
833
834
835
836
837 if (subscription->ops->release)
838 subscription->ops->release(subscription, mm);
839 srcu_read_unlock(&srcu, id);
840
841 spin_lock(&mm->notifier_subscriptions->lock);
842
843
844
845
846 hlist_del_init_rcu(&subscription->hlist);
847 spin_unlock(&mm->notifier_subscriptions->lock);
848 }
849
850
851
852
853
854 synchronize_srcu(&srcu);
855
856 BUG_ON(atomic_read(&mm->mm_count) <= 0);
857
858 mmdrop(mm);
859}
860EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
861
862static void mmu_notifier_free_rcu(struct rcu_head *rcu)
863{
864 struct mmu_notifier *subscription =
865 container_of(rcu, struct mmu_notifier, rcu);
866 struct mm_struct *mm = subscription->mm;
867
868 subscription->ops->free_notifier(subscription);
869
870 mmdrop(mm);
871}
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895void mmu_notifier_put(struct mmu_notifier *subscription)
896{
897 struct mm_struct *mm = subscription->mm;
898
899 spin_lock(&mm->notifier_subscriptions->lock);
900 if (WARN_ON(!subscription->users) || --subscription->users)
901 goto out_unlock;
902 hlist_del_init_rcu(&subscription->hlist);
903 spin_unlock(&mm->notifier_subscriptions->lock);
904
905 call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
906 return;
907
908out_unlock:
909 spin_unlock(&mm->notifier_subscriptions->lock);
910}
911EXPORT_SYMBOL_GPL(mmu_notifier_put);
912
913static int __mmu_interval_notifier_insert(
914 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
915 struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
916 unsigned long length, const struct mmu_interval_notifier_ops *ops)
917{
918 interval_sub->mm = mm;
919 interval_sub->ops = ops;
920 RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
921 interval_sub->interval_tree.start = start;
922
923
924
925
926 if (length == 0 ||
927 check_add_overflow(start, length - 1,
928 &interval_sub->interval_tree.last))
929 return -EOVERFLOW;
930
931
932 if (WARN_ON(atomic_read(&mm->mm_users) <= 0))
933 return -EINVAL;
934
935
936 mmgrab(mm);
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951 spin_lock(&subscriptions->lock);
952 if (subscriptions->active_invalidate_ranges) {
953 if (mn_itree_is_invalidating(subscriptions))
954 hlist_add_head(&interval_sub->deferred_item,
955 &subscriptions->deferred_list);
956 else {
957 subscriptions->invalidate_seq |= 1;
958 interval_tree_insert(&interval_sub->interval_tree,
959 &subscriptions->itree);
960 }
961 interval_sub->invalidate_seq = subscriptions->invalidate_seq;
962 } else {
963 WARN_ON(mn_itree_is_invalidating(subscriptions));
964
965
966
967
968
969
970 interval_sub->invalidate_seq =
971 subscriptions->invalidate_seq - 1;
972 interval_tree_insert(&interval_sub->interval_tree,
973 &subscriptions->itree);
974 }
975 spin_unlock(&subscriptions->lock);
976 return 0;
977}
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
996 struct mm_struct *mm, unsigned long start,
997 unsigned long length,
998 const struct mmu_interval_notifier_ops *ops)
999{
1000 struct mmu_notifier_subscriptions *subscriptions;
1001 int ret;
1002
1003 might_lock(&mm->mmap_lock);
1004
1005 subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
1006 if (!subscriptions || !subscriptions->has_itree) {
1007 ret = mmu_notifier_register(NULL, mm);
1008 if (ret)
1009 return ret;
1010 subscriptions = mm->notifier_subscriptions;
1011 }
1012 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1013 start, length, ops);
1014}
1015EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
1016
1017int mmu_interval_notifier_insert_locked(
1018 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
1019 unsigned long start, unsigned long length,
1020 const struct mmu_interval_notifier_ops *ops)
1021{
1022 struct mmu_notifier_subscriptions *subscriptions =
1023 mm->notifier_subscriptions;
1024 int ret;
1025
1026 mmap_assert_write_locked(mm);
1027
1028 if (!subscriptions || !subscriptions->has_itree) {
1029 ret = __mmu_notifier_register(NULL, mm);
1030 if (ret)
1031 return ret;
1032 subscriptions = mm->notifier_subscriptions;
1033 }
1034 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1035 start, length, ops);
1036}
1037EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
1050{
1051 struct mm_struct *mm = interval_sub->mm;
1052 struct mmu_notifier_subscriptions *subscriptions =
1053 mm->notifier_subscriptions;
1054 unsigned long seq = 0;
1055
1056 might_sleep();
1057
1058 spin_lock(&subscriptions->lock);
1059 if (mn_itree_is_invalidating(subscriptions)) {
1060
1061
1062
1063
1064 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
1065 hlist_del(&interval_sub->deferred_item);
1066 } else {
1067 hlist_add_head(&interval_sub->deferred_item,
1068 &subscriptions->deferred_list);
1069 seq = subscriptions->invalidate_seq;
1070 }
1071 } else {
1072 WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
1073 interval_tree_remove(&interval_sub->interval_tree,
1074 &subscriptions->itree);
1075 }
1076 spin_unlock(&subscriptions->lock);
1077
1078
1079
1080
1081
1082 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
1083 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
1084 if (seq)
1085 wait_event(subscriptions->wq,
1086 READ_ONCE(subscriptions->invalidate_seq) != seq);
1087
1088
1089 mmdrop(mm);
1090}
1091EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106void mmu_notifier_synchronize(void)
1107{
1108 synchronize_srcu(&srcu);
1109}
1110EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
1111
1112bool
1113mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
1114{
1115 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
1116 return false;
1117
1118 return range->vma->vm_flags & VM_READ;
1119}
1120EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
1121