1
2
3
4
5
6
7
8
9
10#include <linux/rculist.h>
11#include <linux/mmu_notifier.h>
12#include <linux/export.h>
13#include <linux/mm.h>
14#include <linux/err.h>
15#include <linux/srcu.h>
16#include <linux/rcupdate.h>
17#include <linux/sched.h>
18#include <linux/sched/mm.h>
19#include <linux/slab.h>
20
21
22DEFINE_STATIC_SRCU(srcu);
23
24
25
26
27
28
29void mmu_notifier_call_srcu(struct rcu_head *rcu,
30 void (*func)(struct rcu_head *rcu))
31{
32 call_srcu(&srcu, rcu, func);
33}
34EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu);
35
36
37
38
39
40
41
42
43
44
45
46
47
48void __mmu_notifier_release(struct mm_struct *mm)
49{
50 struct mmu_notifier *mn;
51 int id;
52
53
54
55
56
57 id = srcu_read_lock(&srcu);
58 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
59
60
61
62
63
64
65 if (mn->ops->release)
66 mn->ops->release(mn, mm);
67
68 spin_lock(&mm->mmu_notifier_mm->lock);
69 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
70 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
71 struct mmu_notifier,
72 hlist);
73
74
75
76
77
78
79 hlist_del_init_rcu(&mn->hlist);
80 }
81 spin_unlock(&mm->mmu_notifier_mm->lock);
82 srcu_read_unlock(&srcu, id);
83
84
85
86
87
88
89
90
91
92
93 synchronize_srcu(&srcu);
94}
95
96
97
98
99
100
101int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
102 unsigned long start,
103 unsigned long end)
104{
105 struct mmu_notifier *mn;
106 int young = 0, id;
107
108 id = srcu_read_lock(&srcu);
109 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
110 if (mn->ops->clear_flush_young)
111 young |= mn->ops->clear_flush_young(mn, mm, start, end);
112 }
113 srcu_read_unlock(&srcu, id);
114
115 return young;
116}
117
118int __mmu_notifier_clear_young(struct mm_struct *mm,
119 unsigned long start,
120 unsigned long end)
121{
122 struct mmu_notifier *mn;
123 int young = 0, id;
124
125 id = srcu_read_lock(&srcu);
126 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
127 if (mn->ops->clear_young)
128 young |= mn->ops->clear_young(mn, mm, start, end);
129 }
130 srcu_read_unlock(&srcu, id);
131
132 return young;
133}
134
135int __mmu_notifier_test_young(struct mm_struct *mm,
136 unsigned long address)
137{
138 struct mmu_notifier *mn;
139 int young = 0, id;
140
141 id = srcu_read_lock(&srcu);
142 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
143 if (mn->ops->test_young) {
144 young = mn->ops->test_young(mn, mm, address);
145 if (young)
146 break;
147 }
148 }
149 srcu_read_unlock(&srcu, id);
150
151 return young;
152}
153
154void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
155 pte_t pte)
156{
157 struct mmu_notifier *mn;
158 int id;
159
160 id = srcu_read_lock(&srcu);
161 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
162 if (mn->ops->change_pte)
163 mn->ops->change_pte(mn, mm, address, pte);
164 }
165 srcu_read_unlock(&srcu, id);
166}
167
168int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
169{
170 struct mmu_notifier *mn;
171 int ret = 0;
172 int id;
173
174 id = srcu_read_lock(&srcu);
175 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
176 if (mn->ops->invalidate_range_start) {
177 int _ret = mn->ops->invalidate_range_start(mn, range);
178 if (_ret) {
179 pr_info("%pS callback failed with %d in %sblockable context.\n",
180 mn->ops->invalidate_range_start, _ret,
181 !mmu_notifier_range_blockable(range) ? "non-" : "");
182 ret = _ret;
183 }
184 }
185 }
186 srcu_read_unlock(&srcu, id);
187
188 return ret;
189}
190EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
191
192void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
193 bool only_end)
194{
195 struct mmu_notifier *mn;
196 int id;
197
198 id = srcu_read_lock(&srcu);
199 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
200
201
202
203
204
205
206
207
208
209
210
211
212
213 if (!only_end && mn->ops->invalidate_range)
214 mn->ops->invalidate_range(mn, range->mm,
215 range->start,
216 range->end);
217 if (mn->ops->invalidate_range_end)
218 mn->ops->invalidate_range_end(mn, range);
219 }
220 srcu_read_unlock(&srcu, id);
221}
222EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
223
224void __mmu_notifier_invalidate_range(struct mm_struct *mm,
225 unsigned long start, unsigned long end)
226{
227 struct mmu_notifier *mn;
228 int id;
229
230 id = srcu_read_lock(&srcu);
231 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
232 if (mn->ops->invalidate_range)
233 mn->ops->invalidate_range(mn, mm, start, end);
234 }
235 srcu_read_unlock(&srcu, id);
236}
237EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
238
239static int do_mmu_notifier_register(struct mmu_notifier *mn,
240 struct mm_struct *mm,
241 int take_mmap_sem)
242{
243 struct mmu_notifier_mm *mmu_notifier_mm;
244 int ret;
245
246 BUG_ON(atomic_read(&mm->mm_users) <= 0);
247
248 ret = -ENOMEM;
249 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
250 if (unlikely(!mmu_notifier_mm))
251 goto out;
252
253 if (take_mmap_sem)
254 down_write(&mm->mmap_sem);
255 ret = mm_take_all_locks(mm);
256 if (unlikely(ret))
257 goto out_clean;
258
259 if (!mm_has_notifiers(mm)) {
260 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
261 spin_lock_init(&mmu_notifier_mm->lock);
262
263 mm->mmu_notifier_mm = mmu_notifier_mm;
264 mmu_notifier_mm = NULL;
265 }
266 mmgrab(mm);
267
268
269
270
271
272
273
274
275
276 spin_lock(&mm->mmu_notifier_mm->lock);
277 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
278 spin_unlock(&mm->mmu_notifier_mm->lock);
279
280 mm_drop_all_locks(mm);
281out_clean:
282 if (take_mmap_sem)
283 up_write(&mm->mmap_sem);
284 kfree(mmu_notifier_mm);
285out:
286 BUG_ON(atomic_read(&mm->mm_users) <= 0);
287 return ret;
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
304{
305 return do_mmu_notifier_register(mn, mm, 1);
306}
307EXPORT_SYMBOL_GPL(mmu_notifier_register);
308
309
310
311
312
313int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
314{
315 return do_mmu_notifier_register(mn, mm, 0);
316}
317EXPORT_SYMBOL_GPL(__mmu_notifier_register);
318
319
320void __mmu_notifier_mm_destroy(struct mm_struct *mm)
321{
322 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
323 kfree(mm->mmu_notifier_mm);
324 mm->mmu_notifier_mm = LIST_POISON1;
325}
326
327
328
329
330
331
332
333
334
335
336
337void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
338{
339 BUG_ON(atomic_read(&mm->mm_count) <= 0);
340
341 if (!hlist_unhashed(&mn->hlist)) {
342
343
344
345
346 int id;
347
348 id = srcu_read_lock(&srcu);
349
350
351
352
353 if (mn->ops->release)
354 mn->ops->release(mn, mm);
355 srcu_read_unlock(&srcu, id);
356
357 spin_lock(&mm->mmu_notifier_mm->lock);
358
359
360
361
362 hlist_del_init_rcu(&mn->hlist);
363 spin_unlock(&mm->mmu_notifier_mm->lock);
364 }
365
366
367
368
369
370 synchronize_srcu(&srcu);
371
372 BUG_ON(atomic_read(&mm->mm_count) <= 0);
373
374 mmdrop(mm);
375}
376EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
377
378
379
380
381void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
382 struct mm_struct *mm)
383{
384 spin_lock(&mm->mmu_notifier_mm->lock);
385
386
387
388
389 hlist_del_init_rcu(&mn->hlist);
390 spin_unlock(&mm->mmu_notifier_mm->lock);
391
392 BUG_ON(atomic_read(&mm->mm_count) <= 0);
393 mmdrop(mm);
394}
395EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);
396
397bool
398mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
399{
400 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
401 return false;
402
403 return range->vma->vm_flags & VM_READ;
404}
405EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
406