1
2
3
4
5
6
7
8
9
10
11
12#include <linux/rculist.h>
13#include <linux/mmu_notifier.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/err.h>
17#include <linux/rcupdate.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20
21
22
23
24
25
26
27
28
29
30
31
32
33void __mmu_notifier_release(struct mm_struct *mm)
34{
35 struct mmu_notifier *mn;
36
37 spin_lock(&mm->mmu_notifier_mm->lock);
38 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
39 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
40 struct mmu_notifier,
41 hlist);
42
43
44
45
46
47
48 hlist_del_init_rcu(&mn->hlist);
49
50
51
52
53 rcu_read_lock();
54 spin_unlock(&mm->mmu_notifier_mm->lock);
55
56
57
58
59
60
61
62 if (mn->ops->release)
63 mn->ops->release(mn, mm);
64 rcu_read_unlock();
65 spin_lock(&mm->mmu_notifier_mm->lock);
66 }
67 spin_unlock(&mm->mmu_notifier_mm->lock);
68
69
70
71
72
73
74
75
76
77
78 synchronize_rcu();
79}
80
81
82
83
84
85
86int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
87 unsigned long address)
88{
89 struct mmu_notifier *mn;
90 struct hlist_node *n;
91 int young = 0;
92
93 rcu_read_lock();
94 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
95 if (mn->ops->clear_flush_young)
96 young |= mn->ops->clear_flush_young(mn, mm, address);
97 }
98 rcu_read_unlock();
99
100 return young;
101}
102
103int __mmu_notifier_test_young(struct mm_struct *mm,
104 unsigned long address)
105{
106 struct mmu_notifier *mn;
107 struct hlist_node *n;
108 int young = 0;
109
110 rcu_read_lock();
111 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
112 if (mn->ops->test_young) {
113 young = mn->ops->test_young(mn, mm, address);
114 if (young)
115 break;
116 }
117 }
118 rcu_read_unlock();
119
120 return young;
121}
122
123void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
124 pte_t pte)
125{
126 struct mmu_notifier *mn;
127 struct hlist_node *n;
128
129 rcu_read_lock();
130 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
131 if (mn->ops->change_pte)
132 mn->ops->change_pte(mn, mm, address, pte);
133
134
135
136
137 else if (mn->ops->invalidate_page)
138 mn->ops->invalidate_page(mn, mm, address);
139 }
140 rcu_read_unlock();
141}
142
143void __mmu_notifier_invalidate_page(struct mm_struct *mm,
144 unsigned long address)
145{
146 struct mmu_notifier *mn;
147 struct hlist_node *n;
148
149 rcu_read_lock();
150 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
151 if (mn->ops->invalidate_page)
152 mn->ops->invalidate_page(mn, mm, address);
153 }
154 rcu_read_unlock();
155}
156
157void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
158 unsigned long start, unsigned long end)
159{
160 struct mmu_notifier *mn;
161 struct hlist_node *n;
162
163 rcu_read_lock();
164 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
165 if (mn->ops->invalidate_range_start)
166 mn->ops->invalidate_range_start(mn, mm, start, end);
167 }
168 rcu_read_unlock();
169}
170
171void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
172 unsigned long start, unsigned long end)
173{
174 struct mmu_notifier *mn;
175 struct hlist_node *n;
176
177 rcu_read_lock();
178 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
179 if (mn->ops->invalidate_range_end)
180 mn->ops->invalidate_range_end(mn, mm, start, end);
181 }
182 rcu_read_unlock();
183}
184
185static int do_mmu_notifier_register(struct mmu_notifier *mn,
186 struct mm_struct *mm,
187 int take_mmap_sem)
188{
189 struct mmu_notifier_mm *mmu_notifier_mm;
190 int ret;
191
192 BUG_ON(atomic_read(&mm->mm_users) <= 0);
193
194 ret = -ENOMEM;
195 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
196 if (unlikely(!mmu_notifier_mm))
197 goto out;
198
199 if (take_mmap_sem)
200 down_write(&mm->mmap_sem);
201 ret = mm_take_all_locks(mm);
202 if (unlikely(ret))
203 goto out_cleanup;
204
205 if (!mm_has_notifiers(mm)) {
206 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
207 spin_lock_init(&mmu_notifier_mm->lock);
208 mm->mmu_notifier_mm = mmu_notifier_mm;
209 mmu_notifier_mm = NULL;
210 }
211 atomic_inc(&mm->mm_count);
212
213
214
215
216
217
218
219
220
221 spin_lock(&mm->mmu_notifier_mm->lock);
222 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
223 spin_unlock(&mm->mmu_notifier_mm->lock);
224
225 mm_drop_all_locks(mm);
226out_cleanup:
227 if (take_mmap_sem)
228 up_write(&mm->mmap_sem);
229
230 kfree(mmu_notifier_mm);
231out:
232 BUG_ON(atomic_read(&mm->mm_users) <= 0);
233 return ret;
234}
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
250{
251 return do_mmu_notifier_register(mn, mm, 1);
252}
253EXPORT_SYMBOL_GPL(mmu_notifier_register);
254
255
256
257
258
259int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
260{
261 return do_mmu_notifier_register(mn, mm, 0);
262}
263EXPORT_SYMBOL_GPL(__mmu_notifier_register);
264
265
266void __mmu_notifier_mm_destroy(struct mm_struct *mm)
267{
268 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
269 kfree(mm->mmu_notifier_mm);
270 mm->mmu_notifier_mm = LIST_POISON1;
271}
272
273
274
275
276
277
278
279
280
281
282
283void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
284{
285 BUG_ON(atomic_read(&mm->mm_count) <= 0);
286
287 spin_lock(&mm->mmu_notifier_mm->lock);
288 if (!hlist_unhashed(&mn->hlist)) {
289 hlist_del_rcu(&mn->hlist);
290
291
292
293
294
295 rcu_read_lock();
296 spin_unlock(&mm->mmu_notifier_mm->lock);
297
298
299
300
301
302 if (mn->ops->release)
303 mn->ops->release(mn, mm);
304 rcu_read_unlock();
305 } else
306 spin_unlock(&mm->mmu_notifier_mm->lock);
307
308
309
310
311
312 synchronize_rcu();
313
314 BUG_ON(atomic_read(&mm->mm_count) <= 0);
315
316 mmdrop(mm);
317}
318EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
319