1
2
3
4
5
6
7
8
9
10
11
12#include <linux/rculist.h>
13#include <linux/mmu_notifier.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/err.h>
17#include <linux/rcupdate.h>
18#include <linux/sched.h>
19
20
21
22
23
24
25
26
27
28
29
30
31
32void __mmu_notifier_release(struct mm_struct *mm)
33{
34 struct mmu_notifier *mn;
35
36 spin_lock(&mm->mmu_notifier_mm->lock);
37 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
38 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
39 struct mmu_notifier,
40 hlist);
41
42
43
44
45
46
47 hlist_del_init_rcu(&mn->hlist);
48
49
50
51
52 rcu_read_lock();
53 spin_unlock(&mm->mmu_notifier_mm->lock);
54
55
56
57
58
59
60
61 if (mn->ops->release)
62 mn->ops->release(mn, mm);
63 rcu_read_unlock();
64 spin_lock(&mm->mmu_notifier_mm->lock);
65 }
66 spin_unlock(&mm->mmu_notifier_mm->lock);
67
68
69
70
71
72
73
74
75
76
77 synchronize_rcu();
78}
79
80
81
82
83
84
85int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
86 unsigned long address)
87{
88 struct mmu_notifier *mn;
89 struct hlist_node *n;
90 int young = 0;
91
92 rcu_read_lock();
93 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
94 if (mn->ops->clear_flush_young)
95 young |= mn->ops->clear_flush_young(mn, mm, address);
96 }
97 rcu_read_unlock();
98
99 return young;
100}
101
102void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
103 pte_t pte)
104{
105 struct mmu_notifier *mn;
106 struct hlist_node *n;
107
108 rcu_read_lock();
109 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
110 if (mn->ops->change_pte)
111 mn->ops->change_pte(mn, mm, address, pte);
112
113
114
115
116 else if (mn->ops->invalidate_page)
117 mn->ops->invalidate_page(mn, mm, address);
118 }
119 rcu_read_unlock();
120}
121
122void __mmu_notifier_invalidate_page(struct mm_struct *mm,
123 unsigned long address)
124{
125 struct mmu_notifier *mn;
126 struct hlist_node *n;
127
128 rcu_read_lock();
129 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
130 if (mn->ops->invalidate_page)
131 mn->ops->invalidate_page(mn, mm, address);
132 }
133 rcu_read_unlock();
134}
135
136void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
137 unsigned long start, unsigned long end)
138{
139 struct mmu_notifier *mn;
140 struct hlist_node *n;
141
142 rcu_read_lock();
143 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
144 if (mn->ops->invalidate_range_start)
145 mn->ops->invalidate_range_start(mn, mm, start, end);
146 }
147 rcu_read_unlock();
148}
149
150void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
151 unsigned long start, unsigned long end)
152{
153 struct mmu_notifier *mn;
154 struct hlist_node *n;
155
156 rcu_read_lock();
157 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
158 if (mn->ops->invalidate_range_end)
159 mn->ops->invalidate_range_end(mn, mm, start, end);
160 }
161 rcu_read_unlock();
162}
163
164static int do_mmu_notifier_register(struct mmu_notifier *mn,
165 struct mm_struct *mm,
166 int take_mmap_sem)
167{
168 struct mmu_notifier_mm *mmu_notifier_mm;
169 int ret;
170
171 BUG_ON(atomic_read(&mm->mm_users) <= 0);
172
173 ret = -ENOMEM;
174 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
175 if (unlikely(!mmu_notifier_mm))
176 goto out;
177
178 if (take_mmap_sem)
179 down_write(&mm->mmap_sem);
180 ret = mm_take_all_locks(mm);
181 if (unlikely(ret))
182 goto out_cleanup;
183
184 if (!mm_has_notifiers(mm)) {
185 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
186 spin_lock_init(&mmu_notifier_mm->lock);
187 mm->mmu_notifier_mm = mmu_notifier_mm;
188 mmu_notifier_mm = NULL;
189 }
190 atomic_inc(&mm->mm_count);
191
192
193
194
195
196
197
198
199
200 spin_lock(&mm->mmu_notifier_mm->lock);
201 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
202 spin_unlock(&mm->mmu_notifier_mm->lock);
203
204 mm_drop_all_locks(mm);
205out_cleanup:
206 if (take_mmap_sem)
207 up_write(&mm->mmap_sem);
208
209 kfree(mmu_notifier_mm);
210out:
211 BUG_ON(atomic_read(&mm->mm_users) <= 0);
212 return ret;
213}
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
229{
230 return do_mmu_notifier_register(mn, mm, 1);
231}
232EXPORT_SYMBOL_GPL(mmu_notifier_register);
233
234
235
236
237
238int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
239{
240 return do_mmu_notifier_register(mn, mm, 0);
241}
242EXPORT_SYMBOL_GPL(__mmu_notifier_register);
243
244
245void __mmu_notifier_mm_destroy(struct mm_struct *mm)
246{
247 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
248 kfree(mm->mmu_notifier_mm);
249 mm->mmu_notifier_mm = LIST_POISON1;
250}
251
252
253
254
255
256
257
258
259
260
261
262void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
263{
264 BUG_ON(atomic_read(&mm->mm_count) <= 0);
265
266 spin_lock(&mm->mmu_notifier_mm->lock);
267 if (!hlist_unhashed(&mn->hlist)) {
268 hlist_del_rcu(&mn->hlist);
269
270
271
272
273
274 rcu_read_lock();
275 spin_unlock(&mm->mmu_notifier_mm->lock);
276
277
278
279
280
281 if (mn->ops->release)
282 mn->ops->release(mn, mm);
283 rcu_read_unlock();
284 } else
285 spin_unlock(&mm->mmu_notifier_mm->lock);
286
287
288
289
290
291 synchronize_rcu();
292
293 BUG_ON(atomic_read(&mm->mm_count) <= 0);
294
295 mmdrop(mm);
296}
297EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
298