1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#include <linux/list.h>
48#include <linux/rculist.h>
49#include <linux/mmu_notifier.h>
50#include <linux/interval_tree_generic.h>
51
52#include "mmu_rb.h"
53#include "trace.h"
54
55struct mmu_rb_handler {
56 struct mmu_notifier mn;
57 struct rb_root_cached root;
58 void *ops_arg;
59 spinlock_t lock;
60 struct mmu_rb_ops *ops;
61 struct mm_struct *mm;
62 struct list_head lru_list;
63 struct work_struct del_work;
64 struct list_head del_list;
65 struct workqueue_struct *wq;
66};
67
68static unsigned long mmu_node_start(struct mmu_rb_node *);
69static unsigned long mmu_node_last(struct mmu_rb_node *);
70static int mmu_notifier_range_start(struct mmu_notifier *,
71 struct mm_struct *,
72 unsigned long, unsigned long, bool);
73static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
74 unsigned long, unsigned long);
75static void do_remove(struct mmu_rb_handler *handler,
76 struct list_head *del_list);
77static void handle_remove(struct work_struct *work);
78
79static const struct mmu_notifier_ops mn_opts = {
80 .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
81 .invalidate_range_start = mmu_notifier_range_start,
82};
83
84INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
85 mmu_node_start, mmu_node_last, static, __mmu_int_rb);
86
87static unsigned long mmu_node_start(struct mmu_rb_node *node)
88{
89 return node->addr & PAGE_MASK;
90}
91
92static unsigned long mmu_node_last(struct mmu_rb_node *node)
93{
94 return PAGE_ALIGN(node->addr + node->len) - 1;
95}
96
97int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
98 struct mmu_rb_ops *ops,
99 struct workqueue_struct *wq,
100 struct mmu_rb_handler **handler)
101{
102 struct mmu_rb_handler *handlr;
103 int ret;
104
105 handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
106 if (!handlr)
107 return -ENOMEM;
108
109 handlr->root = RB_ROOT_CACHED;
110 handlr->ops = ops;
111 handlr->ops_arg = ops_arg;
112 INIT_HLIST_NODE(&handlr->mn.hlist);
113 spin_lock_init(&handlr->lock);
114 handlr->mn.ops = &mn_opts;
115 handlr->mm = mm;
116 INIT_WORK(&handlr->del_work, handle_remove);
117 INIT_LIST_HEAD(&handlr->del_list);
118 INIT_LIST_HEAD(&handlr->lru_list);
119 handlr->wq = wq;
120
121 ret = mmu_notifier_register(&handlr->mn, handlr->mm);
122 if (ret) {
123 kfree(handlr);
124 return ret;
125 }
126
127 *handler = handlr;
128 return 0;
129}
130
131void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
132{
133 struct mmu_rb_node *rbnode;
134 struct rb_node *node;
135 unsigned long flags;
136 struct list_head del_list;
137
138
139 mmu_notifier_unregister(&handler->mn, handler->mm);
140
141
142
143
144
145 flush_work(&handler->del_work);
146
147 INIT_LIST_HEAD(&del_list);
148
149 spin_lock_irqsave(&handler->lock, flags);
150 while ((node = rb_first_cached(&handler->root))) {
151 rbnode = rb_entry(node, struct mmu_rb_node, node);
152 rb_erase_cached(node, &handler->root);
153
154 list_move(&rbnode->list, &del_list);
155 }
156 spin_unlock_irqrestore(&handler->lock, flags);
157
158 do_remove(handler, &del_list);
159
160 kfree(handler);
161}
162
163int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
164 struct mmu_rb_node *mnode)
165{
166 struct mmu_rb_node *node;
167 unsigned long flags;
168 int ret = 0;
169
170 trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
171 spin_lock_irqsave(&handler->lock, flags);
172 node = __mmu_rb_search(handler, mnode->addr, mnode->len);
173 if (node) {
174 ret = -EINVAL;
175 goto unlock;
176 }
177 __mmu_int_rb_insert(mnode, &handler->root);
178 list_add(&mnode->list, &handler->lru_list);
179
180 ret = handler->ops->insert(handler->ops_arg, mnode);
181 if (ret) {
182 __mmu_int_rb_remove(mnode, &handler->root);
183 list_del(&mnode->list);
184 }
185unlock:
186 spin_unlock_irqrestore(&handler->lock, flags);
187 return ret;
188}
189
190
191static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
192 unsigned long addr,
193 unsigned long len)
194{
195 struct mmu_rb_node *node = NULL;
196
197 trace_hfi1_mmu_rb_search(addr, len);
198 if (!handler->ops->filter) {
199 node = __mmu_int_rb_iter_first(&handler->root, addr,
200 (addr + len) - 1);
201 } else {
202 for (node = __mmu_int_rb_iter_first(&handler->root, addr,
203 (addr + len) - 1);
204 node;
205 node = __mmu_int_rb_iter_next(node, addr,
206 (addr + len) - 1)) {
207 if (handler->ops->filter(node, addr, len))
208 return node;
209 }
210 }
211 return node;
212}
213
214bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
215 unsigned long addr, unsigned long len,
216 struct mmu_rb_node **rb_node)
217{
218 struct mmu_rb_node *node;
219 unsigned long flags;
220 bool ret = false;
221
222 spin_lock_irqsave(&handler->lock, flags);
223 node = __mmu_rb_search(handler, addr, len);
224 if (node) {
225 if (node->addr == addr && node->len == len)
226 goto unlock;
227 __mmu_int_rb_remove(node, &handler->root);
228 list_del(&node->list);
229 ret = true;
230 }
231unlock:
232 spin_unlock_irqrestore(&handler->lock, flags);
233 *rb_node = node;
234 return ret;
235}
236
237void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
238{
239 struct mmu_rb_node *rbnode, *ptr;
240 struct list_head del_list;
241 unsigned long flags;
242 bool stop = false;
243
244 INIT_LIST_HEAD(&del_list);
245
246 spin_lock_irqsave(&handler->lock, flags);
247 list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
248 list) {
249 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
250 &stop)) {
251 __mmu_int_rb_remove(rbnode, &handler->root);
252
253 list_move(&rbnode->list, &del_list);
254 }
255 if (stop)
256 break;
257 }
258 spin_unlock_irqrestore(&handler->lock, flags);
259
260 while (!list_empty(&del_list)) {
261 rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
262 list_del(&rbnode->list);
263 handler->ops->remove(handler->ops_arg, rbnode);
264 }
265}
266
267
268
269
270
271
272void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
273 struct mmu_rb_node *node)
274{
275 unsigned long flags;
276
277
278 trace_hfi1_mmu_rb_remove(node->addr, node->len);
279 spin_lock_irqsave(&handler->lock, flags);
280 __mmu_int_rb_remove(node, &handler->root);
281 list_del(&node->list);
282 spin_unlock_irqrestore(&handler->lock, flags);
283
284 handler->ops->remove(handler->ops_arg, node);
285}
286
287static int mmu_notifier_range_start(struct mmu_notifier *mn,
288 struct mm_struct *mm,
289 unsigned long start,
290 unsigned long end,
291 bool blockable)
292{
293 struct mmu_rb_handler *handler =
294 container_of(mn, struct mmu_rb_handler, mn);
295 struct rb_root_cached *root = &handler->root;
296 struct mmu_rb_node *node, *ptr = NULL;
297 unsigned long flags;
298 bool added = false;
299
300 spin_lock_irqsave(&handler->lock, flags);
301 for (node = __mmu_int_rb_iter_first(root, start, end - 1);
302 node; node = ptr) {
303
304 ptr = __mmu_int_rb_iter_next(node, start, end - 1);
305 trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
306 if (handler->ops->invalidate(handler->ops_arg, node)) {
307 __mmu_int_rb_remove(node, root);
308
309 list_move(&node->list, &handler->del_list);
310 added = true;
311 }
312 }
313 spin_unlock_irqrestore(&handler->lock, flags);
314
315 if (added)
316 queue_work(handler->wq, &handler->del_work);
317
318 return 0;
319}
320
321
322
323
324
325
326static void do_remove(struct mmu_rb_handler *handler,
327 struct list_head *del_list)
328{
329 struct mmu_rb_node *node;
330
331 while (!list_empty(del_list)) {
332 node = list_first_entry(del_list, struct mmu_rb_node, list);
333 list_del(&node->list);
334 handler->ops->remove(handler->ops_arg, node);
335 }
336}
337
338
339
340
341
342
343static void handle_remove(struct work_struct *work)
344{
345 struct mmu_rb_handler *handler = container_of(work,
346 struct mmu_rb_handler,
347 del_work);
348 struct list_head del_list;
349 unsigned long flags;
350
351
352 spin_lock_irqsave(&handler->lock, flags);
353 list_replace_init(&handler->del_list, &del_list);
354 spin_unlock_irqrestore(&handler->lock, flags);
355
356 do_remove(handler, &del_list);
357}
358