1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/types.h>
34#include <linux/sched.h>
35#include <linux/pid.h>
36#include <linux/slab.h>
37#include <linux/export.h>
38#include <linux/vmalloc.h>
39
40#include <rdma/ib_verbs.h>
41#include <rdma/ib_umem.h>
42#include <rdma/ib_umem_odp.h>
43
44static void ib_umem_notifier_start_account(struct ib_umem *item)
45{
46 mutex_lock(&item->odp_data->umem_mutex);
47
48
49
50 if (item->odp_data->mn_counters_active) {
51 int notifiers_count = item->odp_data->notifiers_count++;
52
53 if (notifiers_count == 0)
54
55
56
57 reinit_completion(&item->odp_data->notifier_completion);
58 }
59 mutex_unlock(&item->odp_data->umem_mutex);
60}
61
62static void ib_umem_notifier_end_account(struct ib_umem *item)
63{
64 mutex_lock(&item->odp_data->umem_mutex);
65
66
67
68 if (item->odp_data->mn_counters_active) {
69
70
71
72
73
74 ++item->odp_data->notifiers_seq;
75 if (--item->odp_data->notifiers_count == 0)
76 complete_all(&item->odp_data->notifier_completion);
77 }
78 mutex_unlock(&item->odp_data->umem_mutex);
79}
80
81
82static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
83{
84 atomic_inc(&context->notifier_count);
85}
86
87
88
89
90
91static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
92{
93 int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
94
95 if (zero_notifiers &&
96 !list_empty(&context->no_private_counters)) {
97
98
99 struct ib_umem_odp *odp_data, *next;
100
101
102
103 down_write(&context->umem_rwsem);
104
105
106
107 if (!atomic_read(&context->notifier_count)) {
108 list_for_each_entry_safe(odp_data, next,
109 &context->no_private_counters,
110 no_private_counters) {
111 mutex_lock(&odp_data->umem_mutex);
112 odp_data->mn_counters_active = true;
113 list_del(&odp_data->no_private_counters);
114 complete_all(&odp_data->notifier_completion);
115 mutex_unlock(&odp_data->umem_mutex);
116 }
117 }
118
119 up_write(&context->umem_rwsem);
120 }
121}
122
123static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
124 u64 end, void *cookie) {
125
126
127
128
129 ib_umem_notifier_start_account(item);
130 item->odp_data->dying = 1;
131
132
133 smp_wmb();
134 complete_all(&item->odp_data->notifier_completion);
135 item->context->invalidate_range(item, ib_umem_start(item),
136 ib_umem_end(item));
137 return 0;
138}
139
140static void ib_umem_notifier_release(struct mmu_notifier *mn,
141 struct mm_struct *mm)
142{
143 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
144
145 if (!context->invalidate_range)
146 return;
147
148 ib_ucontext_notifier_start_account(context);
149 down_read(&context->umem_rwsem);
150 rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
151 ULLONG_MAX,
152 ib_umem_notifier_release_trampoline,
153 NULL);
154 up_read(&context->umem_rwsem);
155}
156
157static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
158 u64 end, void *cookie)
159{
160 ib_umem_notifier_start_account(item);
161 item->context->invalidate_range(item, start, start + PAGE_SIZE);
162 ib_umem_notifier_end_account(item);
163 return 0;
164}
165
166static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
167 struct mm_struct *mm,
168 unsigned long address)
169{
170 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
171
172 if (!context->invalidate_range)
173 return;
174
175 ib_ucontext_notifier_start_account(context);
176 down_read(&context->umem_rwsem);
177 rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
178 address + PAGE_SIZE,
179 invalidate_page_trampoline, NULL);
180 up_read(&context->umem_rwsem);
181 ib_ucontext_notifier_end_account(context);
182}
183
184static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
185 u64 end, void *cookie)
186{
187 ib_umem_notifier_start_account(item);
188 item->context->invalidate_range(item, start, end);
189 return 0;
190}
191
192static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
193 struct mm_struct *mm,
194 unsigned long start,
195 unsigned long end)
196{
197 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
198
199 if (!context->invalidate_range)
200 return;
201
202 ib_ucontext_notifier_start_account(context);
203 down_read(&context->umem_rwsem);
204 rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
205 end,
206 invalidate_range_start_trampoline, NULL);
207 up_read(&context->umem_rwsem);
208}
209
210static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
211 u64 end, void *cookie)
212{
213 ib_umem_notifier_end_account(item);
214 return 0;
215}
216
217static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
218 struct mm_struct *mm,
219 unsigned long start,
220 unsigned long end)
221{
222 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
223
224 if (!context->invalidate_range)
225 return;
226
227 down_read(&context->umem_rwsem);
228 rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
229 end,
230 invalidate_range_end_trampoline, NULL);
231 up_read(&context->umem_rwsem);
232 ib_ucontext_notifier_end_account(context);
233}
234
235static const struct mmu_notifier_ops ib_umem_notifiers = {
236 .release = ib_umem_notifier_release,
237 .invalidate_page = ib_umem_notifier_invalidate_page,
238 .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
239 .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
240};
241
242int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
243{
244 int ret_val;
245 struct pid *our_pid;
246 struct mm_struct *mm = get_task_mm(current);
247
248 if (!mm)
249 return -EINVAL;
250
251
252 rcu_read_lock();
253 our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
254 rcu_read_unlock();
255 put_pid(our_pid);
256 if (context->tgid != our_pid) {
257 ret_val = -EINVAL;
258 goto out_mm;
259 }
260
261 umem->hugetlb = 0;
262 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
263 if (!umem->odp_data) {
264 ret_val = -ENOMEM;
265 goto out_mm;
266 }
267 umem->odp_data->umem = umem;
268
269 mutex_init(&umem->odp_data->umem_mutex);
270
271 init_completion(&umem->odp_data->notifier_completion);
272
273 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
274 sizeof(*umem->odp_data->page_list));
275 if (!umem->odp_data->page_list) {
276 ret_val = -ENOMEM;
277 goto out_odp_data;
278 }
279
280 umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
281 sizeof(*umem->odp_data->dma_list));
282 if (!umem->odp_data->dma_list) {
283 ret_val = -ENOMEM;
284 goto out_page_list;
285 }
286
287
288
289
290
291
292 down_write(&context->umem_rwsem);
293 context->odp_mrs_count++;
294 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
295 rbt_ib_umem_insert(&umem->odp_data->interval_tree,
296 &context->umem_tree);
297 if (likely(!atomic_read(&context->notifier_count)) ||
298 context->odp_mrs_count == 1)
299 umem->odp_data->mn_counters_active = true;
300 else
301 list_add(&umem->odp_data->no_private_counters,
302 &context->no_private_counters);
303 downgrade_write(&context->umem_rwsem);
304
305 if (context->odp_mrs_count == 1) {
306
307
308
309
310 atomic_set(&context->notifier_count, 0);
311 INIT_HLIST_NODE(&context->mn.hlist);
312 context->mn.ops = &ib_umem_notifiers;
313
314
315
316
317 lockdep_off();
318 ret_val = mmu_notifier_register(&context->mn, mm);
319 lockdep_on();
320 if (ret_val) {
321 pr_err("Failed to register mmu_notifier %d\n", ret_val);
322 ret_val = -EBUSY;
323 goto out_mutex;
324 }
325 }
326
327 up_read(&context->umem_rwsem);
328
329
330
331
332
333
334
335 mmput(mm);
336 return 0;
337
338out_mutex:
339 up_read(&context->umem_rwsem);
340 vfree(umem->odp_data->dma_list);
341out_page_list:
342 vfree(umem->odp_data->page_list);
343out_odp_data:
344 kfree(umem->odp_data);
345out_mm:
346 mmput(mm);
347 return ret_val;
348}
349
350void ib_umem_odp_release(struct ib_umem *umem)
351{
352 struct ib_ucontext *context = umem->context;
353
354
355
356
357
358
359
360 ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
361 ib_umem_end(umem));
362
363 down_write(&context->umem_rwsem);
364 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
365 rbt_ib_umem_remove(&umem->odp_data->interval_tree,
366 &context->umem_tree);
367 context->odp_mrs_count--;
368 if (!umem->odp_data->mn_counters_active) {
369 list_del(&umem->odp_data->no_private_counters);
370 complete_all(&umem->odp_data->notifier_completion);
371 }
372
373
374
375
376
377
378
379
380 downgrade_write(&context->umem_rwsem);
381 if (!context->odp_mrs_count) {
382 struct task_struct *owning_process = NULL;
383 struct mm_struct *owning_mm = NULL;
384
385 owning_process = get_pid_task(context->tgid,
386 PIDTYPE_PID);
387 if (owning_process == NULL)
388
389
390
391
392 goto out;
393
394 owning_mm = get_task_mm(owning_process);
395 if (owning_mm == NULL)
396
397
398
399
400 goto out_put_task;
401 mmu_notifier_unregister(&context->mn, owning_mm);
402
403 mmput(owning_mm);
404
405out_put_task:
406 put_task_struct(owning_process);
407 }
408out:
409 up_read(&context->umem_rwsem);
410
411 vfree(umem->odp_data->dma_list);
412 vfree(umem->odp_data->page_list);
413 kfree(umem->odp_data);
414 kfree(umem);
415}
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435static int ib_umem_odp_map_dma_single_page(
436 struct ib_umem *umem,
437 int page_index,
438 u64 base_virt_addr,
439 struct page *page,
440 u64 access_mask,
441 unsigned long current_seq)
442{
443 struct ib_device *dev = umem->context->device;
444 dma_addr_t dma_addr;
445 int stored_page = 0;
446 int remove_existing_mapping = 0;
447 int ret = 0;
448
449
450
451
452
453
454 if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
455 ret = -EAGAIN;
456 goto out;
457 }
458 if (!(umem->odp_data->dma_list[page_index])) {
459 dma_addr = ib_dma_map_page(dev,
460 page,
461 0, PAGE_SIZE,
462 DMA_BIDIRECTIONAL);
463 if (ib_dma_mapping_error(dev, dma_addr)) {
464 ret = -EFAULT;
465 goto out;
466 }
467 umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
468 umem->odp_data->page_list[page_index] = page;
469 stored_page = 1;
470 } else if (umem->odp_data->page_list[page_index] == page) {
471 umem->odp_data->dma_list[page_index] |= access_mask;
472 } else {
473 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
474 umem->odp_data->page_list[page_index], page);
475
476
477 remove_existing_mapping = 1;
478 }
479
480out:
481
482 if (umem->context->invalidate_range || !stored_page)
483 put_page(page);
484
485 if (remove_existing_mapping && umem->context->invalidate_range) {
486 invalidate_page_trampoline(
487 umem,
488 base_virt_addr + (page_index * PAGE_SIZE),
489 base_virt_addr + ((page_index+1)*PAGE_SIZE),
490 NULL);
491 ret = -EAGAIN;
492 }
493
494 return ret;
495}
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
522 u64 access_mask, unsigned long current_seq)
523{
524 struct task_struct *owning_process = NULL;
525 struct mm_struct *owning_mm = NULL;
526 struct page **local_page_list = NULL;
527 u64 off;
528 int j, k, ret = 0, start_idx, npages = 0;
529 u64 base_virt_addr;
530 unsigned int flags = 0;
531
532 if (access_mask == 0)
533 return -EINVAL;
534
535 if (user_virt < ib_umem_start(umem) ||
536 user_virt + bcnt > ib_umem_end(umem))
537 return -EFAULT;
538
539 local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
540 if (!local_page_list)
541 return -ENOMEM;
542
543 off = user_virt & (~PAGE_MASK);
544 user_virt = user_virt & PAGE_MASK;
545 base_virt_addr = user_virt;
546 bcnt += off;
547
548 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
549 if (owning_process == NULL) {
550 ret = -EINVAL;
551 goto out_no_task;
552 }
553
554 owning_mm = get_task_mm(owning_process);
555 if (owning_mm == NULL) {
556 ret = -EINVAL;
557 goto out_put_task;
558 }
559
560 if (access_mask & ODP_WRITE_ALLOWED_BIT)
561 flags |= FOLL_WRITE;
562
563 start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
564 k = start_idx;
565
566 while (bcnt > 0) {
567 const size_t gup_num_pages =
568 min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
569 PAGE_SIZE / sizeof(struct page *));
570
571 down_read(&owning_mm->mmap_sem);
572
573
574
575
576
577
578
579 npages = get_user_pages_remote(owning_process, owning_mm,
580 user_virt, gup_num_pages,
581 flags, local_page_list, NULL, NULL);
582 up_read(&owning_mm->mmap_sem);
583
584 if (npages < 0)
585 break;
586
587 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
588 user_virt += npages << PAGE_SHIFT;
589 mutex_lock(&umem->odp_data->umem_mutex);
590 for (j = 0; j < npages; ++j) {
591 ret = ib_umem_odp_map_dma_single_page(
592 umem, k, base_virt_addr, local_page_list[j],
593 access_mask, current_seq);
594 if (ret < 0)
595 break;
596 k++;
597 }
598 mutex_unlock(&umem->odp_data->umem_mutex);
599
600 if (ret < 0) {
601
602 for (++j; j < npages; ++j)
603 put_page(local_page_list[j]);
604 break;
605 }
606 }
607
608 if (ret >= 0) {
609 if (npages < 0 && k == start_idx)
610 ret = npages;
611 else
612 ret = k - start_idx;
613 }
614
615 mmput(owning_mm);
616out_put_task:
617 put_task_struct(owning_process);
618out_no_task:
619 free_page((unsigned long)local_page_list);
620 return ret;
621}
622EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
623
624void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
625 u64 bound)
626{
627 int idx;
628 u64 addr;
629 struct ib_device *dev = umem->context->device;
630
631 virt = max_t(u64, virt, ib_umem_start(umem));
632 bound = min_t(u64, bound, ib_umem_end(umem));
633
634
635
636
637
638 mutex_lock(&umem->odp_data->umem_mutex);
639 for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
640 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
641 if (umem->odp_data->page_list[idx]) {
642 struct page *page = umem->odp_data->page_list[idx];
643 dma_addr_t dma = umem->odp_data->dma_list[idx];
644 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
645
646 WARN_ON(!dma_addr);
647
648 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
649 DMA_BIDIRECTIONAL);
650 if (dma & ODP_WRITE_ALLOWED_BIT) {
651 struct page *head_page = compound_head(page);
652
653
654
655
656
657
658
659
660
661 set_page_dirty(head_page);
662 }
663
664 if (!umem->context->invalidate_range)
665 put_page(page);
666 umem->odp_data->page_list[idx] = NULL;
667 umem->odp_data->dma_list[idx] = 0;
668 }
669 }
670 mutex_unlock(&umem->odp_data->umem_mutex);
671}
672EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
673