1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/kref.h>
34#include <linux/slab.h>
35#include <rdma/ib_umem.h>
36
37#include "mlx5_ib.h"
38
39struct mlx5_ib_user_db_page {
40 struct list_head list;
41 struct ib_umem *umem;
42 unsigned long user_virt;
43 int refcnt;
44 struct mm_struct *mm;
45};
46
47int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
48 struct mlx5_db *db)
49{
50 struct mlx5_ib_user_db_page *page;
51 int err = 0;
52
53 mutex_lock(&context->db_page_mutex);
54
55 list_for_each_entry(page, &context->db_page_list, list)
56 if ((current->mm == page->mm) &&
57 (page->user_virt == (virt & PAGE_MASK)))
58 goto found;
59
60 page = kmalloc(sizeof(*page), GFP_KERNEL);
61 if (!page) {
62 err = -ENOMEM;
63 goto out;
64 }
65
66 page->user_virt = (virt & PAGE_MASK);
67 page->refcnt = 0;
68 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
69 PAGE_SIZE, 0);
70 if (IS_ERR(page->umem)) {
71 err = PTR_ERR(page->umem);
72 kfree(page);
73 goto out;
74 }
75 mmgrab(current->mm);
76 page->mm = current->mm;
77
78 list_add(&page->list, &context->db_page_list);
79
80found:
81 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
82 (virt & ~PAGE_MASK);
83 db->u.user_page = page;
84 ++page->refcnt;
85
86out:
87 mutex_unlock(&context->db_page_mutex);
88
89 return err;
90}
91
92void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
93{
94 mutex_lock(&context->db_page_mutex);
95
96 if (!--db->u.user_page->refcnt) {
97 list_del(&db->u.user_page->list);
98 mmdrop(db->u.user_page->mm);
99 ib_umem_release(db->u.user_page->umem);
100 kfree(db->u.user_page);
101 }
102
103 mutex_unlock(&context->db_page_mutex);
104}
105