1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/slab.h>
34#include <rdma/uverbs_ioctl.h>
35
36#include "mlx4_ib.h"
37
38struct mlx4_ib_user_db_page {
39 struct list_head list;
40 struct ib_umem *umem;
41 unsigned long user_virt;
42 int refcnt;
43};
44
45int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
46 struct mlx4_db *db)
47{
48 struct mlx4_ib_user_db_page *page;
49 int err = 0;
50 struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
51 udata, struct mlx4_ib_ucontext, ibucontext);
52
53 mutex_lock(&context->db_page_mutex);
54
55 list_for_each_entry(page, &context->db_page_list, list)
56 if (page->user_virt == (virt & PAGE_MASK))
57 goto found;
58
59 page = kmalloc(sizeof *page, GFP_KERNEL);
60 if (!page) {
61 err = -ENOMEM;
62 goto out;
63 }
64
65 page->user_virt = (virt & PAGE_MASK);
66 page->refcnt = 0;
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
68 PAGE_SIZE, 0);
69 if (IS_ERR(page->umem)) {
70 err = PTR_ERR(page->umem);
71 kfree(page);
72 goto out;
73 }
74
75 list_add(&page->list, &context->db_page_list);
76
77found:
78 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
79 (virt & ~PAGE_MASK);
80 db->u.user_page = page;
81 ++page->refcnt;
82
83out:
84 mutex_unlock(&context->db_page_mutex);
85
86 return err;
87}
88
89void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
90{
91 mutex_lock(&context->db_page_mutex);
92
93 if (!--db->u.user_page->refcnt) {
94 list_del(&db->u.user_page->list);
95 ib_umem_release(db->u.user_page->umem);
96 kfree(db->u.user_page);
97 }
98
99 mutex_unlock(&context->db_page_mutex);
100}
101