1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/kref.h>
34#include <linux/slab.h>
35#include <rdma/ib_umem.h>
36
37#include "mlx5_ib.h"
38
39struct mlx5_ib_user_db_page {
40 struct list_head list;
41 struct ib_umem *umem;
42 unsigned long user_virt;
43 int refcnt;
44};
45
46int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
47 struct mlx5_db *db)
48{
49 struct mlx5_ib_user_db_page *page;
50 int err = 0;
51
52 mutex_lock(&context->db_page_mutex);
53
54 list_for_each_entry(page, &context->db_page_list, list)
55 if (page->user_virt == (virt & PAGE_MASK))
56 goto found;
57
58 page = kmalloc(sizeof(*page), GFP_KERNEL);
59 if (!page) {
60 err = -ENOMEM;
61 goto out;
62 }
63
64 page->user_virt = (virt & PAGE_MASK);
65 page->refcnt = 0;
66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
67 PAGE_SIZE, 0, 0);
68 if (IS_ERR(page->umem)) {
69 err = PTR_ERR(page->umem);
70 kfree(page);
71 goto out;
72 }
73
74 list_add(&page->list, &context->db_page_list);
75
76found:
77 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
78 db->u.user_page = page;
79 ++page->refcnt;
80
81out:
82 mutex_unlock(&context->db_page_mutex);
83
84 return err;
85}
86
87void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
88{
89 mutex_lock(&context->db_page_mutex);
90
91 if (!--db->u.user_page->refcnt) {
92 list_del(&db->u.user_page->list);
93 ib_umem_release(db->u.user_page->umem);
94 kfree(db->u.user_page);
95 }
96
97 mutex_unlock(&context->db_page_mutex);
98}
99