1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/module.h>
35#include <linux/vmalloc.h>
36#include <linux/mm.h>
37#include <linux/errno.h>
38#include <rdma/uverbs_ioctl.h>
39
40#include "rxe.h"
41#include "rxe_loc.h"
42#include "rxe_queue.h"
43
44void rxe_mmap_release(struct kref *ref)
45{
46 struct rxe_mmap_info *ip = container_of(ref,
47 struct rxe_mmap_info, ref);
48 struct rxe_dev *rxe = to_rdev(ip->context->device);
49
50 spin_lock_bh(&rxe->pending_lock);
51
52 if (!list_empty(&ip->pending_mmaps))
53 list_del(&ip->pending_mmaps);
54
55 spin_unlock_bh(&rxe->pending_lock);
56
57 vfree(ip->obj);
58 kfree(ip);
59}
60
61
62
63
64
65static void rxe_vma_open(struct vm_area_struct *vma)
66{
67 struct rxe_mmap_info *ip = vma->vm_private_data;
68
69 kref_get(&ip->ref);
70}
71
72static void rxe_vma_close(struct vm_area_struct *vma)
73{
74 struct rxe_mmap_info *ip = vma->vm_private_data;
75
76 kref_put(&ip->ref, rxe_mmap_release);
77}
78
79static const struct vm_operations_struct rxe_vm_ops = {
80 .open = rxe_vma_open,
81 .close = rxe_vma_close,
82};
83
84
85
86
87
88
89
90int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
91{
92 struct rxe_dev *rxe = to_rdev(context->device);
93 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
94 unsigned long size = vma->vm_end - vma->vm_start;
95 struct rxe_mmap_info *ip, *pp;
96 int ret;
97
98
99
100
101
102
103 spin_lock_bh(&rxe->pending_lock);
104 list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) {
105 if (context != ip->context || (__u64)offset != ip->info.offset)
106 continue;
107
108
109 if (size > ip->info.size) {
110 pr_err("mmap region is larger than the object!\n");
111 spin_unlock_bh(&rxe->pending_lock);
112 ret = -EINVAL;
113 goto done;
114 }
115
116 goto found_it;
117 }
118 pr_warn("unable to find pending mmap info\n");
119 spin_unlock_bh(&rxe->pending_lock);
120 ret = -EINVAL;
121 goto done;
122
123found_it:
124 list_del_init(&ip->pending_mmaps);
125 spin_unlock_bh(&rxe->pending_lock);
126
127 ret = remap_vmalloc_range(vma, ip->obj, 0);
128 if (ret) {
129 pr_err("err %d from remap_vmalloc_range\n", ret);
130 goto done;
131 }
132
133 vma->vm_ops = &rxe_vm_ops;
134 vma->vm_private_data = ip;
135 rxe_vma_open(vma);
136done:
137 return ret;
138}
139
140
141
142
143struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
144 struct ib_udata *udata, void *obj)
145{
146 struct rxe_mmap_info *ip;
147
148 if (!udata)
149 return ERR_PTR(-EINVAL);
150
151 ip = kmalloc(sizeof(*ip), GFP_KERNEL);
152 if (!ip)
153 return ERR_PTR(-ENOMEM);
154
155 size = PAGE_ALIGN(size);
156
157 spin_lock_bh(&rxe->mmap_offset_lock);
158
159 if (rxe->mmap_offset == 0)
160 rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
161
162 ip->info.offset = rxe->mmap_offset;
163 rxe->mmap_offset += ALIGN(size, SHMLBA);
164
165 spin_unlock_bh(&rxe->mmap_offset_lock);
166
167 INIT_LIST_HEAD(&ip->pending_mmaps);
168 ip->info.size = size;
169 ip->context =
170 container_of(udata, struct uverbs_attr_bundle, driver_udata)
171 ->context;
172 ip->obj = obj;
173 kref_init(&ip->ref);
174
175 return ip;
176}
177