1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "qib.h"
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
50{
51 unsigned long flags;
52 u32 r;
53 u32 n;
54 int ret = 0;
55 struct qib_ibdev *dev = to_idev(mr->pd->device);
56 struct qib_lkey_table *rkt = &dev->lk_table;
57
58 spin_lock_irqsave(&rkt->lock, flags);
59
60
61 if (dma_region) {
62 struct qib_mregion *tmr;
63
64 tmr = rcu_dereference(dev->dma_mr);
65 if (!tmr) {
66 qib_get_mr(mr);
67 rcu_assign_pointer(dev->dma_mr, mr);
68 mr->lkey_published = 1;
69 }
70 goto success;
71 }
72
73
74 r = rkt->next;
75 n = r;
76 for (;;) {
77 if (rkt->table[r] == NULL)
78 break;
79 r = (r + 1) & (rkt->max - 1);
80 if (r == n)
81 goto bail;
82 }
83 rkt->next = (r + 1) & (rkt->max - 1);
84
85
86
87
88 rkt->gen++;
89 mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
90 ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
91 << 8);
92 if (mr->lkey == 0) {
93 mr->lkey |= 1 << 8;
94 rkt->gen++;
95 }
96 qib_get_mr(mr);
97 rcu_assign_pointer(rkt->table[r], mr);
98 mr->lkey_published = 1;
99success:
100 spin_unlock_irqrestore(&rkt->lock, flags);
101out:
102 return ret;
103bail:
104 spin_unlock_irqrestore(&rkt->lock, flags);
105 ret = -ENOMEM;
106 goto out;
107}
108
109
110
111
112
113void qib_free_lkey(struct qib_mregion *mr)
114{
115 unsigned long flags;
116 u32 lkey = mr->lkey;
117 u32 r;
118 struct qib_ibdev *dev = to_idev(mr->pd->device);
119 struct qib_lkey_table *rkt = &dev->lk_table;
120
121 spin_lock_irqsave(&rkt->lock, flags);
122 if (!mr->lkey_published)
123 goto out;
124 if (lkey == 0)
125 rcu_assign_pointer(dev->dma_mr, NULL);
126 else {
127 r = lkey >> (32 - ib_qib_lkey_table_size);
128 rcu_assign_pointer(rkt->table[r], NULL);
129 }
130 qib_put_mr(mr);
131 mr->lkey_published = 0;
132out:
133 spin_unlock_irqrestore(&rkt->lock, flags);
134}
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
152 struct qib_sge *isge, struct ib_sge *sge, int acc)
153{
154 struct qib_mregion *mr;
155 unsigned n, m;
156 size_t off;
157
158
159
160
161
162 rcu_read_lock();
163 if (sge->lkey == 0) {
164 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
165
166 if (pd->user)
167 goto bail;
168 mr = rcu_dereference(dev->dma_mr);
169 if (!mr)
170 goto bail;
171 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
172 goto bail;
173 rcu_read_unlock();
174
175 isge->mr = mr;
176 isge->vaddr = (void *) sge->addr;
177 isge->length = sge->length;
178 isge->sge_length = sge->length;
179 isge->m = 0;
180 isge->n = 0;
181 goto ok;
182 }
183 mr = rcu_dereference(
184 rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]);
185 if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
186 goto bail;
187
188 off = sge->addr - mr->user_base;
189 if (unlikely(sge->addr < mr->user_base ||
190 off + sge->length > mr->length ||
191 (mr->access_flags & acc) != acc))
192 goto bail;
193 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
194 goto bail;
195 rcu_read_unlock();
196
197 off += mr->offset;
198 if (mr->page_shift) {
199
200
201
202
203
204 size_t entries_spanned_by_off;
205
206 entries_spanned_by_off = off >> mr->page_shift;
207 off -= (entries_spanned_by_off << mr->page_shift);
208 m = entries_spanned_by_off/QIB_SEGSZ;
209 n = entries_spanned_by_off%QIB_SEGSZ;
210 } else {
211 m = 0;
212 n = 0;
213 while (off >= mr->map[m]->segs[n].length) {
214 off -= mr->map[m]->segs[n].length;
215 n++;
216 if (n >= QIB_SEGSZ) {
217 m++;
218 n = 0;
219 }
220 }
221 }
222 isge->mr = mr;
223 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
224 isge->length = mr->map[m]->segs[n].length - off;
225 isge->sge_length = sge->length;
226 isge->m = m;
227 isge->n = n;
228ok:
229 return 1;
230bail:
231 rcu_read_unlock();
232 return 0;
233}
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
249 u32 len, u64 vaddr, u32 rkey, int acc)
250{
251 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
252 struct qib_mregion *mr;
253 unsigned n, m;
254 size_t off;
255
256
257
258
259
260 rcu_read_lock();
261 if (rkey == 0) {
262 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
263 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
264
265 if (pd->user)
266 goto bail;
267 mr = rcu_dereference(dev->dma_mr);
268 if (!mr)
269 goto bail;
270 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
271 goto bail;
272 rcu_read_unlock();
273
274 sge->mr = mr;
275 sge->vaddr = (void *) vaddr;
276 sge->length = len;
277 sge->sge_length = len;
278 sge->m = 0;
279 sge->n = 0;
280 goto ok;
281 }
282
283 mr = rcu_dereference(
284 rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]);
285 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
286 goto bail;
287
288 off = vaddr - mr->iova;
289 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
290 (mr->access_flags & acc) == 0))
291 goto bail;
292 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
293 goto bail;
294 rcu_read_unlock();
295
296 off += mr->offset;
297 if (mr->page_shift) {
298
299
300
301
302
303 size_t entries_spanned_by_off;
304
305 entries_spanned_by_off = off >> mr->page_shift;
306 off -= (entries_spanned_by_off << mr->page_shift);
307 m = entries_spanned_by_off/QIB_SEGSZ;
308 n = entries_spanned_by_off%QIB_SEGSZ;
309 } else {
310 m = 0;
311 n = 0;
312 while (off >= mr->map[m]->segs[n].length) {
313 off -= mr->map[m]->segs[n].length;
314 n++;
315 if (n >= QIB_SEGSZ) {
316 m++;
317 n = 0;
318 }
319 }
320 }
321 sge->mr = mr;
322 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
323 sge->length = mr->map[m]->segs[n].length - off;
324 sge->sge_length = len;
325 sge->m = m;
326 sge->n = n;
327ok:
328 return 1;
329bail:
330 rcu_read_unlock();
331 return 0;
332}
333
334
335
336
337int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
338{
339 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
340 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
341 struct qib_mregion *mr;
342 u32 rkey = wr->wr.fast_reg.rkey;
343 unsigned i, n, m;
344 int ret = -EINVAL;
345 unsigned long flags;
346 u64 *page_list;
347 size_t ps;
348
349 spin_lock_irqsave(&rkt->lock, flags);
350 if (pd->user || rkey == 0)
351 goto bail;
352
353 mr = rcu_dereference_protected(
354 rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))],
355 lockdep_is_held(&rkt->lock));
356 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
357 goto bail;
358
359 if (wr->wr.fast_reg.page_list_len > mr->max_segs)
360 goto bail;
361
362 ps = 1UL << wr->wr.fast_reg.page_shift;
363 if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
364 goto bail;
365
366 mr->user_base = wr->wr.fast_reg.iova_start;
367 mr->iova = wr->wr.fast_reg.iova_start;
368 mr->lkey = rkey;
369 mr->length = wr->wr.fast_reg.length;
370 mr->access_flags = wr->wr.fast_reg.access_flags;
371 page_list = wr->wr.fast_reg.page_list->page_list;
372 m = 0;
373 n = 0;
374 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
375 mr->map[m]->segs[n].vaddr = (void *) page_list[i];
376 mr->map[m]->segs[n].length = ps;
377 if (++n == QIB_SEGSZ) {
378 m++;
379 n = 0;
380 }
381 }
382
383 ret = 0;
384bail:
385 spin_unlock_irqrestore(&rkt->lock, flags);
386 return ret;
387}
388