1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73#include <linux/sunrpc/rpc_rdma.h>
74#include <linux/sunrpc/svc_rdma.h>
75
76#include "xprt_rdma.h"
77#include <trace/events/rpcrdma.h>
78
79#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
80# define RPCDBG_FACILITY RPCDBG_TRANS
81#endif
82
83
84
85
86
87
88
89bool frwr_is_supported(struct rpcrdma_ia *ia)
90{
91 struct ib_device_attr *attrs = &ia->ri_device->attrs;
92
93 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
94 goto out_not_supported;
95 if (attrs->max_fast_reg_page_list_len == 0)
96 goto out_not_supported;
97 return true;
98
99out_not_supported:
100 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
101 ia->ri_device->name);
102 return false;
103}
104
105
106
107
108
109
110void frwr_release_mr(struct rpcrdma_mr *mr)
111{
112 int rc;
113
114 rc = ib_dereg_mr(mr->frwr.fr_mr);
115 if (rc)
116 trace_xprtrdma_frwr_dereg(mr, rc);
117 kfree(mr->mr_sg);
118 kfree(mr);
119}
120
121
122
123
124static void
125frwr_mr_recycle_worker(struct work_struct *work)
126{
127 struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
128 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
129
130 trace_xprtrdma_mr_recycle(mr);
131
132 if (mr->mr_dir != DMA_NONE) {
133 trace_xprtrdma_mr_unmap(mr);
134 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
135 mr->mr_sg, mr->mr_nents, mr->mr_dir);
136 mr->mr_dir = DMA_NONE;
137 }
138
139 spin_lock(&r_xprt->rx_buf.rb_mrlock);
140 list_del(&mr->mr_all);
141 r_xprt->rx_stats.mrs_recycled++;
142 spin_unlock(&r_xprt->rx_buf.rb_mrlock);
143
144 frwr_release_mr(mr);
145}
146
147
148
149
150
151
152
153
154
155int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
156{
157 unsigned int depth = ia->ri_max_frwr_depth;
158 struct scatterlist *sg;
159 struct ib_mr *frmr;
160 int rc;
161
162 frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
163 if (IS_ERR(frmr))
164 goto out_mr_err;
165
166 sg = kcalloc(depth, sizeof(*sg), GFP_KERNEL);
167 if (!sg)
168 goto out_list_err;
169
170 mr->frwr.fr_mr = frmr;
171 mr->frwr.fr_state = FRWR_IS_INVALID;
172 mr->mr_dir = DMA_NONE;
173 INIT_LIST_HEAD(&mr->mr_list);
174 INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
175 init_completion(&mr->frwr.fr_linv_done);
176
177 sg_init_table(sg, depth);
178 mr->mr_sg = sg;
179 return 0;
180
181out_mr_err:
182 rc = PTR_ERR(frmr);
183 trace_xprtrdma_frwr_alloc(mr, rc);
184 return rc;
185
186out_list_err:
187 dprintk("RPC: %s: sg allocation failure\n",
188 __func__);
189 ib_dereg_mr(frmr);
190 return -ENOMEM;
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
212 struct rpcrdma_create_data_internal *cdata)
213{
214 struct ib_device_attr *attrs = &ia->ri_device->attrs;
215 int max_qp_wr, depth, delta;
216
217 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
218 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
219 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
220
221
222
223
224
225 if (attrs->max_sge_rd > 1)
226 ia->ri_max_frwr_depth = attrs->max_sge_rd;
227 else
228 ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len;
229 if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS)
230 ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS;
231 dprintk("RPC: %s: max FR page list depth = %u\n",
232 __func__, ia->ri_max_frwr_depth);
233
234
235
236
237
238
239
240
241
242
243 depth = 7;
244
245
246
247
248 if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
249 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
250 do {
251 depth += 2;
252 delta -= ia->ri_max_frwr_depth;
253 } while (delta > 0);
254 }
255
256 max_qp_wr = ia->ri_device->attrs.max_qp_wr;
257 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
258 max_qp_wr -= 1;
259 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
260 return -ENOMEM;
261 if (cdata->max_requests > max_qp_wr)
262 cdata->max_requests = max_qp_wr;
263 ep->rep_attr.cap.max_send_wr = cdata->max_requests * depth;
264 if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
265 cdata->max_requests = max_qp_wr / depth;
266 if (!cdata->max_requests)
267 return -EINVAL;
268 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
269 depth;
270 }
271 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
272 ep->rep_attr.cap.max_send_wr += 1;
273 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
274 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
275 ep->rep_attr.cap.max_recv_wr += 1;
276
277 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
278 ia->ri_max_frwr_depth);
279
280 ia->ri_max_segs += 2;
281 if (ia->ri_max_segs > RPCRDMA_MAX_HDR_SEGS)
282 ia->ri_max_segs = RPCRDMA_MAX_HDR_SEGS;
283 return 0;
284}
285
286
287
288
289
290
291
292
293
294
295size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt)
296{
297 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
298
299 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
300 (ia->ri_max_segs - 2) * ia->ri_max_frwr_depth);
301}
302
303static void
304__frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
305{
306 if (wc->status != IB_WC_WR_FLUSH_ERR)
307 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
308 wr, ib_wc_status_msg(wc->status),
309 wc->status, wc->vendor_err);
310}
311
312
313
314
315
316
317
318static void
319frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
320{
321 struct ib_cqe *cqe = wc->wr_cqe;
322 struct rpcrdma_frwr *frwr =
323 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
324
325
326 if (wc->status != IB_WC_SUCCESS) {
327 frwr->fr_state = FRWR_FLUSHED_FR;
328 __frwr_sendcompletion_flush(wc, "fastreg");
329 }
330 trace_xprtrdma_wc_fastreg(wc, frwr);
331}
332
333
334
335
336
337
338
339static void
340frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
341{
342 struct ib_cqe *cqe = wc->wr_cqe;
343 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
344 fr_cqe);
345
346
347 if (wc->status != IB_WC_SUCCESS) {
348 frwr->fr_state = FRWR_FLUSHED_LI;
349 __frwr_sendcompletion_flush(wc, "localinv");
350 }
351 trace_xprtrdma_wc_li(wc, frwr);
352}
353
354
355
356
357
358
359
360
361static void
362frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
363{
364 struct ib_cqe *cqe = wc->wr_cqe;
365 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
366 fr_cqe);
367
368
369 if (wc->status != IB_WC_SUCCESS) {
370 frwr->fr_state = FRWR_FLUSHED_LI;
371 __frwr_sendcompletion_flush(wc, "localinv");
372 }
373 complete(&frwr->fr_linv_done);
374 trace_xprtrdma_wc_li_wake(wc, frwr);
375}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
393 struct rpcrdma_mr_seg *seg,
394 int nsegs, bool writing, u32 xid,
395 struct rpcrdma_mr **out)
396{
397 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
398 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
399 struct rpcrdma_frwr *frwr;
400 struct rpcrdma_mr *mr;
401 struct ib_mr *ibmr;
402 struct ib_reg_wr *reg_wr;
403 int i, n;
404 u8 key;
405
406 mr = NULL;
407 do {
408 if (mr)
409 rpcrdma_mr_recycle(mr);
410 mr = rpcrdma_mr_get(r_xprt);
411 if (!mr)
412 return ERR_PTR(-EAGAIN);
413 } while (mr->frwr.fr_state != FRWR_IS_INVALID);
414 frwr = &mr->frwr;
415 frwr->fr_state = FRWR_IS_VALID;
416
417 if (nsegs > ia->ri_max_frwr_depth)
418 nsegs = ia->ri_max_frwr_depth;
419 for (i = 0; i < nsegs;) {
420 if (seg->mr_page)
421 sg_set_page(&mr->mr_sg[i],
422 seg->mr_page,
423 seg->mr_len,
424 offset_in_page(seg->mr_offset));
425 else
426 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
427 seg->mr_len);
428
429 ++seg;
430 ++i;
431 if (holes_ok)
432 continue;
433 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
434 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
435 break;
436 }
437 mr->mr_dir = rpcrdma_data_dir(writing);
438
439 mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
440 if (!mr->mr_nents)
441 goto out_dmamap_err;
442
443 ibmr = frwr->fr_mr;
444 n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
445 if (unlikely(n != mr->mr_nents))
446 goto out_mapmr_err;
447
448 ibmr->iova &= 0x00000000ffffffff;
449 ibmr->iova |= ((u64)cpu_to_be32(xid)) << 32;
450 key = (u8)(ibmr->rkey & 0x000000FF);
451 ib_update_fast_reg_key(ibmr, ++key);
452
453 reg_wr = &frwr->fr_regwr;
454 reg_wr->mr = ibmr;
455 reg_wr->key = ibmr->rkey;
456 reg_wr->access = writing ?
457 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
458 IB_ACCESS_REMOTE_READ;
459
460 mr->mr_handle = ibmr->rkey;
461 mr->mr_length = ibmr->length;
462 mr->mr_offset = ibmr->iova;
463 trace_xprtrdma_mr_map(mr);
464
465 *out = mr;
466 return seg;
467
468out_dmamap_err:
469 frwr->fr_state = FRWR_IS_INVALID;
470 trace_xprtrdma_frwr_sgerr(mr, i);
471 rpcrdma_mr_put(mr);
472 return ERR_PTR(-EIO);
473
474out_mapmr_err:
475 trace_xprtrdma_frwr_maperr(mr, n);
476 rpcrdma_mr_recycle(mr);
477 return ERR_PTR(-EIO);
478}
479
480
481
482
483
484
485
486
487
488
489
490
491int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
492{
493 struct ib_send_wr *post_wr;
494 struct rpcrdma_mr *mr;
495
496 post_wr = &req->rl_sendctx->sc_wr;
497 list_for_each_entry(mr, &req->rl_registered, mr_list) {
498 struct rpcrdma_frwr *frwr;
499
500 frwr = &mr->frwr;
501
502 frwr->fr_cqe.done = frwr_wc_fastreg;
503 frwr->fr_regwr.wr.next = post_wr;
504 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
505 frwr->fr_regwr.wr.num_sge = 0;
506 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
507 frwr->fr_regwr.wr.send_flags = 0;
508
509 post_wr = &frwr->fr_regwr.wr;
510 }
511
512
513
514
515 return ib_post_send(ia->ri_id->qp, post_wr, NULL);
516}
517
518
519
520
521
522
523
524void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
525{
526 struct rpcrdma_mr *mr;
527
528 list_for_each_entry(mr, mrs, mr_list)
529 if (mr->mr_handle == rep->rr_inv_rkey) {
530 list_del_init(&mr->mr_list);
531 trace_xprtrdma_mr_remoteinv(mr);
532 mr->frwr.fr_state = FRWR_IS_INVALID;
533 rpcrdma_mr_unmap_and_put(mr);
534 break;
535 }
536}
537
538
539
540
541
542
543
544
545
546
547
548
549void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
550{
551 struct ib_send_wr *first, **prev, *last;
552 const struct ib_send_wr *bad_wr;
553 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
554 struct rpcrdma_frwr *frwr;
555 struct rpcrdma_mr *mr;
556 int count, rc;
557
558
559
560
561
562
563 frwr = NULL;
564 count = 0;
565 prev = &first;
566 list_for_each_entry(mr, mrs, mr_list) {
567 mr->frwr.fr_state = FRWR_IS_INVALID;
568
569 frwr = &mr->frwr;
570 trace_xprtrdma_mr_localinv(mr);
571
572 frwr->fr_cqe.done = frwr_wc_localinv;
573 last = &frwr->fr_invwr;
574 memset(last, 0, sizeof(*last));
575 last->wr_cqe = &frwr->fr_cqe;
576 last->opcode = IB_WR_LOCAL_INV;
577 last->ex.invalidate_rkey = mr->mr_handle;
578 count++;
579
580 *prev = last;
581 prev = &last->next;
582 }
583 if (!frwr)
584 goto unmap;
585
586
587
588
589
590 last->send_flags = IB_SEND_SIGNALED;
591 frwr->fr_cqe.done = frwr_wc_localinv_wake;
592 reinit_completion(&frwr->fr_linv_done);
593
594
595
596
597
598 r_xprt->rx_stats.local_inv_needed++;
599 bad_wr = NULL;
600 rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
601 if (bad_wr != first)
602 wait_for_completion(&frwr->fr_linv_done);
603 if (rc)
604 goto out_release;
605
606
607
608
609unmap:
610 while (!list_empty(mrs)) {
611 mr = rpcrdma_mr_pop(mrs);
612 rpcrdma_mr_unmap_and_put(mr);
613 }
614 return;
615
616out_release:
617 pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
618
619
620
621
622 while (bad_wr) {
623 frwr = container_of(bad_wr, struct rpcrdma_frwr,
624 fr_invwr);
625 mr = container_of(frwr, struct rpcrdma_mr, frwr);
626 bad_wr = bad_wr->next;
627
628 list_del_init(&mr->mr_list);
629 rpcrdma_mr_recycle(mr);
630 }
631}
632