1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include <linux/interrupt.h>
53#include <linux/slab.h>
54#include <linux/sunrpc/addr.h>
55#include <linux/sunrpc/svc_rdma.h>
56#include <linux/log2.h>
57
58#include <asm-generic/barrier.h>
59#include <asm/bitops.h>
60
61#include <rdma/ib_cm.h>
62
63#include "xprt_rdma.h"
64#include <trace/events/rpcrdma.h>
65
66
67
68
69
70#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
71# define RPCDBG_FACILITY RPCDBG_TRANS
72#endif
73
74
75
76
77static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
78static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf);
79static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
80static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
81static void rpcrdma_mr_free(struct rpcrdma_mr *mr);
82static struct rpcrdma_regbuf *
83rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
84 gfp_t flags);
85static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
86static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
87static void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
88
89
90
91
92
93static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
94{
95 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
96
97
98
99
100 ib_drain_rq(ia->ri_id->qp);
101
102
103
104
105 ib_drain_sq(ia->ri_id->qp);
106}
107
108
109
110
111
112
113
114
115
116static void
117rpcrdma_qp_event_handler(struct ib_event *event, void *context)
118{
119 struct rpcrdma_ep *ep = context;
120 struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
121 rx_ep);
122
123 trace_xprtrdma_qp_event(r_xprt, event);
124}
125
126
127
128
129
130
131
132static void
133rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
134{
135 struct ib_cqe *cqe = wc->wr_cqe;
136 struct rpcrdma_sendctx *sc =
137 container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
138
139
140 trace_xprtrdma_wc_send(sc, wc);
141 rpcrdma_sendctx_put_locked(sc);
142}
143
144
145
146
147
148
149
150static void
151rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
152{
153 struct ib_cqe *cqe = wc->wr_cqe;
154 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
155 rr_cqe);
156 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
157
158
159 trace_xprtrdma_wc_receive(wc);
160 --r_xprt->rx_ep.rep_receive_count;
161 if (wc->status != IB_WC_SUCCESS)
162 goto out_flushed;
163
164
165 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
166 rep->rr_wc_flags = wc->wc_flags;
167 rep->rr_inv_rkey = wc->ex.invalidate_rkey;
168
169 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
170 rdmab_addr(rep->rr_rdmabuf),
171 wc->byte_len, DMA_FROM_DEVICE);
172
173 rpcrdma_post_recvs(r_xprt, false);
174 rpcrdma_reply_handler(rep);
175 return;
176
177out_flushed:
178 rpcrdma_recv_buffer_put(rep);
179}
180
181static void
182rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
183 struct rdma_conn_param *param)
184{
185 const struct rpcrdma_connect_private *pmsg = param->private_data;
186 unsigned int rsize, wsize;
187
188
189 r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
190 rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
191 wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
192
193 if (pmsg &&
194 pmsg->cp_magic == rpcrdma_cmp_magic &&
195 pmsg->cp_version == RPCRDMA_CMP_VERSION) {
196 r_xprt->rx_ia.ri_implicit_roundup = true;
197 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
198 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
199 }
200
201 if (rsize < r_xprt->rx_ep.rep_inline_recv)
202 r_xprt->rx_ep.rep_inline_recv = rsize;
203 if (wsize < r_xprt->rx_ep.rep_inline_send)
204 r_xprt->rx_ep.rep_inline_send = wsize;
205 dprintk("RPC: %s: max send %u, max recv %u\n", __func__,
206 r_xprt->rx_ep.rep_inline_send,
207 r_xprt->rx_ep.rep_inline_recv);
208 rpcrdma_set_max_header_sizes(r_xprt);
209}
210
211
212
213
214
215
216
217
218
219static int
220rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
221{
222 struct rpcrdma_xprt *r_xprt = id->context;
223 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
224 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
225 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
226
227 might_sleep();
228
229 trace_xprtrdma_cm_event(r_xprt, event);
230 switch (event->event) {
231 case RDMA_CM_EVENT_ADDR_RESOLVED:
232 case RDMA_CM_EVENT_ROUTE_RESOLVED:
233 ia->ri_async_rc = 0;
234 complete(&ia->ri_done);
235 return 0;
236 case RDMA_CM_EVENT_ADDR_ERROR:
237 ia->ri_async_rc = -EPROTO;
238 complete(&ia->ri_done);
239 return 0;
240 case RDMA_CM_EVENT_ROUTE_ERROR:
241 ia->ri_async_rc = -ENETUNREACH;
242 complete(&ia->ri_done);
243 return 0;
244 case RDMA_CM_EVENT_DEVICE_REMOVAL:
245#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
246 pr_info("rpcrdma: removing device %s for %s:%s\n",
247 ia->ri_id->device->name,
248 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
249#endif
250 set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
251 ep->rep_connected = -ENODEV;
252 xprt_force_disconnect(xprt);
253 wait_for_completion(&ia->ri_remove_done);
254
255 ia->ri_id = NULL;
256
257 return 1;
258 case RDMA_CM_EVENT_ESTABLISHED:
259 ++xprt->connect_cookie;
260 ep->rep_connected = 1;
261 rpcrdma_update_connect_private(r_xprt, &event->param.conn);
262 wake_up_all(&ep->rep_connect_wait);
263 break;
264 case RDMA_CM_EVENT_CONNECT_ERROR:
265 ep->rep_connected = -ENOTCONN;
266 goto disconnected;
267 case RDMA_CM_EVENT_UNREACHABLE:
268 ep->rep_connected = -ENETUNREACH;
269 goto disconnected;
270 case RDMA_CM_EVENT_REJECTED:
271 dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
272 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
273 rdma_reject_msg(id, event->status));
274 ep->rep_connected = -ECONNREFUSED;
275 if (event->status == IB_CM_REJ_STALE_CONN)
276 ep->rep_connected = -EAGAIN;
277 goto disconnected;
278 case RDMA_CM_EVENT_DISCONNECTED:
279 ep->rep_connected = -ECONNABORTED;
280disconnected:
281 xprt_force_disconnect(xprt);
282 wake_up_all(&ep->rep_connect_wait);
283 break;
284 default:
285 break;
286 }
287
288 dprintk("RPC: %s: %s:%s on %s/frwr: %s\n", __func__,
289 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
290 ia->ri_id->device->name, rdma_event_msg(event->event));
291 return 0;
292}
293
294static struct rdma_cm_id *
295rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
296{
297 unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
298 struct rdma_cm_id *id;
299 int rc;
300
301 trace_xprtrdma_conn_start(xprt);
302
303 init_completion(&ia->ri_done);
304 init_completion(&ia->ri_remove_done);
305
306 id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
307 xprt, RDMA_PS_TCP, IB_QPT_RC);
308 if (IS_ERR(id))
309 return id;
310
311 ia->ri_async_rc = -ETIMEDOUT;
312 rc = rdma_resolve_addr(id, NULL,
313 (struct sockaddr *)&xprt->rx_xprt.addr,
314 RDMA_RESOLVE_TIMEOUT);
315 if (rc)
316 goto out;
317 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
318 if (rc < 0) {
319 trace_xprtrdma_conn_tout(xprt);
320 goto out;
321 }
322
323 rc = ia->ri_async_rc;
324 if (rc)
325 goto out;
326
327 ia->ri_async_rc = -ETIMEDOUT;
328 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
329 if (rc)
330 goto out;
331 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
332 if (rc < 0) {
333 trace_xprtrdma_conn_tout(xprt);
334 goto out;
335 }
336 rc = ia->ri_async_rc;
337 if (rc)
338 goto out;
339
340 return id;
341
342out:
343 rdma_destroy_id(id);
344 return ERR_PTR(rc);
345}
346
347
348
349
350
351
352
353
354
355
356
357
358int
359rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
360{
361 struct rpcrdma_ia *ia = &xprt->rx_ia;
362 int rc;
363
364 ia->ri_id = rpcrdma_create_id(xprt, ia);
365 if (IS_ERR(ia->ri_id)) {
366 rc = PTR_ERR(ia->ri_id);
367 goto out_err;
368 }
369
370 ia->ri_pd = ib_alloc_pd(ia->ri_id->device, 0);
371 if (IS_ERR(ia->ri_pd)) {
372 rc = PTR_ERR(ia->ri_pd);
373 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
374 goto out_err;
375 }
376
377 switch (xprt_rdma_memreg_strategy) {
378 case RPCRDMA_FRWR:
379 if (frwr_is_supported(ia->ri_id->device))
380 break;
381
382 default:
383 pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
384 ia->ri_id->device->name, xprt_rdma_memreg_strategy);
385 rc = -EINVAL;
386 goto out_err;
387 }
388
389 return 0;
390
391out_err:
392 rpcrdma_ia_close(ia);
393 return rc;
394}
395
396
397
398
399
400
401
402
403void
404rpcrdma_ia_remove(struct rpcrdma_ia *ia)
405{
406 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
407 rx_ia);
408 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
409 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
410 struct rpcrdma_req *req;
411
412 cancel_work_sync(&buf->rb_refresh_worker);
413
414
415
416
417
418
419
420
421 if (ia->ri_id->qp) {
422 rpcrdma_xprt_drain(r_xprt);
423 rdma_destroy_qp(ia->ri_id);
424 ia->ri_id->qp = NULL;
425 }
426 ib_free_cq(ep->rep_attr.recv_cq);
427 ep->rep_attr.recv_cq = NULL;
428 ib_free_cq(ep->rep_attr.send_cq);
429 ep->rep_attr.send_cq = NULL;
430
431
432
433
434 rpcrdma_reps_destroy(buf);
435 list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
436 rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf);
437 rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
438 rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
439 }
440 rpcrdma_mrs_destroy(buf);
441 ib_dealloc_pd(ia->ri_pd);
442 ia->ri_pd = NULL;
443
444
445 complete(&ia->ri_remove_done);
446
447 trace_xprtrdma_remove(r_xprt);
448}
449
450
451
452
453
454
455void
456rpcrdma_ia_close(struct rpcrdma_ia *ia)
457{
458 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
459 if (ia->ri_id->qp)
460 rdma_destroy_qp(ia->ri_id);
461 rdma_destroy_id(ia->ri_id);
462 }
463 ia->ri_id = NULL;
464
465
466 if (ia->ri_pd && !IS_ERR(ia->ri_pd))
467 ib_dealloc_pd(ia->ri_pd);
468 ia->ri_pd = NULL;
469}
470
471
472
473
474
475
476
477int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
478{
479 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
480 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
481 struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
482 struct ib_cq *sendcq, *recvcq;
483 unsigned int max_sge;
484 int rc;
485
486 ep->rep_max_requests = xprt_rdma_slot_table_entries;
487 ep->rep_inline_send = xprt_rdma_max_inline_write;
488 ep->rep_inline_recv = xprt_rdma_max_inline_read;
489
490 max_sge = min_t(unsigned int, ia->ri_id->device->attrs.max_send_sge,
491 RPCRDMA_MAX_SEND_SGES);
492 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
493 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
494 return -ENOMEM;
495 }
496 ia->ri_max_send_sges = max_sge;
497
498 rc = frwr_open(ia, ep);
499 if (rc)
500 return rc;
501
502 ep->rep_attr.event_handler = rpcrdma_qp_event_handler;
503 ep->rep_attr.qp_context = ep;
504 ep->rep_attr.srq = NULL;
505 ep->rep_attr.cap.max_send_sge = max_sge;
506 ep->rep_attr.cap.max_recv_sge = 1;
507 ep->rep_attr.cap.max_inline_data = 0;
508 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
509 ep->rep_attr.qp_type = IB_QPT_RC;
510 ep->rep_attr.port_num = ~0;
511
512 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
513 "iovs: send %d recv %d\n",
514 __func__,
515 ep->rep_attr.cap.max_send_wr,
516 ep->rep_attr.cap.max_recv_wr,
517 ep->rep_attr.cap.max_send_sge,
518 ep->rep_attr.cap.max_recv_sge);
519
520 ep->rep_send_batch = ep->rep_max_requests >> 3;
521 ep->rep_send_count = ep->rep_send_batch;
522 init_waitqueue_head(&ep->rep_connect_wait);
523 ep->rep_receive_count = 0;
524
525 sendcq = ib_alloc_cq_any(ia->ri_id->device, NULL,
526 ep->rep_attr.cap.max_send_wr + 1,
527 IB_POLL_WORKQUEUE);
528 if (IS_ERR(sendcq)) {
529 rc = PTR_ERR(sendcq);
530 goto out1;
531 }
532
533 recvcq = ib_alloc_cq_any(ia->ri_id->device, NULL,
534 ep->rep_attr.cap.max_recv_wr + 1,
535 IB_POLL_WORKQUEUE);
536 if (IS_ERR(recvcq)) {
537 rc = PTR_ERR(recvcq);
538 goto out2;
539 }
540
541 ep->rep_attr.send_cq = sendcq;
542 ep->rep_attr.recv_cq = recvcq;
543
544
545 memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
546
547
548 pmsg->cp_magic = rpcrdma_cmp_magic;
549 pmsg->cp_version = RPCRDMA_CMP_VERSION;
550 pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK;
551 pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->rep_inline_send);
552 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->rep_inline_recv);
553 ep->rep_remote_cma.private_data = pmsg;
554 ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
555
556
557 ep->rep_remote_cma.initiator_depth = 0;
558 ep->rep_remote_cma.responder_resources =
559 min_t(int, U8_MAX, ia->ri_id->device->attrs.max_qp_rd_atom);
560
561
562
563
564
565 ep->rep_remote_cma.retry_count = 6;
566
567
568
569
570
571 ep->rep_remote_cma.flow_control = 0;
572 ep->rep_remote_cma.rnr_retry_count = 0;
573
574 return 0;
575
576out2:
577 ib_free_cq(sendcq);
578out1:
579 return rc;
580}
581
582
583
584
585
586
587void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt)
588{
589 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
590 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
591
592 if (ia->ri_id && ia->ri_id->qp) {
593 rpcrdma_ep_disconnect(ep, ia);
594 rdma_destroy_qp(ia->ri_id);
595 ia->ri_id->qp = NULL;
596 }
597
598 if (ep->rep_attr.recv_cq)
599 ib_free_cq(ep->rep_attr.recv_cq);
600 if (ep->rep_attr.send_cq)
601 ib_free_cq(ep->rep_attr.send_cq);
602}
603
604
605
606
607
608static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
609 struct ib_qp_init_attr *qp_init_attr)
610{
611 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
612 int rc, err;
613
614 trace_xprtrdma_reinsert(r_xprt);
615
616 rc = -EHOSTUNREACH;
617 if (rpcrdma_ia_open(r_xprt))
618 goto out1;
619
620 rc = -ENOMEM;
621 err = rpcrdma_ep_create(r_xprt);
622 if (err) {
623 pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
624 goto out2;
625 }
626
627 rc = -ENETUNREACH;
628 err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr);
629 if (err) {
630 pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
631 goto out3;
632 }
633
634 rpcrdma_mrs_create(r_xprt);
635 return 0;
636
637out3:
638 rpcrdma_ep_destroy(r_xprt);
639out2:
640 rpcrdma_ia_close(ia);
641out1:
642 return rc;
643}
644
645static int rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt,
646 struct ib_qp_init_attr *qp_init_attr)
647{
648 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
649 struct rdma_cm_id *id, *old;
650 int err, rc;
651
652 trace_xprtrdma_reconnect(r_xprt);
653
654 rpcrdma_ep_disconnect(&r_xprt->rx_ep, ia);
655
656 rc = -EHOSTUNREACH;
657 id = rpcrdma_create_id(r_xprt, ia);
658 if (IS_ERR(id))
659 goto out;
660
661
662
663
664
665
666
667
668
669 old = id;
670 rc = -ENETUNREACH;
671 if (ia->ri_id->device != id->device) {
672 pr_err("rpcrdma: can't reconnect on different device!\n");
673 goto out_destroy;
674 }
675
676 err = rdma_create_qp(id, ia->ri_pd, qp_init_attr);
677 if (err)
678 goto out_destroy;
679
680
681 rc = 0;
682 old = ia->ri_id;
683 ia->ri_id = id;
684 rdma_destroy_qp(old);
685
686out_destroy:
687 rdma_destroy_id(old);
688out:
689 return rc;
690}
691
692
693
694
695int
696rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
697{
698 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
699 rx_ia);
700 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
701 struct ib_qp_init_attr qp_init_attr;
702 int rc;
703
704retry:
705 memcpy(&qp_init_attr, &ep->rep_attr, sizeof(qp_init_attr));
706 switch (ep->rep_connected) {
707 case 0:
708 dprintk("RPC: %s: connecting...\n", __func__);
709 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &qp_init_attr);
710 if (rc) {
711 rc = -ENETUNREACH;
712 goto out_noupdate;
713 }
714 break;
715 case -ENODEV:
716 rc = rpcrdma_ep_recreate_xprt(r_xprt, &qp_init_attr);
717 if (rc)
718 goto out_noupdate;
719 break;
720 default:
721 rc = rpcrdma_ep_reconnect(r_xprt, &qp_init_attr);
722 if (rc)
723 goto out;
724 }
725
726 ep->rep_connected = 0;
727 xprt_clear_connected(xprt);
728
729 rpcrdma_post_recvs(r_xprt, true);
730
731 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
732 if (rc)
733 goto out;
734
735 if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
736 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
737 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
738 if (ep->rep_connected <= 0) {
739 if (ep->rep_connected == -EAGAIN)
740 goto retry;
741 rc = ep->rep_connected;
742 goto out;
743 }
744
745 dprintk("RPC: %s: connected\n", __func__);
746
747out:
748 if (rc)
749 ep->rep_connected = rc;
750
751out_noupdate:
752 return rc;
753}
754
755
756
757
758
759
760
761
762
763
764
765
766void
767rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
768{
769 struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
770 rx_ep);
771 int rc;
772
773
774 rc = rdma_disconnect(ia->ri_id);
775 if (!rc)
776 wait_event_interruptible(ep->rep_connect_wait,
777 ep->rep_connected != 1);
778 else
779 ep->rep_connected = rc;
780 trace_xprtrdma_disconnect(r_xprt, rc);
781
782 rpcrdma_xprt_drain(r_xprt);
783}
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
803{
804 unsigned long i;
805
806 for (i = 0; i <= buf->rb_sc_last; i++)
807 kfree(buf->rb_sc_ctxs[i]);
808 kfree(buf->rb_sc_ctxs);
809}
810
811static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
812{
813 struct rpcrdma_sendctx *sc;
814
815 sc = kzalloc(struct_size(sc, sc_sges, ia->ri_max_send_sges),
816 GFP_KERNEL);
817 if (!sc)
818 return NULL;
819
820 sc->sc_wr.wr_cqe = &sc->sc_cqe;
821 sc->sc_wr.sg_list = sc->sc_sges;
822 sc->sc_wr.opcode = IB_WR_SEND;
823 sc->sc_cqe.done = rpcrdma_wc_send;
824 return sc;
825}
826
827static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
828{
829 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
830 struct rpcrdma_sendctx *sc;
831 unsigned long i;
832
833
834
835
836
837
838 i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
839 dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i);
840 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
841 if (!buf->rb_sc_ctxs)
842 return -ENOMEM;
843
844 buf->rb_sc_last = i - 1;
845 for (i = 0; i <= buf->rb_sc_last; i++) {
846 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
847 if (!sc)
848 return -ENOMEM;
849
850 sc->sc_xprt = r_xprt;
851 buf->rb_sc_ctxs[i] = sc;
852 }
853
854 return 0;
855}
856
857
858
859
860
861static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
862 unsigned long item)
863{
864 return likely(item < buf->rb_sc_last) ? item + 1 : 0;
865}
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt)
881{
882 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
883 struct rpcrdma_sendctx *sc;
884 unsigned long next_head;
885
886 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
887
888 if (next_head == READ_ONCE(buf->rb_sc_tail))
889 goto out_emptyq;
890
891
892 sc = buf->rb_sc_ctxs[next_head];
893
894
895
896
897 buf->rb_sc_head = next_head;
898
899 return sc;
900
901out_emptyq:
902
903
904
905
906 xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
907 r_xprt->rx_stats.empty_sendctx_q++;
908 return NULL;
909}
910
911
912
913
914
915
916
917
918
919
920static void
921rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
922{
923 struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf;
924 unsigned long next_tail;
925
926
927
928
929 next_tail = buf->rb_sc_tail;
930 do {
931 next_tail = rpcrdma_sendctx_next(buf, next_tail);
932
933
934 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]);
935
936 } while (buf->rb_sc_ctxs[next_tail] != sc);
937
938
939 smp_store_release(&buf->rb_sc_tail, next_tail);
940
941 xprt_write_space(&sc->sc_xprt->rx_xprt);
942}
943
944static void
945rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
946{
947 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
948 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
949 unsigned int count;
950
951 for (count = 0; count < ia->ri_max_segs; count++) {
952 struct rpcrdma_mr *mr;
953 int rc;
954
955 mr = kzalloc(sizeof(*mr), GFP_NOFS);
956 if (!mr)
957 break;
958
959 rc = frwr_init_mr(ia, mr);
960 if (rc) {
961 kfree(mr);
962 break;
963 }
964
965 mr->mr_xprt = r_xprt;
966
967 spin_lock(&buf->rb_lock);
968 list_add(&mr->mr_list, &buf->rb_mrs);
969 list_add(&mr->mr_all, &buf->rb_all_mrs);
970 spin_unlock(&buf->rb_lock);
971 }
972
973 r_xprt->rx_stats.mrs_allocated += count;
974 trace_xprtrdma_createmrs(r_xprt, count);
975}
976
977static void
978rpcrdma_mr_refresh_worker(struct work_struct *work)
979{
980 struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
981 rb_refresh_worker);
982 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
983 rx_buf);
984
985 rpcrdma_mrs_create(r_xprt);
986 xprt_write_space(&r_xprt->rx_xprt);
987}
988
989
990
991
992
993
994
995
996
997struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
998 gfp_t flags)
999{
1000 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
1001 struct rpcrdma_regbuf *rb;
1002 struct rpcrdma_req *req;
1003 size_t maxhdrsize;
1004
1005 req = kzalloc(sizeof(*req), flags);
1006 if (req == NULL)
1007 goto out1;
1008
1009
1010 maxhdrsize = rpcrdma_fixed_maxsz + 3 +
1011 r_xprt->rx_ia.ri_max_segs * rpcrdma_readchunk_maxsz;
1012 maxhdrsize *= sizeof(__be32);
1013 rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
1014 DMA_TO_DEVICE, flags);
1015 if (!rb)
1016 goto out2;
1017 req->rl_rdmabuf = rb;
1018 xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
1019
1020 req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
1021 if (!req->rl_sendbuf)
1022 goto out3;
1023
1024 req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
1025 if (!req->rl_recvbuf)
1026 goto out4;
1027
1028 INIT_LIST_HEAD(&req->rl_free_mrs);
1029 INIT_LIST_HEAD(&req->rl_registered);
1030 spin_lock(&buffer->rb_lock);
1031 list_add(&req->rl_all, &buffer->rb_allreqs);
1032 spin_unlock(&buffer->rb_lock);
1033 return req;
1034
1035out4:
1036 kfree(req->rl_sendbuf);
1037out3:
1038 kfree(req->rl_rdmabuf);
1039out2:
1040 kfree(req);
1041out1:
1042 return NULL;
1043}
1044
1045static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
1046 bool temp)
1047{
1048 struct rpcrdma_rep *rep;
1049
1050 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1051 if (rep == NULL)
1052 goto out;
1053
1054 rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep.rep_inline_recv,
1055 DMA_FROM_DEVICE, GFP_KERNEL);
1056 if (!rep->rr_rdmabuf)
1057 goto out_free;
1058
1059 xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
1060 rdmab_length(rep->rr_rdmabuf));
1061 rep->rr_cqe.done = rpcrdma_wc_receive;
1062 rep->rr_rxprt = r_xprt;
1063 rep->rr_recv_wr.next = NULL;
1064 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
1065 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1066 rep->rr_recv_wr.num_sge = 1;
1067 rep->rr_temp = temp;
1068 return rep;
1069
1070out_free:
1071 kfree(rep);
1072out:
1073 return NULL;
1074}
1075
1076static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
1077{
1078 rpcrdma_regbuf_free(rep->rr_rdmabuf);
1079 kfree(rep);
1080}
1081
1082static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
1083{
1084 struct llist_node *node;
1085
1086
1087 node = llist_del_first(&buf->rb_free_reps);
1088 if (!node)
1089 return NULL;
1090 return llist_entry(node, struct rpcrdma_rep, rr_node);
1091}
1092
1093static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
1094 struct rpcrdma_rep *rep)
1095{
1096 if (!rep->rr_temp)
1097 llist_add(&rep->rr_node, &buf->rb_free_reps);
1098 else
1099 rpcrdma_rep_destroy(rep);
1100}
1101
1102static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
1103{
1104 struct rpcrdma_rep *rep;
1105
1106 while ((rep = rpcrdma_rep_get_locked(buf)) != NULL)
1107 rpcrdma_rep_destroy(rep);
1108}
1109
1110
1111
1112
1113
1114
1115
1116int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1117{
1118 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1119 int i, rc;
1120
1121 buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests;
1122 buf->rb_bc_srv_max_requests = 0;
1123 spin_lock_init(&buf->rb_lock);
1124 INIT_LIST_HEAD(&buf->rb_mrs);
1125 INIT_LIST_HEAD(&buf->rb_all_mrs);
1126 INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker);
1127
1128 rpcrdma_mrs_create(r_xprt);
1129
1130 INIT_LIST_HEAD(&buf->rb_send_bufs);
1131 INIT_LIST_HEAD(&buf->rb_allreqs);
1132
1133 rc = -ENOMEM;
1134 for (i = 0; i < buf->rb_max_requests; i++) {
1135 struct rpcrdma_req *req;
1136
1137 req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE,
1138 GFP_KERNEL);
1139 if (!req)
1140 goto out;
1141 list_add(&req->rl_list, &buf->rb_send_bufs);
1142 }
1143
1144 buf->rb_credits = 1;
1145 init_llist_head(&buf->rb_free_reps);
1146
1147 rc = rpcrdma_sendctxs_create(r_xprt);
1148 if (rc)
1149 goto out;
1150
1151 return 0;
1152out:
1153 rpcrdma_buffer_destroy(buf);
1154 return rc;
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164void rpcrdma_req_destroy(struct rpcrdma_req *req)
1165{
1166 list_del(&req->rl_all);
1167
1168 while (!list_empty(&req->rl_free_mrs))
1169 rpcrdma_mr_free(rpcrdma_mr_pop(&req->rl_free_mrs));
1170
1171 rpcrdma_regbuf_free(req->rl_recvbuf);
1172 rpcrdma_regbuf_free(req->rl_sendbuf);
1173 rpcrdma_regbuf_free(req->rl_rdmabuf);
1174 kfree(req);
1175}
1176
1177static void
1178rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
1179{
1180 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
1181 rx_buf);
1182 struct rpcrdma_mr *mr;
1183 unsigned int count;
1184
1185 count = 0;
1186 spin_lock(&buf->rb_lock);
1187 while ((mr = list_first_entry_or_null(&buf->rb_all_mrs,
1188 struct rpcrdma_mr,
1189 mr_all)) != NULL) {
1190 list_del(&mr->mr_all);
1191 spin_unlock(&buf->rb_lock);
1192
1193 frwr_release_mr(mr);
1194 count++;
1195 spin_lock(&buf->rb_lock);
1196 }
1197 spin_unlock(&buf->rb_lock);
1198 r_xprt->rx_stats.mrs_allocated = 0;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209void
1210rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1211{
1212 cancel_work_sync(&buf->rb_refresh_worker);
1213
1214 rpcrdma_sendctxs_destroy(buf);
1215 rpcrdma_reps_destroy(buf);
1216
1217 while (!list_empty(&buf->rb_send_bufs)) {
1218 struct rpcrdma_req *req;
1219
1220 req = list_first_entry(&buf->rb_send_bufs,
1221 struct rpcrdma_req, rl_list);
1222 list_del(&req->rl_list);
1223 rpcrdma_req_destroy(req);
1224 }
1225
1226 rpcrdma_mrs_destroy(buf);
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236struct rpcrdma_mr *
1237rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
1238{
1239 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1240 struct rpcrdma_mr *mr;
1241
1242 spin_lock(&buf->rb_lock);
1243 mr = rpcrdma_mr_pop(&buf->rb_mrs);
1244 spin_unlock(&buf->rb_lock);
1245 return mr;
1246}
1247
1248
1249
1250
1251
1252
1253void rpcrdma_mr_put(struct rpcrdma_mr *mr)
1254{
1255 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1256
1257 if (mr->mr_dir != DMA_NONE) {
1258 trace_xprtrdma_mr_unmap(mr);
1259 ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device,
1260 mr->mr_sg, mr->mr_nents, mr->mr_dir);
1261 mr->mr_dir = DMA_NONE;
1262 }
1263
1264 rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
1265}
1266
1267static void rpcrdma_mr_free(struct rpcrdma_mr *mr)
1268{
1269 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1270 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1271
1272 mr->mr_req = NULL;
1273 spin_lock(&buf->rb_lock);
1274 rpcrdma_mr_push(mr, &buf->rb_mrs);
1275 spin_unlock(&buf->rb_lock);
1276}
1277
1278
1279
1280
1281
1282
1283
1284struct rpcrdma_req *
1285rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1286{
1287 struct rpcrdma_req *req;
1288
1289 spin_lock(&buffers->rb_lock);
1290 req = list_first_entry_or_null(&buffers->rb_send_bufs,
1291 struct rpcrdma_req, rl_list);
1292 if (req)
1293 list_del_init(&req->rl_list);
1294 spin_unlock(&buffers->rb_lock);
1295 return req;
1296}
1297
1298
1299
1300
1301
1302
1303
1304void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
1305{
1306 if (req->rl_reply)
1307 rpcrdma_rep_put(buffers, req->rl_reply);
1308 req->rl_reply = NULL;
1309
1310 spin_lock(&buffers->rb_lock);
1311 list_add(&req->rl_list, &buffers->rb_send_bufs);
1312 spin_unlock(&buffers->rb_lock);
1313}
1314
1315
1316
1317
1318
1319
1320
1321void rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1322{
1323 rpcrdma_rep_put(&rep->rr_rxprt->rx_buf, rep);
1324}
1325
1326
1327
1328
1329
1330
1331
1332static struct rpcrdma_regbuf *
1333rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
1334 gfp_t flags)
1335{
1336 struct rpcrdma_regbuf *rb;
1337
1338 rb = kmalloc(sizeof(*rb), flags);
1339 if (!rb)
1340 return NULL;
1341 rb->rg_data = kmalloc(size, flags);
1342 if (!rb->rg_data) {
1343 kfree(rb);
1344 return NULL;
1345 }
1346
1347 rb->rg_device = NULL;
1348 rb->rg_direction = direction;
1349 rb->rg_iov.length = size;
1350 return rb;
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
1363{
1364 void *buf;
1365
1366 buf = kmalloc(size, flags);
1367 if (!buf)
1368 return false;
1369
1370 rpcrdma_regbuf_dma_unmap(rb);
1371 kfree(rb->rg_data);
1372
1373 rb->rg_data = buf;
1374 rb->rg_iov.length = size;
1375 return true;
1376}
1377
1378
1379
1380
1381
1382
1383
1384
1385bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
1386 struct rpcrdma_regbuf *rb)
1387{
1388 struct ib_device *device = r_xprt->rx_ia.ri_id->device;
1389
1390 if (rb->rg_direction == DMA_NONE)
1391 return false;
1392
1393 rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb),
1394 rdmab_length(rb), rb->rg_direction);
1395 if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
1396 trace_xprtrdma_dma_maperr(rdmab_addr(rb));
1397 return false;
1398 }
1399
1400 rb->rg_device = device;
1401 rb->rg_iov.lkey = r_xprt->rx_ia.ri_pd->local_dma_lkey;
1402 return true;
1403}
1404
1405static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb)
1406{
1407 if (!rb)
1408 return;
1409
1410 if (!rpcrdma_regbuf_is_mapped(rb))
1411 return;
1412
1413 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb),
1414 rb->rg_direction);
1415 rb->rg_device = NULL;
1416}
1417
1418static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
1419{
1420 rpcrdma_regbuf_dma_unmap(rb);
1421 if (rb)
1422 kfree(rb->rg_data);
1423 kfree(rb);
1424}
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435int
1436rpcrdma_ep_post(struct rpcrdma_ia *ia,
1437 struct rpcrdma_ep *ep,
1438 struct rpcrdma_req *req)
1439{
1440 struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
1441 int rc;
1442
1443 if (!ep->rep_send_count || kref_read(&req->rl_kref) > 1) {
1444 send_wr->send_flags |= IB_SEND_SIGNALED;
1445 ep->rep_send_count = ep->rep_send_batch;
1446 } else {
1447 send_wr->send_flags &= ~IB_SEND_SIGNALED;
1448 --ep->rep_send_count;
1449 }
1450
1451 rc = frwr_send(ia, req);
1452 trace_xprtrdma_post_send(req, rc);
1453 if (rc)
1454 return -ENOTCONN;
1455 return 0;
1456}
1457
1458static void
1459rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
1460{
1461 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1462 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
1463 struct ib_recv_wr *i, *wr, *bad_wr;
1464 struct rpcrdma_rep *rep;
1465 int needed, count, rc;
1466
1467 rc = 0;
1468 count = 0;
1469
1470 needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
1471 if (likely(ep->rep_receive_count > needed))
1472 goto out;
1473 needed -= ep->rep_receive_count;
1474 if (!temp)
1475 needed += RPCRDMA_MAX_RECV_BATCH;
1476
1477
1478 wr = NULL;
1479 while (needed) {
1480 rep = rpcrdma_rep_get_locked(buf);
1481 if (!rep)
1482 rep = rpcrdma_rep_create(r_xprt, temp);
1483 if (!rep)
1484 break;
1485
1486 rep->rr_recv_wr.next = wr;
1487 wr = &rep->rr_recv_wr;
1488 --needed;
1489 }
1490 if (!wr)
1491 goto out;
1492
1493 for (i = wr; i; i = i->next) {
1494 rep = container_of(i, struct rpcrdma_rep, rr_recv_wr);
1495
1496 if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
1497 goto release_wrs;
1498
1499 trace_xprtrdma_post_recv(rep);
1500 ++count;
1501 }
1502
1503 rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
1504 (const struct ib_recv_wr **)&bad_wr);
1505out:
1506 trace_xprtrdma_post_recvs(r_xprt, count, rc);
1507 if (rc) {
1508 for (wr = bad_wr; wr;) {
1509 struct rpcrdma_rep *rep;
1510
1511 rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
1512 wr = wr->next;
1513 rpcrdma_recv_buffer_put(rep);
1514 --count;
1515 }
1516 }
1517 ep->rep_receive_count += count;
1518 return;
1519
1520release_wrs:
1521 for (i = wr; i;) {
1522 rep = container_of(i, struct rpcrdma_rep, rr_recv_wr);
1523 i = i->next;
1524 rpcrdma_recv_buffer_put(rep);
1525 }
1526}
1527