1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#include <linux/interrupt.h>
51#include <linux/slab.h>
52#include <linux/prefetch.h>
53#include <linux/sunrpc/addr.h>
54#include <linux/sunrpc/svc_rdma.h>
55#include <asm/bitops.h>
56#include <linux/module.h>
57
58#include "xprt_rdma.h"
59
60
61
62
63
64#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
65# define RPCDBG_FACILITY RPCDBG_TRANS
66#endif
67
68
69
70
71
72static struct workqueue_struct *rpcrdma_receive_wq;
73
74int
75rpcrdma_alloc_wq(void)
76{
77 struct workqueue_struct *recv_wq;
78
79 recv_wq = alloc_workqueue("xprtrdma_receive",
80 WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_HIGHPRI,
81 0);
82 if (!recv_wq)
83 return -ENOMEM;
84
85 rpcrdma_receive_wq = recv_wq;
86 return 0;
87}
88
89void
90rpcrdma_destroy_wq(void)
91{
92 struct workqueue_struct *wq;
93
94 if (rpcrdma_receive_wq) {
95 wq = rpcrdma_receive_wq;
96 rpcrdma_receive_wq = NULL;
97 destroy_workqueue(wq);
98 }
99}
100
101static void
102rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
103{
104 struct rpcrdma_ep *ep = context;
105
106 pr_err("rpcrdma: %s on device %s ep %p\n",
107 ib_event_msg(event->event), event->device->name, context);
108
109 if (ep->rep_connected == 1) {
110 ep->rep_connected = -EIO;
111 rpcrdma_conn_func(ep);
112 wake_up_all(&ep->rep_connect_wait);
113 }
114}
115
116
117
118
119
120
121
122static void
123rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
124{
125
126 if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
127 pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
128 ib_wc_status_msg(wc->status),
129 wc->status, wc->vendor_err);
130}
131
132
133
134
135static void
136rpcrdma_update_granted_credits(struct rpcrdma_rep *rep)
137{
138 struct rpcrdma_msg *rmsgp = rdmab_to_msg(rep->rr_rdmabuf);
139 struct rpcrdma_buffer *buffer = &rep->rr_rxprt->rx_buf;
140 u32 credits;
141
142 if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
143 return;
144
145 credits = be32_to_cpu(rmsgp->rm_credit);
146 if (credits == 0)
147 credits = 1;
148 else if (credits > buffer->rb_max_requests)
149 credits = buffer->rb_max_requests;
150
151 atomic_set(&buffer->rb_credits, credits);
152}
153
154
155
156
157
158
159
160static void
161rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
162{
163 struct ib_cqe *cqe = wc->wr_cqe;
164 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
165 rr_cqe);
166
167
168 if (wc->status != IB_WC_SUCCESS)
169 goto out_fail;
170
171
172 if (wc->opcode != IB_WC_RECV)
173 return;
174
175 dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
176 __func__, rep, wc->byte_len);
177
178 rep->rr_len = wc->byte_len;
179 rep->rr_wc_flags = wc->wc_flags;
180 rep->rr_inv_rkey = wc->ex.invalidate_rkey;
181
182 ib_dma_sync_single_for_cpu(rep->rr_device,
183 rdmab_addr(rep->rr_rdmabuf),
184 rep->rr_len, DMA_FROM_DEVICE);
185
186 rpcrdma_update_granted_credits(rep);
187
188out_schedule:
189 queue_work(rpcrdma_receive_wq, &rep->rr_work);
190 return;
191
192out_fail:
193 if (wc->status != IB_WC_WR_FLUSH_ERR)
194 pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
195 ib_wc_status_msg(wc->status),
196 wc->status, wc->vendor_err);
197 rep->rr_len = RPCRDMA_BAD_LEN;
198 goto out_schedule;
199}
200
201static void
202rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
203 struct rdma_conn_param *param)
204{
205 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
206 const struct rpcrdma_connect_private *pmsg = param->private_data;
207 unsigned int rsize, wsize;
208
209
210 r_xprt->rx_ia.ri_reminv_expected = false;
211 r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
212 rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
213 wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
214
215 if (pmsg &&
216 pmsg->cp_magic == rpcrdma_cmp_magic &&
217 pmsg->cp_version == RPCRDMA_CMP_VERSION) {
218 r_xprt->rx_ia.ri_reminv_expected = true;
219 r_xprt->rx_ia.ri_implicit_roundup = true;
220 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
221 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
222 }
223
224 if (rsize < cdata->inline_rsize)
225 cdata->inline_rsize = rsize;
226 if (wsize < cdata->inline_wsize)
227 cdata->inline_wsize = wsize;
228 dprintk("RPC: %s: max send %u, max recv %u\n",
229 __func__, cdata->inline_wsize, cdata->inline_rsize);
230 rpcrdma_set_max_header_sizes(r_xprt);
231}
232
233static int
234rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
235{
236 struct rpcrdma_xprt *xprt = id->context;
237 struct rpcrdma_ia *ia = &xprt->rx_ia;
238 struct rpcrdma_ep *ep = &xprt->rx_ep;
239#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
240 struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
241#endif
242 struct ib_qp_attr *attr = &ia->ri_qp_attr;
243 struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
244 int connstate = 0;
245
246 switch (event->event) {
247 case RDMA_CM_EVENT_ADDR_RESOLVED:
248 case RDMA_CM_EVENT_ROUTE_RESOLVED:
249 ia->ri_async_rc = 0;
250 complete(&ia->ri_done);
251 break;
252 case RDMA_CM_EVENT_ADDR_ERROR:
253 ia->ri_async_rc = -EHOSTUNREACH;
254 dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
255 __func__, ep);
256 complete(&ia->ri_done);
257 break;
258 case RDMA_CM_EVENT_ROUTE_ERROR:
259 ia->ri_async_rc = -ENETUNREACH;
260 dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
261 __func__, ep);
262 complete(&ia->ri_done);
263 break;
264 case RDMA_CM_EVENT_ESTABLISHED:
265 connstate = 1;
266 ib_query_qp(ia->ri_id->qp, attr,
267 IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
268 iattr);
269 dprintk("RPC: %s: %d responder resources"
270 " (%d initiator)\n",
271 __func__, attr->max_dest_rd_atomic,
272 attr->max_rd_atomic);
273 rpcrdma_update_connect_private(xprt, &event->param.conn);
274 goto connected;
275 case RDMA_CM_EVENT_CONNECT_ERROR:
276 connstate = -ENOTCONN;
277 goto connected;
278 case RDMA_CM_EVENT_UNREACHABLE:
279 connstate = -ENETDOWN;
280 goto connected;
281 case RDMA_CM_EVENT_REJECTED:
282 connstate = -ECONNREFUSED;
283 goto connected;
284 case RDMA_CM_EVENT_DISCONNECTED:
285 connstate = -ECONNABORTED;
286 goto connected;
287 case RDMA_CM_EVENT_DEVICE_REMOVAL:
288 connstate = -ENODEV;
289connected:
290 dprintk("RPC: %s: %sconnected\n",
291 __func__, connstate > 0 ? "" : "dis");
292 atomic_set(&xprt->rx_buf.rb_credits, 1);
293 ep->rep_connected = connstate;
294 rpcrdma_conn_func(ep);
295 wake_up_all(&ep->rep_connect_wait);
296
297 default:
298 dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n",
299 __func__, sap, rpc_get_port(sap), ep,
300 rdma_event_msg(event->event));
301 break;
302 }
303
304#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
305 if (connstate == 1) {
306 int ird = attr->max_dest_rd_atomic;
307 int tird = ep->rep_remote_cma.responder_resources;
308
309 pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
310 sap, rpc_get_port(sap),
311 ia->ri_device->name,
312 ia->ri_ops->ro_displayname,
313 xprt->rx_buf.rb_max_requests,
314 ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
315 } else if (connstate < 0) {
316 pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
317 sap, rpc_get_port(sap), connstate);
318 }
319#endif
320
321 return 0;
322}
323
324static void rpcrdma_destroy_id(struct rdma_cm_id *id)
325{
326 if (id) {
327 module_put(id->device->owner);
328 rdma_destroy_id(id);
329 }
330}
331
332static struct rdma_cm_id *
333rpcrdma_create_id(struct rpcrdma_xprt *xprt,
334 struct rpcrdma_ia *ia, struct sockaddr *addr)
335{
336 unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
337 struct rdma_cm_id *id;
338 int rc;
339
340 init_completion(&ia->ri_done);
341
342 id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
343 IB_QPT_RC);
344 if (IS_ERR(id)) {
345 rc = PTR_ERR(id);
346 dprintk("RPC: %s: rdma_create_id() failed %i\n",
347 __func__, rc);
348 return id;
349 }
350
351 ia->ri_async_rc = -ETIMEDOUT;
352 rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
353 if (rc) {
354 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
355 __func__, rc);
356 goto out;
357 }
358 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
359 if (rc < 0) {
360 dprintk("RPC: %s: wait() exited: %i\n",
361 __func__, rc);
362 goto out;
363 }
364
365
366
367
368
369
370 if (!ia->ri_async_rc && !try_module_get(id->device->owner)) {
371 dprintk("RPC: %s: Failed to get device module\n",
372 __func__);
373 ia->ri_async_rc = -ENODEV;
374 }
375 rc = ia->ri_async_rc;
376 if (rc)
377 goto out;
378
379 ia->ri_async_rc = -ETIMEDOUT;
380 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
381 if (rc) {
382 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
383 __func__, rc);
384 goto put;
385 }
386 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
387 if (rc < 0) {
388 dprintk("RPC: %s: wait() exited: %i\n",
389 __func__, rc);
390 goto put;
391 }
392 rc = ia->ri_async_rc;
393 if (rc)
394 goto put;
395
396 return id;
397put:
398 module_put(id->device->owner);
399out:
400 rdma_destroy_id(id);
401 return ERR_PTR(rc);
402}
403
404
405
406
407
408
409
410
411
412
413int
414rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
415{
416 struct rpcrdma_ia *ia = &xprt->rx_ia;
417 int rc;
418
419 ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
420 if (IS_ERR(ia->ri_id)) {
421 rc = PTR_ERR(ia->ri_id);
422 goto out1;
423 }
424 ia->ri_device = ia->ri_id->device;
425
426 ia->ri_pd = ib_alloc_pd(ia->ri_device, 0);
427 if (IS_ERR(ia->ri_pd)) {
428 rc = PTR_ERR(ia->ri_pd);
429 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
430 goto out2;
431 }
432
433 switch (memreg) {
434 case RPCRDMA_FRMR:
435 if (frwr_is_supported(ia)) {
436 ia->ri_ops = &rpcrdma_frwr_memreg_ops;
437 break;
438 }
439
440 case RPCRDMA_MTHCAFMR:
441 if (fmr_is_supported(ia)) {
442 ia->ri_ops = &rpcrdma_fmr_memreg_ops;
443 break;
444 }
445
446 default:
447 pr_err("rpcrdma: Unsupported memory registration mode: %d\n",
448 memreg);
449 rc = -EINVAL;
450 goto out3;
451 }
452
453 return 0;
454
455out3:
456 ib_dealloc_pd(ia->ri_pd);
457 ia->ri_pd = NULL;
458out2:
459 rpcrdma_destroy_id(ia->ri_id);
460 ia->ri_id = NULL;
461out1:
462 return rc;
463}
464
465
466
467
468
469
470void
471rpcrdma_ia_close(struct rpcrdma_ia *ia)
472{
473 dprintk("RPC: %s: entering\n", __func__);
474 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
475 if (ia->ri_id->qp)
476 rdma_destroy_qp(ia->ri_id);
477 rpcrdma_destroy_id(ia->ri_id);
478 ia->ri_id = NULL;
479 }
480
481
482 if (ia->ri_pd && !IS_ERR(ia->ri_pd))
483 ib_dealloc_pd(ia->ri_pd);
484}
485
486
487
488
489int
490rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
491 struct rpcrdma_create_data_internal *cdata)
492{
493 struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
494 unsigned int max_qp_wr, max_sge;
495 struct ib_cq *sendcq, *recvcq;
496 int rc;
497
498 max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES);
499 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
500 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
501 return -ENOMEM;
502 }
503 ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES;
504
505 if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
506 dprintk("RPC: %s: insufficient wqe's available\n",
507 __func__);
508 return -ENOMEM;
509 }
510 max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1;
511
512
513 if (cdata->max_requests > max_qp_wr)
514 cdata->max_requests = max_qp_wr;
515
516 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
517 ep->rep_attr.qp_context = ep;
518 ep->rep_attr.srq = NULL;
519 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
520 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
521 ep->rep_attr.cap.max_send_wr += 1;
522 rc = ia->ri_ops->ro_open(ia, ep, cdata);
523 if (rc)
524 return rc;
525 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
526 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
527 ep->rep_attr.cap.max_recv_wr += 1;
528 ep->rep_attr.cap.max_send_sge = max_sge;
529 ep->rep_attr.cap.max_recv_sge = 1;
530 ep->rep_attr.cap.max_inline_data = 0;
531 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
532 ep->rep_attr.qp_type = IB_QPT_RC;
533 ep->rep_attr.port_num = ~0;
534
535 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
536 "iovs: send %d recv %d\n",
537 __func__,
538 ep->rep_attr.cap.max_send_wr,
539 ep->rep_attr.cap.max_recv_wr,
540 ep->rep_attr.cap.max_send_sge,
541 ep->rep_attr.cap.max_recv_sge);
542
543
544 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
545 if (ep->rep_cqinit <= 2)
546 ep->rep_cqinit = 0;
547 rpcrdma_init_cqcount(ep, 0);
548 init_waitqueue_head(&ep->rep_connect_wait);
549 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
550
551 sendcq = ib_alloc_cq(ia->ri_device, NULL,
552 ep->rep_attr.cap.max_send_wr + 1,
553 0, IB_POLL_SOFTIRQ);
554 if (IS_ERR(sendcq)) {
555 rc = PTR_ERR(sendcq);
556 dprintk("RPC: %s: failed to create send CQ: %i\n",
557 __func__, rc);
558 goto out1;
559 }
560
561 recvcq = ib_alloc_cq(ia->ri_device, NULL,
562 ep->rep_attr.cap.max_recv_wr + 1,
563 0, IB_POLL_SOFTIRQ);
564 if (IS_ERR(recvcq)) {
565 rc = PTR_ERR(recvcq);
566 dprintk("RPC: %s: failed to create recv CQ: %i\n",
567 __func__, rc);
568 goto out2;
569 }
570
571 ep->rep_attr.send_cq = sendcq;
572 ep->rep_attr.recv_cq = recvcq;
573
574
575 memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
576
577
578 pmsg->cp_magic = rpcrdma_cmp_magic;
579 pmsg->cp_version = RPCRDMA_CMP_VERSION;
580 pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok;
581 pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize);
582 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize);
583 ep->rep_remote_cma.private_data = pmsg;
584 ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
585
586
587 ep->rep_remote_cma.initiator_depth = 0;
588 if (ia->ri_device->attrs.max_qp_rd_atom > 32)
589 ep->rep_remote_cma.responder_resources = 32;
590 else
591 ep->rep_remote_cma.responder_resources =
592 ia->ri_device->attrs.max_qp_rd_atom;
593
594
595
596
597
598 ep->rep_remote_cma.retry_count = 6;
599
600
601
602
603
604 ep->rep_remote_cma.flow_control = 0;
605 ep->rep_remote_cma.rnr_retry_count = 0;
606
607 return 0;
608
609out2:
610 ib_free_cq(sendcq);
611out1:
612 return rc;
613}
614
615
616
617
618
619
620
621
622void
623rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
624{
625 dprintk("RPC: %s: entering, connected is %d\n",
626 __func__, ep->rep_connected);
627
628 cancel_delayed_work_sync(&ep->rep_connect_worker);
629
630 if (ia->ri_id->qp) {
631 rpcrdma_ep_disconnect(ep, ia);
632 rdma_destroy_qp(ia->ri_id);
633 ia->ri_id->qp = NULL;
634 }
635
636 ib_free_cq(ep->rep_attr.recv_cq);
637 ib_free_cq(ep->rep_attr.send_cq);
638}
639
640
641
642
643int
644rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
645{
646 struct rdma_cm_id *id, *old;
647 int rc = 0;
648 int retry_count = 0;
649
650 if (ep->rep_connected != 0) {
651 struct rpcrdma_xprt *xprt;
652retry:
653 dprintk("RPC: %s: reconnecting...\n", __func__);
654
655 rpcrdma_ep_disconnect(ep, ia);
656
657 xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
658 id = rpcrdma_create_id(xprt, ia,
659 (struct sockaddr *)&xprt->rx_data.addr);
660 if (IS_ERR(id)) {
661 rc = -EHOSTUNREACH;
662 goto out;
663 }
664
665
666
667
668
669
670
671 if (ia->ri_device != id->device) {
672 printk("RPC: %s: can't reconnect on "
673 "different device!\n", __func__);
674 rpcrdma_destroy_id(id);
675 rc = -ENETUNREACH;
676 goto out;
677 }
678
679 rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
680 if (rc) {
681 dprintk("RPC: %s: rdma_create_qp failed %i\n",
682 __func__, rc);
683 rpcrdma_destroy_id(id);
684 rc = -ENETUNREACH;
685 goto out;
686 }
687
688 old = ia->ri_id;
689 ia->ri_id = id;
690
691 rdma_destroy_qp(old);
692 rpcrdma_destroy_id(old);
693 } else {
694 dprintk("RPC: %s: connecting...\n", __func__);
695 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
696 if (rc) {
697 dprintk("RPC: %s: rdma_create_qp failed %i\n",
698 __func__, rc);
699
700 return -ENETUNREACH;
701 }
702 }
703
704 ep->rep_connected = 0;
705
706 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
707 if (rc) {
708 dprintk("RPC: %s: rdma_connect() failed with %i\n",
709 __func__, rc);
710 goto out;
711 }
712
713 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
714
715
716
717
718
719
720
721 if (ep->rep_connected == -ECONNREFUSED &&
722 ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
723 dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
724 goto retry;
725 }
726 if (ep->rep_connected <= 0) {
727
728
729 if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
730 (ep->rep_remote_cma.responder_resources == 0 ||
731 ep->rep_remote_cma.initiator_depth !=
732 ep->rep_remote_cma.responder_resources)) {
733 if (ep->rep_remote_cma.responder_resources == 0)
734 ep->rep_remote_cma.responder_resources = 1;
735 ep->rep_remote_cma.initiator_depth =
736 ep->rep_remote_cma.responder_resources;
737 goto retry;
738 }
739 rc = ep->rep_connected;
740 } else {
741 struct rpcrdma_xprt *r_xprt;
742 unsigned int extras;
743
744 dprintk("RPC: %s: connected\n", __func__);
745
746 r_xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
747 extras = r_xprt->rx_buf.rb_bc_srv_max_requests;
748
749 if (extras) {
750 rc = rpcrdma_ep_post_extra_recv(r_xprt, extras);
751 if (rc) {
752 pr_warn("%s: rpcrdma_ep_post_extra_recv: %i\n",
753 __func__, rc);
754 rc = 0;
755 }
756 }
757 }
758
759out:
760 if (rc)
761 ep->rep_connected = rc;
762 return rc;
763}
764
765
766
767
768
769
770
771
772
773
774void
775rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
776{
777 int rc;
778
779 rc = rdma_disconnect(ia->ri_id);
780 if (!rc) {
781
782 wait_event_interruptible(ep->rep_connect_wait,
783 ep->rep_connected != 1);
784 dprintk("RPC: %s: after wait, %sconnected\n", __func__,
785 (ep->rep_connected == 1) ? "still " : "dis");
786 } else {
787 dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
788 ep->rep_connected = rc;
789 }
790
791 ib_drain_qp(ia->ri_id->qp);
792}
793
794static void
795rpcrdma_mr_recovery_worker(struct work_struct *work)
796{
797 struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
798 rb_recovery_worker.work);
799 struct rpcrdma_mw *mw;
800
801 spin_lock(&buf->rb_recovery_lock);
802 while (!list_empty(&buf->rb_stale_mrs)) {
803 mw = list_first_entry(&buf->rb_stale_mrs,
804 struct rpcrdma_mw, mw_list);
805 list_del_init(&mw->mw_list);
806 spin_unlock(&buf->rb_recovery_lock);
807
808 dprintk("RPC: %s: recovering MR %p\n", __func__, mw);
809 mw->mw_xprt->rx_ia.ri_ops->ro_recover_mr(mw);
810
811 spin_lock(&buf->rb_recovery_lock);
812 }
813 spin_unlock(&buf->rb_recovery_lock);
814}
815
816void
817rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw)
818{
819 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
820 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
821
822 spin_lock(&buf->rb_recovery_lock);
823 list_add(&mw->mw_list, &buf->rb_stale_mrs);
824 spin_unlock(&buf->rb_recovery_lock);
825
826 schedule_delayed_work(&buf->rb_recovery_worker, 0);
827}
828
829static void
830rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt)
831{
832 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
833 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
834 unsigned int count;
835 LIST_HEAD(free);
836 LIST_HEAD(all);
837
838 for (count = 0; count < 32; count++) {
839 struct rpcrdma_mw *mw;
840 int rc;
841
842 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
843 if (!mw)
844 break;
845
846 rc = ia->ri_ops->ro_init_mr(ia, mw);
847 if (rc) {
848 kfree(mw);
849 break;
850 }
851
852 mw->mw_xprt = r_xprt;
853
854 list_add(&mw->mw_list, &free);
855 list_add(&mw->mw_all, &all);
856 }
857
858 spin_lock(&buf->rb_mwlock);
859 list_splice(&free, &buf->rb_mws);
860 list_splice(&all, &buf->rb_all);
861 r_xprt->rx_stats.mrs_allocated += count;
862 spin_unlock(&buf->rb_mwlock);
863
864 dprintk("RPC: %s: created %u MRs\n", __func__, count);
865}
866
867static void
868rpcrdma_mr_refresh_worker(struct work_struct *work)
869{
870 struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
871 rb_refresh_worker.work);
872 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
873 rx_buf);
874
875 rpcrdma_create_mrs(r_xprt);
876}
877
878struct rpcrdma_req *
879rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
880{
881 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
882 struct rpcrdma_req *req;
883
884 req = kzalloc(sizeof(*req), GFP_KERNEL);
885 if (req == NULL)
886 return ERR_PTR(-ENOMEM);
887
888 INIT_LIST_HEAD(&req->rl_free);
889 spin_lock(&buffer->rb_reqslock);
890 list_add(&req->rl_all, &buffer->rb_allreqs);
891 spin_unlock(&buffer->rb_reqslock);
892 req->rl_cqe.done = rpcrdma_wc_send;
893 req->rl_buffer = &r_xprt->rx_buf;
894 INIT_LIST_HEAD(&req->rl_registered);
895 req->rl_send_wr.next = NULL;
896 req->rl_send_wr.wr_cqe = &req->rl_cqe;
897 req->rl_send_wr.sg_list = req->rl_send_sge;
898 req->rl_send_wr.opcode = IB_WR_SEND;
899 return req;
900}
901
902struct rpcrdma_rep *
903rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
904{
905 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
906 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
907 struct rpcrdma_rep *rep;
908 int rc;
909
910 rc = -ENOMEM;
911 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
912 if (rep == NULL)
913 goto out;
914
915 rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize,
916 DMA_FROM_DEVICE, GFP_KERNEL);
917 if (IS_ERR(rep->rr_rdmabuf)) {
918 rc = PTR_ERR(rep->rr_rdmabuf);
919 goto out_free;
920 }
921
922 rep->rr_device = ia->ri_device;
923 rep->rr_cqe.done = rpcrdma_wc_receive;
924 rep->rr_rxprt = r_xprt;
925 INIT_WORK(&rep->rr_work, rpcrdma_reply_handler);
926 rep->rr_recv_wr.next = NULL;
927 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
928 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
929 rep->rr_recv_wr.num_sge = 1;
930 return rep;
931
932out_free:
933 kfree(rep);
934out:
935 return ERR_PTR(rc);
936}
937
938int
939rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
940{
941 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
942 int i, rc;
943
944 buf->rb_max_requests = r_xprt->rx_data.max_requests;
945 buf->rb_bc_srv_max_requests = 0;
946 atomic_set(&buf->rb_credits, 1);
947 spin_lock_init(&buf->rb_mwlock);
948 spin_lock_init(&buf->rb_lock);
949 spin_lock_init(&buf->rb_recovery_lock);
950 INIT_LIST_HEAD(&buf->rb_mws);
951 INIT_LIST_HEAD(&buf->rb_all);
952 INIT_LIST_HEAD(&buf->rb_stale_mrs);
953 INIT_DELAYED_WORK(&buf->rb_refresh_worker,
954 rpcrdma_mr_refresh_worker);
955 INIT_DELAYED_WORK(&buf->rb_recovery_worker,
956 rpcrdma_mr_recovery_worker);
957
958 rpcrdma_create_mrs(r_xprt);
959
960 INIT_LIST_HEAD(&buf->rb_send_bufs);
961 INIT_LIST_HEAD(&buf->rb_allreqs);
962 spin_lock_init(&buf->rb_reqslock);
963 for (i = 0; i < buf->rb_max_requests; i++) {
964 struct rpcrdma_req *req;
965
966 req = rpcrdma_create_req(r_xprt);
967 if (IS_ERR(req)) {
968 dprintk("RPC: %s: request buffer %d alloc"
969 " failed\n", __func__, i);
970 rc = PTR_ERR(req);
971 goto out;
972 }
973 req->rl_backchannel = false;
974 list_add(&req->rl_free, &buf->rb_send_bufs);
975 }
976
977 INIT_LIST_HEAD(&buf->rb_recv_bufs);
978 for (i = 0; i < buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; i++) {
979 struct rpcrdma_rep *rep;
980
981 rep = rpcrdma_create_rep(r_xprt);
982 if (IS_ERR(rep)) {
983 dprintk("RPC: %s: reply buffer %d alloc failed\n",
984 __func__, i);
985 rc = PTR_ERR(rep);
986 goto out;
987 }
988 list_add(&rep->rr_list, &buf->rb_recv_bufs);
989 }
990
991 return 0;
992out:
993 rpcrdma_buffer_destroy(buf);
994 return rc;
995}
996
997static struct rpcrdma_req *
998rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf)
999{
1000 struct rpcrdma_req *req;
1001
1002 req = list_first_entry(&buf->rb_send_bufs,
1003 struct rpcrdma_req, rl_free);
1004 list_del(&req->rl_free);
1005 return req;
1006}
1007
1008static struct rpcrdma_rep *
1009rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf)
1010{
1011 struct rpcrdma_rep *rep;
1012
1013 rep = list_first_entry(&buf->rb_recv_bufs,
1014 struct rpcrdma_rep, rr_list);
1015 list_del(&rep->rr_list);
1016 return rep;
1017}
1018
1019static void
1020rpcrdma_destroy_rep(struct rpcrdma_rep *rep)
1021{
1022 rpcrdma_free_regbuf(rep->rr_rdmabuf);
1023 kfree(rep);
1024}
1025
1026void
1027rpcrdma_destroy_req(struct rpcrdma_req *req)
1028{
1029 rpcrdma_free_regbuf(req->rl_recvbuf);
1030 rpcrdma_free_regbuf(req->rl_sendbuf);
1031 rpcrdma_free_regbuf(req->rl_rdmabuf);
1032 kfree(req);
1033}
1034
1035static void
1036rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf)
1037{
1038 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
1039 rx_buf);
1040 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1041 struct rpcrdma_mw *mw;
1042 unsigned int count;
1043
1044 count = 0;
1045 spin_lock(&buf->rb_mwlock);
1046 while (!list_empty(&buf->rb_all)) {
1047 mw = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
1048 list_del(&mw->mw_all);
1049
1050 spin_unlock(&buf->rb_mwlock);
1051 ia->ri_ops->ro_release_mr(mw);
1052 count++;
1053 spin_lock(&buf->rb_mwlock);
1054 }
1055 spin_unlock(&buf->rb_mwlock);
1056 r_xprt->rx_stats.mrs_allocated = 0;
1057
1058 dprintk("RPC: %s: released %u MRs\n", __func__, count);
1059}
1060
1061void
1062rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1063{
1064 cancel_delayed_work_sync(&buf->rb_recovery_worker);
1065
1066 while (!list_empty(&buf->rb_recv_bufs)) {
1067 struct rpcrdma_rep *rep;
1068
1069 rep = rpcrdma_buffer_get_rep_locked(buf);
1070 rpcrdma_destroy_rep(rep);
1071 }
1072 buf->rb_send_count = 0;
1073
1074 spin_lock(&buf->rb_reqslock);
1075 while (!list_empty(&buf->rb_allreqs)) {
1076 struct rpcrdma_req *req;
1077
1078 req = list_first_entry(&buf->rb_allreqs,
1079 struct rpcrdma_req, rl_all);
1080 list_del(&req->rl_all);
1081
1082 spin_unlock(&buf->rb_reqslock);
1083 rpcrdma_destroy_req(req);
1084 spin_lock(&buf->rb_reqslock);
1085 }
1086 spin_unlock(&buf->rb_reqslock);
1087 buf->rb_recv_count = 0;
1088
1089 rpcrdma_destroy_mrs(buf);
1090}
1091
1092struct rpcrdma_mw *
1093rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
1094{
1095 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1096 struct rpcrdma_mw *mw = NULL;
1097
1098 spin_lock(&buf->rb_mwlock);
1099 if (!list_empty(&buf->rb_mws)) {
1100 mw = list_first_entry(&buf->rb_mws,
1101 struct rpcrdma_mw, mw_list);
1102 list_del_init(&mw->mw_list);
1103 }
1104 spin_unlock(&buf->rb_mwlock);
1105
1106 if (!mw)
1107 goto out_nomws;
1108 return mw;
1109
1110out_nomws:
1111 dprintk("RPC: %s: no MWs available\n", __func__);
1112 schedule_delayed_work(&buf->rb_refresh_worker, 0);
1113
1114
1115 cond_resched();
1116
1117 return NULL;
1118}
1119
1120void
1121rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
1122{
1123 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1124
1125 spin_lock(&buf->rb_mwlock);
1126 list_add_tail(&mw->mw_list, &buf->rb_mws);
1127 spin_unlock(&buf->rb_mwlock);
1128}
1129
1130static struct rpcrdma_rep *
1131rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers)
1132{
1133
1134
1135
1136
1137
1138 if (unlikely(buffers->rb_send_count < buffers->rb_recv_count))
1139 return NULL;
1140
1141 if (unlikely(list_empty(&buffers->rb_recv_bufs)))
1142 return NULL;
1143 buffers->rb_recv_count++;
1144 return rpcrdma_buffer_get_rep_locked(buffers);
1145}
1146
1147
1148
1149
1150
1151
1152struct rpcrdma_req *
1153rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1154{
1155 struct rpcrdma_req *req;
1156
1157 spin_lock(&buffers->rb_lock);
1158 if (list_empty(&buffers->rb_send_bufs))
1159 goto out_reqbuf;
1160 buffers->rb_send_count++;
1161 req = rpcrdma_buffer_get_req_locked(buffers);
1162 req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1163 spin_unlock(&buffers->rb_lock);
1164 return req;
1165
1166out_reqbuf:
1167 spin_unlock(&buffers->rb_lock);
1168 pr_warn("RPC: %s: out of request buffers\n", __func__);
1169 return NULL;
1170}
1171
1172
1173
1174
1175
1176void
1177rpcrdma_buffer_put(struct rpcrdma_req *req)
1178{
1179 struct rpcrdma_buffer *buffers = req->rl_buffer;
1180 struct rpcrdma_rep *rep = req->rl_reply;
1181
1182 req->rl_send_wr.num_sge = 0;
1183 req->rl_reply = NULL;
1184
1185 spin_lock(&buffers->rb_lock);
1186 buffers->rb_send_count--;
1187 list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
1188 if (rep) {
1189 buffers->rb_recv_count--;
1190 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1191 }
1192 spin_unlock(&buffers->rb_lock);
1193}
1194
1195
1196
1197
1198
1199void
1200rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1201{
1202 struct rpcrdma_buffer *buffers = req->rl_buffer;
1203
1204 spin_lock(&buffers->rb_lock);
1205 req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1206 spin_unlock(&buffers->rb_lock);
1207}
1208
1209
1210
1211
1212
1213void
1214rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1215{
1216 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1217
1218 spin_lock(&buffers->rb_lock);
1219 buffers->rb_recv_count--;
1220 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1221 spin_unlock(&buffers->rb_lock);
1222}
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237struct rpcrdma_regbuf *
1238rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
1239 gfp_t flags)
1240{
1241 struct rpcrdma_regbuf *rb;
1242
1243 rb = kmalloc(sizeof(*rb) + size, flags);
1244 if (rb == NULL)
1245 return ERR_PTR(-ENOMEM);
1246
1247 rb->rg_device = NULL;
1248 rb->rg_direction = direction;
1249 rb->rg_iov.length = size;
1250
1251 return rb;
1252}
1253
1254
1255
1256
1257
1258
1259bool
1260__rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1261{
1262 if (rb->rg_direction == DMA_NONE)
1263 return false;
1264
1265 rb->rg_iov.addr = ib_dma_map_single(ia->ri_device,
1266 (void *)rb->rg_base,
1267 rdmab_length(rb),
1268 rb->rg_direction);
1269 if (ib_dma_mapping_error(ia->ri_device, rdmab_addr(rb)))
1270 return false;
1271
1272 rb->rg_device = ia->ri_device;
1273 rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
1274 return true;
1275}
1276
1277static void
1278rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
1279{
1280 if (!rpcrdma_regbuf_is_mapped(rb))
1281 return;
1282
1283 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb),
1284 rdmab_length(rb), rb->rg_direction);
1285 rb->rg_device = NULL;
1286}
1287
1288
1289
1290
1291
1292void
1293rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
1294{
1295 if (!rb)
1296 return;
1297
1298 rpcrdma_dma_unmap_regbuf(rb);
1299 kfree(rb);
1300}
1301
1302
1303
1304
1305
1306
1307int
1308rpcrdma_ep_post(struct rpcrdma_ia *ia,
1309 struct rpcrdma_ep *ep,
1310 struct rpcrdma_req *req)
1311{
1312 struct ib_send_wr *send_wr = &req->rl_send_wr;
1313 struct ib_send_wr *send_wr_fail;
1314 int rc;
1315
1316 if (req->rl_reply) {
1317 rc = rpcrdma_ep_post_recv(ia, req->rl_reply);
1318 if (rc)
1319 return rc;
1320 req->rl_reply = NULL;
1321 }
1322
1323 dprintk("RPC: %s: posting %d s/g entries\n",
1324 __func__, send_wr->num_sge);
1325
1326 rpcrdma_set_signaled(ep, send_wr);
1327 rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
1328 if (rc)
1329 goto out_postsend_err;
1330 return 0;
1331
1332out_postsend_err:
1333 pr_err("rpcrdma: RDMA Send ib_post_send returned %i\n", rc);
1334 return -ENOTCONN;
1335}
1336
1337int
1338rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
1339 struct rpcrdma_rep *rep)
1340{
1341 struct ib_recv_wr *recv_wr_fail;
1342 int rc;
1343
1344 if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf))
1345 goto out_map;
1346 rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail);
1347 if (rc)
1348 goto out_postrecv;
1349 return 0;
1350
1351out_map:
1352 pr_err("rpcrdma: failed to DMA map the Receive buffer\n");
1353 return -EIO;
1354
1355out_postrecv:
1356 pr_err("rpcrdma: ib_post_recv returned %i\n", rc);
1357 return -ENOTCONN;
1358}
1359
1360
1361
1362
1363
1364
1365
1366
1367int
1368rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count)
1369{
1370 struct rpcrdma_buffer *buffers = &r_xprt->rx_buf;
1371 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1372 struct rpcrdma_rep *rep;
1373 int rc;
1374
1375 while (count--) {
1376 spin_lock(&buffers->rb_lock);
1377 if (list_empty(&buffers->rb_recv_bufs))
1378 goto out_reqbuf;
1379 rep = rpcrdma_buffer_get_rep_locked(buffers);
1380 spin_unlock(&buffers->rb_lock);
1381
1382 rc = rpcrdma_ep_post_recv(ia, rep);
1383 if (rc)
1384 goto out_rc;
1385 }
1386
1387 return 0;
1388
1389out_reqbuf:
1390 spin_unlock(&buffers->rb_lock);
1391 pr_warn("%s: no extra receive buffers\n", __func__);
1392 return -ENOMEM;
1393
1394out_rc:
1395 rpcrdma_recv_buffer_put(rep);
1396 return rc;
1397}
1398