1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/sunrpc/svc_xprt.h>
44#include <linux/sunrpc/debug.h>
45#include <linux/sunrpc/rpc_rdma.h>
46#include <linux/interrupt.h>
47#include <linux/sched.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
50#include <linux/workqueue.h>
51#include <rdma/ib_verbs.h>
52#include <rdma/rdma_cm.h>
53#include <linux/sunrpc/svc_rdma.h>
54#include <linux/export.h>
55#include "xprt_rdma.h"
56
57#define RPCDBG_FACILITY RPCDBG_SVCXPRT
58
59static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
60 struct net *net,
61 struct sockaddr *sa, int salen,
62 int flags);
63static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
64static void svc_rdma_release_rqst(struct svc_rqst *);
65static void dto_tasklet_func(unsigned long data);
66static void svc_rdma_detach(struct svc_xprt *xprt);
67static void svc_rdma_free(struct svc_xprt *xprt);
68static int svc_rdma_has_wspace(struct svc_xprt *xprt);
69static int svc_rdma_secure_port(struct svc_rqst *);
70static void rq_cq_reap(struct svcxprt_rdma *xprt);
71static void sq_cq_reap(struct svcxprt_rdma *xprt);
72
73static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
74static DEFINE_SPINLOCK(dto_lock);
75static LIST_HEAD(dto_xprt_q);
76
77static struct svc_xprt_ops svc_rdma_ops = {
78 .xpo_create = svc_rdma_create,
79 .xpo_recvfrom = svc_rdma_recvfrom,
80 .xpo_sendto = svc_rdma_sendto,
81 .xpo_release_rqst = svc_rdma_release_rqst,
82 .xpo_detach = svc_rdma_detach,
83 .xpo_free = svc_rdma_free,
84 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
85 .xpo_has_wspace = svc_rdma_has_wspace,
86 .xpo_accept = svc_rdma_accept,
87 .xpo_secure_port = svc_rdma_secure_port,
88};
89
90struct svc_xprt_class svc_rdma_class = {
91 .xcl_name = "rdma",
92 .xcl_owner = THIS_MODULE,
93 .xcl_ops = &svc_rdma_ops,
94 .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
95 .xcl_ident = XPRT_TRANSPORT_RDMA,
96};
97
98struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
99{
100 struct svc_rdma_op_ctxt *ctxt;
101
102 while (1) {
103 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL);
104 if (ctxt)
105 break;
106 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
107 }
108 ctxt->xprt = xprt;
109 INIT_LIST_HEAD(&ctxt->dto_q);
110 ctxt->count = 0;
111 ctxt->frmr = NULL;
112 atomic_inc(&xprt->sc_ctxt_used);
113 return ctxt;
114}
115
116void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
117{
118 struct svcxprt_rdma *xprt = ctxt->xprt;
119 int i;
120 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
121
122
123
124
125
126
127 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
128 atomic_dec(&xprt->sc_dma_used);
129 ib_dma_unmap_page(xprt->sc_cm_id->device,
130 ctxt->sge[i].addr,
131 ctxt->sge[i].length,
132 ctxt->direction);
133 }
134 }
135}
136
137void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
138{
139 struct svcxprt_rdma *xprt;
140 int i;
141
142 xprt = ctxt->xprt;
143 if (free_pages)
144 for (i = 0; i < ctxt->count; i++)
145 put_page(ctxt->pages[i]);
146
147 kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
148 atomic_dec(&xprt->sc_ctxt_used);
149}
150
151
152
153
154
155
156struct svc_rdma_req_map *svc_rdma_get_req_map(void)
157{
158 struct svc_rdma_req_map *map;
159 while (1) {
160 map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
161 if (map)
162 break;
163 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
164 }
165 map->count = 0;
166 return map;
167}
168
169void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
170{
171 kmem_cache_free(svc_rdma_map_cachep, map);
172}
173
174
175static void cq_event_handler(struct ib_event *event, void *context)
176{
177 struct svc_xprt *xprt = context;
178 dprintk("svcrdma: received CQ event id=%d, context=%p\n",
179 event->event, context);
180 set_bit(XPT_CLOSE, &xprt->xpt_flags);
181}
182
183
184static void qp_event_handler(struct ib_event *event, void *context)
185{
186 struct svc_xprt *xprt = context;
187
188 switch (event->event) {
189
190 case IB_EVENT_PATH_MIG:
191 case IB_EVENT_COMM_EST:
192 case IB_EVENT_SQ_DRAINED:
193 case IB_EVENT_QP_LAST_WQE_REACHED:
194 dprintk("svcrdma: QP event %d received for QP=%p\n",
195 event->event, event->element.qp);
196 break;
197
198 case IB_EVENT_PATH_MIG_ERR:
199 case IB_EVENT_QP_FATAL:
200 case IB_EVENT_QP_REQ_ERR:
201 case IB_EVENT_QP_ACCESS_ERR:
202 case IB_EVENT_DEVICE_FATAL:
203 default:
204 dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
205 "closing transport\n",
206 event->event, event->element.qp);
207 set_bit(XPT_CLOSE, &xprt->xpt_flags);
208 break;
209 }
210}
211
212
213
214
215
216
217
218
219
220
221static void dto_tasklet_func(unsigned long data)
222{
223 struct svcxprt_rdma *xprt;
224 unsigned long flags;
225
226 spin_lock_irqsave(&dto_lock, flags);
227 while (!list_empty(&dto_xprt_q)) {
228 xprt = list_entry(dto_xprt_q.next,
229 struct svcxprt_rdma, sc_dto_q);
230 list_del_init(&xprt->sc_dto_q);
231 spin_unlock_irqrestore(&dto_lock, flags);
232
233 rq_cq_reap(xprt);
234 sq_cq_reap(xprt);
235
236 svc_xprt_put(&xprt->sc_xprt);
237 spin_lock_irqsave(&dto_lock, flags);
238 }
239 spin_unlock_irqrestore(&dto_lock, flags);
240}
241
242
243
244
245
246
247
248static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
249{
250 struct svcxprt_rdma *xprt = cq_context;
251 unsigned long flags;
252
253
254 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
255 return;
256
257
258
259
260
261
262 set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
263
264
265
266
267
268 spin_lock_irqsave(&dto_lock, flags);
269 if (list_empty(&xprt->sc_dto_q)) {
270 svc_xprt_get(&xprt->sc_xprt);
271 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
272 }
273 spin_unlock_irqrestore(&dto_lock, flags);
274
275
276 tasklet_schedule(&dto_tasklet);
277}
278
279
280
281
282
283
284
285
286
287static void rq_cq_reap(struct svcxprt_rdma *xprt)
288{
289 int ret;
290 struct ib_wc wc;
291 struct svc_rdma_op_ctxt *ctxt = NULL;
292
293 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
294 return;
295
296 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
297 atomic_inc(&rdma_stat_rq_poll);
298
299 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
300 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
301 ctxt->wc_status = wc.status;
302 ctxt->byte_len = wc.byte_len;
303 svc_rdma_unmap_dma(ctxt);
304 if (wc.status != IB_WC_SUCCESS) {
305
306 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
307 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
308 svc_rdma_put_context(ctxt, 1);
309 svc_xprt_put(&xprt->sc_xprt);
310 continue;
311 }
312 spin_lock_bh(&xprt->sc_rq_dto_lock);
313 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
314 spin_unlock_bh(&xprt->sc_rq_dto_lock);
315 svc_xprt_put(&xprt->sc_xprt);
316 }
317
318 if (ctxt)
319 atomic_inc(&rdma_stat_rq_prod);
320
321 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
322
323
324
325
326
327 if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
328 svc_xprt_enqueue(&xprt->sc_xprt);
329}
330
331
332
333
334static void process_context(struct svcxprt_rdma *xprt,
335 struct svc_rdma_op_ctxt *ctxt)
336{
337 svc_rdma_unmap_dma(ctxt);
338
339 switch (ctxt->wr_op) {
340 case IB_WR_SEND:
341 if (ctxt->frmr)
342 pr_err("svcrdma: SEND: ctxt->frmr != NULL\n");
343 svc_rdma_put_context(ctxt, 1);
344 break;
345
346 case IB_WR_RDMA_WRITE:
347 if (ctxt->frmr)
348 pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n");
349 svc_rdma_put_context(ctxt, 0);
350 break;
351
352 case IB_WR_RDMA_READ:
353 case IB_WR_RDMA_READ_WITH_INV:
354 svc_rdma_put_frmr(xprt, ctxt->frmr);
355 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
356 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
357 if (read_hdr) {
358 spin_lock_bh(&xprt->sc_rq_dto_lock);
359 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
360 list_add_tail(&read_hdr->dto_q,
361 &xprt->sc_read_complete_q);
362 spin_unlock_bh(&xprt->sc_rq_dto_lock);
363 } else {
364 pr_err("svcrdma: ctxt->read_hdr == NULL\n");
365 }
366 svc_xprt_enqueue(&xprt->sc_xprt);
367 }
368 svc_rdma_put_context(ctxt, 0);
369 break;
370
371 default:
372 printk(KERN_ERR "svcrdma: unexpected completion type, "
373 "opcode=%d\n",
374 ctxt->wr_op);
375 break;
376 }
377}
378
379
380
381
382
383
384static void sq_cq_reap(struct svcxprt_rdma *xprt)
385{
386 struct svc_rdma_op_ctxt *ctxt = NULL;
387 struct ib_wc wc_a[6];
388 struct ib_wc *wc;
389 struct ib_cq *cq = xprt->sc_sq_cq;
390 int ret;
391
392 memset(wc_a, 0, sizeof(wc_a));
393
394 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
395 return;
396
397 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
398 atomic_inc(&rdma_stat_sq_poll);
399 while ((ret = ib_poll_cq(cq, ARRAY_SIZE(wc_a), wc_a)) > 0) {
400 int i;
401
402 for (i = 0; i < ret; i++) {
403 wc = &wc_a[i];
404 if (wc->status != IB_WC_SUCCESS) {
405 dprintk("svcrdma: sq wc err status %d\n",
406 wc->status);
407
408
409 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
410 }
411
412
413 atomic_dec(&xprt->sc_sq_count);
414 wake_up(&xprt->sc_send_wait);
415
416 ctxt = (struct svc_rdma_op_ctxt *)
417 (unsigned long)wc->wr_id;
418 if (ctxt)
419 process_context(xprt, ctxt);
420
421 svc_xprt_put(&xprt->sc_xprt);
422 }
423 }
424
425 if (ctxt)
426 atomic_inc(&rdma_stat_sq_prod);
427}
428
429static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
430{
431 struct svcxprt_rdma *xprt = cq_context;
432 unsigned long flags;
433
434
435 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
436 return;
437
438
439
440
441
442
443 set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
444
445
446
447
448
449 spin_lock_irqsave(&dto_lock, flags);
450 if (list_empty(&xprt->sc_dto_q)) {
451 svc_xprt_get(&xprt->sc_xprt);
452 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
453 }
454 spin_unlock_irqrestore(&dto_lock, flags);
455
456
457 tasklet_schedule(&dto_tasklet);
458}
459
460static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
461 int listener)
462{
463 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
464
465 if (!cma_xprt)
466 return NULL;
467 svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
468 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
469 INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
470 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
471 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
472 INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
473 init_waitqueue_head(&cma_xprt->sc_send_wait);
474
475 spin_lock_init(&cma_xprt->sc_lock);
476 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
477 spin_lock_init(&cma_xprt->sc_frmr_q_lock);
478
479 cma_xprt->sc_ord = svcrdma_ord;
480
481 cma_xprt->sc_max_req_size = svcrdma_max_req_size;
482 cma_xprt->sc_max_requests = svcrdma_max_requests;
483 cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
484 atomic_set(&cma_xprt->sc_sq_count, 0);
485 atomic_set(&cma_xprt->sc_ctxt_used, 0);
486
487 if (listener)
488 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
489
490 return cma_xprt;
491}
492
493struct page *svc_rdma_get_page(void)
494{
495 struct page *page;
496
497 while ((page = alloc_page(GFP_KERNEL)) == NULL) {
498
499 printk(KERN_INFO "svcrdma: out of memory...retrying in 1s\n");
500 schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
501 }
502 return page;
503}
504
505int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
506{
507 struct ib_recv_wr recv_wr, *bad_recv_wr;
508 struct svc_rdma_op_ctxt *ctxt;
509 struct page *page;
510 dma_addr_t pa;
511 int sge_no;
512 int buflen;
513 int ret;
514
515 ctxt = svc_rdma_get_context(xprt);
516 buflen = 0;
517 ctxt->direction = DMA_FROM_DEVICE;
518 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
519 if (sge_no >= xprt->sc_max_sge) {
520 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
521 goto err_put_ctxt;
522 }
523 page = svc_rdma_get_page();
524 ctxt->pages[sge_no] = page;
525 pa = ib_dma_map_page(xprt->sc_cm_id->device,
526 page, 0, PAGE_SIZE,
527 DMA_FROM_DEVICE);
528 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
529 goto err_put_ctxt;
530 atomic_inc(&xprt->sc_dma_used);
531 ctxt->sge[sge_no].addr = pa;
532 ctxt->sge[sge_no].length = PAGE_SIZE;
533 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
534 ctxt->count = sge_no + 1;
535 buflen += PAGE_SIZE;
536 }
537 recv_wr.next = NULL;
538 recv_wr.sg_list = &ctxt->sge[0];
539 recv_wr.num_sge = ctxt->count;
540 recv_wr.wr_id = (u64)(unsigned long)ctxt;
541
542 svc_xprt_get(&xprt->sc_xprt);
543 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
544 if (ret) {
545 svc_rdma_unmap_dma(ctxt);
546 svc_rdma_put_context(ctxt, 1);
547 svc_xprt_put(&xprt->sc_xprt);
548 }
549 return ret;
550
551 err_put_ctxt:
552 svc_rdma_unmap_dma(ctxt);
553 svc_rdma_put_context(ctxt, 1);
554 return -ENOMEM;
555}
556
557
558
559
560
561
562
563
564
565
566
567
568static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
569{
570 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
571 struct svcxprt_rdma *newxprt;
572 struct sockaddr *sa;
573
574
575 newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
576 if (!newxprt) {
577 dprintk("svcrdma: failed to create new transport\n");
578 return;
579 }
580 newxprt->sc_cm_id = new_cma_id;
581 new_cma_id->context = newxprt;
582 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
583 newxprt, newxprt->sc_cm_id, listen_xprt);
584
585
586 newxprt->sc_ord = client_ird;
587
588
589 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
590 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
591 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
592 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
593
594
595
596
597
598 spin_lock_bh(&listen_xprt->sc_lock);
599 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
600 spin_unlock_bh(&listen_xprt->sc_lock);
601
602 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
603 svc_xprt_enqueue(&listen_xprt->sc_xprt);
604}
605
606
607
608
609
610static int rdma_listen_handler(struct rdma_cm_id *cma_id,
611 struct rdma_cm_event *event)
612{
613 struct svcxprt_rdma *xprt = cma_id->context;
614 int ret = 0;
615
616 switch (event->event) {
617 case RDMA_CM_EVENT_CONNECT_REQUEST:
618 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
619 "event=%d\n", cma_id, cma_id->context, event->event);
620 handle_connect_req(cma_id,
621 event->param.conn.initiator_depth);
622 break;
623
624 case RDMA_CM_EVENT_ESTABLISHED:
625
626 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
627 "cm_id=%p\n", xprt, cma_id);
628 break;
629
630 case RDMA_CM_EVENT_DEVICE_REMOVAL:
631 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
632 xprt, cma_id);
633 if (xprt)
634 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
635 break;
636
637 default:
638 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
639 "event=%d\n", cma_id, event->event);
640 break;
641 }
642
643 return ret;
644}
645
646static int rdma_cma_handler(struct rdma_cm_id *cma_id,
647 struct rdma_cm_event *event)
648{
649 struct svc_xprt *xprt = cma_id->context;
650 struct svcxprt_rdma *rdma =
651 container_of(xprt, struct svcxprt_rdma, sc_xprt);
652 switch (event->event) {
653 case RDMA_CM_EVENT_ESTABLISHED:
654
655 svc_xprt_get(xprt);
656 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
657 "cm_id=%p\n", xprt, cma_id);
658 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
659 svc_xprt_enqueue(xprt);
660 break;
661 case RDMA_CM_EVENT_DISCONNECTED:
662 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
663 xprt, cma_id);
664 if (xprt) {
665 set_bit(XPT_CLOSE, &xprt->xpt_flags);
666 svc_xprt_enqueue(xprt);
667 svc_xprt_put(xprt);
668 }
669 break;
670 case RDMA_CM_EVENT_DEVICE_REMOVAL:
671 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
672 "event=%d\n", cma_id, xprt, event->event);
673 if (xprt) {
674 set_bit(XPT_CLOSE, &xprt->xpt_flags);
675 svc_xprt_enqueue(xprt);
676 }
677 break;
678 default:
679 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
680 "event=%d\n", cma_id, event->event);
681 break;
682 }
683 return 0;
684}
685
686
687
688
689static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
690 struct net *net,
691 struct sockaddr *sa, int salen,
692 int flags)
693{
694 struct rdma_cm_id *listen_id;
695 struct svcxprt_rdma *cma_xprt;
696 int ret;
697
698 dprintk("svcrdma: Creating RDMA socket\n");
699 if (sa->sa_family != AF_INET) {
700 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
701 return ERR_PTR(-EAFNOSUPPORT);
702 }
703 cma_xprt = rdma_create_xprt(serv, 1);
704 if (!cma_xprt)
705 return ERR_PTR(-ENOMEM);
706
707 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
708 IB_QPT_RC);
709 if (IS_ERR(listen_id)) {
710 ret = PTR_ERR(listen_id);
711 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
712 goto err0;
713 }
714
715 ret = rdma_bind_addr(listen_id, sa);
716 if (ret) {
717 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
718 goto err1;
719 }
720 cma_xprt->sc_cm_id = listen_id;
721
722 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
723 if (ret) {
724 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
725 goto err1;
726 }
727
728
729
730
731
732 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
733 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
734
735 return &cma_xprt->sc_xprt;
736
737 err1:
738 rdma_destroy_id(listen_id);
739 err0:
740 kfree(cma_xprt);
741 return ERR_PTR(ret);
742}
743
744static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
745{
746 struct ib_mr *mr;
747 struct ib_fast_reg_page_list *pl;
748 struct svc_rdma_fastreg_mr *frmr;
749
750 frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
751 if (!frmr)
752 goto err;
753
754 mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
755 if (IS_ERR(mr))
756 goto err_free_frmr;
757
758 pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
759 RPCSVC_MAXPAGES);
760 if (IS_ERR(pl))
761 goto err_free_mr;
762
763 frmr->mr = mr;
764 frmr->page_list = pl;
765 INIT_LIST_HEAD(&frmr->frmr_list);
766 return frmr;
767
768 err_free_mr:
769 ib_dereg_mr(mr);
770 err_free_frmr:
771 kfree(frmr);
772 err:
773 return ERR_PTR(-ENOMEM);
774}
775
776static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
777{
778 struct svc_rdma_fastreg_mr *frmr;
779
780 while (!list_empty(&xprt->sc_frmr_q)) {
781 frmr = list_entry(xprt->sc_frmr_q.next,
782 struct svc_rdma_fastreg_mr, frmr_list);
783 list_del_init(&frmr->frmr_list);
784 ib_dereg_mr(frmr->mr);
785 ib_free_fast_reg_page_list(frmr->page_list);
786 kfree(frmr);
787 }
788}
789
790struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
791{
792 struct svc_rdma_fastreg_mr *frmr = NULL;
793
794 spin_lock_bh(&rdma->sc_frmr_q_lock);
795 if (!list_empty(&rdma->sc_frmr_q)) {
796 frmr = list_entry(rdma->sc_frmr_q.next,
797 struct svc_rdma_fastreg_mr, frmr_list);
798 list_del_init(&frmr->frmr_list);
799 frmr->map_len = 0;
800 frmr->page_list_len = 0;
801 }
802 spin_unlock_bh(&rdma->sc_frmr_q_lock);
803 if (frmr)
804 return frmr;
805
806 return rdma_alloc_frmr(rdma);
807}
808
809static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
810 struct svc_rdma_fastreg_mr *frmr)
811{
812 int page_no;
813 for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
814 dma_addr_t addr = frmr->page_list->page_list[page_no];
815 if (ib_dma_mapping_error(frmr->mr->device, addr))
816 continue;
817 atomic_dec(&xprt->sc_dma_used);
818 ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE,
819 frmr->direction);
820 }
821}
822
823void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
824 struct svc_rdma_fastreg_mr *frmr)
825{
826 if (frmr) {
827 frmr_unmap_dma(rdma, frmr);
828 spin_lock_bh(&rdma->sc_frmr_q_lock);
829 WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
830 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
831 spin_unlock_bh(&rdma->sc_frmr_q_lock);
832 }
833}
834
835
836
837
838
839
840
841
842
843
844
845
846static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
847{
848 struct svcxprt_rdma *listen_rdma;
849 struct svcxprt_rdma *newxprt = NULL;
850 struct rdma_conn_param conn_param;
851 struct ib_qp_init_attr qp_attr;
852 struct ib_device_attr devattr;
853 int uninitialized_var(dma_mr_acc);
854 int need_dma_mr;
855 int ret;
856 int i;
857
858 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
859 clear_bit(XPT_CONN, &xprt->xpt_flags);
860
861 spin_lock_bh(&listen_rdma->sc_lock);
862 if (!list_empty(&listen_rdma->sc_accept_q)) {
863 newxprt = list_entry(listen_rdma->sc_accept_q.next,
864 struct svcxprt_rdma, sc_accept_q);
865 list_del_init(&newxprt->sc_accept_q);
866 }
867 if (!list_empty(&listen_rdma->sc_accept_q))
868 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
869 spin_unlock_bh(&listen_rdma->sc_lock);
870 if (!newxprt)
871 return NULL;
872
873 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
874 newxprt, newxprt->sc_cm_id);
875
876 ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
877 if (ret) {
878 dprintk("svcrdma: could not query device attributes on "
879 "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
880 goto errout;
881 }
882
883
884
885 newxprt->sc_max_sge = min((size_t)devattr.max_sge,
886 (size_t)RPCSVC_MAXPAGES);
887 newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
888 (size_t)svcrdma_max_requests);
889 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
890
891
892
893
894
895 newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
896 newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
897
898 newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
899 if (IS_ERR(newxprt->sc_pd)) {
900 dprintk("svcrdma: error creating PD for connect request\n");
901 goto errout;
902 }
903 newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
904 sq_comp_handler,
905 cq_event_handler,
906 newxprt,
907 newxprt->sc_sq_depth,
908 0);
909 if (IS_ERR(newxprt->sc_sq_cq)) {
910 dprintk("svcrdma: error creating SQ CQ for connect request\n");
911 goto errout;
912 }
913 newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
914 rq_comp_handler,
915 cq_event_handler,
916 newxprt,
917 newxprt->sc_max_requests,
918 0);
919 if (IS_ERR(newxprt->sc_rq_cq)) {
920 dprintk("svcrdma: error creating RQ CQ for connect request\n");
921 goto errout;
922 }
923
924 memset(&qp_attr, 0, sizeof qp_attr);
925 qp_attr.event_handler = qp_event_handler;
926 qp_attr.qp_context = &newxprt->sc_xprt;
927 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
928 qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
929 qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
930 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
931 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
932 qp_attr.qp_type = IB_QPT_RC;
933 qp_attr.send_cq = newxprt->sc_sq_cq;
934 qp_attr.recv_cq = newxprt->sc_rq_cq;
935 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
936 " cm_id->device=%p, sc_pd->device=%p\n"
937 " cap.max_send_wr = %d\n"
938 " cap.max_recv_wr = %d\n"
939 " cap.max_send_sge = %d\n"
940 " cap.max_recv_sge = %d\n",
941 newxprt->sc_cm_id, newxprt->sc_pd,
942 newxprt->sc_cm_id->device, newxprt->sc_pd->device,
943 qp_attr.cap.max_send_wr,
944 qp_attr.cap.max_recv_wr,
945 qp_attr.cap.max_send_sge,
946 qp_attr.cap.max_recv_sge);
947
948 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
949 if (ret) {
950 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
951 goto errout;
952 }
953 newxprt->sc_qp = newxprt->sc_cm_id->qp;
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977 newxprt->sc_reader = rdma_read_chunk_lcl;
978 if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
979 newxprt->sc_frmr_pg_list_len =
980 devattr.max_fast_reg_page_list_len;
981 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
982 newxprt->sc_reader = rdma_read_chunk_frmr;
983 }
984
985
986
987
988 switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) {
989 case RDMA_TRANSPORT_IWARP:
990 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
991 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
992 need_dma_mr = 1;
993 dma_mr_acc =
994 (IB_ACCESS_LOCAL_WRITE |
995 IB_ACCESS_REMOTE_WRITE);
996 } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
997 need_dma_mr = 1;
998 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
999 } else
1000 need_dma_mr = 0;
1001 break;
1002 case RDMA_TRANSPORT_IB:
1003 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
1004 need_dma_mr = 1;
1005 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
1006 } else if (!(devattr.device_cap_flags &
1007 IB_DEVICE_LOCAL_DMA_LKEY)) {
1008 need_dma_mr = 1;
1009 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
1010 } else
1011 need_dma_mr = 0;
1012 break;
1013 default:
1014 goto errout;
1015 }
1016
1017
1018 if (need_dma_mr) {
1019
1020 newxprt->sc_phys_mr =
1021 ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc);
1022 if (IS_ERR(newxprt->sc_phys_mr)) {
1023 dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
1024 ret);
1025 goto errout;
1026 }
1027 newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey;
1028 } else
1029 newxprt->sc_dma_lkey =
1030 newxprt->sc_cm_id->device->local_dma_lkey;
1031
1032
1033 for (i = 0; i < newxprt->sc_max_requests; i++) {
1034 ret = svc_rdma_post_recv(newxprt);
1035 if (ret) {
1036 dprintk("svcrdma: failure posting receive buffers\n");
1037 goto errout;
1038 }
1039 }
1040
1041
1042 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
1043
1044
1045
1046
1047
1048 ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
1049 ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
1050
1051
1052 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
1053 memset(&conn_param, 0, sizeof conn_param);
1054 conn_param.responder_resources = 0;
1055 conn_param.initiator_depth = newxprt->sc_ord;
1056 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
1057 if (ret) {
1058 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1059 ret);
1060 goto errout;
1061 }
1062
1063 dprintk("svcrdma: new connection %p accepted with the following "
1064 "attributes:\n"
1065 " local_ip : %pI4\n"
1066 " local_port : %d\n"
1067 " remote_ip : %pI4\n"
1068 " remote_port : %d\n"
1069 " max_sge : %d\n"
1070 " sq_depth : %d\n"
1071 " max_requests : %d\n"
1072 " ord : %d\n",
1073 newxprt,
1074 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1075 route.addr.src_addr)->sin_addr.s_addr,
1076 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1077 route.addr.src_addr)->sin_port),
1078 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1079 route.addr.dst_addr)->sin_addr.s_addr,
1080 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1081 route.addr.dst_addr)->sin_port),
1082 newxprt->sc_max_sge,
1083 newxprt->sc_sq_depth,
1084 newxprt->sc_max_requests,
1085 newxprt->sc_ord);
1086
1087 return &newxprt->sc_xprt;
1088
1089 errout:
1090 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
1091
1092 svc_xprt_get(&newxprt->sc_xprt);
1093 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
1094 ib_destroy_qp(newxprt->sc_qp);
1095 rdma_destroy_id(newxprt->sc_cm_id);
1096
1097 svc_xprt_put(&newxprt->sc_xprt);
1098 return NULL;
1099}
1100
1101static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1102{
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117static void svc_rdma_detach(struct svc_xprt *xprt)
1118{
1119 struct svcxprt_rdma *rdma =
1120 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1121 dprintk("svc: svc_rdma_detach(%p)\n", xprt);
1122
1123
1124 rdma_disconnect(rdma->sc_cm_id);
1125}
1126
1127static void __svc_rdma_free(struct work_struct *work)
1128{
1129 struct svcxprt_rdma *rdma =
1130 container_of(work, struct svcxprt_rdma, sc_work);
1131 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
1132
1133
1134 if (atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0)
1135 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1136 atomic_read(&rdma->sc_xprt.xpt_ref.refcount));
1137
1138
1139
1140
1141
1142
1143
1144 while (!list_empty(&rdma->sc_read_complete_q)) {
1145 struct svc_rdma_op_ctxt *ctxt;
1146 ctxt = list_entry(rdma->sc_read_complete_q.next,
1147 struct svc_rdma_op_ctxt,
1148 dto_q);
1149 list_del_init(&ctxt->dto_q);
1150 svc_rdma_put_context(ctxt, 1);
1151 }
1152
1153
1154 while (!list_empty(&rdma->sc_rq_dto_q)) {
1155 struct svc_rdma_op_ctxt *ctxt;
1156 ctxt = list_entry(rdma->sc_rq_dto_q.next,
1157 struct svc_rdma_op_ctxt,
1158 dto_q);
1159 list_del_init(&ctxt->dto_q);
1160 svc_rdma_put_context(ctxt, 1);
1161 }
1162
1163
1164 if (atomic_read(&rdma->sc_ctxt_used) != 0)
1165 pr_err("svcrdma: ctxt still in use? (%d)\n",
1166 atomic_read(&rdma->sc_ctxt_used));
1167 if (atomic_read(&rdma->sc_dma_used) != 0)
1168 pr_err("svcrdma: dma still in use? (%d)\n",
1169 atomic_read(&rdma->sc_dma_used));
1170
1171
1172 rdma_dealloc_frmr_q(rdma);
1173
1174
1175 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1176 ib_destroy_qp(rdma->sc_qp);
1177
1178 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1179 ib_destroy_cq(rdma->sc_sq_cq);
1180
1181 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1182 ib_destroy_cq(rdma->sc_rq_cq);
1183
1184 if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
1185 ib_dereg_mr(rdma->sc_phys_mr);
1186
1187 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1188 ib_dealloc_pd(rdma->sc_pd);
1189
1190
1191 rdma_destroy_id(rdma->sc_cm_id);
1192
1193 kfree(rdma);
1194}
1195
1196static void svc_rdma_free(struct svc_xprt *xprt)
1197{
1198 struct svcxprt_rdma *rdma =
1199 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1200 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
1201 queue_work(svc_rdma_wq, &rdma->sc_work);
1202}
1203
1204static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1205{
1206 struct svcxprt_rdma *rdma =
1207 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1208
1209
1210
1211
1212
1213 if (waitqueue_active(&rdma->sc_send_wait))
1214 return 0;
1215
1216
1217 return 1;
1218}
1219
1220static int svc_rdma_secure_port(struct svc_rqst *rqstp)
1221{
1222 return 1;
1223}
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
1236 struct svc_rdma_fastreg_mr *frmr)
1237{
1238 struct ib_send_wr fastreg_wr;
1239 u8 key;
1240
1241
1242 key = (u8)(frmr->mr->lkey & 0x000000FF);
1243 ib_update_fast_reg_key(frmr->mr, ++key);
1244
1245
1246 memset(&fastreg_wr, 0, sizeof fastreg_wr);
1247 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
1248 fastreg_wr.send_flags = IB_SEND_SIGNALED;
1249 fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
1250 fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
1251 fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
1252 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1253 fastreg_wr.wr.fast_reg.length = frmr->map_len;
1254 fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
1255 fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
1256 return svc_rdma_send(xprt, &fastreg_wr);
1257}
1258
1259int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1260{
1261 struct ib_send_wr *bad_wr, *n_wr;
1262 int wr_count;
1263 int i;
1264 int ret;
1265
1266 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1267 return -ENOTCONN;
1268
1269 wr_count = 1;
1270 for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1271 wr_count++;
1272
1273
1274 while (1) {
1275 spin_lock_bh(&xprt->sc_lock);
1276 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
1277 spin_unlock_bh(&xprt->sc_lock);
1278 atomic_inc(&rdma_stat_sq_starve);
1279
1280
1281 sq_cq_reap(xprt);
1282
1283
1284 wait_event(xprt->sc_send_wait,
1285 atomic_read(&xprt->sc_sq_count) <
1286 xprt->sc_sq_depth);
1287 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1288 return -ENOTCONN;
1289 continue;
1290 }
1291
1292 for (i = 0; i < wr_count; i++)
1293 svc_xprt_get(&xprt->sc_xprt);
1294
1295
1296 atomic_add(wr_count, &xprt->sc_sq_count);
1297 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1298 if (ret) {
1299 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1300 atomic_sub(wr_count, &xprt->sc_sq_count);
1301 for (i = 0; i < wr_count; i ++)
1302 svc_xprt_put(&xprt->sc_xprt);
1303 dprintk("svcrdma: failed to post SQ WR rc=%d, "
1304 "sc_sq_count=%d, sc_sq_depth=%d\n",
1305 ret, atomic_read(&xprt->sc_sq_count),
1306 xprt->sc_sq_depth);
1307 }
1308 spin_unlock_bh(&xprt->sc_lock);
1309 if (ret)
1310 wake_up(&xprt->sc_send_wait);
1311 break;
1312 }
1313 return ret;
1314}
1315
1316void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1317 enum rpcrdma_errcode err)
1318{
1319 struct ib_send_wr err_wr;
1320 struct page *p;
1321 struct svc_rdma_op_ctxt *ctxt;
1322 u32 *va;
1323 int length;
1324 int ret;
1325
1326 p = svc_rdma_get_page();
1327 va = page_address(p);
1328
1329
1330 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1331
1332 ctxt = svc_rdma_get_context(xprt);
1333 ctxt->direction = DMA_FROM_DEVICE;
1334 ctxt->count = 1;
1335 ctxt->pages[0] = p;
1336
1337
1338 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
1339 p, 0, length, DMA_FROM_DEVICE);
1340 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
1341 put_page(p);
1342 svc_rdma_put_context(ctxt, 1);
1343 return;
1344 }
1345 atomic_inc(&xprt->sc_dma_used);
1346 ctxt->sge[0].lkey = xprt->sc_dma_lkey;
1347 ctxt->sge[0].length = length;
1348
1349
1350 memset(&err_wr, 0, sizeof err_wr);
1351 ctxt->wr_op = IB_WR_SEND;
1352 err_wr.wr_id = (unsigned long)ctxt;
1353 err_wr.sg_list = ctxt->sge;
1354 err_wr.num_sge = 1;
1355 err_wr.opcode = IB_WR_SEND;
1356 err_wr.send_flags = IB_SEND_SIGNALED;
1357
1358
1359 ret = svc_rdma_send(xprt, &err_wr);
1360 if (ret) {
1361 dprintk("svcrdma: Error %d posting send for protocol error\n",
1362 ret);
1363 svc_rdma_unmap_dma(ctxt);
1364 svc_rdma_put_context(ctxt, 1);
1365 }
1366}
1367