1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102#include <linux/spinlock.h>
103#include <asm/unaligned.h>
104
105#include <rdma/ib_verbs.h>
106#include <rdma/rdma_cm.h>
107
108#include <linux/sunrpc/debug.h>
109#include <linux/sunrpc/rpc_rdma.h>
110#include <linux/sunrpc/svc_rdma.h>
111
112#include "xprt_rdma.h"
113#include <trace/events/rpcrdma.h>
114
115#define RPCDBG_FACILITY RPCDBG_SVCXPRT
116
117static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
118
119static inline struct svc_rdma_send_ctxt *
120svc_rdma_next_send_ctxt(struct list_head *list)
121{
122 return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
123 sc_list);
124}
125
126static struct svc_rdma_send_ctxt *
127svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
128{
129 struct svc_rdma_send_ctxt *ctxt;
130 dma_addr_t addr;
131 void *buffer;
132 size_t size;
133 int i;
134
135 size = sizeof(*ctxt);
136 size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
137 ctxt = kmalloc(size, GFP_KERNEL);
138 if (!ctxt)
139 goto fail0;
140 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
141 if (!buffer)
142 goto fail1;
143 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
144 rdma->sc_max_req_size, DMA_TO_DEVICE);
145 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
146 goto fail2;
147
148 ctxt->sc_send_wr.next = NULL;
149 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
150 ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
151 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
152 ctxt->sc_cqe.done = svc_rdma_wc_send;
153 ctxt->sc_xprt_buf = buffer;
154 ctxt->sc_sges[0].addr = addr;
155
156 for (i = 0; i < rdma->sc_max_send_sges; i++)
157 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
158 return ctxt;
159
160fail2:
161 kfree(buffer);
162fail1:
163 kfree(ctxt);
164fail0:
165 return NULL;
166}
167
168
169
170
171
172
173void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
174{
175 struct svc_rdma_send_ctxt *ctxt;
176
177 while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
178 list_del(&ctxt->sc_list);
179 ib_dma_unmap_single(rdma->sc_pd->device,
180 ctxt->sc_sges[0].addr,
181 rdma->sc_max_req_size,
182 DMA_TO_DEVICE);
183 kfree(ctxt->sc_xprt_buf);
184 kfree(ctxt);
185 }
186}
187
188
189
190
191
192
193
194
195struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
196{
197 struct svc_rdma_send_ctxt *ctxt;
198
199 spin_lock(&rdma->sc_send_lock);
200 ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
201 if (!ctxt)
202 goto out_empty;
203 list_del(&ctxt->sc_list);
204 spin_unlock(&rdma->sc_send_lock);
205
206out:
207 ctxt->sc_send_wr.num_sge = 0;
208 ctxt->sc_cur_sge_no = 0;
209 ctxt->sc_page_count = 0;
210 return ctxt;
211
212out_empty:
213 spin_unlock(&rdma->sc_send_lock);
214 ctxt = svc_rdma_send_ctxt_alloc(rdma);
215 if (!ctxt)
216 return NULL;
217 goto out;
218}
219
220
221
222
223
224
225
226
227void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
228 struct svc_rdma_send_ctxt *ctxt)
229{
230 struct ib_device *device = rdma->sc_cm_id->device;
231 unsigned int i;
232
233
234
235
236 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++)
237 ib_dma_unmap_page(device,
238 ctxt->sc_sges[i].addr,
239 ctxt->sc_sges[i].length,
240 DMA_TO_DEVICE);
241
242 for (i = 0; i < ctxt->sc_page_count; ++i)
243 put_page(ctxt->sc_pages[i]);
244
245 spin_lock(&rdma->sc_send_lock);
246 list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
247 spin_unlock(&rdma->sc_send_lock);
248}
249
250
251
252
253
254
255
256
257
258static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
259{
260 struct svcxprt_rdma *rdma = cq->cq_context;
261 struct ib_cqe *cqe = wc->wr_cqe;
262 struct svc_rdma_send_ctxt *ctxt;
263
264 trace_svcrdma_wc_send(wc);
265
266 atomic_inc(&rdma->sc_sq_avail);
267 wake_up(&rdma->sc_send_wait);
268
269 ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
270 svc_rdma_send_ctxt_put(rdma, ctxt);
271
272 if (unlikely(wc->status != IB_WC_SUCCESS)) {
273 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
274 svc_xprt_enqueue(&rdma->sc_xprt);
275 }
276
277 svc_xprt_put(&rdma->sc_xprt);
278}
279
280
281
282
283
284
285
286
287
288int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
289{
290 int ret;
291
292 might_sleep();
293
294
295 while (1) {
296 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
297 atomic_inc(&rdma_stat_sq_starve);
298 trace_svcrdma_sq_full(rdma);
299 atomic_inc(&rdma->sc_sq_avail);
300 wait_event(rdma->sc_send_wait,
301 atomic_read(&rdma->sc_sq_avail) > 1);
302 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
303 return -ENOTCONN;
304 trace_svcrdma_sq_retry(rdma);
305 continue;
306 }
307
308 svc_xprt_get(&rdma->sc_xprt);
309 ret = ib_post_send(rdma->sc_qp, wr, NULL);
310 trace_svcrdma_post_send(wr, ret);
311 if (ret) {
312 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
313 svc_xprt_put(&rdma->sc_xprt);
314 wake_up(&rdma->sc_send_wait);
315 }
316 break;
317 }
318 return ret;
319}
320
321static u32 xdr_padsize(u32 len)
322{
323 return (len & 3) ? (4 - (len & 3)) : 0;
324}
325
326
327
328static unsigned int svc_rdma_reply_hdr_len(__be32 *rdma_resp)
329{
330 unsigned int nsegs;
331 __be32 *p;
332
333 p = rdma_resp;
334
335
336 p += rpcrdma_fixed_maxsz + 1;
337
338
339 while (*p++ != xdr_zero) {
340 nsegs = be32_to_cpup(p++);
341 p += nsegs * rpcrdma_segment_maxsz;
342 }
343
344
345 if (*p++ != xdr_zero) {
346 nsegs = be32_to_cpup(p++);
347 p += nsegs * rpcrdma_segment_maxsz;
348 }
349
350 return (unsigned long)p - (unsigned long)rdma_resp;
351}
352
353
354
355
356
357
358
359static unsigned int xdr_encode_write_chunk(__be32 *dst, __be32 *src,
360 unsigned int remaining)
361{
362 unsigned int i, nsegs;
363 u32 seg_len;
364
365
366 *dst++ = *src++;
367
368
369 nsegs = be32_to_cpup(src);
370 *dst++ = *src++;
371
372 for (i = nsegs; i; i--) {
373
374 *dst++ = *src++;
375
376
377 seg_len = be32_to_cpu(*src);
378 if (remaining >= seg_len) {
379
380 *dst = *src;
381 remaining -= seg_len;
382 } else {
383
384 *dst = cpu_to_be32(remaining);
385 remaining = 0;
386 }
387 dst++; src++;
388
389
390 *dst++ = *src++;
391 *dst++ = *src++;
392 }
393
394 return nsegs;
395}
396
397
398
399
400
401
402
403
404
405static void svc_rdma_xdr_encode_write_list(__be32 *rdma_resp, __be32 *wr_ch,
406 unsigned int consumed)
407{
408 unsigned int nsegs;
409 __be32 *p, *q;
410
411
412 p = rdma_resp + rpcrdma_fixed_maxsz + 1;
413
414 q = wr_ch;
415 while (*q != xdr_zero) {
416 nsegs = xdr_encode_write_chunk(p, q, consumed);
417 q += 2 + nsegs * rpcrdma_segment_maxsz;
418 p += 2 + nsegs * rpcrdma_segment_maxsz;
419 consumed = 0;
420 }
421
422
423 *p++ = xdr_zero;
424
425
426 *p = xdr_zero;
427}
428
429
430
431
432
433
434
435
436static void svc_rdma_xdr_encode_reply_chunk(__be32 *rdma_resp, __be32 *rp_ch,
437 unsigned int consumed)
438{
439 __be32 *p;
440
441
442
443
444 p = rdma_resp + rpcrdma_fixed_maxsz + 1;
445
446
447 while (*p++ != xdr_zero)
448 p += 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
449
450 xdr_encode_write_chunk(p, rp_ch, consumed);
451}
452
453
454
455static void svc_rdma_get_write_arrays(__be32 *rdma_argp,
456 __be32 **write, __be32 **reply)
457{
458 __be32 *p;
459
460 p = rdma_argp + rpcrdma_fixed_maxsz;
461
462
463 while (*p++ != xdr_zero)
464 p += 5;
465
466
467 if (*p != xdr_zero) {
468 *write = p;
469 while (*p++ != xdr_zero)
470 p += 1 + be32_to_cpu(*p) * 4;
471 } else {
472 *write = NULL;
473 p++;
474 }
475
476
477 if (*p != xdr_zero)
478 *reply = p;
479 else
480 *reply = NULL;
481}
482
483static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
484 struct svc_rdma_send_ctxt *ctxt,
485 struct page *page,
486 unsigned long offset,
487 unsigned int len)
488{
489 struct ib_device *dev = rdma->sc_cm_id->device;
490 dma_addr_t dma_addr;
491
492 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
493 if (ib_dma_mapping_error(dev, dma_addr))
494 goto out_maperr;
495
496 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
497 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
498 ctxt->sc_send_wr.num_sge++;
499 return 0;
500
501out_maperr:
502 trace_svcrdma_dma_map_page(rdma, page);
503 return -EIO;
504}
505
506
507
508
509static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
510 struct svc_rdma_send_ctxt *ctxt,
511 unsigned char *base,
512 unsigned int len)
513{
514 return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base),
515 offset_in_page(base), len);
516}
517
518
519
520
521
522
523
524
525void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
526 struct svc_rdma_send_ctxt *ctxt,
527 unsigned int len)
528{
529 ctxt->sc_sges[0].length = len;
530 ctxt->sc_send_wr.num_sge++;
531 ib_dma_sync_single_for_device(rdma->sc_pd->device,
532 ctxt->sc_sges[0].addr, len,
533 DMA_TO_DEVICE);
534}
535
536
537
538
539
540static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
541 struct xdr_buf *xdr,
542 __be32 *wr_lst)
543{
544 int elements;
545
546
547 elements = 1;
548
549
550 if (!wr_lst) {
551 unsigned int remaining;
552 unsigned long pageoff;
553
554 pageoff = xdr->page_base & ~PAGE_MASK;
555 remaining = xdr->page_len;
556 while (remaining) {
557 ++elements;
558 remaining -= min_t(u32, PAGE_SIZE - pageoff,
559 remaining);
560 pageoff = 0;
561 }
562 }
563
564
565 if (xdr->tail[0].iov_len)
566 ++elements;
567
568
569 return elements >= rdma->sc_max_send_sges;
570}
571
572
573
574
575
576static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
577 struct svc_rdma_send_ctxt *ctxt,
578 struct xdr_buf *xdr, __be32 *wr_lst)
579{
580 unsigned char *dst, *tailbase;
581 unsigned int taillen;
582
583 dst = ctxt->sc_xprt_buf;
584 dst += ctxt->sc_sges[0].length;
585
586 memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
587 dst += xdr->head[0].iov_len;
588
589 tailbase = xdr->tail[0].iov_base;
590 taillen = xdr->tail[0].iov_len;
591 if (wr_lst) {
592 u32 xdrpad;
593
594 xdrpad = xdr_padsize(xdr->page_len);
595 if (taillen && xdrpad) {
596 tailbase += xdrpad;
597 taillen -= xdrpad;
598 }
599 } else {
600 unsigned int len, remaining;
601 unsigned long pageoff;
602 struct page **ppages;
603
604 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
605 pageoff = xdr->page_base & ~PAGE_MASK;
606 remaining = xdr->page_len;
607 while (remaining) {
608 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
609
610 memcpy(dst, page_address(*ppages), len);
611 remaining -= len;
612 dst += len;
613 pageoff = 0;
614 }
615 }
616
617 if (taillen)
618 memcpy(dst, tailbase, taillen);
619
620 ctxt->sc_sges[0].length += xdr->len;
621 ib_dma_sync_single_for_device(rdma->sc_pd->device,
622 ctxt->sc_sges[0].addr,
623 ctxt->sc_sges[0].length,
624 DMA_TO_DEVICE);
625
626 return 0;
627}
628
629
630
631
632
633
634
635
636
637
638
639
640int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
641 struct svc_rdma_send_ctxt *ctxt,
642 struct xdr_buf *xdr, __be32 *wr_lst)
643{
644 unsigned int len, remaining;
645 unsigned long page_off;
646 struct page **ppages;
647 unsigned char *base;
648 u32 xdr_pad;
649 int ret;
650
651 if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
652 return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
653
654 ++ctxt->sc_cur_sge_no;
655 ret = svc_rdma_dma_map_buf(rdma, ctxt,
656 xdr->head[0].iov_base,
657 xdr->head[0].iov_len);
658 if (ret < 0)
659 return ret;
660
661
662
663
664
665
666 if (wr_lst) {
667 base = xdr->tail[0].iov_base;
668 len = xdr->tail[0].iov_len;
669 xdr_pad = xdr_padsize(xdr->page_len);
670
671 if (len && xdr_pad) {
672 base += xdr_pad;
673 len -= xdr_pad;
674 }
675
676 goto tail;
677 }
678
679 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
680 page_off = xdr->page_base & ~PAGE_MASK;
681 remaining = xdr->page_len;
682 while (remaining) {
683 len = min_t(u32, PAGE_SIZE - page_off, remaining);
684
685 ++ctxt->sc_cur_sge_no;
686 ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
687 page_off, len);
688 if (ret < 0)
689 return ret;
690
691 remaining -= len;
692 page_off = 0;
693 }
694
695 base = xdr->tail[0].iov_base;
696 len = xdr->tail[0].iov_len;
697tail:
698 if (len) {
699 ++ctxt->sc_cur_sge_no;
700 ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
701 if (ret < 0)
702 return ret;
703 }
704
705 return 0;
706}
707
708
709
710
711
712static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
713 struct svc_rdma_send_ctxt *ctxt)
714{
715 int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
716
717 ctxt->sc_page_count += pages;
718 for (i = 0; i < pages; i++) {
719 ctxt->sc_pages[i] = rqstp->rq_respages[i];
720 rqstp->rq_respages[i] = NULL;
721 }
722
723
724 rqstp->rq_next_page = rqstp->rq_respages;
725}
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
746 struct svc_rdma_send_ctxt *sctxt,
747 struct svc_rdma_recv_ctxt *rctxt,
748 struct svc_rqst *rqstp,
749 __be32 *wr_lst, __be32 *rp_ch)
750{
751 int ret;
752
753 if (!rp_ch) {
754 ret = svc_rdma_map_reply_msg(rdma, sctxt,
755 &rqstp->rq_res, wr_lst);
756 if (ret < 0)
757 return ret;
758 }
759
760 svc_rdma_save_io_pages(rqstp, sctxt);
761
762 if (rctxt->rc_inv_rkey) {
763 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
764 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
765 } else {
766 sctxt->sc_send_wr.opcode = IB_WR_SEND;
767 }
768 dprintk("svcrdma: posting Send WR with %u sge(s)\n",
769 sctxt->sc_send_wr.num_sge);
770 return svc_rdma_send(rdma, &sctxt->sc_send_wr);
771}
772
773
774
775
776
777
778
779
780static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
781 struct svc_rdma_send_ctxt *ctxt,
782 struct svc_rqst *rqstp)
783{
784 __be32 *p;
785 int ret;
786
787 p = ctxt->sc_xprt_buf;
788 trace_svcrdma_err_chunk(*p);
789 p += 3;
790 *p++ = rdma_error;
791 *p = err_chunk;
792 svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_ERR);
793
794 svc_rdma_save_io_pages(rqstp, ctxt);
795
796 ctxt->sc_send_wr.opcode = IB_WR_SEND;
797 ret = svc_rdma_send(rdma, &ctxt->sc_send_wr);
798 if (ret) {
799 svc_rdma_send_ctxt_put(rdma, ctxt);
800 return ret;
801 }
802
803 return 0;
804}
805
806
807
808
809
810
811
812
813
814
815
816
817
818int svc_rdma_sendto(struct svc_rqst *rqstp)
819{
820 struct svc_xprt *xprt = rqstp->rq_xprt;
821 struct svcxprt_rdma *rdma =
822 container_of(xprt, struct svcxprt_rdma, sc_xprt);
823 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
824 __be32 *p, *rdma_argp, *rdma_resp, *wr_lst, *rp_ch;
825 struct xdr_buf *xdr = &rqstp->rq_res;
826 struct svc_rdma_send_ctxt *sctxt;
827 int ret;
828
829 rdma_argp = rctxt->rc_recv_buf;
830 svc_rdma_get_write_arrays(rdma_argp, &wr_lst, &rp_ch);
831
832
833
834
835
836
837
838 ret = -ENOMEM;
839 sctxt = svc_rdma_send_ctxt_get(rdma);
840 if (!sctxt)
841 goto err0;
842 rdma_resp = sctxt->sc_xprt_buf;
843
844 p = rdma_resp;
845 *p++ = *rdma_argp;
846 *p++ = *(rdma_argp + 1);
847 *p++ = rdma->sc_fc_credits;
848 *p++ = rp_ch ? rdma_nomsg : rdma_msg;
849
850
851 *p++ = xdr_zero;
852 *p++ = xdr_zero;
853 *p = xdr_zero;
854
855 if (wr_lst) {
856
857 ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr);
858 if (ret < 0)
859 goto err2;
860 svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret);
861 }
862 if (rp_ch) {
863 ret = svc_rdma_send_reply_chunk(rdma, rp_ch, wr_lst, xdr);
864 if (ret < 0)
865 goto err2;
866 svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret);
867 }
868
869 svc_rdma_sync_reply_hdr(rdma, sctxt, svc_rdma_reply_hdr_len(rdma_resp));
870 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp,
871 wr_lst, rp_ch);
872 if (ret < 0)
873 goto err1;
874 ret = 0;
875
876out:
877 rqstp->rq_xprt_ctxt = NULL;
878 svc_rdma_recv_ctxt_put(rdma, rctxt);
879 return ret;
880
881 err2:
882 if (ret != -E2BIG && ret != -EINVAL)
883 goto err1;
884
885 ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp);
886 if (ret < 0)
887 goto err1;
888 ret = 0;
889 goto out;
890
891 err1:
892 svc_rdma_send_ctxt_put(rdma, sctxt);
893 err0:
894 trace_svcrdma_send_failed(rqstp, ret);
895 set_bit(XPT_CLOSE, &xprt->xpt_flags);
896 ret = -ENOTCONN;
897 goto out;
898}
899