1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102#include <linux/spinlock.h>
103#include <asm/unaligned.h>
104
105#include <rdma/ib_verbs.h>
106#include <rdma/rdma_cm.h>
107
108#include <linux/sunrpc/debug.h>
109#include <linux/sunrpc/svc_rdma.h>
110
111#include "xprt_rdma.h"
112#include <trace/events/rpcrdma.h>
113
114static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
115
116static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
117 struct rpc_rdma_cid *cid)
118{
119 cid->ci_queue_id = rdma->sc_sq_cq->res.id;
120 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
121}
122
123static struct svc_rdma_send_ctxt *
124svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
125{
126 struct svc_rdma_send_ctxt *ctxt;
127 dma_addr_t addr;
128 void *buffer;
129 size_t size;
130 int i;
131
132 size = sizeof(*ctxt);
133 size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
134 ctxt = kmalloc(size, GFP_KERNEL);
135 if (!ctxt)
136 goto fail0;
137 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
138 if (!buffer)
139 goto fail1;
140 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
141 rdma->sc_max_req_size, DMA_TO_DEVICE);
142 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
143 goto fail2;
144
145 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
146
147 ctxt->sc_send_wr.next = NULL;
148 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
149 ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
150 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
151 init_completion(&ctxt->sc_done);
152 ctxt->sc_cqe.done = svc_rdma_wc_send;
153 ctxt->sc_xprt_buf = buffer;
154 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
155 rdma->sc_max_req_size);
156 ctxt->sc_sges[0].addr = addr;
157
158 for (i = 0; i < rdma->sc_max_send_sges; i++)
159 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
160 return ctxt;
161
162fail2:
163 kfree(buffer);
164fail1:
165 kfree(ctxt);
166fail0:
167 return NULL;
168}
169
170
171
172
173
174
175void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
176{
177 struct svc_rdma_send_ctxt *ctxt;
178 struct llist_node *node;
179
180 while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) {
181 ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
182 ib_dma_unmap_single(rdma->sc_pd->device,
183 ctxt->sc_sges[0].addr,
184 rdma->sc_max_req_size,
185 DMA_TO_DEVICE);
186 kfree(ctxt->sc_xprt_buf);
187 kfree(ctxt);
188 }
189}
190
191
192
193
194
195
196
197
198struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
199{
200 struct svc_rdma_send_ctxt *ctxt;
201 struct llist_node *node;
202
203 spin_lock(&rdma->sc_send_lock);
204 node = llist_del_first(&rdma->sc_send_ctxts);
205 if (!node)
206 goto out_empty;
207 ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
208 spin_unlock(&rdma->sc_send_lock);
209
210out:
211 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
212 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
213 ctxt->sc_xprt_buf, NULL);
214
215 ctxt->sc_send_wr.num_sge = 0;
216 ctxt->sc_cur_sge_no = 0;
217 return ctxt;
218
219out_empty:
220 spin_unlock(&rdma->sc_send_lock);
221 ctxt = svc_rdma_send_ctxt_alloc(rdma);
222 if (!ctxt)
223 return NULL;
224 goto out;
225}
226
227
228
229
230
231
232void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
233 struct svc_rdma_send_ctxt *ctxt)
234{
235 struct ib_device *device = rdma->sc_cm_id->device;
236 unsigned int i;
237
238
239
240
241 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
242 ib_dma_unmap_page(device,
243 ctxt->sc_sges[i].addr,
244 ctxt->sc_sges[i].length,
245 DMA_TO_DEVICE);
246 trace_svcrdma_dma_unmap_page(rdma,
247 ctxt->sc_sges[i].addr,
248 ctxt->sc_sges[i].length);
249 }
250
251 llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
252}
253
254
255
256
257
258
259
260void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail)
261{
262 atomic_add(avail, &rdma->sc_sq_avail);
263 smp_mb__after_atomic();
264 if (unlikely(waitqueue_active(&rdma->sc_send_wait)))
265 wake_up(&rdma->sc_send_wait);
266}
267
268
269
270
271
272
273
274
275
276static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
277{
278 struct svcxprt_rdma *rdma = cq->cq_context;
279 struct ib_cqe *cqe = wc->wr_cqe;
280 struct svc_rdma_send_ctxt *ctxt =
281 container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
282
283 trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
284
285 svc_rdma_wake_send_waiters(rdma, 1);
286 complete(&ctxt->sc_done);
287
288 if (unlikely(wc->status != IB_WC_SUCCESS))
289 svc_xprt_deferred_close(&rdma->sc_xprt);
290}
291
292
293
294
295
296
297
298
299
300int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
301{
302 struct ib_send_wr *wr = &ctxt->sc_send_wr;
303 int ret;
304
305 reinit_completion(&ctxt->sc_done);
306
307
308 ib_dma_sync_single_for_device(rdma->sc_pd->device,
309 wr->sg_list[0].addr,
310 wr->sg_list[0].length,
311 DMA_TO_DEVICE);
312
313
314 while (1) {
315 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
316 percpu_counter_inc(&svcrdma_stat_sq_starve);
317 trace_svcrdma_sq_full(rdma);
318 atomic_inc(&rdma->sc_sq_avail);
319 wait_event(rdma->sc_send_wait,
320 atomic_read(&rdma->sc_sq_avail) > 1);
321 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
322 return -ENOTCONN;
323 trace_svcrdma_sq_retry(rdma);
324 continue;
325 }
326
327 trace_svcrdma_post_send(ctxt);
328 ret = ib_post_send(rdma->sc_qp, wr, NULL);
329 if (ret)
330 break;
331 return 0;
332 }
333
334 trace_svcrdma_sq_post_err(rdma, ret);
335 svc_xprt_deferred_close(&rdma->sc_xprt);
336 wake_up(&rdma->sc_send_wait);
337 return ret;
338}
339
340
341
342
343
344
345
346
347
348
349static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
350{
351
352 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
353}
354
355
356
357
358
359
360
361
362
363
364
365
366
367static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt,
368 const struct svc_rdma_chunk *chunk,
369 u32 *remaining, unsigned int segno)
370{
371 const struct svc_rdma_segment *segment = &chunk->ch_segments[segno];
372 const size_t len = rpcrdma_segment_maxsz * sizeof(__be32);
373 u32 length;
374 __be32 *p;
375
376 p = xdr_reserve_space(&sctxt->sc_stream, len);
377 if (!p)
378 return -EMSGSIZE;
379
380 length = min_t(u32, *remaining, segment->rs_length);
381 *remaining -= length;
382 xdr_encode_rdma_segment(p, segment->rs_handle, length,
383 segment->rs_offset);
384 trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length,
385 segment->rs_offset);
386 return len;
387}
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt,
404 const struct svc_rdma_chunk *chunk)
405{
406 u32 remaining = chunk->ch_payload_length;
407 unsigned int segno;
408 ssize_t len, ret;
409
410 len = 0;
411 ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
412 if (ret < 0)
413 return ret;
414 len += ret;
415
416 ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount);
417 if (ret < 0)
418 return ret;
419 len += ret;
420
421 for (segno = 0; segno < chunk->ch_segcount; segno++) {
422 ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno);
423 if (ret < 0)
424 return ret;
425 len += ret;
426 }
427
428 return len;
429}
430
431
432
433
434
435
436
437
438
439
440
441static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt,
442 struct svc_rdma_send_ctxt *sctxt)
443{
444 struct svc_rdma_chunk *chunk;
445 ssize_t len, ret;
446
447 len = 0;
448 pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) {
449 ret = svc_rdma_encode_write_chunk(sctxt, chunk);
450 if (ret < 0)
451 return ret;
452 len += ret;
453 }
454
455
456 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
457 if (ret < 0)
458 return ret;
459
460 return len + ret;
461}
462
463
464
465
466
467
468
469
470
471
472
473
474
475static ssize_t
476svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt,
477 struct svc_rdma_send_ctxt *sctxt,
478 unsigned int length)
479{
480 struct svc_rdma_chunk *chunk;
481
482 if (pcl_is_empty(&rctxt->rc_reply_pcl))
483 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
484
485 chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
486 if (length > chunk->ch_length)
487 return -E2BIG;
488
489 chunk->ch_payload_length = length;
490 return svc_rdma_encode_write_chunk(sctxt, chunk);
491}
492
493struct svc_rdma_map_data {
494 struct svcxprt_rdma *md_rdma;
495 struct svc_rdma_send_ctxt *md_ctxt;
496};
497
498
499
500
501
502
503
504
505
506
507
508
509static int svc_rdma_page_dma_map(void *data, struct page *page,
510 unsigned long offset, unsigned int len)
511{
512 struct svc_rdma_map_data *args = data;
513 struct svcxprt_rdma *rdma = args->md_rdma;
514 struct svc_rdma_send_ctxt *ctxt = args->md_ctxt;
515 struct ib_device *dev = rdma->sc_cm_id->device;
516 dma_addr_t dma_addr;
517
518 ++ctxt->sc_cur_sge_no;
519
520 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
521 if (ib_dma_mapping_error(dev, dma_addr))
522 goto out_maperr;
523
524 trace_svcrdma_dma_map_page(rdma, dma_addr, len);
525 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
526 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
527 ctxt->sc_send_wr.num_sge++;
528 return 0;
529
530out_maperr:
531 trace_svcrdma_dma_map_err(rdma, dma_addr, len);
532 return -EIO;
533}
534
535
536
537
538
539
540
541
542
543
544
545
546
547static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov)
548{
549 if (!iov->iov_len)
550 return 0;
551 return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base),
552 offset_in_page(iov->iov_base),
553 iov->iov_len);
554}
555
556
557
558
559
560
561
562
563
564
565
566
567
568static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data)
569{
570 unsigned int len, remaining;
571 unsigned long pageoff;
572 struct page **ppages;
573 int ret;
574
575 ret = svc_rdma_iov_dma_map(data, &xdr->head[0]);
576 if (ret < 0)
577 return ret;
578
579 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
580 pageoff = offset_in_page(xdr->page_base);
581 remaining = xdr->page_len;
582 while (remaining) {
583 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
584
585 ret = svc_rdma_page_dma_map(data, *ppages++, pageoff, len);
586 if (ret < 0)
587 return ret;
588
589 remaining -= len;
590 pageoff = 0;
591 }
592
593 ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]);
594 if (ret < 0)
595 return ret;
596
597 return xdr->len;
598}
599
600struct svc_rdma_pullup_data {
601 u8 *pd_dest;
602 unsigned int pd_length;
603 unsigned int pd_num_sges;
604};
605
606
607
608
609
610
611
612
613
614static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr,
615 void *data)
616{
617 struct svc_rdma_pullup_data *args = data;
618 unsigned int remaining;
619 unsigned long offset;
620
621 if (xdr->head[0].iov_len)
622 ++args->pd_num_sges;
623
624 offset = offset_in_page(xdr->page_base);
625 remaining = xdr->page_len;
626 while (remaining) {
627 ++args->pd_num_sges;
628 remaining -= min_t(u32, PAGE_SIZE - offset, remaining);
629 offset = 0;
630 }
631
632 if (xdr->tail[0].iov_len)
633 ++args->pd_num_sges;
634
635 args->pd_length += xdr->len;
636 return 0;
637}
638
639
640
641
642
643
644
645
646
647
648
649
650static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma,
651 const struct svc_rdma_send_ctxt *sctxt,
652 const struct svc_rdma_recv_ctxt *rctxt,
653 const struct xdr_buf *xdr)
654{
655
656 struct svc_rdma_pullup_data args = {
657 .pd_length = sctxt->sc_hdrbuf.len,
658 .pd_num_sges = 1,
659 };
660 int ret;
661
662 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
663 svc_rdma_xb_count_sges, &args);
664 if (ret < 0)
665 return false;
666
667 if (args.pd_length < RPCRDMA_PULLUP_THRESH)
668 return true;
669 return args.pd_num_sges >= rdma->sc_max_send_sges;
670}
671
672
673
674
675
676
677
678
679
680static int svc_rdma_xb_linearize(const struct xdr_buf *xdr,
681 void *data)
682{
683 struct svc_rdma_pullup_data *args = data;
684 unsigned int len, remaining;
685 unsigned long pageoff;
686 struct page **ppages;
687
688 if (xdr->head[0].iov_len) {
689 memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len);
690 args->pd_dest += xdr->head[0].iov_len;
691 }
692
693 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
694 pageoff = offset_in_page(xdr->page_base);
695 remaining = xdr->page_len;
696 while (remaining) {
697 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
698 memcpy(args->pd_dest, page_address(*ppages) + pageoff, len);
699 remaining -= len;
700 args->pd_dest += len;
701 pageoff = 0;
702 ppages++;
703 }
704
705 if (xdr->tail[0].iov_len) {
706 memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
707 args->pd_dest += xdr->tail[0].iov_len;
708 }
709
710 args->pd_length += xdr->len;
711 return 0;
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma,
732 struct svc_rdma_send_ctxt *sctxt,
733 const struct svc_rdma_recv_ctxt *rctxt,
734 const struct xdr_buf *xdr)
735{
736 struct svc_rdma_pullup_data args = {
737 .pd_dest = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len,
738 };
739 int ret;
740
741 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
742 svc_rdma_xb_linearize, &args);
743 if (ret < 0)
744 return ret;
745
746 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length;
747 trace_svcrdma_send_pullup(sctxt, args.pd_length);
748 return 0;
749}
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
765 struct svc_rdma_send_ctxt *sctxt,
766 const struct svc_rdma_recv_ctxt *rctxt,
767 const struct xdr_buf *xdr)
768{
769 struct svc_rdma_map_data args = {
770 .md_rdma = rdma,
771 .md_ctxt = sctxt,
772 };
773
774
775 sctxt->sc_send_wr.num_sge = 1;
776 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
777
778
779
780
781 if (!pcl_is_empty(&rctxt->rc_reply_pcl))
782 return 0;
783
784
785
786
787 if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
788 return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
789
790 return pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
791 svc_rdma_xb_dma_map, &args);
792}
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
813 struct svc_rdma_send_ctxt *sctxt,
814 const struct svc_rdma_recv_ctxt *rctxt,
815 struct svc_rqst *rqstp)
816{
817 int ret;
818
819 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
820 if (ret < 0)
821 return ret;
822
823 if (rctxt->rc_inv_rkey) {
824 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
825 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
826 } else {
827 sctxt->sc_send_wr.opcode = IB_WR_SEND;
828 }
829
830 ret = svc_rdma_send(rdma, sctxt);
831 if (ret < 0)
832 return ret;
833
834 ret = wait_for_completion_killable(&sctxt->sc_done);
835 svc_rdma_send_ctxt_put(rdma, sctxt);
836 return ret;
837}
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
855 struct svc_rdma_send_ctxt *sctxt,
856 struct svc_rdma_recv_ctxt *rctxt,
857 int status)
858{
859 __be32 *rdma_argp = rctxt->rc_recv_buf;
860 __be32 *p;
861
862 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
863 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
864 sctxt->sc_xprt_buf, NULL);
865
866 p = xdr_reserve_space(&sctxt->sc_stream,
867 rpcrdma_fixed_maxsz * sizeof(*p));
868 if (!p)
869 goto put_ctxt;
870
871 *p++ = *rdma_argp;
872 *p++ = *(rdma_argp + 1);
873 *p++ = rdma->sc_fc_credits;
874 *p = rdma_error;
875
876 switch (status) {
877 case -EPROTONOSUPPORT:
878 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
879 if (!p)
880 goto put_ctxt;
881
882 *p++ = err_vers;
883 *p++ = rpcrdma_version;
884 *p = rpcrdma_version;
885 trace_svcrdma_err_vers(*rdma_argp);
886 break;
887 default:
888 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
889 if (!p)
890 goto put_ctxt;
891
892 *p = err_chunk;
893 trace_svcrdma_err_chunk(*rdma_argp);
894 }
895
896
897 sctxt->sc_send_wr.num_sge = 1;
898 sctxt->sc_send_wr.opcode = IB_WR_SEND;
899 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
900 if (svc_rdma_send(rdma, sctxt))
901 goto put_ctxt;
902
903 wait_for_completion_killable(&sctxt->sc_done);
904
905put_ctxt:
906 svc_rdma_send_ctxt_put(rdma, sctxt);
907}
908
909
910
911
912
913
914
915
916
917
918
919
920
921int svc_rdma_sendto(struct svc_rqst *rqstp)
922{
923 struct svc_xprt *xprt = rqstp->rq_xprt;
924 struct svcxprt_rdma *rdma =
925 container_of(xprt, struct svcxprt_rdma, sc_xprt);
926 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
927 __be32 *rdma_argp = rctxt->rc_recv_buf;
928 struct svc_rdma_send_ctxt *sctxt;
929 unsigned int rc_size;
930 __be32 *p;
931 int ret;
932
933 ret = -ENOTCONN;
934 if (svc_xprt_is_dead(xprt))
935 goto drop_connection;
936
937 ret = -ENOMEM;
938 sctxt = svc_rdma_send_ctxt_get(rdma);
939 if (!sctxt)
940 goto drop_connection;
941
942 ret = -EMSGSIZE;
943 p = xdr_reserve_space(&sctxt->sc_stream,
944 rpcrdma_fixed_maxsz * sizeof(*p));
945 if (!p)
946 goto put_ctxt;
947
948 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
949 if (ret < 0)
950 goto reply_chunk;
951 rc_size = ret;
952
953 *p++ = *rdma_argp;
954 *p++ = *(rdma_argp + 1);
955 *p++ = rdma->sc_fc_credits;
956 *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg;
957
958 ret = svc_rdma_encode_read_list(sctxt);
959 if (ret < 0)
960 goto put_ctxt;
961 ret = svc_rdma_encode_write_list(rctxt, sctxt);
962 if (ret < 0)
963 goto put_ctxt;
964 ret = svc_rdma_encode_reply_chunk(rctxt, sctxt, rc_size);
965 if (ret < 0)
966 goto put_ctxt;
967
968 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
969 if (ret < 0)
970 goto put_ctxt;
971
972
973
974
975 rqstp->rq_respages++;
976 return 0;
977
978reply_chunk:
979 if (ret != -E2BIG && ret != -EINVAL)
980 goto put_ctxt;
981
982 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
983 return 0;
984
985put_ctxt:
986 svc_rdma_send_ctxt_put(rdma, sctxt);
987drop_connection:
988 trace_svcrdma_send_err(rqstp, ret);
989 svc_xprt_deferred_close(&rdma->sc_xprt);
990 return -ENOTCONN;
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
1009 unsigned int length)
1010{
1011 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
1012 struct svc_rdma_chunk *chunk;
1013 struct svcxprt_rdma *rdma;
1014 struct xdr_buf subbuf;
1015 int ret;
1016
1017 chunk = rctxt->rc_cur_result_payload;
1018 if (!length || !chunk)
1019 return 0;
1020 rctxt->rc_cur_result_payload =
1021 pcl_next_chunk(&rctxt->rc_write_pcl, chunk);
1022 if (length > chunk->ch_length)
1023 return -E2BIG;
1024
1025 chunk->ch_position = offset;
1026 chunk->ch_payload_length = length;
1027
1028 if (xdr_buf_subsegment(&rqstp->rq_res, &subbuf, offset, length))
1029 return -EMSGSIZE;
1030
1031 rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt);
1032 ret = svc_rdma_send_write_chunk(rdma, chunk, &subbuf);
1033 if (ret < 0)
1034 return ret;
1035 return 0;
1036}
1037