1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102#include <linux/spinlock.h>
103#include <asm/unaligned.h>
104
105#include <rdma/ib_verbs.h>
106#include <rdma/rdma_cm.h>
107
108#include <linux/sunrpc/debug.h>
109#include <linux/sunrpc/svc_rdma.h>
110
111#include "xprt_rdma.h"
112#include <trace/events/rpcrdma.h>
113
114#define RPCDBG_FACILITY RPCDBG_SVCXPRT
115
116static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
117
118static inline struct svc_rdma_send_ctxt *
119svc_rdma_next_send_ctxt(struct list_head *list)
120{
121 return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
122 sc_list);
123}
124
125static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
126 struct rpc_rdma_cid *cid)
127{
128 cid->ci_queue_id = rdma->sc_sq_cq->res.id;
129 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
130}
131
132static struct svc_rdma_send_ctxt *
133svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
134{
135 struct svc_rdma_send_ctxt *ctxt;
136 dma_addr_t addr;
137 void *buffer;
138 size_t size;
139 int i;
140
141 size = sizeof(*ctxt);
142 size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
143 ctxt = kmalloc(size, GFP_KERNEL);
144 if (!ctxt)
145 goto fail0;
146 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
147 if (!buffer)
148 goto fail1;
149 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
150 rdma->sc_max_req_size, DMA_TO_DEVICE);
151 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
152 goto fail2;
153
154 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
155
156 ctxt->sc_send_wr.next = NULL;
157 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
158 ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
159 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
160 ctxt->sc_cqe.done = svc_rdma_wc_send;
161 ctxt->sc_xprt_buf = buffer;
162 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
163 rdma->sc_max_req_size);
164 ctxt->sc_sges[0].addr = addr;
165
166 for (i = 0; i < rdma->sc_max_send_sges; i++)
167 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
168 return ctxt;
169
170fail2:
171 kfree(buffer);
172fail1:
173 kfree(ctxt);
174fail0:
175 return NULL;
176}
177
178
179
180
181
182
183void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
184{
185 struct svc_rdma_send_ctxt *ctxt;
186
187 while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
188 list_del(&ctxt->sc_list);
189 ib_dma_unmap_single(rdma->sc_pd->device,
190 ctxt->sc_sges[0].addr,
191 rdma->sc_max_req_size,
192 DMA_TO_DEVICE);
193 kfree(ctxt->sc_xprt_buf);
194 kfree(ctxt);
195 }
196}
197
198
199
200
201
202
203
204
205struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
206{
207 struct svc_rdma_send_ctxt *ctxt;
208
209 spin_lock(&rdma->sc_send_lock);
210 ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
211 if (!ctxt)
212 goto out_empty;
213 list_del(&ctxt->sc_list);
214 spin_unlock(&rdma->sc_send_lock);
215
216out:
217 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
218 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
219 ctxt->sc_xprt_buf, NULL);
220
221 ctxt->sc_send_wr.num_sge = 0;
222 ctxt->sc_cur_sge_no = 0;
223 ctxt->sc_page_count = 0;
224 return ctxt;
225
226out_empty:
227 spin_unlock(&rdma->sc_send_lock);
228 ctxt = svc_rdma_send_ctxt_alloc(rdma);
229 if (!ctxt)
230 return NULL;
231 goto out;
232}
233
234
235
236
237
238
239
240
241void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
242 struct svc_rdma_send_ctxt *ctxt)
243{
244 struct ib_device *device = rdma->sc_cm_id->device;
245 unsigned int i;
246
247
248
249
250 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
251 ib_dma_unmap_page(device,
252 ctxt->sc_sges[i].addr,
253 ctxt->sc_sges[i].length,
254 DMA_TO_DEVICE);
255 trace_svcrdma_dma_unmap_page(rdma,
256 ctxt->sc_sges[i].addr,
257 ctxt->sc_sges[i].length);
258 }
259
260 for (i = 0; i < ctxt->sc_page_count; ++i)
261 put_page(ctxt->sc_pages[i]);
262
263 spin_lock(&rdma->sc_send_lock);
264 list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
265 spin_unlock(&rdma->sc_send_lock);
266}
267
268
269
270
271
272
273
274
275
276static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
277{
278 struct svcxprt_rdma *rdma = cq->cq_context;
279 struct ib_cqe *cqe = wc->wr_cqe;
280 struct svc_rdma_send_ctxt *ctxt =
281 container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
282
283 trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
284
285 atomic_inc(&rdma->sc_sq_avail);
286 wake_up(&rdma->sc_send_wait);
287
288 svc_rdma_send_ctxt_put(rdma, ctxt);
289
290 if (unlikely(wc->status != IB_WC_SUCCESS)) {
291 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
292 svc_xprt_enqueue(&rdma->sc_xprt);
293 }
294}
295
296
297
298
299
300
301
302
303
304int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
305{
306 struct ib_send_wr *wr = &ctxt->sc_send_wr;
307 int ret;
308
309 might_sleep();
310
311
312 ib_dma_sync_single_for_device(rdma->sc_pd->device,
313 wr->sg_list[0].addr,
314 wr->sg_list[0].length,
315 DMA_TO_DEVICE);
316
317
318 while (1) {
319 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
320 atomic_inc(&rdma_stat_sq_starve);
321 trace_svcrdma_sq_full(rdma);
322 atomic_inc(&rdma->sc_sq_avail);
323 wait_event(rdma->sc_send_wait,
324 atomic_read(&rdma->sc_sq_avail) > 1);
325 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
326 return -ENOTCONN;
327 trace_svcrdma_sq_retry(rdma);
328 continue;
329 }
330
331 trace_svcrdma_post_send(ctxt);
332 ret = ib_post_send(rdma->sc_qp, wr, NULL);
333 if (ret)
334 break;
335 return 0;
336 }
337
338 trace_svcrdma_sq_post_err(rdma, ret);
339 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
340 wake_up(&rdma->sc_send_wait);
341 return ret;
342}
343
344
345
346
347
348
349
350
351
352
353static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
354{
355
356 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
357}
358
359
360
361
362
363
364
365
366
367
368
369
370static ssize_t svc_rdma_encode_write_segment(__be32 *src,
371 struct svc_rdma_send_ctxt *sctxt,
372 unsigned int *remaining)
373{
374 __be32 *p;
375 const size_t len = rpcrdma_segment_maxsz * sizeof(*p);
376 u32 handle, length;
377 u64 offset;
378
379 p = xdr_reserve_space(&sctxt->sc_stream, len);
380 if (!p)
381 return -EMSGSIZE;
382
383 xdr_decode_rdma_segment(src, &handle, &length, &offset);
384
385 if (*remaining < length) {
386
387 length = *remaining;
388 *remaining = 0;
389 } else {
390
391 *remaining -= length;
392 }
393 xdr_encode_rdma_segment(p, handle, length, offset);
394
395 trace_svcrdma_encode_wseg(handle, length, offset);
396 return len;
397}
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414static ssize_t svc_rdma_encode_write_chunk(__be32 *src,
415 struct svc_rdma_send_ctxt *sctxt,
416 unsigned int remaining)
417{
418 unsigned int i, nsegs;
419 ssize_t len, ret;
420
421 len = 0;
422 trace_svcrdma_encode_write_chunk(remaining);
423
424 src++;
425 ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
426 if (ret < 0)
427 return -EMSGSIZE;
428 len += ret;
429
430 nsegs = be32_to_cpup(src++);
431 ret = xdr_stream_encode_u32(&sctxt->sc_stream, nsegs);
432 if (ret < 0)
433 return -EMSGSIZE;
434 len += ret;
435
436 for (i = nsegs; i; i--) {
437 ret = svc_rdma_encode_write_segment(src, sctxt, &remaining);
438 if (ret < 0)
439 return -EMSGSIZE;
440 src += rpcrdma_segment_maxsz;
441 len += ret;
442 }
443
444 return len;
445}
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466static ssize_t
467svc_rdma_encode_write_list(const struct svc_rdma_recv_ctxt *rctxt,
468 struct svc_rdma_send_ctxt *sctxt,
469 unsigned int length)
470{
471 ssize_t len, ret;
472
473 ret = svc_rdma_encode_write_chunk(rctxt->rc_write_list, sctxt, length);
474 if (ret < 0)
475 return ret;
476 len = ret;
477
478
479 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
480 if (ret < 0)
481 return ret;
482
483 return len + ret;
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500static ssize_t
501svc_rdma_encode_reply_chunk(const struct svc_rdma_recv_ctxt *rctxt,
502 struct svc_rdma_send_ctxt *sctxt,
503 unsigned int length)
504{
505 return svc_rdma_encode_write_chunk(rctxt->rc_reply_chunk, sctxt,
506 length);
507}
508
509static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
510 struct svc_rdma_send_ctxt *ctxt,
511 struct page *page,
512 unsigned long offset,
513 unsigned int len)
514{
515 struct ib_device *dev = rdma->sc_cm_id->device;
516 dma_addr_t dma_addr;
517
518 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
519 trace_svcrdma_dma_map_page(rdma, dma_addr, len);
520 if (ib_dma_mapping_error(dev, dma_addr))
521 goto out_maperr;
522
523 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
524 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
525 ctxt->sc_send_wr.num_sge++;
526 return 0;
527
528out_maperr:
529 return -EIO;
530}
531
532
533
534
535static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
536 struct svc_rdma_send_ctxt *ctxt,
537 unsigned char *base,
538 unsigned int len)
539{
540 return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base),
541 offset_in_page(base), len);
542}
543
544
545
546
547
548
549
550
551
552
553
554
555static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
556 struct svc_rdma_send_ctxt *sctxt,
557 const struct svc_rdma_recv_ctxt *rctxt,
558 struct xdr_buf *xdr)
559{
560 int elements;
561
562
563
564 if (sctxt->sc_hdrbuf.len + xdr->len < RPCRDMA_PULLUP_THRESH)
565 return true;
566
567
568
569
570
571 elements = 1;
572
573
574 if (!rctxt || !rctxt->rc_write_list) {
575 unsigned int remaining;
576 unsigned long pageoff;
577
578 pageoff = xdr->page_base & ~PAGE_MASK;
579 remaining = xdr->page_len;
580 while (remaining) {
581 ++elements;
582 remaining -= min_t(u32, PAGE_SIZE - pageoff,
583 remaining);
584 pageoff = 0;
585 }
586 }
587
588
589 if (xdr->tail[0].iov_len)
590 ++elements;
591
592
593 return elements >= rdma->sc_max_send_sges;
594}
595
596
597
598
599
600
601
602
603
604
605
606
607
608static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
609 struct svc_rdma_send_ctxt *sctxt,
610 const struct svc_rdma_recv_ctxt *rctxt,
611 const struct xdr_buf *xdr)
612{
613 unsigned char *dst, *tailbase;
614 unsigned int taillen;
615
616 dst = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len;
617 memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
618 dst += xdr->head[0].iov_len;
619
620 tailbase = xdr->tail[0].iov_base;
621 taillen = xdr->tail[0].iov_len;
622 if (rctxt && rctxt->rc_write_list) {
623 u32 xdrpad;
624
625 xdrpad = xdr_pad_size(xdr->page_len);
626 if (taillen && xdrpad) {
627 tailbase += xdrpad;
628 taillen -= xdrpad;
629 }
630 } else {
631 unsigned int len, remaining;
632 unsigned long pageoff;
633 struct page **ppages;
634
635 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
636 pageoff = xdr->page_base & ~PAGE_MASK;
637 remaining = xdr->page_len;
638 while (remaining) {
639 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
640
641 memcpy(dst, page_address(*ppages), len);
642 remaining -= len;
643 dst += len;
644 pageoff = 0;
645 }
646 }
647
648 if (taillen)
649 memcpy(dst, tailbase, taillen);
650
651 sctxt->sc_sges[0].length += xdr->len;
652 trace_svcrdma_send_pullup(sctxt->sc_sges[0].length);
653 return 0;
654}
655
656
657
658
659
660
661
662
663
664
665
666
667int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
668 struct svc_rdma_send_ctxt *sctxt,
669 const struct svc_rdma_recv_ctxt *rctxt,
670 struct xdr_buf *xdr)
671{
672 unsigned int len, remaining;
673 unsigned long page_off;
674 struct page **ppages;
675 unsigned char *base;
676 u32 xdr_pad;
677 int ret;
678
679
680 sctxt->sc_send_wr.num_sge = 1;
681 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
682
683
684
685
686 if (rctxt && rctxt->rc_reply_chunk)
687 return 0;
688
689
690
691
692 if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
693 return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
694
695 ++sctxt->sc_cur_sge_no;
696 ret = svc_rdma_dma_map_buf(rdma, sctxt,
697 xdr->head[0].iov_base,
698 xdr->head[0].iov_len);
699 if (ret < 0)
700 return ret;
701
702
703
704
705
706
707 if (rctxt && rctxt->rc_write_list) {
708 base = xdr->tail[0].iov_base;
709 len = xdr->tail[0].iov_len;
710 xdr_pad = xdr_pad_size(xdr->page_len);
711
712 if (len && xdr_pad) {
713 base += xdr_pad;
714 len -= xdr_pad;
715 }
716
717 goto tail;
718 }
719
720 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
721 page_off = xdr->page_base & ~PAGE_MASK;
722 remaining = xdr->page_len;
723 while (remaining) {
724 len = min_t(u32, PAGE_SIZE - page_off, remaining);
725
726 ++sctxt->sc_cur_sge_no;
727 ret = svc_rdma_dma_map_page(rdma, sctxt, *ppages++,
728 page_off, len);
729 if (ret < 0)
730 return ret;
731
732 remaining -= len;
733 page_off = 0;
734 }
735
736 base = xdr->tail[0].iov_base;
737 len = xdr->tail[0].iov_len;
738tail:
739 if (len) {
740 ++sctxt->sc_cur_sge_no;
741 ret = svc_rdma_dma_map_buf(rdma, sctxt, base, len);
742 if (ret < 0)
743 return ret;
744 }
745
746 return 0;
747}
748
749
750
751
752
753static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
754 struct svc_rdma_send_ctxt *ctxt)
755{
756 int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
757
758 ctxt->sc_page_count += pages;
759 for (i = 0; i < pages; i++) {
760 ctxt->sc_pages[i] = rqstp->rq_respages[i];
761 rqstp->rq_respages[i] = NULL;
762 }
763
764
765 rqstp->rq_next_page = rqstp->rq_respages;
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
787 struct svc_rdma_send_ctxt *sctxt,
788 const struct svc_rdma_recv_ctxt *rctxt,
789 struct svc_rqst *rqstp)
790{
791 int ret;
792
793 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
794 if (ret < 0)
795 return ret;
796
797 svc_rdma_save_io_pages(rqstp, sctxt);
798
799 if (rctxt->rc_inv_rkey) {
800 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
801 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
802 } else {
803 sctxt->sc_send_wr.opcode = IB_WR_SEND;
804 }
805 return svc_rdma_send(rdma, sctxt);
806}
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
824 struct svc_rdma_send_ctxt *sctxt,
825 struct svc_rdma_recv_ctxt *rctxt,
826 int status)
827{
828 __be32 *rdma_argp = rctxt->rc_recv_buf;
829 __be32 *p;
830
831 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
832 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
833 sctxt->sc_xprt_buf, NULL);
834
835 p = xdr_reserve_space(&sctxt->sc_stream,
836 rpcrdma_fixed_maxsz * sizeof(*p));
837 if (!p)
838 goto put_ctxt;
839
840 *p++ = *rdma_argp;
841 *p++ = *(rdma_argp + 1);
842 *p++ = rdma->sc_fc_credits;
843 *p = rdma_error;
844
845 switch (status) {
846 case -EPROTONOSUPPORT:
847 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
848 if (!p)
849 goto put_ctxt;
850
851 *p++ = err_vers;
852 *p++ = rpcrdma_version;
853 *p = rpcrdma_version;
854 trace_svcrdma_err_vers(*rdma_argp);
855 break;
856 default:
857 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
858 if (!p)
859 goto put_ctxt;
860
861 *p = err_chunk;
862 trace_svcrdma_err_chunk(*rdma_argp);
863 }
864
865
866 sctxt->sc_send_wr.num_sge = 1;
867 sctxt->sc_send_wr.opcode = IB_WR_SEND;
868 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
869 if (svc_rdma_send(rdma, sctxt))
870 goto put_ctxt;
871 return;
872
873put_ctxt:
874 svc_rdma_send_ctxt_put(rdma, sctxt);
875}
876
877
878
879
880
881
882
883
884
885
886
887
888
889int svc_rdma_sendto(struct svc_rqst *rqstp)
890{
891 struct svc_xprt *xprt = rqstp->rq_xprt;
892 struct svcxprt_rdma *rdma =
893 container_of(xprt, struct svcxprt_rdma, sc_xprt);
894 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
895 __be32 *rdma_argp = rctxt->rc_recv_buf;
896 __be32 *wr_lst = rctxt->rc_write_list;
897 __be32 *rp_ch = rctxt->rc_reply_chunk;
898 struct xdr_buf *xdr = &rqstp->rq_res;
899 struct svc_rdma_send_ctxt *sctxt;
900 __be32 *p;
901 int ret;
902
903 ret = -ENOTCONN;
904 if (svc_xprt_is_dead(xprt))
905 goto err0;
906
907 ret = -ENOMEM;
908 sctxt = svc_rdma_send_ctxt_get(rdma);
909 if (!sctxt)
910 goto err0;
911
912 p = xdr_reserve_space(&sctxt->sc_stream,
913 rpcrdma_fixed_maxsz * sizeof(*p));
914 if (!p)
915 goto err0;
916 *p++ = *rdma_argp;
917 *p++ = *(rdma_argp + 1);
918 *p++ = rdma->sc_fc_credits;
919 *p = rp_ch ? rdma_nomsg : rdma_msg;
920
921 if (svc_rdma_encode_read_list(sctxt) < 0)
922 goto err0;
923 if (wr_lst) {
924
925 unsigned long offset;
926 unsigned int length;
927
928 if (rctxt->rc_read_payload_length) {
929 offset = rctxt->rc_read_payload_offset;
930 length = rctxt->rc_read_payload_length;
931 } else {
932 offset = xdr->head[0].iov_len;
933 length = xdr->page_len;
934 }
935 ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset,
936 length);
937 if (ret < 0)
938 goto err2;
939 if (svc_rdma_encode_write_list(rctxt, sctxt, length) < 0)
940 goto err0;
941 } else {
942 if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
943 goto err0;
944 }
945 if (rp_ch) {
946 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
947 if (ret < 0)
948 goto err2;
949 if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
950 goto err0;
951 } else {
952 if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
953 goto err0;
954 }
955
956 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
957 if (ret < 0)
958 goto err1;
959 return 0;
960
961 err2:
962 if (ret != -E2BIG && ret != -EINVAL)
963 goto err1;
964
965
966
967
968 svc_rdma_save_io_pages(rqstp, sctxt);
969 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
970 return 0;
971
972 err1:
973 svc_rdma_send_ctxt_put(rdma, sctxt);
974 err0:
975 trace_svcrdma_send_err(rqstp, ret);
976 set_bit(XPT_CLOSE, &xprt->xpt_flags);
977 return -ENOTCONN;
978}
979
980
981
982
983
984
985
986
987
988
989
990
991
992int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
993 unsigned int length)
994{
995 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
996
997
998
999
1000
1001 rctxt->rc_read_payload_offset = offset;
1002 rctxt->rc_read_payload_length = length;
1003
1004 return 0;
1005}
1006