1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#include <linux/highmem.h>
51
52#include <linux/sunrpc/svc_rdma.h>
53
54#include "xprt_rdma.h"
55#include <trace/events/rpcrdma.h>
56
57#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58# define RPCDBG_FACILITY RPCDBG_TRANS
59#endif
60
61
62
63
64
65
66static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
67{
68 unsigned int size;
69
70
71 size = RPCRDMA_HDRLEN_MIN;
72
73
74 maxsegs += 2;
75 size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
76
77
78 size += sizeof(__be32);
79 size += rpcrdma_segment_maxsz * sizeof(__be32);
80 size += sizeof(__be32);
81
82 dprintk("RPC: %s: max call header size = %u\n",
83 __func__, size);
84 return size;
85}
86
87
88
89
90
91
92static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
93{
94 unsigned int size;
95
96
97 size = RPCRDMA_HDRLEN_MIN;
98
99
100 maxsegs += 2;
101 size = sizeof(__be32);
102 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
103 size += sizeof(__be32);
104
105 dprintk("RPC: %s: max reply header size = %u\n",
106 __func__, size);
107 return size;
108}
109
110void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
111{
112 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
113 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
114 unsigned int maxsegs = ia->ri_max_segs;
115
116 ia->ri_max_inline_write = cdata->inline_wsize -
117 rpcrdma_max_call_header_size(maxsegs);
118 ia->ri_max_inline_read = cdata->inline_rsize -
119 rpcrdma_max_reply_header_size(maxsegs);
120}
121
122
123
124
125
126
127
128
129
130static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
131 struct rpc_rqst *rqst)
132{
133 struct xdr_buf *xdr = &rqst->rq_snd_buf;
134 unsigned int count, remaining, offset;
135
136 if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
137 return false;
138
139 if (xdr->page_len) {
140 remaining = xdr->page_len;
141 offset = offset_in_page(xdr->page_base);
142 count = RPCRDMA_MIN_SEND_SGES;
143 while (remaining) {
144 remaining -= min_t(unsigned int,
145 PAGE_SIZE - offset, remaining);
146 offset = 0;
147 if (++count > r_xprt->rx_ia.ri_max_send_sges)
148 return false;
149 }
150 }
151
152 return true;
153}
154
155
156
157
158
159
160
161static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
162 struct rpc_rqst *rqst)
163{
164 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
165
166 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
167}
168
169
170
171
172
173
174
175
176static struct rpcrdma_mr_seg *
177rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
178 unsigned int *n)
179{
180 u32 remaining, page_offset;
181 char *base;
182
183 base = vec->iov_base;
184 page_offset = offset_in_page(base);
185 remaining = vec->iov_len;
186 while (remaining) {
187 seg->mr_page = NULL;
188 seg->mr_offset = base;
189 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
190 remaining -= seg->mr_len;
191 base += seg->mr_len;
192 ++seg;
193 ++(*n);
194 page_offset = 0;
195 }
196 return seg;
197}
198
199
200
201
202
203
204
205
206static int
207rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
208 unsigned int pos, enum rpcrdma_chunktype type,
209 struct rpcrdma_mr_seg *seg)
210{
211 unsigned long page_base;
212 unsigned int len, n;
213 struct page **ppages;
214
215 n = 0;
216 if (pos == 0)
217 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
218
219 len = xdrbuf->page_len;
220 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
221 page_base = offset_in_page(xdrbuf->page_base);
222 while (len) {
223 if (unlikely(!*ppages)) {
224
225
226
227 *ppages = alloc_page(GFP_ATOMIC);
228 if (!*ppages)
229 return -ENOBUFS;
230 }
231 seg->mr_page = *ppages;
232 seg->mr_offset = (char *)page_base;
233 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
234 len -= seg->mr_len;
235 ++ppages;
236 ++seg;
237 ++n;
238 page_base = 0;
239 }
240
241
242
243
244 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
245 goto out;
246
247
248
249
250
251
252 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
253 goto out;
254
255 if (xdrbuf->tail[0].iov_len)
256 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
257
258out:
259 if (unlikely(n > RPCRDMA_MAX_SEGS))
260 return -EIO;
261 return n;
262}
263
264static inline int
265encode_item_present(struct xdr_stream *xdr)
266{
267 __be32 *p;
268
269 p = xdr_reserve_space(xdr, sizeof(*p));
270 if (unlikely(!p))
271 return -EMSGSIZE;
272
273 *p = xdr_one;
274 return 0;
275}
276
277static inline int
278encode_item_not_present(struct xdr_stream *xdr)
279{
280 __be32 *p;
281
282 p = xdr_reserve_space(xdr, sizeof(*p));
283 if (unlikely(!p))
284 return -EMSGSIZE;
285
286 *p = xdr_zero;
287 return 0;
288}
289
290static void
291xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
292{
293 *iptr++ = cpu_to_be32(mr->mr_handle);
294 *iptr++ = cpu_to_be32(mr->mr_length);
295 xdr_encode_hyper(iptr, mr->mr_offset);
296}
297
298static int
299encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
300{
301 __be32 *p;
302
303 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
304 if (unlikely(!p))
305 return -EMSGSIZE;
306
307 xdr_encode_rdma_segment(p, mr);
308 return 0;
309}
310
311static int
312encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
313 u32 position)
314{
315 __be32 *p;
316
317 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
318 if (unlikely(!p))
319 return -EMSGSIZE;
320
321 *p++ = xdr_one;
322 *p++ = cpu_to_be32(position);
323 xdr_encode_rdma_segment(p, mr);
324 return 0;
325}
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341static noinline int
342rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
343 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
344{
345 struct xdr_stream *xdr = &req->rl_stream;
346 struct rpcrdma_mr_seg *seg;
347 struct rpcrdma_mr *mr;
348 unsigned int pos;
349 int nsegs;
350
351 pos = rqst->rq_snd_buf.head[0].iov_len;
352 if (rtype == rpcrdma_areadch)
353 pos = 0;
354 seg = req->rl_segments;
355 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
356 rtype, seg);
357 if (nsegs < 0)
358 return nsegs;
359
360 do {
361 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
362 false, &mr);
363 if (IS_ERR(seg))
364 return PTR_ERR(seg);
365 rpcrdma_mr_push(mr, &req->rl_registered);
366
367 if (encode_read_segment(xdr, mr, pos) < 0)
368 return -EMSGSIZE;
369
370 trace_xprtrdma_read_chunk(rqst->rq_task, pos, mr, nsegs);
371 r_xprt->rx_stats.read_chunk_count++;
372 nsegs -= mr->mr_nents;
373 } while (nsegs);
374
375 return 0;
376}
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393static noinline int
394rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
395 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
396{
397 struct xdr_stream *xdr = &req->rl_stream;
398 struct rpcrdma_mr_seg *seg;
399 struct rpcrdma_mr *mr;
400 int nsegs, nchunks;
401 __be32 *segcount;
402
403 seg = req->rl_segments;
404 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
405 rqst->rq_rcv_buf.head[0].iov_len,
406 wtype, seg);
407 if (nsegs < 0)
408 return nsegs;
409
410 if (encode_item_present(xdr) < 0)
411 return -EMSGSIZE;
412 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
413 if (unlikely(!segcount))
414 return -EMSGSIZE;
415
416
417 nchunks = 0;
418 do {
419 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
420 true, &mr);
421 if (IS_ERR(seg))
422 return PTR_ERR(seg);
423 rpcrdma_mr_push(mr, &req->rl_registered);
424
425 if (encode_rdma_segment(xdr, mr) < 0)
426 return -EMSGSIZE;
427
428 trace_xprtrdma_write_chunk(rqst->rq_task, mr, nsegs);
429 r_xprt->rx_stats.write_chunk_count++;
430 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
431 nchunks++;
432 nsegs -= mr->mr_nents;
433 } while (nsegs);
434
435
436 *segcount = cpu_to_be32(nchunks);
437
438 return 0;
439}
440
441
442
443
444
445
446
447
448
449
450
451
452
453static noinline int
454rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
455 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
456{
457 struct xdr_stream *xdr = &req->rl_stream;
458 struct rpcrdma_mr_seg *seg;
459 struct rpcrdma_mr *mr;
460 int nsegs, nchunks;
461 __be32 *segcount;
462
463 seg = req->rl_segments;
464 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
465 if (nsegs < 0)
466 return nsegs;
467
468 if (encode_item_present(xdr) < 0)
469 return -EMSGSIZE;
470 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
471 if (unlikely(!segcount))
472 return -EMSGSIZE;
473
474
475 nchunks = 0;
476 do {
477 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
478 true, &mr);
479 if (IS_ERR(seg))
480 return PTR_ERR(seg);
481 rpcrdma_mr_push(mr, &req->rl_registered);
482
483 if (encode_rdma_segment(xdr, mr) < 0)
484 return -EMSGSIZE;
485
486 trace_xprtrdma_reply_chunk(rqst->rq_task, mr, nsegs);
487 r_xprt->rx_stats.reply_chunk_count++;
488 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
489 nchunks++;
490 nsegs -= mr->mr_nents;
491 } while (nsegs);
492
493
494 *segcount = cpu_to_be32(nchunks);
495
496 return 0;
497}
498
499
500
501
502
503
504void
505rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc)
506{
507 struct rpcrdma_ia *ia = &sc->sc_xprt->rx_ia;
508 struct ib_sge *sge;
509 unsigned int count;
510
511
512
513
514
515 sge = &sc->sc_sges[2];
516 for (count = sc->sc_unmap_count; count; ++sge, --count)
517 ib_dma_unmap_page(ia->ri_device,
518 sge->addr, sge->length, DMA_TO_DEVICE);
519
520 if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &sc->sc_req->rl_flags)) {
521 smp_mb__after_atomic();
522 wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
523 }
524}
525
526
527
528static bool
529rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
530 u32 len)
531{
532 struct rpcrdma_sendctx *sc = req->rl_sendctx;
533 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
534 struct ib_sge *sge = sc->sc_sges;
535
536 if (!rpcrdma_dma_map_regbuf(ia, rb))
537 goto out_regbuf;
538 sge->addr = rdmab_addr(rb);
539 sge->length = len;
540 sge->lkey = rdmab_lkey(rb);
541
542 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
543 sge->length, DMA_TO_DEVICE);
544 sc->sc_wr.num_sge++;
545 return true;
546
547out_regbuf:
548 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
549 return false;
550}
551
552
553
554
555static bool
556rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
557 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
558{
559 struct rpcrdma_sendctx *sc = req->rl_sendctx;
560 unsigned int sge_no, page_base, len, remaining;
561 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
562 struct ib_device *device = ia->ri_device;
563 struct ib_sge *sge = sc->sc_sges;
564 u32 lkey = ia->ri_pd->local_dma_lkey;
565 struct page *page, **ppages;
566
567
568
569
570 if (!rpcrdma_dma_map_regbuf(ia, rb))
571 goto out_regbuf;
572 sge_no = 1;
573 sge[sge_no].addr = rdmab_addr(rb);
574 sge[sge_no].length = xdr->head[0].iov_len;
575 sge[sge_no].lkey = rdmab_lkey(rb);
576 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
577 sge[sge_no].length, DMA_TO_DEVICE);
578
579
580
581
582
583
584
585 if (rtype == rpcrdma_readch) {
586 len = xdr->tail[0].iov_len;
587
588
589 if (len < 4)
590 goto out;
591
592 page = virt_to_page(xdr->tail[0].iov_base);
593 page_base = offset_in_page(xdr->tail[0].iov_base);
594
595
596
597
598
599
600 page_base += len & 3;
601 len -= len & 3;
602 goto map_tail;
603 }
604
605
606
607
608 if (xdr->page_len) {
609 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
610 page_base = offset_in_page(xdr->page_base);
611 remaining = xdr->page_len;
612 while (remaining) {
613 sge_no++;
614 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
615 goto out_mapping_overflow;
616
617 len = min_t(u32, PAGE_SIZE - page_base, remaining);
618 sge[sge_no].addr = ib_dma_map_page(device, *ppages,
619 page_base, len,
620 DMA_TO_DEVICE);
621 if (ib_dma_mapping_error(device, sge[sge_no].addr))
622 goto out_mapping_err;
623 sge[sge_no].length = len;
624 sge[sge_no].lkey = lkey;
625
626 sc->sc_unmap_count++;
627 ppages++;
628 remaining -= len;
629 page_base = 0;
630 }
631 }
632
633
634
635
636
637
638 if (xdr->tail[0].iov_len) {
639 page = virt_to_page(xdr->tail[0].iov_base);
640 page_base = offset_in_page(xdr->tail[0].iov_base);
641 len = xdr->tail[0].iov_len;
642
643map_tail:
644 sge_no++;
645 sge[sge_no].addr = ib_dma_map_page(device, page,
646 page_base, len,
647 DMA_TO_DEVICE);
648 if (ib_dma_mapping_error(device, sge[sge_no].addr))
649 goto out_mapping_err;
650 sge[sge_no].length = len;
651 sge[sge_no].lkey = lkey;
652 sc->sc_unmap_count++;
653 }
654
655out:
656 sc->sc_wr.num_sge += sge_no;
657 if (sc->sc_unmap_count)
658 __set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
659 return true;
660
661out_regbuf:
662 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
663 return false;
664
665out_mapping_overflow:
666 rpcrdma_unmap_sendctx(sc);
667 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
668 return false;
669
670out_mapping_err:
671 rpcrdma_unmap_sendctx(sc);
672 pr_err("rpcrdma: Send mapping error\n");
673 return false;
674}
675
676
677
678
679
680
681
682
683
684
685
686int
687rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
688 struct rpcrdma_req *req, u32 hdrlen,
689 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
690{
691 req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf);
692 if (!req->rl_sendctx)
693 return -EAGAIN;
694 req->rl_sendctx->sc_wr.num_sge = 0;
695 req->rl_sendctx->sc_unmap_count = 0;
696 req->rl_sendctx->sc_req = req;
697 __clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
698
699 if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen))
700 return -EIO;
701
702 if (rtype != rpcrdma_areadch)
703 if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype))
704 return -EIO;
705
706 return 0;
707}
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728int
729rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
730{
731 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
732 struct xdr_stream *xdr = &req->rl_stream;
733 enum rpcrdma_chunktype rtype, wtype;
734 bool ddp_allowed;
735 __be32 *p;
736 int ret;
737
738 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
739 xdr_init_encode(xdr, &req->rl_hdrbuf,
740 req->rl_rdmabuf->rg_base);
741
742
743 ret = -EMSGSIZE;
744 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
745 if (!p)
746 goto out_err;
747 *p++ = rqst->rq_xid;
748 *p++ = rpcrdma_version;
749 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
750
751
752
753
754
755 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
756 RPCAUTH_AUTH_DATATOUCH);
757
758
759
760
761
762
763
764
765
766
767 if (rpcrdma_results_inline(r_xprt, rqst))
768 wtype = rpcrdma_noch;
769 else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
770 wtype = rpcrdma_writech;
771 else
772 wtype = rpcrdma_replych;
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788 if (rpcrdma_args_inline(r_xprt, rqst)) {
789 *p++ = rdma_msg;
790 rtype = rpcrdma_noch;
791 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
792 *p++ = rdma_msg;
793 rtype = rpcrdma_readch;
794 } else {
795 r_xprt->rx_stats.nomsg_call_count++;
796 *p++ = rdma_nomsg;
797 rtype = rpcrdma_areadch;
798 }
799
800
801
802
803
804 while (unlikely(!list_empty(&req->rl_registered))) {
805 struct rpcrdma_mr *mr;
806
807 mr = rpcrdma_mr_pop(&req->rl_registered);
808 rpcrdma_mr_defer_recovery(mr);
809 }
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833 if (rtype != rpcrdma_noch) {
834 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
835 if (ret)
836 goto out_err;
837 }
838 ret = encode_item_not_present(xdr);
839 if (ret)
840 goto out_err;
841
842 if (wtype == rpcrdma_writech) {
843 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
844 if (ret)
845 goto out_err;
846 }
847 ret = encode_item_not_present(xdr);
848 if (ret)
849 goto out_err;
850
851 if (wtype != rpcrdma_replych)
852 ret = encode_item_not_present(xdr);
853 else
854 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
855 if (ret)
856 goto out_err;
857
858 trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype);
859
860 ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
861 &rqst->rq_snd_buf, rtype);
862 if (ret)
863 goto out_err;
864 return 0;
865
866out_err:
867 switch (ret) {
868 case -EAGAIN:
869 xprt_wait_for_buffer_space(rqst->rq_task, NULL);
870 break;
871 case -ENOBUFS:
872 break;
873 default:
874 r_xprt->rx_stats.failed_marshal_count++;
875 }
876 return ret;
877}
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897static unsigned long
898rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
899{
900 unsigned long fixup_copy_count;
901 int i, npages, curlen;
902 char *destp;
903 struct page **ppages;
904 int page_base;
905
906
907
908
909 rqst->rq_rcv_buf.head[0].iov_base = srcp;
910 rqst->rq_private_buf.head[0].iov_base = srcp;
911
912
913
914
915 curlen = rqst->rq_rcv_buf.head[0].iov_len;
916 if (curlen > copy_len)
917 curlen = copy_len;
918 trace_xprtrdma_fixup(rqst, copy_len, curlen);
919 srcp += curlen;
920 copy_len -= curlen;
921
922 ppages = rqst->rq_rcv_buf.pages +
923 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
924 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
925 fixup_copy_count = 0;
926 if (copy_len && rqst->rq_rcv_buf.page_len) {
927 int pagelist_len;
928
929 pagelist_len = rqst->rq_rcv_buf.page_len;
930 if (pagelist_len > copy_len)
931 pagelist_len = copy_len;
932 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
933 for (i = 0; i < npages; i++) {
934 curlen = PAGE_SIZE - page_base;
935 if (curlen > pagelist_len)
936 curlen = pagelist_len;
937
938 trace_xprtrdma_fixup_pg(rqst, i, srcp,
939 copy_len, curlen);
940 destp = kmap_atomic(ppages[i]);
941 memcpy(destp + page_base, srcp, curlen);
942 flush_dcache_page(ppages[i]);
943 kunmap_atomic(destp);
944 srcp += curlen;
945 copy_len -= curlen;
946 fixup_copy_count += curlen;
947 pagelist_len -= curlen;
948 if (!pagelist_len)
949 break;
950 page_base = 0;
951 }
952
953
954
955
956
957
958
959 if (pad)
960 srcp -= pad;
961 }
962
963
964
965
966 if (copy_len || pad) {
967 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
968 rqst->rq_private_buf.tail[0].iov_base = srcp;
969 }
970
971 return fixup_copy_count;
972}
973
974
975
976
977
978
979static bool
980rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
981#if defined(CONFIG_SUNRPC_BACKCHANNEL)
982{
983 struct xdr_stream *xdr = &rep->rr_stream;
984 __be32 *p;
985
986 if (rep->rr_proc != rdma_msg)
987 return false;
988
989
990 p = xdr_inline_decode(xdr, 0);
991
992
993 if (*p++ != xdr_zero)
994 return false;
995 if (*p++ != xdr_zero)
996 return false;
997 if (*p++ != xdr_zero)
998 return false;
999
1000
1001 if (*p++ != rep->rr_xid)
1002 return false;
1003 if (*p != cpu_to_be32(RPC_CALL))
1004 return false;
1005
1006
1007
1008
1009 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1010 if (unlikely(!p))
1011 goto out_short;
1012
1013 rpcrdma_bc_receive_call(r_xprt, rep);
1014 return true;
1015
1016out_short:
1017 pr_warn("RPC/RDMA short backward direction call\n");
1018 return true;
1019}
1020#else
1021{
1022 return false;
1023}
1024#endif
1025
1026static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1027{
1028 u32 handle;
1029 u64 offset;
1030 __be32 *p;
1031
1032 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1033 if (unlikely(!p))
1034 return -EIO;
1035
1036 handle = be32_to_cpup(p++);
1037 *length = be32_to_cpup(p++);
1038 xdr_decode_hyper(p, &offset);
1039
1040 trace_xprtrdma_decode_seg(handle, *length, offset);
1041 return 0;
1042}
1043
1044static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1045{
1046 u32 segcount, seglength;
1047 __be32 *p;
1048
1049 p = xdr_inline_decode(xdr, sizeof(*p));
1050 if (unlikely(!p))
1051 return -EIO;
1052
1053 *length = 0;
1054 segcount = be32_to_cpup(p);
1055 while (segcount--) {
1056 if (decode_rdma_segment(xdr, &seglength))
1057 return -EIO;
1058 *length += seglength;
1059 }
1060
1061 return 0;
1062}
1063
1064
1065
1066
1067
1068static int decode_read_list(struct xdr_stream *xdr)
1069{
1070 __be32 *p;
1071
1072 p = xdr_inline_decode(xdr, sizeof(*p));
1073 if (unlikely(!p))
1074 return -EIO;
1075 if (unlikely(*p != xdr_zero))
1076 return -EIO;
1077 return 0;
1078}
1079
1080
1081
1082static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1083{
1084 u32 chunklen;
1085 bool first;
1086 __be32 *p;
1087
1088 *length = 0;
1089 first = true;
1090 do {
1091 p = xdr_inline_decode(xdr, sizeof(*p));
1092 if (unlikely(!p))
1093 return -EIO;
1094 if (*p == xdr_zero)
1095 break;
1096 if (!first)
1097 return -EIO;
1098
1099 if (decode_write_chunk(xdr, &chunklen))
1100 return -EIO;
1101 *length += chunklen;
1102 first = false;
1103 } while (true);
1104 return 0;
1105}
1106
1107static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1108{
1109 __be32 *p;
1110
1111 p = xdr_inline_decode(xdr, sizeof(*p));
1112 if (unlikely(!p))
1113 return -EIO;
1114
1115 *length = 0;
1116 if (*p != xdr_zero)
1117 if (decode_write_chunk(xdr, length))
1118 return -EIO;
1119 return 0;
1120}
1121
1122static int
1123rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1124 struct rpc_rqst *rqst)
1125{
1126 struct xdr_stream *xdr = &rep->rr_stream;
1127 u32 writelist, replychunk, rpclen;
1128 char *base;
1129
1130
1131 if (decode_read_list(xdr))
1132 return -EIO;
1133 if (decode_write_list(xdr, &writelist))
1134 return -EIO;
1135 if (decode_reply_chunk(xdr, &replychunk))
1136 return -EIO;
1137
1138
1139 if (unlikely(replychunk))
1140 return -EIO;
1141
1142
1143 base = (char *)xdr_inline_decode(xdr, 0);
1144 rpclen = xdr_stream_remaining(xdr);
1145 r_xprt->rx_stats.fixup_copy_count +=
1146 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1147
1148 r_xprt->rx_stats.total_rdma_reply += writelist;
1149 return rpclen + xdr_align_size(writelist);
1150}
1151
1152static noinline int
1153rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1154{
1155 struct xdr_stream *xdr = &rep->rr_stream;
1156 u32 writelist, replychunk;
1157
1158
1159 if (decode_read_list(xdr))
1160 return -EIO;
1161 if (decode_write_list(xdr, &writelist))
1162 return -EIO;
1163 if (decode_reply_chunk(xdr, &replychunk))
1164 return -EIO;
1165
1166
1167 if (unlikely(writelist))
1168 return -EIO;
1169 if (unlikely(!replychunk))
1170 return -EIO;
1171
1172
1173 r_xprt->rx_stats.total_rdma_reply += replychunk;
1174 return replychunk;
1175}
1176
1177static noinline int
1178rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1179 struct rpc_rqst *rqst)
1180{
1181 struct xdr_stream *xdr = &rep->rr_stream;
1182 __be32 *p;
1183
1184 p = xdr_inline_decode(xdr, sizeof(*p));
1185 if (unlikely(!p))
1186 return -EIO;
1187
1188 switch (*p) {
1189 case err_vers:
1190 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1191 if (!p)
1192 break;
1193 dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
1194 rqst->rq_task->tk_pid, __func__,
1195 be32_to_cpup(p), be32_to_cpu(*(p + 1)));
1196 break;
1197 case err_chunk:
1198 dprintk("RPC: %5u: %s: server reports header decoding error\n",
1199 rqst->rq_task->tk_pid, __func__);
1200 break;
1201 default:
1202 dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
1203 rqst->rq_task->tk_pid, __func__, be32_to_cpup(p));
1204 }
1205
1206 r_xprt->rx_stats.bad_reply_count++;
1207 return -EREMOTEIO;
1208}
1209
1210
1211
1212
1213
1214void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1215{
1216 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1217 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1218 struct rpc_rqst *rqst = rep->rr_rqst;
1219 unsigned long cwnd;
1220 int status;
1221
1222 xprt->reestablish_timeout = 0;
1223
1224 switch (rep->rr_proc) {
1225 case rdma_msg:
1226 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1227 break;
1228 case rdma_nomsg:
1229 status = rpcrdma_decode_nomsg(r_xprt, rep);
1230 break;
1231 case rdma_error:
1232 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1233 break;
1234 default:
1235 status = -EIO;
1236 }
1237 if (status < 0)
1238 goto out_badheader;
1239
1240out:
1241 spin_lock(&xprt->recv_lock);
1242 cwnd = xprt->cwnd;
1243 xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
1244 if (xprt->cwnd > cwnd)
1245 xprt_release_rqst_cong(rqst->rq_task);
1246
1247 xprt_complete_rqst(rqst->rq_task, status);
1248 xprt_unpin_rqst(rqst);
1249 spin_unlock(&xprt->recv_lock);
1250 return;
1251
1252
1253
1254
1255
1256out_badheader:
1257 trace_xprtrdma_reply_hdr(rep);
1258 r_xprt->rx_stats.bad_reply_count++;
1259 status = -EIO;
1260 goto out;
1261}
1262
1263void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1264{
1265
1266
1267
1268
1269
1270
1271
1272 if (!list_empty(&req->rl_registered))
1273 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
1274 &req->rl_registered);
1275
1276
1277
1278
1279
1280
1281
1282 if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1283 r_xprt->rx_stats.reply_waits_for_send++;
1284 out_of_line_wait_on_bit(&req->rl_flags,
1285 RPCRDMA_REQ_F_TX_RESOURCES,
1286 bit_wait,
1287 TASK_UNINTERRUPTIBLE);
1288 }
1289}
1290
1291
1292
1293
1294void rpcrdma_deferred_completion(struct work_struct *work)
1295{
1296 struct rpcrdma_rep *rep =
1297 container_of(work, struct rpcrdma_rep, rr_work);
1298 struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
1299 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1300
1301 trace_xprtrdma_defer_cmp(rep);
1302 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1303 r_xprt->rx_ia.ri_ops->ro_reminv(rep, &req->rl_registered);
1304 rpcrdma_release_rqst(r_xprt, req);
1305 rpcrdma_complete_rqst(rep);
1306}
1307
1308
1309
1310
1311
1312
1313void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1314{
1315 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1316 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1317 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1318 struct rpcrdma_req *req;
1319 struct rpc_rqst *rqst;
1320 u32 credits;
1321 __be32 *p;
1322
1323 --buf->rb_posted_receives;
1324
1325 if (rep->rr_hdrbuf.head[0].iov_len == 0)
1326 goto out_badstatus;
1327
1328
1329 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1330 rep->rr_hdrbuf.head[0].iov_base);
1331 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1332 if (unlikely(!p))
1333 goto out_shortreply;
1334 rep->rr_xid = *p++;
1335 rep->rr_vers = *p++;
1336 credits = be32_to_cpu(*p++);
1337 rep->rr_proc = *p++;
1338
1339 if (rep->rr_vers != rpcrdma_version)
1340 goto out_badversion;
1341
1342 if (rpcrdma_is_bcall(r_xprt, rep))
1343 return;
1344
1345
1346
1347
1348 spin_lock(&xprt->recv_lock);
1349 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1350 if (!rqst)
1351 goto out_norqst;
1352 xprt_pin_rqst(rqst);
1353
1354 if (credits == 0)
1355 credits = 1;
1356 else if (credits > buf->rb_max_requests)
1357 credits = buf->rb_max_requests;
1358 buf->rb_credits = credits;
1359
1360 spin_unlock(&xprt->recv_lock);
1361
1362 req = rpcr_to_rdmar(rqst);
1363 req->rl_reply = rep;
1364 rep->rr_rqst = rqst;
1365 clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
1366
1367 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1368
1369 rpcrdma_post_recvs(r_xprt, false);
1370 queue_work(rpcrdma_receive_wq, &rep->rr_work);
1371 return;
1372
1373out_badversion:
1374 trace_xprtrdma_reply_vers(rep);
1375 goto repost;
1376
1377
1378
1379
1380out_norqst:
1381 spin_unlock(&xprt->recv_lock);
1382 trace_xprtrdma_reply_rqst(rep);
1383 goto repost;
1384
1385out_shortreply:
1386 trace_xprtrdma_reply_short(rep);
1387
1388
1389
1390
1391repost:
1392 rpcrdma_post_recvs(r_xprt, false);
1393out_badstatus:
1394 rpcrdma_recv_buffer_put(rep);
1395}
1396