1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#include <linux/highmem.h>
51
52#include <linux/sunrpc/svc_rdma.h>
53
54#include "xprt_rdma.h"
55#include <trace/events/rpcrdma.h>
56
57#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58# define RPCDBG_FACILITY RPCDBG_TRANS
59#endif
60
61
62
63
64
65
66static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
67{
68 unsigned int size;
69
70
71 size = RPCRDMA_HDRLEN_MIN;
72
73
74 size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
75
76
77 size += sizeof(__be32);
78 size += rpcrdma_segment_maxsz * sizeof(__be32);
79 size += sizeof(__be32);
80
81 dprintk("RPC: %s: max call header size = %u\n",
82 __func__, size);
83 return size;
84}
85
86
87
88
89
90
91static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
92{
93 unsigned int size;
94
95
96 size = RPCRDMA_HDRLEN_MIN;
97
98
99 size = sizeof(__be32);
100 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
101 size += sizeof(__be32);
102
103 dprintk("RPC: %s: max reply header size = %u\n",
104 __func__, size);
105 return size;
106}
107
108
109
110
111
112
113
114
115
116void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
117{
118 unsigned int maxsegs = r_xprt->rx_ia.ri_max_segs;
119 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
120
121 ep->rep_max_inline_send =
122 ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs);
123 ep->rep_max_inline_recv =
124 ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
125}
126
127
128
129
130
131
132
133
134
135static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
136 struct rpc_rqst *rqst)
137{
138 struct xdr_buf *xdr = &rqst->rq_snd_buf;
139 unsigned int count, remaining, offset;
140
141 if (xdr->len > r_xprt->rx_ep.rep_max_inline_send)
142 return false;
143
144 if (xdr->page_len) {
145 remaining = xdr->page_len;
146 offset = offset_in_page(xdr->page_base);
147 count = RPCRDMA_MIN_SEND_SGES;
148 while (remaining) {
149 remaining -= min_t(unsigned int,
150 PAGE_SIZE - offset, remaining);
151 offset = 0;
152 if (++count > r_xprt->rx_ia.ri_max_send_sges)
153 return false;
154 }
155 }
156
157 return true;
158}
159
160
161
162
163
164
165
166static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
167 struct rpc_rqst *rqst)
168{
169 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv;
170}
171
172
173
174
175
176static bool
177rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
178 const struct rpc_rqst *rqst)
179{
180 const struct xdr_buf *buf = &rqst->rq_rcv_buf;
181
182 return (buf->head[0].iov_len + buf->tail[0].iov_len) <
183 r_xprt->rx_ep.rep_max_inline_recv;
184}
185
186
187
188
189
190
191
192
193static struct rpcrdma_mr_seg *
194rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
195 unsigned int *n)
196{
197 u32 remaining, page_offset;
198 char *base;
199
200 base = vec->iov_base;
201 page_offset = offset_in_page(base);
202 remaining = vec->iov_len;
203 while (remaining) {
204 seg->mr_page = NULL;
205 seg->mr_offset = base;
206 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
207 remaining -= seg->mr_len;
208 base += seg->mr_len;
209 ++seg;
210 ++(*n);
211 page_offset = 0;
212 }
213 return seg;
214}
215
216
217
218
219
220
221
222
223static int
224rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
225 unsigned int pos, enum rpcrdma_chunktype type,
226 struct rpcrdma_mr_seg *seg)
227{
228 unsigned long page_base;
229 unsigned int len, n;
230 struct page **ppages;
231
232 n = 0;
233 if (pos == 0)
234 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
235
236 len = xdrbuf->page_len;
237 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
238 page_base = offset_in_page(xdrbuf->page_base);
239 while (len) {
240
241
242
243 if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
244 if (!*ppages)
245 *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
246 if (!*ppages)
247 return -ENOBUFS;
248 }
249 seg->mr_page = *ppages;
250 seg->mr_offset = (char *)page_base;
251 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
252 len -= seg->mr_len;
253 ++ppages;
254 ++seg;
255 ++n;
256 page_base = 0;
257 }
258
259
260
261
262 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
263 goto out;
264
265
266
267
268
269
270 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
271 goto out;
272
273 if (xdrbuf->tail[0].iov_len)
274 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
275
276out:
277 if (unlikely(n > RPCRDMA_MAX_SEGS))
278 return -EIO;
279 return n;
280}
281
282static inline int
283encode_item_present(struct xdr_stream *xdr)
284{
285 __be32 *p;
286
287 p = xdr_reserve_space(xdr, sizeof(*p));
288 if (unlikely(!p))
289 return -EMSGSIZE;
290
291 *p = xdr_one;
292 return 0;
293}
294
295static inline int
296encode_item_not_present(struct xdr_stream *xdr)
297{
298 __be32 *p;
299
300 p = xdr_reserve_space(xdr, sizeof(*p));
301 if (unlikely(!p))
302 return -EMSGSIZE;
303
304 *p = xdr_zero;
305 return 0;
306}
307
308static void
309xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
310{
311 *iptr++ = cpu_to_be32(mr->mr_handle);
312 *iptr++ = cpu_to_be32(mr->mr_length);
313 xdr_encode_hyper(iptr, mr->mr_offset);
314}
315
316static int
317encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
318{
319 __be32 *p;
320
321 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
322 if (unlikely(!p))
323 return -EMSGSIZE;
324
325 xdr_encode_rdma_segment(p, mr);
326 return 0;
327}
328
329static int
330encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
331 u32 position)
332{
333 __be32 *p;
334
335 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
336 if (unlikely(!p))
337 return -EMSGSIZE;
338
339 *p++ = xdr_one;
340 *p++ = cpu_to_be32(position);
341 xdr_encode_rdma_segment(p, mr);
342 return 0;
343}
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359static noinline int
360rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
361 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
362{
363 struct xdr_stream *xdr = &req->rl_stream;
364 struct rpcrdma_mr_seg *seg;
365 struct rpcrdma_mr *mr;
366 unsigned int pos;
367 int nsegs;
368
369 pos = rqst->rq_snd_buf.head[0].iov_len;
370 if (rtype == rpcrdma_areadch)
371 pos = 0;
372 seg = req->rl_segments;
373 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
374 rtype, seg);
375 if (nsegs < 0)
376 return nsegs;
377
378 do {
379 seg = frwr_map(r_xprt, seg, nsegs, false, rqst->rq_xid, &mr);
380 if (IS_ERR(seg))
381 return PTR_ERR(seg);
382 rpcrdma_mr_push(mr, &req->rl_registered);
383
384 if (encode_read_segment(xdr, mr, pos) < 0)
385 return -EMSGSIZE;
386
387 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
388 r_xprt->rx_stats.read_chunk_count++;
389 nsegs -= mr->mr_nents;
390 } while (nsegs);
391
392 return 0;
393}
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410static noinline int
411rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
412 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
413{
414 struct xdr_stream *xdr = &req->rl_stream;
415 struct rpcrdma_mr_seg *seg;
416 struct rpcrdma_mr *mr;
417 int nsegs, nchunks;
418 __be32 *segcount;
419
420 seg = req->rl_segments;
421 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
422 rqst->rq_rcv_buf.head[0].iov_len,
423 wtype, seg);
424 if (nsegs < 0)
425 return nsegs;
426
427 if (encode_item_present(xdr) < 0)
428 return -EMSGSIZE;
429 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
430 if (unlikely(!segcount))
431 return -EMSGSIZE;
432
433
434 nchunks = 0;
435 do {
436 seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
437 if (IS_ERR(seg))
438 return PTR_ERR(seg);
439 rpcrdma_mr_push(mr, &req->rl_registered);
440
441 if (encode_rdma_segment(xdr, mr) < 0)
442 return -EMSGSIZE;
443
444 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
445 r_xprt->rx_stats.write_chunk_count++;
446 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
447 nchunks++;
448 nsegs -= mr->mr_nents;
449 } while (nsegs);
450
451
452 *segcount = cpu_to_be32(nchunks);
453
454 return 0;
455}
456
457
458
459
460
461
462
463
464
465
466
467
468
469static noinline int
470rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
471 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
472{
473 struct xdr_stream *xdr = &req->rl_stream;
474 struct rpcrdma_mr_seg *seg;
475 struct rpcrdma_mr *mr;
476 int nsegs, nchunks;
477 __be32 *segcount;
478
479 seg = req->rl_segments;
480 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
481 if (nsegs < 0)
482 return nsegs;
483
484 if (encode_item_present(xdr) < 0)
485 return -EMSGSIZE;
486 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
487 if (unlikely(!segcount))
488 return -EMSGSIZE;
489
490
491 nchunks = 0;
492 do {
493 seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
494 if (IS_ERR(seg))
495 return PTR_ERR(seg);
496 rpcrdma_mr_push(mr, &req->rl_registered);
497
498 if (encode_rdma_segment(xdr, mr) < 0)
499 return -EMSGSIZE;
500
501 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
502 r_xprt->rx_stats.reply_chunk_count++;
503 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
504 nchunks++;
505 nsegs -= mr->mr_nents;
506 } while (nsegs);
507
508
509 *segcount = cpu_to_be32(nchunks);
510
511 return 0;
512}
513
514
515
516
517
518
519void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
520{
521 struct ib_sge *sge;
522
523
524
525
526
527 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
528 ++sge, --sc->sc_unmap_count)
529 ib_dma_unmap_page(sc->sc_device, sge->addr, sge->length,
530 DMA_TO_DEVICE);
531
532 if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES,
533 &sc->sc_req->rl_flags))
534 wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
535}
536
537
538
539static bool rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
540 struct rpcrdma_req *req, u32 len)
541{
542 struct rpcrdma_sendctx *sc = req->rl_sendctx;
543 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
544 struct ib_sge *sge = sc->sc_sges;
545
546 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
547 goto out_regbuf;
548 sge->addr = rdmab_addr(rb);
549 sge->length = len;
550 sge->lkey = rdmab_lkey(rb);
551
552 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
553 DMA_TO_DEVICE);
554 sc->sc_wr.num_sge++;
555 return true;
556
557out_regbuf:
558 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
559 return false;
560}
561
562
563
564
565static bool rpcrdma_prepare_msg_sges(struct rpcrdma_xprt *r_xprt,
566 struct rpcrdma_req *req,
567 struct xdr_buf *xdr,
568 enum rpcrdma_chunktype rtype)
569{
570 struct rpcrdma_sendctx *sc = req->rl_sendctx;
571 unsigned int sge_no, page_base, len, remaining;
572 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
573 struct ib_sge *sge = sc->sc_sges;
574 struct page *page, **ppages;
575
576
577
578
579 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
580 goto out_regbuf;
581 sc->sc_device = rdmab_device(rb);
582 sge_no = 1;
583 sge[sge_no].addr = rdmab_addr(rb);
584 sge[sge_no].length = xdr->head[0].iov_len;
585 sge[sge_no].lkey = rdmab_lkey(rb);
586 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
587 sge[sge_no].length, DMA_TO_DEVICE);
588
589
590
591
592
593
594
595 if (rtype == rpcrdma_readch) {
596 len = xdr->tail[0].iov_len;
597
598
599 if (len < 4)
600 goto out;
601
602 page = virt_to_page(xdr->tail[0].iov_base);
603 page_base = offset_in_page(xdr->tail[0].iov_base);
604
605
606
607
608
609
610 page_base += len & 3;
611 len -= len & 3;
612 goto map_tail;
613 }
614
615
616
617
618 if (xdr->page_len) {
619 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
620 page_base = offset_in_page(xdr->page_base);
621 remaining = xdr->page_len;
622 while (remaining) {
623 sge_no++;
624 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
625 goto out_mapping_overflow;
626
627 len = min_t(u32, PAGE_SIZE - page_base, remaining);
628 sge[sge_no].addr =
629 ib_dma_map_page(rdmab_device(rb), *ppages,
630 page_base, len, DMA_TO_DEVICE);
631 if (ib_dma_mapping_error(rdmab_device(rb),
632 sge[sge_no].addr))
633 goto out_mapping_err;
634 sge[sge_no].length = len;
635 sge[sge_no].lkey = rdmab_lkey(rb);
636
637 sc->sc_unmap_count++;
638 ppages++;
639 remaining -= len;
640 page_base = 0;
641 }
642 }
643
644
645
646
647
648
649 if (xdr->tail[0].iov_len) {
650 page = virt_to_page(xdr->tail[0].iov_base);
651 page_base = offset_in_page(xdr->tail[0].iov_base);
652 len = xdr->tail[0].iov_len;
653
654map_tail:
655 sge_no++;
656 sge[sge_no].addr =
657 ib_dma_map_page(rdmab_device(rb), page, page_base, len,
658 DMA_TO_DEVICE);
659 if (ib_dma_mapping_error(rdmab_device(rb), sge[sge_no].addr))
660 goto out_mapping_err;
661 sge[sge_no].length = len;
662 sge[sge_no].lkey = rdmab_lkey(rb);
663 sc->sc_unmap_count++;
664 }
665
666out:
667 sc->sc_wr.num_sge += sge_no;
668 if (sc->sc_unmap_count)
669 __set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
670 return true;
671
672out_regbuf:
673 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
674 return false;
675
676out_mapping_overflow:
677 rpcrdma_sendctx_unmap(sc);
678 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
679 return false;
680
681out_mapping_err:
682 rpcrdma_sendctx_unmap(sc);
683 trace_xprtrdma_dma_maperr(sge[sge_no].addr);
684 return false;
685}
686
687
688
689
690
691
692
693
694
695
696
697int
698rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
699 struct rpcrdma_req *req, u32 hdrlen,
700 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
701{
702 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
703 if (!req->rl_sendctx)
704 return -EAGAIN;
705 req->rl_sendctx->sc_wr.num_sge = 0;
706 req->rl_sendctx->sc_unmap_count = 0;
707 req->rl_sendctx->sc_req = req;
708 __clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
709
710 if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
711 return -EIO;
712
713 if (rtype != rpcrdma_areadch)
714 if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype))
715 return -EIO;
716
717 return 0;
718}
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739int
740rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
741{
742 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
743 struct xdr_stream *xdr = &req->rl_stream;
744 enum rpcrdma_chunktype rtype, wtype;
745 bool ddp_allowed;
746 __be32 *p;
747 int ret;
748
749 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
750 xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
751 rqst);
752
753
754 ret = -EMSGSIZE;
755 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
756 if (!p)
757 goto out_err;
758 *p++ = rqst->rq_xid;
759 *p++ = rpcrdma_version;
760 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
761
762
763
764
765
766 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
767 RPCAUTH_AUTH_DATATOUCH);
768
769
770
771
772
773
774
775
776
777
778 if (rpcrdma_results_inline(r_xprt, rqst))
779 wtype = rpcrdma_noch;
780 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
781 rpcrdma_nonpayload_inline(r_xprt, rqst))
782 wtype = rpcrdma_writech;
783 else
784 wtype = rpcrdma_replych;
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800 if (rpcrdma_args_inline(r_xprt, rqst)) {
801 *p++ = rdma_msg;
802 rtype = rpcrdma_noch;
803 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
804 *p++ = rdma_msg;
805 rtype = rpcrdma_readch;
806 } else {
807 r_xprt->rx_stats.nomsg_call_count++;
808 *p++ = rdma_nomsg;
809 rtype = rpcrdma_areadch;
810 }
811
812
813
814
815
816 while (unlikely(!list_empty(&req->rl_registered))) {
817 struct rpcrdma_mr *mr;
818
819 mr = rpcrdma_mr_pop(&req->rl_registered);
820 rpcrdma_mr_recycle(mr);
821 }
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845 if (rtype != rpcrdma_noch) {
846 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
847 if (ret)
848 goto out_err;
849 }
850 ret = encode_item_not_present(xdr);
851 if (ret)
852 goto out_err;
853
854 if (wtype == rpcrdma_writech) {
855 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
856 if (ret)
857 goto out_err;
858 }
859 ret = encode_item_not_present(xdr);
860 if (ret)
861 goto out_err;
862
863 if (wtype != rpcrdma_replych)
864 ret = encode_item_not_present(xdr);
865 else
866 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
867 if (ret)
868 goto out_err;
869
870 trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype);
871
872 ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
873 &rqst->rq_snd_buf, rtype);
874 if (ret)
875 goto out_err;
876 return 0;
877
878out_err:
879 trace_xprtrdma_marshal_failed(rqst, ret);
880 switch (ret) {
881 case -EAGAIN:
882 xprt_wait_for_buffer_space(rqst->rq_xprt);
883 break;
884 case -ENOBUFS:
885 break;
886 default:
887 r_xprt->rx_stats.failed_marshal_count++;
888 }
889 return ret;
890}
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910static unsigned long
911rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
912{
913 unsigned long fixup_copy_count;
914 int i, npages, curlen;
915 char *destp;
916 struct page **ppages;
917 int page_base;
918
919
920
921
922 rqst->rq_rcv_buf.head[0].iov_base = srcp;
923 rqst->rq_private_buf.head[0].iov_base = srcp;
924
925
926
927
928 curlen = rqst->rq_rcv_buf.head[0].iov_len;
929 if (curlen > copy_len)
930 curlen = copy_len;
931 trace_xprtrdma_fixup(rqst, copy_len, curlen);
932 srcp += curlen;
933 copy_len -= curlen;
934
935 ppages = rqst->rq_rcv_buf.pages +
936 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
937 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
938 fixup_copy_count = 0;
939 if (copy_len && rqst->rq_rcv_buf.page_len) {
940 int pagelist_len;
941
942 pagelist_len = rqst->rq_rcv_buf.page_len;
943 if (pagelist_len > copy_len)
944 pagelist_len = copy_len;
945 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
946 for (i = 0; i < npages; i++) {
947 curlen = PAGE_SIZE - page_base;
948 if (curlen > pagelist_len)
949 curlen = pagelist_len;
950
951 trace_xprtrdma_fixup_pg(rqst, i, srcp,
952 copy_len, curlen);
953 destp = kmap_atomic(ppages[i]);
954 memcpy(destp + page_base, srcp, curlen);
955 flush_dcache_page(ppages[i]);
956 kunmap_atomic(destp);
957 srcp += curlen;
958 copy_len -= curlen;
959 fixup_copy_count += curlen;
960 pagelist_len -= curlen;
961 if (!pagelist_len)
962 break;
963 page_base = 0;
964 }
965
966
967
968
969
970
971
972 if (pad)
973 srcp -= pad;
974 }
975
976
977
978
979 if (copy_len || pad) {
980 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
981 rqst->rq_private_buf.tail[0].iov_base = srcp;
982 }
983
984 return fixup_copy_count;
985}
986
987
988
989
990
991
992static bool
993rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
994#if defined(CONFIG_SUNRPC_BACKCHANNEL)
995{
996 struct xdr_stream *xdr = &rep->rr_stream;
997 __be32 *p;
998
999 if (rep->rr_proc != rdma_msg)
1000 return false;
1001
1002
1003 p = xdr_inline_decode(xdr, 0);
1004
1005
1006 if (*p++ != xdr_zero)
1007 return false;
1008 if (*p++ != xdr_zero)
1009 return false;
1010 if (*p++ != xdr_zero)
1011 return false;
1012
1013
1014 if (*p++ != rep->rr_xid)
1015 return false;
1016 if (*p != cpu_to_be32(RPC_CALL))
1017 return false;
1018
1019
1020
1021
1022 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1023 if (unlikely(!p))
1024 goto out_short;
1025
1026 rpcrdma_bc_receive_call(r_xprt, rep);
1027 return true;
1028
1029out_short:
1030 pr_warn("RPC/RDMA short backward direction call\n");
1031 return true;
1032}
1033#else
1034{
1035 return false;
1036}
1037#endif
1038
1039static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1040{
1041 u32 handle;
1042 u64 offset;
1043 __be32 *p;
1044
1045 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1046 if (unlikely(!p))
1047 return -EIO;
1048
1049 handle = be32_to_cpup(p++);
1050 *length = be32_to_cpup(p++);
1051 xdr_decode_hyper(p, &offset);
1052
1053 trace_xprtrdma_decode_seg(handle, *length, offset);
1054 return 0;
1055}
1056
1057static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1058{
1059 u32 segcount, seglength;
1060 __be32 *p;
1061
1062 p = xdr_inline_decode(xdr, sizeof(*p));
1063 if (unlikely(!p))
1064 return -EIO;
1065
1066 *length = 0;
1067 segcount = be32_to_cpup(p);
1068 while (segcount--) {
1069 if (decode_rdma_segment(xdr, &seglength))
1070 return -EIO;
1071 *length += seglength;
1072 }
1073
1074 return 0;
1075}
1076
1077
1078
1079
1080
1081static int decode_read_list(struct xdr_stream *xdr)
1082{
1083 __be32 *p;
1084
1085 p = xdr_inline_decode(xdr, sizeof(*p));
1086 if (unlikely(!p))
1087 return -EIO;
1088 if (unlikely(*p != xdr_zero))
1089 return -EIO;
1090 return 0;
1091}
1092
1093
1094
1095static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1096{
1097 u32 chunklen;
1098 bool first;
1099 __be32 *p;
1100
1101 *length = 0;
1102 first = true;
1103 do {
1104 p = xdr_inline_decode(xdr, sizeof(*p));
1105 if (unlikely(!p))
1106 return -EIO;
1107 if (*p == xdr_zero)
1108 break;
1109 if (!first)
1110 return -EIO;
1111
1112 if (decode_write_chunk(xdr, &chunklen))
1113 return -EIO;
1114 *length += chunklen;
1115 first = false;
1116 } while (true);
1117 return 0;
1118}
1119
1120static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1121{
1122 __be32 *p;
1123
1124 p = xdr_inline_decode(xdr, sizeof(*p));
1125 if (unlikely(!p))
1126 return -EIO;
1127
1128 *length = 0;
1129 if (*p != xdr_zero)
1130 if (decode_write_chunk(xdr, length))
1131 return -EIO;
1132 return 0;
1133}
1134
1135static int
1136rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1137 struct rpc_rqst *rqst)
1138{
1139 struct xdr_stream *xdr = &rep->rr_stream;
1140 u32 writelist, replychunk, rpclen;
1141 char *base;
1142
1143
1144 if (decode_read_list(xdr))
1145 return -EIO;
1146 if (decode_write_list(xdr, &writelist))
1147 return -EIO;
1148 if (decode_reply_chunk(xdr, &replychunk))
1149 return -EIO;
1150
1151
1152 if (unlikely(replychunk))
1153 return -EIO;
1154
1155
1156 base = (char *)xdr_inline_decode(xdr, 0);
1157 rpclen = xdr_stream_remaining(xdr);
1158 r_xprt->rx_stats.fixup_copy_count +=
1159 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1160
1161 r_xprt->rx_stats.total_rdma_reply += writelist;
1162 return rpclen + xdr_align_size(writelist);
1163}
1164
1165static noinline int
1166rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1167{
1168 struct xdr_stream *xdr = &rep->rr_stream;
1169 u32 writelist, replychunk;
1170
1171
1172 if (decode_read_list(xdr))
1173 return -EIO;
1174 if (decode_write_list(xdr, &writelist))
1175 return -EIO;
1176 if (decode_reply_chunk(xdr, &replychunk))
1177 return -EIO;
1178
1179
1180 if (unlikely(writelist))
1181 return -EIO;
1182 if (unlikely(!replychunk))
1183 return -EIO;
1184
1185
1186 r_xprt->rx_stats.total_rdma_reply += replychunk;
1187 return replychunk;
1188}
1189
1190static noinline int
1191rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1192 struct rpc_rqst *rqst)
1193{
1194 struct xdr_stream *xdr = &rep->rr_stream;
1195 __be32 *p;
1196
1197 p = xdr_inline_decode(xdr, sizeof(*p));
1198 if (unlikely(!p))
1199 return -EIO;
1200
1201 switch (*p) {
1202 case err_vers:
1203 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1204 if (!p)
1205 break;
1206 dprintk("RPC: %s: server reports "
1207 "version error (%u-%u), xid %08x\n", __func__,
1208 be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1209 be32_to_cpu(rep->rr_xid));
1210 break;
1211 case err_chunk:
1212 dprintk("RPC: %s: server reports "
1213 "header decoding error, xid %08x\n", __func__,
1214 be32_to_cpu(rep->rr_xid));
1215 break;
1216 default:
1217 dprintk("RPC: %s: server reports "
1218 "unrecognized error %d, xid %08x\n", __func__,
1219 be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
1220 }
1221
1222 r_xprt->rx_stats.bad_reply_count++;
1223 return -EREMOTEIO;
1224}
1225
1226
1227
1228
1229
1230void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1231{
1232 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1233 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1234 struct rpc_rqst *rqst = rep->rr_rqst;
1235 int status;
1236
1237 xprt->reestablish_timeout = 0;
1238
1239 switch (rep->rr_proc) {
1240 case rdma_msg:
1241 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1242 break;
1243 case rdma_nomsg:
1244 status = rpcrdma_decode_nomsg(r_xprt, rep);
1245 break;
1246 case rdma_error:
1247 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1248 break;
1249 default:
1250 status = -EIO;
1251 }
1252 if (status < 0)
1253 goto out_badheader;
1254
1255out:
1256 spin_lock(&xprt->queue_lock);
1257 xprt_complete_rqst(rqst->rq_task, status);
1258 xprt_unpin_rqst(rqst);
1259 spin_unlock(&xprt->queue_lock);
1260 return;
1261
1262
1263
1264
1265
1266out_badheader:
1267 trace_xprtrdma_reply_hdr(rep);
1268 r_xprt->rx_stats.bad_reply_count++;
1269 goto out;
1270}
1271
1272void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1273{
1274
1275
1276
1277
1278
1279
1280
1281 if (!list_empty(&req->rl_registered))
1282 frwr_unmap_sync(r_xprt, &req->rl_registered);
1283
1284
1285
1286
1287
1288
1289
1290 if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1291 r_xprt->rx_stats.reply_waits_for_send++;
1292 out_of_line_wait_on_bit(&req->rl_flags,
1293 RPCRDMA_REQ_F_TX_RESOURCES,
1294 bit_wait,
1295 TASK_UNINTERRUPTIBLE);
1296 }
1297}
1298
1299
1300
1301
1302void rpcrdma_deferred_completion(struct work_struct *work)
1303{
1304 struct rpcrdma_rep *rep =
1305 container_of(work, struct rpcrdma_rep, rr_work);
1306 struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
1307 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1308
1309 trace_xprtrdma_defer_cmp(rep);
1310 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1311 frwr_reminv(rep, &req->rl_registered);
1312 rpcrdma_release_rqst(r_xprt, req);
1313 rpcrdma_complete_rqst(rep);
1314}
1315
1316
1317
1318
1319
1320
1321void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1322{
1323 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1324 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1325 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1326 struct rpcrdma_req *req;
1327 struct rpc_rqst *rqst;
1328 u32 credits;
1329 __be32 *p;
1330
1331
1332 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1333 rep->rr_hdrbuf.head[0].iov_base, NULL);
1334 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1335 if (unlikely(!p))
1336 goto out_shortreply;
1337 rep->rr_xid = *p++;
1338 rep->rr_vers = *p++;
1339 credits = be32_to_cpu(*p++);
1340 rep->rr_proc = *p++;
1341
1342 if (rep->rr_vers != rpcrdma_version)
1343 goto out_badversion;
1344
1345 if (rpcrdma_is_bcall(r_xprt, rep))
1346 return;
1347
1348
1349
1350
1351 spin_lock(&xprt->queue_lock);
1352 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1353 if (!rqst)
1354 goto out_norqst;
1355 xprt_pin_rqst(rqst);
1356 spin_unlock(&xprt->queue_lock);
1357
1358 if (credits == 0)
1359 credits = 1;
1360 else if (credits > buf->rb_max_requests)
1361 credits = buf->rb_max_requests;
1362 if (buf->rb_credits != credits) {
1363 spin_lock_bh(&xprt->transport_lock);
1364 buf->rb_credits = credits;
1365 xprt->cwnd = credits << RPC_CWNDSHIFT;
1366 spin_unlock_bh(&xprt->transport_lock);
1367 }
1368
1369 req = rpcr_to_rdmar(rqst);
1370 if (req->rl_reply) {
1371 trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1372 rpcrdma_recv_buffer_put(req->rl_reply);
1373 }
1374 req->rl_reply = rep;
1375 rep->rr_rqst = rqst;
1376 clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
1377
1378 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1379 queue_work(buf->rb_completion_wq, &rep->rr_work);
1380 return;
1381
1382out_badversion:
1383 trace_xprtrdma_reply_vers(rep);
1384 goto out;
1385
1386out_norqst:
1387 spin_unlock(&xprt->queue_lock);
1388 trace_xprtrdma_reply_rqst(rep);
1389 goto out;
1390
1391out_shortreply:
1392 trace_xprtrdma_reply_short(rep);
1393
1394out:
1395 rpcrdma_recv_buffer_put(rep);
1396}
1397