1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#include <linux/highmem.h>
51
52#include <linux/sunrpc/svc_rdma.h>
53
54#include "xprt_rdma.h"
55#include <trace/events/rpcrdma.h>
56
57#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58# define RPCDBG_FACILITY RPCDBG_TRANS
59#endif
60
61
62
63
64
65
66static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
67{
68 unsigned int size;
69
70
71 size = RPCRDMA_HDRLEN_MIN;
72
73
74 size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
75
76
77 size += sizeof(__be32);
78 size += rpcrdma_segment_maxsz * sizeof(__be32);
79 size += sizeof(__be32);
80
81 return size;
82}
83
84
85
86
87
88
89static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
90{
91 unsigned int size;
92
93
94 size = RPCRDMA_HDRLEN_MIN;
95
96
97 size += sizeof(__be32);
98 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
99 size += sizeof(__be32);
100
101 return size;
102}
103
104
105
106
107
108
109
110
111
112void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep)
113{
114 unsigned int maxsegs = ep->re_max_rdma_segs;
115
116 ep->re_max_inline_send =
117 ep->re_inline_send - rpcrdma_max_call_header_size(maxsegs);
118 ep->re_max_inline_recv =
119 ep->re_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
120}
121
122
123
124
125
126
127
128
129
130static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
131 struct rpc_rqst *rqst)
132{
133 struct xdr_buf *xdr = &rqst->rq_snd_buf;
134 struct rpcrdma_ep *ep = r_xprt->rx_ep;
135 unsigned int count, remaining, offset;
136
137 if (xdr->len > ep->re_max_inline_send)
138 return false;
139
140 if (xdr->page_len) {
141 remaining = xdr->page_len;
142 offset = offset_in_page(xdr->page_base);
143 count = RPCRDMA_MIN_SEND_SGES;
144 while (remaining) {
145 remaining -= min_t(unsigned int,
146 PAGE_SIZE - offset, remaining);
147 offset = 0;
148 if (++count > ep->re_attr.cap.max_send_sge)
149 return false;
150 }
151 }
152
153 return true;
154}
155
156
157
158
159
160
161
162static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
163 struct rpc_rqst *rqst)
164{
165 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv;
166}
167
168
169
170
171
172static bool
173rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
174 const struct rpc_rqst *rqst)
175{
176 const struct xdr_buf *buf = &rqst->rq_rcv_buf;
177
178 return (buf->head[0].iov_len + buf->tail[0].iov_len) <
179 r_xprt->rx_ep->re_max_inline_recv;
180}
181
182
183
184
185
186
187
188
189static struct rpcrdma_mr_seg *
190rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
191 unsigned int *n)
192{
193 u32 remaining, page_offset;
194 char *base;
195
196 base = vec->iov_base;
197 page_offset = offset_in_page(base);
198 remaining = vec->iov_len;
199 while (remaining) {
200 seg->mr_page = NULL;
201 seg->mr_offset = base;
202 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
203 remaining -= seg->mr_len;
204 base += seg->mr_len;
205 ++seg;
206 ++(*n);
207 page_offset = 0;
208 }
209 return seg;
210}
211
212
213
214
215
216
217
218
219static int
220rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
221 unsigned int pos, enum rpcrdma_chunktype type,
222 struct rpcrdma_mr_seg *seg)
223{
224 unsigned long page_base;
225 unsigned int len, n;
226 struct page **ppages;
227
228 n = 0;
229 if (pos == 0)
230 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
231
232 len = xdrbuf->page_len;
233 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
234 page_base = offset_in_page(xdrbuf->page_base);
235 while (len) {
236
237
238
239 if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
240 if (!*ppages)
241 *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
242 if (!*ppages)
243 return -ENOBUFS;
244 }
245 seg->mr_page = *ppages;
246 seg->mr_offset = (char *)page_base;
247 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
248 len -= seg->mr_len;
249 ++ppages;
250 ++seg;
251 ++n;
252 page_base = 0;
253 }
254
255
256
257
258 if (type == rpcrdma_readch && r_xprt->rx_ep->re_implicit_roundup)
259 goto out;
260
261
262
263
264
265
266 if (type == rpcrdma_writech && r_xprt->rx_ep->re_implicit_roundup)
267 goto out;
268
269 if (xdrbuf->tail[0].iov_len)
270 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
271
272out:
273 if (unlikely(n > RPCRDMA_MAX_SEGS))
274 return -EIO;
275 return n;
276}
277
278static int
279encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
280{
281 __be32 *p;
282
283 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
284 if (unlikely(!p))
285 return -EMSGSIZE;
286
287 xdr_encode_rdma_segment(p, mr->mr_handle, mr->mr_length, mr->mr_offset);
288 return 0;
289}
290
291static int
292encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
293 u32 position)
294{
295 __be32 *p;
296
297 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
298 if (unlikely(!p))
299 return -EMSGSIZE;
300
301 *p++ = xdr_one;
302 xdr_encode_read_segment(p, position, mr->mr_handle, mr->mr_length,
303 mr->mr_offset);
304 return 0;
305}
306
307static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
308 struct rpcrdma_req *req,
309 struct rpcrdma_mr_seg *seg,
310 int nsegs, bool writing,
311 struct rpcrdma_mr **mr)
312{
313 *mr = rpcrdma_mr_pop(&req->rl_free_mrs);
314 if (!*mr) {
315 *mr = rpcrdma_mr_get(r_xprt);
316 if (!*mr)
317 goto out_getmr_err;
318 trace_xprtrdma_mr_get(req);
319 (*mr)->mr_req = req;
320 }
321
322 rpcrdma_mr_push(*mr, &req->rl_registered);
323 return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
324
325out_getmr_err:
326 trace_xprtrdma_nomrs(req);
327 xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
328 rpcrdma_mrs_refresh(r_xprt);
329 return ERR_PTR(-EAGAIN);
330}
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
347 struct rpcrdma_req *req,
348 struct rpc_rqst *rqst,
349 enum rpcrdma_chunktype rtype)
350{
351 struct xdr_stream *xdr = &req->rl_stream;
352 struct rpcrdma_mr_seg *seg;
353 struct rpcrdma_mr *mr;
354 unsigned int pos;
355 int nsegs;
356
357 if (rtype == rpcrdma_noch_pullup || rtype == rpcrdma_noch_mapped)
358 goto done;
359
360 pos = rqst->rq_snd_buf.head[0].iov_len;
361 if (rtype == rpcrdma_areadch)
362 pos = 0;
363 seg = req->rl_segments;
364 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
365 rtype, seg);
366 if (nsegs < 0)
367 return nsegs;
368
369 do {
370 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
371 if (IS_ERR(seg))
372 return PTR_ERR(seg);
373
374 if (encode_read_segment(xdr, mr, pos) < 0)
375 return -EMSGSIZE;
376
377 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
378 r_xprt->rx_stats.read_chunk_count++;
379 nsegs -= mr->mr_nents;
380 } while (nsegs);
381
382done:
383 if (xdr_stream_encode_item_absent(xdr) < 0)
384 return -EMSGSIZE;
385 return 0;
386}
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
404 struct rpcrdma_req *req,
405 struct rpc_rqst *rqst,
406 enum rpcrdma_chunktype wtype)
407{
408 struct xdr_stream *xdr = &req->rl_stream;
409 struct rpcrdma_mr_seg *seg;
410 struct rpcrdma_mr *mr;
411 int nsegs, nchunks;
412 __be32 *segcount;
413
414 if (wtype != rpcrdma_writech)
415 goto done;
416
417 seg = req->rl_segments;
418 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
419 rqst->rq_rcv_buf.head[0].iov_len,
420 wtype, seg);
421 if (nsegs < 0)
422 return nsegs;
423
424 if (xdr_stream_encode_item_present(xdr) < 0)
425 return -EMSGSIZE;
426 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
427 if (unlikely(!segcount))
428 return -EMSGSIZE;
429
430
431 nchunks = 0;
432 do {
433 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
434 if (IS_ERR(seg))
435 return PTR_ERR(seg);
436
437 if (encode_rdma_segment(xdr, mr) < 0)
438 return -EMSGSIZE;
439
440 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
441 r_xprt->rx_stats.write_chunk_count++;
442 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
443 nchunks++;
444 nsegs -= mr->mr_nents;
445 } while (nsegs);
446
447
448 *segcount = cpu_to_be32(nchunks);
449
450done:
451 if (xdr_stream_encode_item_absent(xdr) < 0)
452 return -EMSGSIZE;
453 return 0;
454}
455
456
457
458
459
460
461
462
463
464
465
466
467
468static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
469 struct rpcrdma_req *req,
470 struct rpc_rqst *rqst,
471 enum rpcrdma_chunktype wtype)
472{
473 struct xdr_stream *xdr = &req->rl_stream;
474 struct rpcrdma_mr_seg *seg;
475 struct rpcrdma_mr *mr;
476 int nsegs, nchunks;
477 __be32 *segcount;
478
479 if (wtype != rpcrdma_replych) {
480 if (xdr_stream_encode_item_absent(xdr) < 0)
481 return -EMSGSIZE;
482 return 0;
483 }
484
485 seg = req->rl_segments;
486 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
487 if (nsegs < 0)
488 return nsegs;
489
490 if (xdr_stream_encode_item_present(xdr) < 0)
491 return -EMSGSIZE;
492 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
493 if (unlikely(!segcount))
494 return -EMSGSIZE;
495
496
497 nchunks = 0;
498 do {
499 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
500 if (IS_ERR(seg))
501 return PTR_ERR(seg);
502
503 if (encode_rdma_segment(xdr, mr) < 0)
504 return -EMSGSIZE;
505
506 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
507 r_xprt->rx_stats.reply_chunk_count++;
508 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
509 nchunks++;
510 nsegs -= mr->mr_nents;
511 } while (nsegs);
512
513
514 *segcount = cpu_to_be32(nchunks);
515
516 return 0;
517}
518
519static void rpcrdma_sendctx_done(struct kref *kref)
520{
521 struct rpcrdma_req *req =
522 container_of(kref, struct rpcrdma_req, rl_kref);
523 struct rpcrdma_rep *rep = req->rl_reply;
524
525 rpcrdma_complete_rqst(rep);
526 rep->rr_rxprt->rx_stats.reply_waits_for_send++;
527}
528
529
530
531
532
533
534void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
535{
536 struct rpcrdma_regbuf *rb = sc->sc_req->rl_sendbuf;
537 struct ib_sge *sge;
538
539 if (!sc->sc_unmap_count)
540 return;
541
542
543
544
545
546 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
547 ++sge, --sc->sc_unmap_count)
548 ib_dma_unmap_page(rdmab_device(rb), sge->addr, sge->length,
549 DMA_TO_DEVICE);
550
551 kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
552}
553
554
555
556static void rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
557 struct rpcrdma_req *req, u32 len)
558{
559 struct rpcrdma_sendctx *sc = req->rl_sendctx;
560 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
561 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
562
563 sge->addr = rdmab_addr(rb);
564 sge->length = len;
565 sge->lkey = rdmab_lkey(rb);
566
567 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
568 DMA_TO_DEVICE);
569}
570
571
572
573
574static bool rpcrdma_prepare_head_iov(struct rpcrdma_xprt *r_xprt,
575 struct rpcrdma_req *req, unsigned int len)
576{
577 struct rpcrdma_sendctx *sc = req->rl_sendctx;
578 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
579 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
580
581 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
582 return false;
583
584 sge->addr = rdmab_addr(rb);
585 sge->length = len;
586 sge->lkey = rdmab_lkey(rb);
587
588 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
589 DMA_TO_DEVICE);
590 return true;
591}
592
593
594
595
596static bool rpcrdma_prepare_pagelist(struct rpcrdma_req *req,
597 struct xdr_buf *xdr)
598{
599 struct rpcrdma_sendctx *sc = req->rl_sendctx;
600 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
601 unsigned int page_base, len, remaining;
602 struct page **ppages;
603 struct ib_sge *sge;
604
605 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
606 page_base = offset_in_page(xdr->page_base);
607 remaining = xdr->page_len;
608 while (remaining) {
609 sge = &sc->sc_sges[req->rl_wr.num_sge++];
610 len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
611 sge->addr = ib_dma_map_page(rdmab_device(rb), *ppages,
612 page_base, len, DMA_TO_DEVICE);
613 if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
614 goto out_mapping_err;
615
616 sge->length = len;
617 sge->lkey = rdmab_lkey(rb);
618
619 sc->sc_unmap_count++;
620 ppages++;
621 remaining -= len;
622 page_base = 0;
623 }
624
625 return true;
626
627out_mapping_err:
628 trace_xprtrdma_dma_maperr(sge->addr);
629 return false;
630}
631
632
633
634
635
636static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
637 struct xdr_buf *xdr,
638 unsigned int page_base, unsigned int len)
639{
640 struct rpcrdma_sendctx *sc = req->rl_sendctx;
641 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
642 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
643 struct page *page = virt_to_page(xdr->tail[0].iov_base);
644
645 sge->addr = ib_dma_map_page(rdmab_device(rb), page, page_base, len,
646 DMA_TO_DEVICE);
647 if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
648 goto out_mapping_err;
649
650 sge->length = len;
651 sge->lkey = rdmab_lkey(rb);
652 ++sc->sc_unmap_count;
653 return true;
654
655out_mapping_err:
656 trace_xprtrdma_dma_maperr(sge->addr);
657 return false;
658}
659
660
661
662static void rpcrdma_pullup_tail_iov(struct rpcrdma_xprt *r_xprt,
663 struct rpcrdma_req *req,
664 struct xdr_buf *xdr)
665{
666 unsigned char *dst;
667
668 dst = (unsigned char *)xdr->head[0].iov_base;
669 dst += xdr->head[0].iov_len + xdr->page_len;
670 memmove(dst, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
671 r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len;
672}
673
674
675
676static void rpcrdma_pullup_pagelist(struct rpcrdma_xprt *r_xprt,
677 struct rpcrdma_req *req,
678 struct xdr_buf *xdr)
679{
680 unsigned int len, page_base, remaining;
681 struct page **ppages;
682 unsigned char *src, *dst;
683
684 dst = (unsigned char *)xdr->head[0].iov_base;
685 dst += xdr->head[0].iov_len;
686 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
687 page_base = offset_in_page(xdr->page_base);
688 remaining = xdr->page_len;
689 while (remaining) {
690 src = page_address(*ppages);
691 src += page_base;
692 len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
693 memcpy(dst, src, len);
694 r_xprt->rx_stats.pullup_copy_count += len;
695
696 ppages++;
697 dst += len;
698 remaining -= len;
699 page_base = 0;
700 }
701}
702
703
704
705
706
707
708
709
710
711
712static bool rpcrdma_prepare_noch_pullup(struct rpcrdma_xprt *r_xprt,
713 struct rpcrdma_req *req,
714 struct xdr_buf *xdr)
715{
716 if (unlikely(xdr->tail[0].iov_len))
717 rpcrdma_pullup_tail_iov(r_xprt, req, xdr);
718
719 if (unlikely(xdr->page_len))
720 rpcrdma_pullup_pagelist(r_xprt, req, xdr);
721
722
723 return rpcrdma_prepare_head_iov(r_xprt, req, xdr->len);
724}
725
726static bool rpcrdma_prepare_noch_mapped(struct rpcrdma_xprt *r_xprt,
727 struct rpcrdma_req *req,
728 struct xdr_buf *xdr)
729{
730 struct kvec *tail = &xdr->tail[0];
731
732 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
733 return false;
734 if (xdr->page_len)
735 if (!rpcrdma_prepare_pagelist(req, xdr))
736 return false;
737 if (tail->iov_len)
738 if (!rpcrdma_prepare_tail_iov(req, xdr,
739 offset_in_page(tail->iov_base),
740 tail->iov_len))
741 return false;
742
743 if (req->rl_sendctx->sc_unmap_count)
744 kref_get(&req->rl_kref);
745 return true;
746}
747
748static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
749 struct rpcrdma_req *req,
750 struct xdr_buf *xdr)
751{
752 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
753 return false;
754
755
756
757
758
759
760 if (xdr->tail[0].iov_len > 3) {
761 unsigned int page_base, len;
762
763
764
765
766
767
768 page_base = offset_in_page(xdr->tail[0].iov_base);
769 len = xdr->tail[0].iov_len;
770 page_base += len & 3;
771 len -= len & 3;
772 if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
773 return false;
774 kref_get(&req->rl_kref);
775 }
776
777 return true;
778}
779
780
781
782
783
784
785
786
787
788
789
790inline int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
791 struct rpcrdma_req *req, u32 hdrlen,
792 struct xdr_buf *xdr,
793 enum rpcrdma_chunktype rtype)
794{
795 int ret;
796
797 ret = -EAGAIN;
798 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
799 if (!req->rl_sendctx)
800 goto out_nosc;
801 req->rl_sendctx->sc_unmap_count = 0;
802 req->rl_sendctx->sc_req = req;
803 kref_init(&req->rl_kref);
804 req->rl_wr.wr_cqe = &req->rl_sendctx->sc_cqe;
805 req->rl_wr.sg_list = req->rl_sendctx->sc_sges;
806 req->rl_wr.num_sge = 0;
807 req->rl_wr.opcode = IB_WR_SEND;
808
809 rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen);
810
811 ret = -EIO;
812 switch (rtype) {
813 case rpcrdma_noch_pullup:
814 if (!rpcrdma_prepare_noch_pullup(r_xprt, req, xdr))
815 goto out_unmap;
816 break;
817 case rpcrdma_noch_mapped:
818 if (!rpcrdma_prepare_noch_mapped(r_xprt, req, xdr))
819 goto out_unmap;
820 break;
821 case rpcrdma_readch:
822 if (!rpcrdma_prepare_readch(r_xprt, req, xdr))
823 goto out_unmap;
824 break;
825 case rpcrdma_areadch:
826 break;
827 default:
828 goto out_unmap;
829 }
830
831 return 0;
832
833out_unmap:
834 rpcrdma_sendctx_unmap(req->rl_sendctx);
835out_nosc:
836 trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
837 return ret;
838}
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859int
860rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
861{
862 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
863 struct xdr_stream *xdr = &req->rl_stream;
864 enum rpcrdma_chunktype rtype, wtype;
865 struct xdr_buf *buf = &rqst->rq_snd_buf;
866 bool ddp_allowed;
867 __be32 *p;
868 int ret;
869
870 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
871 xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
872 rqst);
873
874
875 ret = -EMSGSIZE;
876 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
877 if (!p)
878 goto out_err;
879 *p++ = rqst->rq_xid;
880 *p++ = rpcrdma_version;
881 *p++ = r_xprt->rx_buf.rb_max_requests;
882
883
884
885
886
887 ddp_allowed = !test_bit(RPCAUTH_AUTH_DATATOUCH,
888 &rqst->rq_cred->cr_auth->au_flags);
889
890
891
892
893
894
895
896
897
898
899 if (rpcrdma_results_inline(r_xprt, rqst))
900 wtype = rpcrdma_noch;
901 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
902 rpcrdma_nonpayload_inline(r_xprt, rqst))
903 wtype = rpcrdma_writech;
904 else
905 wtype = rpcrdma_replych;
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921 if (rpcrdma_args_inline(r_xprt, rqst)) {
922 *p++ = rdma_msg;
923 rtype = buf->len < rdmab_length(req->rl_sendbuf) ?
924 rpcrdma_noch_pullup : rpcrdma_noch_mapped;
925 } else if (ddp_allowed && buf->flags & XDRBUF_WRITE) {
926 *p++ = rdma_msg;
927 rtype = rpcrdma_readch;
928 } else {
929 r_xprt->rx_stats.nomsg_call_count++;
930 *p++ = rdma_nomsg;
931 rtype = rpcrdma_areadch;
932 }
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
957 if (ret)
958 goto out_err;
959 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
960 if (ret)
961 goto out_err;
962 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
963 if (ret)
964 goto out_err;
965
966 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
967 buf, rtype);
968 if (ret)
969 goto out_err;
970
971 trace_xprtrdma_marshal(req, rtype, wtype);
972 return 0;
973
974out_err:
975 trace_xprtrdma_marshal_failed(rqst, ret);
976 r_xprt->rx_stats.failed_marshal_count++;
977 frwr_reset(req);
978 return ret;
979}
980
981static void __rpcrdma_update_cwnd_locked(struct rpc_xprt *xprt,
982 struct rpcrdma_buffer *buf,
983 u32 grant)
984{
985 buf->rb_credits = grant;
986 xprt->cwnd = grant << RPC_CWNDSHIFT;
987}
988
989static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant)
990{
991 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
992
993 spin_lock(&xprt->transport_lock);
994 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant);
995 spin_unlock(&xprt->transport_lock);
996}
997
998
999
1000
1001
1002
1003
1004
1005void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt)
1006{
1007 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1008
1009 spin_lock(&xprt->transport_lock);
1010 xprt->cong = 0;
1011 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1);
1012 spin_unlock(&xprt->transport_lock);
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033static unsigned long
1034rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
1035{
1036 unsigned long fixup_copy_count;
1037 int i, npages, curlen;
1038 char *destp;
1039 struct page **ppages;
1040 int page_base;
1041
1042
1043
1044
1045 rqst->rq_rcv_buf.head[0].iov_base = srcp;
1046 rqst->rq_private_buf.head[0].iov_base = srcp;
1047
1048
1049
1050
1051 curlen = rqst->rq_rcv_buf.head[0].iov_len;
1052 if (curlen > copy_len)
1053 curlen = copy_len;
1054 srcp += curlen;
1055 copy_len -= curlen;
1056
1057 ppages = rqst->rq_rcv_buf.pages +
1058 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
1059 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
1060 fixup_copy_count = 0;
1061 if (copy_len && rqst->rq_rcv_buf.page_len) {
1062 int pagelist_len;
1063
1064 pagelist_len = rqst->rq_rcv_buf.page_len;
1065 if (pagelist_len > copy_len)
1066 pagelist_len = copy_len;
1067 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
1068 for (i = 0; i < npages; i++) {
1069 curlen = PAGE_SIZE - page_base;
1070 if (curlen > pagelist_len)
1071 curlen = pagelist_len;
1072
1073 destp = kmap_atomic(ppages[i]);
1074 memcpy(destp + page_base, srcp, curlen);
1075 flush_dcache_page(ppages[i]);
1076 kunmap_atomic(destp);
1077 srcp += curlen;
1078 copy_len -= curlen;
1079 fixup_copy_count += curlen;
1080 pagelist_len -= curlen;
1081 if (!pagelist_len)
1082 break;
1083 page_base = 0;
1084 }
1085
1086
1087
1088
1089
1090
1091
1092 if (pad)
1093 srcp -= pad;
1094 }
1095
1096
1097
1098
1099 if (copy_len || pad) {
1100 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
1101 rqst->rq_private_buf.tail[0].iov_base = srcp;
1102 }
1103
1104 if (fixup_copy_count)
1105 trace_xprtrdma_fixup(rqst, fixup_copy_count);
1106 return fixup_copy_count;
1107}
1108
1109
1110
1111
1112
1113
1114static bool
1115rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1116#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1117{
1118 struct xdr_stream *xdr = &rep->rr_stream;
1119 __be32 *p;
1120
1121 if (rep->rr_proc != rdma_msg)
1122 return false;
1123
1124
1125 p = xdr_inline_decode(xdr, 0);
1126
1127
1128 if (xdr_item_is_present(p++))
1129 return false;
1130 if (xdr_item_is_present(p++))
1131 return false;
1132 if (xdr_item_is_present(p++))
1133 return false;
1134
1135
1136 if (*p++ != rep->rr_xid)
1137 return false;
1138 if (*p != cpu_to_be32(RPC_CALL))
1139 return false;
1140
1141
1142
1143
1144 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1145 if (unlikely(!p))
1146 goto out_short;
1147
1148 rpcrdma_bc_receive_call(r_xprt, rep);
1149 return true;
1150
1151out_short:
1152 pr_warn("RPC/RDMA short backward direction call\n");
1153 return true;
1154}
1155#else
1156{
1157 return false;
1158}
1159#endif
1160
1161static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1162{
1163 u32 handle;
1164 u64 offset;
1165 __be32 *p;
1166
1167 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1168 if (unlikely(!p))
1169 return -EIO;
1170
1171 xdr_decode_rdma_segment(p, &handle, length, &offset);
1172 trace_xprtrdma_decode_seg(handle, *length, offset);
1173 return 0;
1174}
1175
1176static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1177{
1178 u32 segcount, seglength;
1179 __be32 *p;
1180
1181 p = xdr_inline_decode(xdr, sizeof(*p));
1182 if (unlikely(!p))
1183 return -EIO;
1184
1185 *length = 0;
1186 segcount = be32_to_cpup(p);
1187 while (segcount--) {
1188 if (decode_rdma_segment(xdr, &seglength))
1189 return -EIO;
1190 *length += seglength;
1191 }
1192
1193 return 0;
1194}
1195
1196
1197
1198
1199
1200static int decode_read_list(struct xdr_stream *xdr)
1201{
1202 __be32 *p;
1203
1204 p = xdr_inline_decode(xdr, sizeof(*p));
1205 if (unlikely(!p))
1206 return -EIO;
1207 if (unlikely(xdr_item_is_present(p)))
1208 return -EIO;
1209 return 0;
1210}
1211
1212
1213
1214static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1215{
1216 u32 chunklen;
1217 bool first;
1218 __be32 *p;
1219
1220 *length = 0;
1221 first = true;
1222 do {
1223 p = xdr_inline_decode(xdr, sizeof(*p));
1224 if (unlikely(!p))
1225 return -EIO;
1226 if (xdr_item_is_absent(p))
1227 break;
1228 if (!first)
1229 return -EIO;
1230
1231 if (decode_write_chunk(xdr, &chunklen))
1232 return -EIO;
1233 *length += chunklen;
1234 first = false;
1235 } while (true);
1236 return 0;
1237}
1238
1239static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1240{
1241 __be32 *p;
1242
1243 p = xdr_inline_decode(xdr, sizeof(*p));
1244 if (unlikely(!p))
1245 return -EIO;
1246
1247 *length = 0;
1248 if (xdr_item_is_present(p))
1249 if (decode_write_chunk(xdr, length))
1250 return -EIO;
1251 return 0;
1252}
1253
1254static int
1255rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1256 struct rpc_rqst *rqst)
1257{
1258 struct xdr_stream *xdr = &rep->rr_stream;
1259 u32 writelist, replychunk, rpclen;
1260 char *base;
1261
1262
1263 if (decode_read_list(xdr))
1264 return -EIO;
1265 if (decode_write_list(xdr, &writelist))
1266 return -EIO;
1267 if (decode_reply_chunk(xdr, &replychunk))
1268 return -EIO;
1269
1270
1271 if (unlikely(replychunk))
1272 return -EIO;
1273
1274
1275 base = (char *)xdr_inline_decode(xdr, 0);
1276 rpclen = xdr_stream_remaining(xdr);
1277 r_xprt->rx_stats.fixup_copy_count +=
1278 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1279
1280 r_xprt->rx_stats.total_rdma_reply += writelist;
1281 return rpclen + xdr_align_size(writelist);
1282}
1283
1284static noinline int
1285rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1286{
1287 struct xdr_stream *xdr = &rep->rr_stream;
1288 u32 writelist, replychunk;
1289
1290
1291 if (decode_read_list(xdr))
1292 return -EIO;
1293 if (decode_write_list(xdr, &writelist))
1294 return -EIO;
1295 if (decode_reply_chunk(xdr, &replychunk))
1296 return -EIO;
1297
1298
1299 if (unlikely(writelist))
1300 return -EIO;
1301 if (unlikely(!replychunk))
1302 return -EIO;
1303
1304
1305 r_xprt->rx_stats.total_rdma_reply += replychunk;
1306 return replychunk;
1307}
1308
1309static noinline int
1310rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1311 struct rpc_rqst *rqst)
1312{
1313 struct xdr_stream *xdr = &rep->rr_stream;
1314 __be32 *p;
1315
1316 p = xdr_inline_decode(xdr, sizeof(*p));
1317 if (unlikely(!p))
1318 return -EIO;
1319
1320 switch (*p) {
1321 case err_vers:
1322 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1323 if (!p)
1324 break;
1325 dprintk("RPC: %s: server reports "
1326 "version error (%u-%u), xid %08x\n", __func__,
1327 be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1328 be32_to_cpu(rep->rr_xid));
1329 break;
1330 case err_chunk:
1331 dprintk("RPC: %s: server reports "
1332 "header decoding error, xid %08x\n", __func__,
1333 be32_to_cpu(rep->rr_xid));
1334 break;
1335 default:
1336 dprintk("RPC: %s: server reports "
1337 "unrecognized error %d, xid %08x\n", __func__,
1338 be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
1339 }
1340
1341 return -EIO;
1342}
1343
1344
1345
1346
1347
1348void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1349{
1350 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1351 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1352 struct rpc_rqst *rqst = rep->rr_rqst;
1353 int status;
1354
1355 switch (rep->rr_proc) {
1356 case rdma_msg:
1357 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1358 break;
1359 case rdma_nomsg:
1360 status = rpcrdma_decode_nomsg(r_xprt, rep);
1361 break;
1362 case rdma_error:
1363 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1364 break;
1365 default:
1366 status = -EIO;
1367 }
1368 if (status < 0)
1369 goto out_badheader;
1370
1371out:
1372 spin_lock(&xprt->queue_lock);
1373 xprt_complete_rqst(rqst->rq_task, status);
1374 xprt_unpin_rqst(rqst);
1375 spin_unlock(&xprt->queue_lock);
1376 return;
1377
1378out_badheader:
1379 trace_xprtrdma_reply_hdr(rep);
1380 r_xprt->rx_stats.bad_reply_count++;
1381 rqst->rq_task->tk_status = status;
1382 status = 0;
1383 goto out;
1384}
1385
1386static void rpcrdma_reply_done(struct kref *kref)
1387{
1388 struct rpcrdma_req *req =
1389 container_of(kref, struct rpcrdma_req, rl_kref);
1390
1391 rpcrdma_complete_rqst(req->rl_reply);
1392}
1393
1394
1395
1396
1397
1398
1399
1400
1401void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1402{
1403 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1404 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1405 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1406 struct rpcrdma_req *req;
1407 struct rpc_rqst *rqst;
1408 u32 credits;
1409 __be32 *p;
1410
1411
1412
1413
1414 if (xprt->reestablish_timeout)
1415 xprt->reestablish_timeout = 0;
1416
1417
1418 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1419 rep->rr_hdrbuf.head[0].iov_base, NULL);
1420 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1421 if (unlikely(!p))
1422 goto out_shortreply;
1423 rep->rr_xid = *p++;
1424 rep->rr_vers = *p++;
1425 credits = be32_to_cpu(*p++);
1426 rep->rr_proc = *p++;
1427
1428 if (rep->rr_vers != rpcrdma_version)
1429 goto out_badversion;
1430
1431 if (rpcrdma_is_bcall(r_xprt, rep))
1432 return;
1433
1434
1435
1436
1437 spin_lock(&xprt->queue_lock);
1438 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1439 if (!rqst)
1440 goto out_norqst;
1441 xprt_pin_rqst(rqst);
1442 spin_unlock(&xprt->queue_lock);
1443
1444 if (credits == 0)
1445 credits = 1;
1446 else if (credits > r_xprt->rx_ep->re_max_requests)
1447 credits = r_xprt->rx_ep->re_max_requests;
1448 if (buf->rb_credits != credits)
1449 rpcrdma_update_cwnd(r_xprt, credits);
1450 rpcrdma_post_recvs(r_xprt, false);
1451
1452 req = rpcr_to_rdmar(rqst);
1453 if (req->rl_reply) {
1454 trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1455 rpcrdma_recv_buffer_put(req->rl_reply);
1456 }
1457 req->rl_reply = rep;
1458 rep->rr_rqst = rqst;
1459
1460 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1461
1462 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1463 frwr_reminv(rep, &req->rl_registered);
1464 if (!list_empty(&req->rl_registered))
1465 frwr_unmap_async(r_xprt, req);
1466
1467 else
1468 kref_put(&req->rl_kref, rpcrdma_reply_done);
1469 return;
1470
1471out_badversion:
1472 trace_xprtrdma_reply_vers(rep);
1473 goto out;
1474
1475out_norqst:
1476 spin_unlock(&xprt->queue_lock);
1477 trace_xprtrdma_reply_rqst(rep);
1478 goto out;
1479
1480out_shortreply:
1481 trace_xprtrdma_reply_short(rep);
1482
1483out:
1484 rpcrdma_recv_buffer_put(rep);
1485}
1486