1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include "xprt_rdma.h"
49
50#include <linux/highmem.h>
51
52#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
53# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif
55
56enum rpcrdma_chunktype {
57 rpcrdma_noch = 0,
58 rpcrdma_readch,
59 rpcrdma_areadch,
60 rpcrdma_writech,
61 rpcrdma_replych
62};
63
64#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
65static const char transfertypes[][12] = {
66 "pure inline",
67 " read chunk",
68 "*read chunk",
69 "write chunk",
70 "reply chunk"
71};
72#endif
73
74
75
76
77
78
79static bool rpcrdma_args_inline(struct rpc_rqst *rqst)
80{
81 unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst->rq_snd_buf.len;
82
83 return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
84}
85
86
87
88
89
90
91
92static bool rpcrdma_results_inline(struct rpc_rqst *rqst)
93{
94 unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst->rq_rcv_buf.buflen;
95
96 return repsize <= RPCRDMA_INLINE_READ_THRESHOLD(rqst);
97}
98
99static int
100rpcrdma_tail_pullup(struct xdr_buf *buf)
101{
102 size_t tlen = buf->tail[0].iov_len;
103 size_t skip = tlen & 3;
104
105
106 if (tlen < 4)
107 return 0;
108
109
110
111
112
113
114 if (skip) {
115 unsigned char *src, *dst;
116 unsigned int count;
117
118 src = buf->tail[0].iov_base;
119 dst = buf->head[0].iov_base;
120 dst += buf->head[0].iov_len;
121
122 src += skip;
123 tlen -= skip;
124
125 dprintk("RPC: %s: skip=%zu, memmove(%p, %p, %zu)\n",
126 __func__, skip, dst, src, tlen);
127
128 for (count = tlen; count; count--)
129 *dst++ = *src++;
130 }
131
132 return tlen;
133}
134
135
136
137
138
139static int
140rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
141 int n, int nsegs)
142{
143 size_t page_offset;
144 u32 remaining;
145 char *base;
146
147 base = vec->iov_base;
148 page_offset = offset_in_page(base);
149 remaining = vec->iov_len;
150 while (remaining && n < nsegs) {
151 seg[n].mr_page = NULL;
152 seg[n].mr_offset = base;
153 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
154 remaining -= seg[n].mr_len;
155 base += seg[n].mr_len;
156 ++n;
157 page_offset = 0;
158 }
159 return n;
160}
161
162
163
164
165
166
167
168
169
170
171
172static int
173rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
174 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
175{
176 int len, n = 0, p;
177 int page_base;
178 struct page **ppages;
179
180 if (pos == 0) {
181 n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n, nsegs);
182 if (n == nsegs)
183 return -EIO;
184 }
185
186 len = xdrbuf->page_len;
187 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
188 page_base = xdrbuf->page_base & ~PAGE_MASK;
189 p = 0;
190 while (len && n < nsegs) {
191 if (!ppages[p]) {
192
193 ppages[p] = alloc_page(GFP_ATOMIC);
194 if (!ppages[p])
195 return -ENOMEM;
196 }
197 seg[n].mr_page = ppages[p];
198 seg[n].mr_offset = (void *)(unsigned long) page_base;
199 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
200 if (seg[n].mr_len > PAGE_SIZE)
201 return -EIO;
202 len -= seg[n].mr_len;
203 ++n;
204 ++p;
205 page_base = 0;
206 }
207
208
209 if (len && n == nsegs)
210 return -EIO;
211
212
213 if (type == rpcrdma_readch)
214 return n;
215
216 if (xdrbuf->tail[0].iov_len) {
217
218
219 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
220 return n;
221 n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n, nsegs);
222 if (n == nsegs)
223 return -EIO;
224 }
225
226 return n;
227}
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264static ssize_t
265rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
266 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
267{
268 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
269 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
270 int n, nsegs, nchunks = 0;
271 unsigned int pos;
272 struct rpcrdma_mr_seg *seg = req->rl_segments;
273 struct rpcrdma_read_chunk *cur_rchunk = NULL;
274 struct rpcrdma_write_array *warray = NULL;
275 struct rpcrdma_write_chunk *cur_wchunk = NULL;
276 __be32 *iptr = headerp->rm_body.rm_chunks;
277 int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool);
278
279 if (type == rpcrdma_readch || type == rpcrdma_areadch) {
280
281 cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
282 } else {
283
284 *iptr++ = xdr_zero;
285 if (type == rpcrdma_replych)
286 *iptr++ = xdr_zero;
287 warray = (struct rpcrdma_write_array *) iptr;
288 cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
289 }
290
291 if (type == rpcrdma_replych || type == rpcrdma_areadch)
292 pos = 0;
293 else
294 pos = target->head[0].iov_len;
295
296 nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
297 if (nsegs < 0)
298 return nsegs;
299
300 map = r_xprt->rx_ia.ri_ops->ro_map;
301 do {
302 n = map(r_xprt, seg, nsegs, cur_wchunk != NULL);
303 if (n <= 0)
304 goto out;
305 if (cur_rchunk) {
306 cur_rchunk->rc_discrim = xdr_one;
307
308 cur_rchunk->rc_position = cpu_to_be32(pos);
309 cur_rchunk->rc_target.rs_handle =
310 cpu_to_be32(seg->mr_rkey);
311 cur_rchunk->rc_target.rs_length =
312 cpu_to_be32(seg->mr_len);
313 xdr_encode_hyper(
314 (__be32 *)&cur_rchunk->rc_target.rs_offset,
315 seg->mr_base);
316 dprintk("RPC: %s: read chunk "
317 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
318 seg->mr_len, (unsigned long long)seg->mr_base,
319 seg->mr_rkey, pos, n < nsegs ? "more" : "last");
320 cur_rchunk++;
321 r_xprt->rx_stats.read_chunk_count++;
322 } else {
323 cur_wchunk->wc_target.rs_handle =
324 cpu_to_be32(seg->mr_rkey);
325 cur_wchunk->wc_target.rs_length =
326 cpu_to_be32(seg->mr_len);
327 xdr_encode_hyper(
328 (__be32 *)&cur_wchunk->wc_target.rs_offset,
329 seg->mr_base);
330 dprintk("RPC: %s: %s chunk "
331 "elem %d@0x%llx:0x%x (%s)\n", __func__,
332 (type == rpcrdma_replych) ? "reply" : "write",
333 seg->mr_len, (unsigned long long)seg->mr_base,
334 seg->mr_rkey, n < nsegs ? "more" : "last");
335 cur_wchunk++;
336 if (type == rpcrdma_replych)
337 r_xprt->rx_stats.reply_chunk_count++;
338 else
339 r_xprt->rx_stats.write_chunk_count++;
340 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
341 }
342 nchunks++;
343 seg += n;
344 nsegs -= n;
345 } while (nsegs);
346
347
348 req->rl_nchunks = nchunks;
349
350
351
352
353 if (cur_rchunk) {
354 iptr = (__be32 *) cur_rchunk;
355 *iptr++ = xdr_zero;
356 *iptr++ = xdr_zero;
357 *iptr++ = xdr_zero;
358 } else {
359 warray->wc_discrim = xdr_one;
360 warray->wc_nchunks = cpu_to_be32(nchunks);
361 iptr = (__be32 *) cur_wchunk;
362 if (type == rpcrdma_writech) {
363 *iptr++ = xdr_zero;
364 *iptr++ = xdr_zero;
365 }
366 }
367
368
369
370
371 return (unsigned char *)iptr - (unsigned char *)headerp;
372
373out:
374 for (pos = 0; nchunks--;)
375 pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
376 &req->rl_segments[pos]);
377 return n;
378}
379
380
381
382
383
384
385
386
387static void rpcrdma_inline_pullup(struct rpc_rqst *rqst)
388{
389 int i, npages, curlen;
390 int copy_len;
391 unsigned char *srcp, *destp;
392 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
393 int page_base;
394 struct page **ppages;
395
396 destp = rqst->rq_svec[0].iov_base;
397 curlen = rqst->rq_svec[0].iov_len;
398 destp += curlen;
399
400 dprintk("RPC: %s: destp 0x%p len %d hdrlen %d\n",
401 __func__, destp, rqst->rq_slen, curlen);
402
403 copy_len = rqst->rq_snd_buf.page_len;
404
405 if (rqst->rq_snd_buf.tail[0].iov_len) {
406 curlen = rqst->rq_snd_buf.tail[0].iov_len;
407 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
408 memmove(destp + copy_len,
409 rqst->rq_snd_buf.tail[0].iov_base, curlen);
410 r_xprt->rx_stats.pullup_copy_count += curlen;
411 }
412 dprintk("RPC: %s: tail destp 0x%p len %d\n",
413 __func__, destp + copy_len, curlen);
414 rqst->rq_svec[0].iov_len += curlen;
415 }
416 r_xprt->rx_stats.pullup_copy_count += copy_len;
417
418 page_base = rqst->rq_snd_buf.page_base;
419 ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
420 page_base &= ~PAGE_MASK;
421 npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
422 for (i = 0; copy_len && i < npages; i++) {
423 curlen = PAGE_SIZE - page_base;
424 if (curlen > copy_len)
425 curlen = copy_len;
426 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
427 __func__, i, destp, copy_len, curlen);
428 srcp = kmap_atomic(ppages[i]);
429 memcpy(destp, srcp+page_base, curlen);
430 kunmap_atomic(srcp);
431 rqst->rq_svec[0].iov_len += curlen;
432 destp += curlen;
433 copy_len -= curlen;
434 page_base = 0;
435 }
436
437}
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454int
455rpcrdma_marshal_req(struct rpc_rqst *rqst)
456{
457 struct rpc_xprt *xprt = rqst->rq_xprt;
458 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
459 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
460 char *base;
461 size_t rpclen;
462 ssize_t hdrlen;
463 enum rpcrdma_chunktype rtype, wtype;
464 struct rpcrdma_msg *headerp;
465
466#if defined(CONFIG_SUNRPC_BACKCHANNEL)
467 if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
468 return rpcrdma_bc_marshal_reply(rqst);
469#endif
470
471
472
473
474
475 base = rqst->rq_svec[0].iov_base;
476 rpclen = rqst->rq_svec[0].iov_len;
477
478 headerp = rdmab_to_msg(req->rl_rdmabuf);
479
480 headerp->rm_xid = rqst->rq_xid;
481 headerp->rm_vers = rpcrdma_version;
482 headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
483 headerp->rm_type = rdma_msg;
484
485
486
487
488
489
490
491
492
493 if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
494 wtype = rpcrdma_writech;
495 else if (rpcrdma_results_inline(rqst))
496 wtype = rpcrdma_noch;
497 else
498 wtype = rpcrdma_replych;
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514 if (rpcrdma_args_inline(rqst)) {
515 rtype = rpcrdma_noch;
516 } else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
517 rtype = rpcrdma_readch;
518 } else {
519 r_xprt->rx_stats.nomsg_call_count++;
520 headerp->rm_type = htonl(RDMA_NOMSG);
521 rtype = rpcrdma_areadch;
522 rpclen = 0;
523 }
524
525
526 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
527 wtype = rpcrdma_noch;
528 if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
529 dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
530 __func__);
531 return -EIO;
532 }
533
534 hdrlen = RPCRDMA_HDRLEN_MIN;
535
536
537
538
539
540
541 if (rtype == rpcrdma_noch) {
542
543 rpcrdma_inline_pullup(rqst);
544
545 headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
546 headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
547 headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
548
549 rpclen = rqst->rq_svec[0].iov_len;
550 } else if (rtype == rpcrdma_readch)
551 rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
552 if (rtype != rpcrdma_noch) {
553 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
554 headerp, rtype);
555 wtype = rtype;
556
557 } else if (wtype != rpcrdma_noch) {
558 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
559 headerp, wtype);
560 }
561 if (hdrlen < 0)
562 return hdrlen;
563
564 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd"
565 " headerp 0x%p base 0x%p lkey 0x%x\n",
566 __func__, transfertypes[wtype], hdrlen, rpclen,
567 headerp, base, rdmab_lkey(req->rl_rdmabuf));
568
569
570
571
572
573
574
575
576 req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
577 req->rl_send_iov[0].length = hdrlen;
578 req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
579
580 req->rl_niovs = 1;
581 if (rtype == rpcrdma_areadch)
582 return 0;
583
584 req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
585 req->rl_send_iov[1].length = rpclen;
586 req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
587
588 req->rl_niovs = 2;
589 return 0;
590}
591
592
593
594
595
596static int
597rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
598{
599 unsigned int i, total_len;
600 struct rpcrdma_write_chunk *cur_wchunk;
601 char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
602
603 i = be32_to_cpu(**iptrp);
604 if (i > max)
605 return -1;
606 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
607 total_len = 0;
608 while (i--) {
609 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
610 ifdebug(FACILITY) {
611 u64 off;
612 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
613 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
614 __func__,
615 be32_to_cpu(seg->rs_length),
616 (unsigned long long)off,
617 be32_to_cpu(seg->rs_handle));
618 }
619 total_len += be32_to_cpu(seg->rs_length);
620 ++cur_wchunk;
621 }
622
623 if (wrchunk) {
624 __be32 *w = (__be32 *) cur_wchunk;
625 if (*w++ != xdr_zero)
626 return -1;
627 cur_wchunk = (struct rpcrdma_write_chunk *) w;
628 }
629 if ((char *)cur_wchunk > base + rep->rr_len)
630 return -1;
631
632 *iptrp = (__be32 *) cur_wchunk;
633 return total_len;
634}
635
636
637
638
639static void
640rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
641{
642 int i, npages, curlen, olen;
643 char *destp;
644 struct page **ppages;
645 int page_base;
646
647 curlen = rqst->rq_rcv_buf.head[0].iov_len;
648 if (curlen > copy_len) {
649 curlen = copy_len;
650 rqst->rq_rcv_buf.head[0].iov_len = curlen;
651 }
652
653 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
654 __func__, srcp, copy_len, curlen);
655
656
657 rqst->rq_rcv_buf.head[0].iov_base = srcp;
658 srcp += curlen;
659 copy_len -= curlen;
660
661 olen = copy_len;
662 i = 0;
663 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
664 page_base = rqst->rq_rcv_buf.page_base;
665 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
666 page_base &= ~PAGE_MASK;
667
668 if (copy_len && rqst->rq_rcv_buf.page_len) {
669 npages = PAGE_ALIGN(page_base +
670 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
671 for (; i < npages; i++) {
672 curlen = PAGE_SIZE - page_base;
673 if (curlen > copy_len)
674 curlen = copy_len;
675 dprintk("RPC: %s: page %d"
676 " srcp 0x%p len %d curlen %d\n",
677 __func__, i, srcp, copy_len, curlen);
678 destp = kmap_atomic(ppages[i]);
679 memcpy(destp + page_base, srcp, curlen);
680 flush_dcache_page(ppages[i]);
681 kunmap_atomic(destp);
682 srcp += curlen;
683 copy_len -= curlen;
684 if (copy_len == 0)
685 break;
686 page_base = 0;
687 }
688 }
689
690 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
691 curlen = copy_len;
692 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
693 curlen = rqst->rq_rcv_buf.tail[0].iov_len;
694 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
695 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
696 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
697 __func__, srcp, copy_len, curlen);
698 rqst->rq_rcv_buf.tail[0].iov_len = curlen;
699 copy_len -= curlen; ++i;
700 } else
701 rqst->rq_rcv_buf.tail[0].iov_len = 0;
702
703 if (pad) {
704
705 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
706 while (pad--)
707 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
708 }
709
710 if (copy_len)
711 dprintk("RPC: %s: %d bytes in"
712 " %d extra segments (%d lost)\n",
713 __func__, olen, i, copy_len);
714
715
716 rqst->rq_private_buf = rqst->rq_rcv_buf;
717}
718
719void
720rpcrdma_connect_worker(struct work_struct *work)
721{
722 struct rpcrdma_ep *ep =
723 container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
724 struct rpcrdma_xprt *r_xprt =
725 container_of(ep, struct rpcrdma_xprt, rx_ep);
726 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
727
728 spin_lock_bh(&xprt->transport_lock);
729 if (++xprt->connect_cookie == 0)
730 ++xprt->connect_cookie;
731 if (ep->rep_connected > 0) {
732 if (!xprt_test_and_set_connected(xprt))
733 xprt_wake_pending_tasks(xprt, 0);
734 } else {
735 if (xprt_test_and_clear_connected(xprt))
736 xprt_wake_pending_tasks(xprt, -ENOTCONN);
737 }
738 spin_unlock_bh(&xprt->transport_lock);
739}
740
741#if defined(CONFIG_SUNRPC_BACKCHANNEL)
742
743
744
745
746
747static bool
748rpcrdma_is_bcall(struct rpcrdma_msg *headerp)
749{
750 __be32 *p = (__be32 *)headerp;
751
752 if (headerp->rm_type != rdma_msg)
753 return false;
754 if (headerp->rm_body.rm_chunks[0] != xdr_zero)
755 return false;
756 if (headerp->rm_body.rm_chunks[1] != xdr_zero)
757 return false;
758 if (headerp->rm_body.rm_chunks[2] != xdr_zero)
759 return false;
760
761
762 if (p[7] != headerp->rm_xid)
763 return false;
764
765 if (p[8] != cpu_to_be32(RPC_CALL))
766 return false;
767
768 return true;
769}
770#endif
771
772
773
774
775
776
777
778void
779rpcrdma_conn_func(struct rpcrdma_ep *ep)
780{
781 schedule_delayed_work(&ep->rep_connect_worker, 0);
782}
783
784
785
786
787
788
789void
790rpcrdma_reply_handler(struct rpcrdma_rep *rep)
791{
792 struct rpcrdma_msg *headerp;
793 struct rpcrdma_req *req;
794 struct rpc_rqst *rqst;
795 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
796 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
797 __be32 *iptr;
798 int rdmalen, status, rmerr;
799 unsigned long cwnd;
800
801 dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
802
803 if (rep->rr_len == RPCRDMA_BAD_LEN)
804 goto out_badstatus;
805 if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
806 goto out_shortreply;
807
808 headerp = rdmab_to_msg(rep->rr_rdmabuf);
809#if defined(CONFIG_SUNRPC_BACKCHANNEL)
810 if (rpcrdma_is_bcall(headerp))
811 goto out_bcall;
812#endif
813
814
815
816
817 spin_lock_bh(&xprt->transport_lock);
818 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
819 if (!rqst)
820 goto out_nomatch;
821
822 req = rpcr_to_rdmar(rqst);
823 if (req->rl_reply)
824 goto out_duplicate;
825
826
827
828
829 list_del_init(&rqst->rq_list);
830 spin_unlock_bh(&xprt->transport_lock);
831 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
832 __func__, rep, req, be32_to_cpu(headerp->rm_xid));
833
834
835 req->rl_reply = rep;
836 xprt->reestablish_timeout = 0;
837
838 if (headerp->rm_vers != rpcrdma_version)
839 goto out_badversion;
840
841
842
843 switch (headerp->rm_type) {
844 case rdma_msg:
845
846
847
848 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
849 (headerp->rm_body.rm_chunks[1] == xdr_zero &&
850 headerp->rm_body.rm_chunks[2] != xdr_zero) ||
851 (headerp->rm_body.rm_chunks[1] != xdr_zero &&
852 req->rl_nchunks == 0))
853 goto badheader;
854 if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
855
856
857 iptr = &headerp->rm_body.rm_chunks[2];
858 rdmalen = rpcrdma_count_chunks(rep,
859 req->rl_nchunks, 1, &iptr);
860
861 if (rdmalen < 0 || *iptr++ != xdr_zero)
862 goto badheader;
863 rep->rr_len -=
864 ((unsigned char *)iptr - (unsigned char *)headerp);
865 status = rep->rr_len + rdmalen;
866 r_xprt->rx_stats.total_rdma_reply += rdmalen;
867
868 if (rdmalen &= 3) {
869 rdmalen = 4 - rdmalen;
870 status += rdmalen;
871 }
872 } else {
873
874 rdmalen = 0;
875 iptr = (__be32 *)((unsigned char *)headerp +
876 RPCRDMA_HDRLEN_MIN);
877 rep->rr_len -= RPCRDMA_HDRLEN_MIN;
878 status = rep->rr_len;
879 }
880
881 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
882 break;
883
884 case rdma_nomsg:
885
886 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
887 headerp->rm_body.rm_chunks[1] != xdr_zero ||
888 headerp->rm_body.rm_chunks[2] != xdr_one ||
889 req->rl_nchunks == 0)
890 goto badheader;
891 iptr = (__be32 *)((unsigned char *)headerp +
892 RPCRDMA_HDRLEN_MIN);
893 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
894 if (rdmalen < 0)
895 goto badheader;
896 r_xprt->rx_stats.total_rdma_reply += rdmalen;
897
898 status = rdmalen;
899 break;
900
901 case rdma_error:
902 goto out_rdmaerr;
903
904badheader:
905 default:
906 dprintk("%s: invalid rpcrdma reply header (type %d):"
907 " chunks[012] == %d %d %d"
908 " expected chunks <= %d\n",
909 __func__, be32_to_cpu(headerp->rm_type),
910 headerp->rm_body.rm_chunks[0],
911 headerp->rm_body.rm_chunks[1],
912 headerp->rm_body.rm_chunks[2],
913 req->rl_nchunks);
914 status = -EIO;
915 r_xprt->rx_stats.bad_reply_count++;
916 break;
917 }
918
919out:
920
921
922
923
924
925
926
927 if (req->rl_nchunks)
928 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
929
930 spin_lock_bh(&xprt->transport_lock);
931 cwnd = xprt->cwnd;
932 xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
933 if (xprt->cwnd > cwnd)
934 xprt_release_rqst_cong(rqst->rq_task);
935
936 xprt_complete_rqst(rqst->rq_task, status);
937 spin_unlock_bh(&xprt->transport_lock);
938 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
939 __func__, xprt, rqst, status);
940 return;
941
942out_badstatus:
943 rpcrdma_recv_buffer_put(rep);
944 if (r_xprt->rx_ep.rep_connected == 1) {
945 r_xprt->rx_ep.rep_connected = -EIO;
946 rpcrdma_conn_func(&r_xprt->rx_ep);
947 }
948 return;
949
950#if defined(CONFIG_SUNRPC_BACKCHANNEL)
951out_bcall:
952 rpcrdma_bc_receive_call(r_xprt, rep);
953 return;
954#endif
955
956
957
958
959
960out_badversion:
961 dprintk("RPC: %s: invalid version %d\n",
962 __func__, be32_to_cpu(headerp->rm_vers));
963 status = -EIO;
964 r_xprt->rx_stats.bad_reply_count++;
965 goto out;
966
967out_rdmaerr:
968 rmerr = be32_to_cpu(headerp->rm_body.rm_error.rm_err);
969 switch (rmerr) {
970 case ERR_VERS:
971 pr_err("%s: server reports header version error (%u-%u)\n",
972 __func__,
973 be32_to_cpu(headerp->rm_body.rm_error.rm_vers_low),
974 be32_to_cpu(headerp->rm_body.rm_error.rm_vers_high));
975 break;
976 case ERR_CHUNK:
977 pr_err("%s: server reports header decoding error\n",
978 __func__);
979 break;
980 default:
981 pr_err("%s: server reports unknown error %d\n",
982 __func__, rmerr);
983 }
984 status = -EREMOTEIO;
985 r_xprt->rx_stats.bad_reply_count++;
986 goto out;
987
988
989
990
991out_shortreply:
992 dprintk("RPC: %s: short/invalid reply\n", __func__);
993 goto repost;
994
995out_nomatch:
996 spin_unlock_bh(&xprt->transport_lock);
997 dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n",
998 __func__, be32_to_cpu(headerp->rm_xid),
999 rep->rr_len);
1000 goto repost;
1001
1002out_duplicate:
1003 spin_unlock_bh(&xprt->transport_lock);
1004 dprintk("RPC: %s: "
1005 "duplicate reply %p to RPC request %p: xid 0x%08x\n",
1006 __func__, rep, req, be32_to_cpu(headerp->rm_xid));
1007
1008repost:
1009 r_xprt->rx_stats.bad_reply_count++;
1010 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
1011 rpcrdma_recv_buffer_put(rep);
1012}
1013