1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include "xprt_rdma.h"
49
50#include <linux/highmem.h>
51
52#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
53# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif
55
56#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
57static const char transfertypes[][12] = {
58 "pure inline",
59 " read chunk",
60 "*read chunk",
61 "write chunk",
62 "reply chunk"
63};
64#endif
65
66
67
68
69
70
71
72
73
74
75
76static int
77rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
78 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
79{
80 int len, n = 0, p;
81 int page_base;
82 struct page **ppages;
83
84 if (pos == 0 && xdrbuf->head[0].iov_len) {
85 seg[n].mr_page = NULL;
86 seg[n].mr_offset = xdrbuf->head[0].iov_base;
87 seg[n].mr_len = xdrbuf->head[0].iov_len;
88 ++n;
89 }
90
91 len = xdrbuf->page_len;
92 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
93 page_base = xdrbuf->page_base & ~PAGE_MASK;
94 p = 0;
95 while (len && n < nsegs) {
96 if (!ppages[p]) {
97
98 ppages[p] = alloc_page(GFP_ATOMIC);
99 if (!ppages[p])
100 return -ENOMEM;
101 }
102 seg[n].mr_page = ppages[p];
103 seg[n].mr_offset = (void *)(unsigned long) page_base;
104 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
105 if (seg[n].mr_len > PAGE_SIZE)
106 return -EIO;
107 len -= seg[n].mr_len;
108 ++n;
109 ++p;
110 page_base = 0;
111 }
112
113
114 if (len && n == nsegs)
115 return -EIO;
116
117 if (xdrbuf->tail[0].iov_len) {
118
119
120 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
121 return n;
122 if (n == nsegs)
123
124 return -EIO;
125 seg[n].mr_page = NULL;
126 seg[n].mr_offset = xdrbuf->tail[0].iov_base;
127 seg[n].mr_len = xdrbuf->tail[0].iov_len;
128 ++n;
129 }
130
131 return n;
132}
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169static ssize_t
170rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
171 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
172{
173 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
174 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
175 int n, nsegs, nchunks = 0;
176 unsigned int pos;
177 struct rpcrdma_mr_seg *seg = req->rl_segments;
178 struct rpcrdma_read_chunk *cur_rchunk = NULL;
179 struct rpcrdma_write_array *warray = NULL;
180 struct rpcrdma_write_chunk *cur_wchunk = NULL;
181 __be32 *iptr = headerp->rm_body.rm_chunks;
182
183 if (type == rpcrdma_readch || type == rpcrdma_areadch) {
184
185 cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
186 } else {
187
188 *iptr++ = xdr_zero;
189 if (type == rpcrdma_replych)
190 *iptr++ = xdr_zero;
191 warray = (struct rpcrdma_write_array *) iptr;
192 cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
193 }
194
195 if (type == rpcrdma_replych || type == rpcrdma_areadch)
196 pos = 0;
197 else
198 pos = target->head[0].iov_len;
199
200 nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
201 if (nsegs < 0)
202 return nsegs;
203
204 do {
205 n = rpcrdma_register_external(seg, nsegs,
206 cur_wchunk != NULL, r_xprt);
207 if (n <= 0)
208 goto out;
209 if (cur_rchunk) {
210 cur_rchunk->rc_discrim = xdr_one;
211
212 cur_rchunk->rc_position = htonl(pos);
213 cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
214 cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
215 xdr_encode_hyper(
216 (__be32 *)&cur_rchunk->rc_target.rs_offset,
217 seg->mr_base);
218 dprintk("RPC: %s: read chunk "
219 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
220 seg->mr_len, (unsigned long long)seg->mr_base,
221 seg->mr_rkey, pos, n < nsegs ? "more" : "last");
222 cur_rchunk++;
223 r_xprt->rx_stats.read_chunk_count++;
224 } else {
225 cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
226 cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
227 xdr_encode_hyper(
228 (__be32 *)&cur_wchunk->wc_target.rs_offset,
229 seg->mr_base);
230 dprintk("RPC: %s: %s chunk "
231 "elem %d@0x%llx:0x%x (%s)\n", __func__,
232 (type == rpcrdma_replych) ? "reply" : "write",
233 seg->mr_len, (unsigned long long)seg->mr_base,
234 seg->mr_rkey, n < nsegs ? "more" : "last");
235 cur_wchunk++;
236 if (type == rpcrdma_replych)
237 r_xprt->rx_stats.reply_chunk_count++;
238 else
239 r_xprt->rx_stats.write_chunk_count++;
240 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
241 }
242 nchunks++;
243 seg += n;
244 nsegs -= n;
245 } while (nsegs);
246
247
248 req->rl_nchunks = nchunks;
249
250
251
252
253 if (cur_rchunk) {
254 iptr = (__be32 *) cur_rchunk;
255 *iptr++ = xdr_zero;
256 *iptr++ = xdr_zero;
257 *iptr++ = xdr_zero;
258 } else {
259 warray->wc_discrim = xdr_one;
260 warray->wc_nchunks = htonl(nchunks);
261 iptr = (__be32 *) cur_wchunk;
262 if (type == rpcrdma_writech) {
263 *iptr++ = xdr_zero;
264 *iptr++ = xdr_zero;
265 }
266 }
267
268
269
270
271 return (unsigned char *)iptr - (unsigned char *)headerp;
272
273out:
274 if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_FRMR) {
275 for (pos = 0; nchunks--;)
276 pos += rpcrdma_deregister_external(
277 &req->rl_segments[pos], r_xprt);
278 }
279 return n;
280}
281
282
283
284
285
286
287
288
289ssize_t
290rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result)
291{
292 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
293 struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)req->rl_base;
294
295 if (req->rl_rtype != rpcrdma_noch)
296 result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
297 headerp, req->rl_rtype);
298 else if (req->rl_wtype != rpcrdma_noch)
299 result = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
300 headerp, req->rl_wtype);
301 return result;
302}
303
304
305
306
307
308
309
310
311static int
312rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
313{
314 int i, npages, curlen;
315 int copy_len;
316 unsigned char *srcp, *destp;
317 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
318 int page_base;
319 struct page **ppages;
320
321 destp = rqst->rq_svec[0].iov_base;
322 curlen = rqst->rq_svec[0].iov_len;
323 destp += curlen;
324
325
326
327
328 pad -= (curlen + 36);
329 if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
330 pad = 0;
331
332 dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n",
333 __func__, pad, destp, rqst->rq_slen, curlen);
334
335 copy_len = rqst->rq_snd_buf.page_len;
336
337 if (rqst->rq_snd_buf.tail[0].iov_len) {
338 curlen = rqst->rq_snd_buf.tail[0].iov_len;
339 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
340 memmove(destp + copy_len,
341 rqst->rq_snd_buf.tail[0].iov_base, curlen);
342 r_xprt->rx_stats.pullup_copy_count += curlen;
343 }
344 dprintk("RPC: %s: tail destp 0x%p len %d\n",
345 __func__, destp + copy_len, curlen);
346 rqst->rq_svec[0].iov_len += curlen;
347 }
348 r_xprt->rx_stats.pullup_copy_count += copy_len;
349
350 page_base = rqst->rq_snd_buf.page_base;
351 ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
352 page_base &= ~PAGE_MASK;
353 npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
354 for (i = 0; copy_len && i < npages; i++) {
355 curlen = PAGE_SIZE - page_base;
356 if (curlen > copy_len)
357 curlen = copy_len;
358 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
359 __func__, i, destp, copy_len, curlen);
360 srcp = kmap_atomic(ppages[i]);
361 memcpy(destp, srcp+page_base, curlen);
362 kunmap_atomic(srcp);
363 rqst->rq_svec[0].iov_len += curlen;
364 destp += curlen;
365 copy_len -= curlen;
366 page_base = 0;
367 }
368
369 return pad;
370}
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387int
388rpcrdma_marshal_req(struct rpc_rqst *rqst)
389{
390 struct rpc_xprt *xprt = rqst->rq_xprt;
391 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
392 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
393 char *base;
394 size_t rpclen, padlen;
395 ssize_t hdrlen;
396 struct rpcrdma_msg *headerp;
397
398
399
400
401
402 base = rqst->rq_svec[0].iov_base;
403 rpclen = rqst->rq_svec[0].iov_len;
404
405
406 headerp = (struct rpcrdma_msg *) req->rl_base;
407
408 headerp->rm_xid = rqst->rq_xid;
409 headerp->rm_vers = xdr_one;
410 headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests);
411 headerp->rm_type = htonl(RDMA_MSG);
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
433 req->rl_wtype = rpcrdma_noch;
434 else if (rqst->rq_rcv_buf.page_len == 0)
435 req->rl_wtype = rpcrdma_replych;
436 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
437 req->rl_wtype = rpcrdma_writech;
438 else
439 req->rl_wtype = rpcrdma_replych;
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
456 req->rl_rtype = rpcrdma_noch;
457 else if (rqst->rq_snd_buf.page_len == 0)
458 req->rl_rtype = rpcrdma_areadch;
459 else
460 req->rl_rtype = rpcrdma_readch;
461
462
463 if (req->rl_rtype != rpcrdma_noch && req->rl_wtype == rpcrdma_replych)
464 req->rl_wtype = rpcrdma_noch;
465 if (req->rl_rtype != rpcrdma_noch && req->rl_wtype != rpcrdma_noch) {
466 dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
467 __func__);
468 return -EIO;
469 }
470
471 hdrlen = 28;
472 padlen = 0;
473
474
475
476
477
478
479 if (req->rl_rtype == rpcrdma_noch) {
480
481 padlen = rpcrdma_inline_pullup(rqst,
482 RPCRDMA_INLINE_PAD_VALUE(rqst));
483
484 if (padlen) {
485 headerp->rm_type = htonl(RDMA_MSGP);
486 headerp->rm_body.rm_padded.rm_align =
487 htonl(RPCRDMA_INLINE_PAD_VALUE(rqst));
488 headerp->rm_body.rm_padded.rm_thresh =
489 htonl(RPCRDMA_INLINE_PAD_THRESH);
490 headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
491 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
492 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
493 hdrlen += 2 * sizeof(u32);
494 if (req->rl_wtype != rpcrdma_noch) {
495 dprintk("RPC: %s: invalid chunk list\n",
496 __func__);
497 return -EIO;
498 }
499 } else {
500 headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
501 headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
502 headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
503
504 rpclen = rqst->rq_svec[0].iov_len;
505
506
507
508
509
510
511
512
513
514
515 if (req->rl_wtype == rpcrdma_noch)
516 req->rl_wtype = rpcrdma_replych;
517 }
518 }
519
520 hdrlen = rpcrdma_marshal_chunks(rqst, hdrlen);
521 if (hdrlen < 0)
522 return hdrlen;
523
524 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
525 " headerp 0x%p base 0x%p lkey 0x%x\n",
526 __func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen,
527 headerp, base, req->rl_iov.lkey);
528
529
530
531
532
533
534
535
536 req->rl_send_iov[0].addr = req->rl_iov.addr;
537 req->rl_send_iov[0].length = hdrlen;
538 req->rl_send_iov[0].lkey = req->rl_iov.lkey;
539
540 req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base);
541 req->rl_send_iov[1].length = rpclen;
542 req->rl_send_iov[1].lkey = req->rl_iov.lkey;
543
544 req->rl_niovs = 2;
545
546 if (padlen) {
547 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
548
549 req->rl_send_iov[2].addr = ep->rep_pad.addr;
550 req->rl_send_iov[2].length = padlen;
551 req->rl_send_iov[2].lkey = ep->rep_pad.lkey;
552
553 req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
554 req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
555 req->rl_send_iov[3].lkey = req->rl_iov.lkey;
556
557 req->rl_niovs = 4;
558 }
559
560 return 0;
561}
562
563
564
565
566
567static int
568rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
569{
570 unsigned int i, total_len;
571 struct rpcrdma_write_chunk *cur_wchunk;
572
573 i = ntohl(**iptrp);
574 if (i > max)
575 return -1;
576 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
577 total_len = 0;
578 while (i--) {
579 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
580 ifdebug(FACILITY) {
581 u64 off;
582 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
583 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
584 __func__,
585 ntohl(seg->rs_length),
586 (unsigned long long)off,
587 ntohl(seg->rs_handle));
588 }
589 total_len += ntohl(seg->rs_length);
590 ++cur_wchunk;
591 }
592
593 if (wrchunk) {
594 __be32 *w = (__be32 *) cur_wchunk;
595 if (*w++ != xdr_zero)
596 return -1;
597 cur_wchunk = (struct rpcrdma_write_chunk *) w;
598 }
599 if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
600 return -1;
601
602 *iptrp = (__be32 *) cur_wchunk;
603 return total_len;
604}
605
606
607
608
609static void
610rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
611{
612 int i, npages, curlen, olen;
613 char *destp;
614 struct page **ppages;
615 int page_base;
616
617 curlen = rqst->rq_rcv_buf.head[0].iov_len;
618 if (curlen > copy_len) {
619 curlen = copy_len;
620 rqst->rq_rcv_buf.head[0].iov_len = curlen;
621 }
622
623 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
624 __func__, srcp, copy_len, curlen);
625
626
627 rqst->rq_rcv_buf.head[0].iov_base = srcp;
628 srcp += curlen;
629 copy_len -= curlen;
630
631 olen = copy_len;
632 i = 0;
633 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
634 page_base = rqst->rq_rcv_buf.page_base;
635 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
636 page_base &= ~PAGE_MASK;
637
638 if (copy_len && rqst->rq_rcv_buf.page_len) {
639 npages = PAGE_ALIGN(page_base +
640 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
641 for (; i < npages; i++) {
642 curlen = PAGE_SIZE - page_base;
643 if (curlen > copy_len)
644 curlen = copy_len;
645 dprintk("RPC: %s: page %d"
646 " srcp 0x%p len %d curlen %d\n",
647 __func__, i, srcp, copy_len, curlen);
648 destp = kmap_atomic(ppages[i]);
649 memcpy(destp + page_base, srcp, curlen);
650 flush_dcache_page(ppages[i]);
651 kunmap_atomic(destp);
652 srcp += curlen;
653 copy_len -= curlen;
654 if (copy_len == 0)
655 break;
656 page_base = 0;
657 }
658 }
659
660 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
661 curlen = copy_len;
662 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
663 curlen = rqst->rq_rcv_buf.tail[0].iov_len;
664 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
665 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
666 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
667 __func__, srcp, copy_len, curlen);
668 rqst->rq_rcv_buf.tail[0].iov_len = curlen;
669 copy_len -= curlen; ++i;
670 } else
671 rqst->rq_rcv_buf.tail[0].iov_len = 0;
672
673 if (pad) {
674
675 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
676 while (pad--)
677 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
678 }
679
680 if (copy_len)
681 dprintk("RPC: %s: %d bytes in"
682 " %d extra segments (%d lost)\n",
683 __func__, olen, i, copy_len);
684
685
686 rqst->rq_private_buf = rqst->rq_rcv_buf;
687}
688
689void
690rpcrdma_connect_worker(struct work_struct *work)
691{
692 struct rpcrdma_ep *ep =
693 container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
694 struct rpc_xprt *xprt = ep->rep_xprt;
695
696 spin_lock_bh(&xprt->transport_lock);
697 if (++xprt->connect_cookie == 0)
698 ++xprt->connect_cookie;
699 if (ep->rep_connected > 0) {
700 if (!xprt_test_and_set_connected(xprt))
701 xprt_wake_pending_tasks(xprt, 0);
702 } else {
703 if (xprt_test_and_clear_connected(xprt))
704 xprt_wake_pending_tasks(xprt, -ENOTCONN);
705 }
706 spin_unlock_bh(&xprt->transport_lock);
707}
708
709
710
711
712
713
714
715void
716rpcrdma_conn_func(struct rpcrdma_ep *ep)
717{
718 schedule_delayed_work(&ep->rep_connect_worker, 0);
719}
720
721
722
723
724
725
726void
727rpcrdma_reply_handler(struct rpcrdma_rep *rep)
728{
729 struct rpcrdma_msg *headerp;
730 struct rpcrdma_req *req;
731 struct rpc_rqst *rqst;
732 struct rpc_xprt *xprt = rep->rr_xprt;
733 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
734 __be32 *iptr;
735 int rdmalen, status;
736 unsigned long cwnd;
737
738
739 if (rep->rr_len == ~0U) {
740 rpcrdma_recv_buffer_put(rep);
741 if (r_xprt->rx_ep.rep_connected == 1) {
742 r_xprt->rx_ep.rep_connected = -EIO;
743 rpcrdma_conn_func(&r_xprt->rx_ep);
744 }
745 return;
746 }
747 if (rep->rr_len < 28) {
748 dprintk("RPC: %s: short/invalid reply\n", __func__);
749 goto repost;
750 }
751 headerp = (struct rpcrdma_msg *) rep->rr_base;
752 if (headerp->rm_vers != xdr_one) {
753 dprintk("RPC: %s: invalid version %d\n",
754 __func__, ntohl(headerp->rm_vers));
755 goto repost;
756 }
757
758
759 spin_lock(&xprt->transport_lock);
760 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
761 if (rqst == NULL) {
762 spin_unlock(&xprt->transport_lock);
763 dprintk("RPC: %s: reply 0x%p failed "
764 "to match any request xid 0x%08x len %d\n",
765 __func__, rep, headerp->rm_xid, rep->rr_len);
766repost:
767 r_xprt->rx_stats.bad_reply_count++;
768 rep->rr_func = rpcrdma_reply_handler;
769 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
770 rpcrdma_recv_buffer_put(rep);
771
772 return;
773 }
774
775
776 req = rpcr_to_rdmar(rqst);
777 if (req->rl_reply) {
778 spin_unlock(&xprt->transport_lock);
779 dprintk("RPC: %s: duplicate reply 0x%p to RPC "
780 "request 0x%p: xid 0x%08x\n", __func__, rep, req,
781 headerp->rm_xid);
782 goto repost;
783 }
784
785 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
786 " RPC request 0x%p xid 0x%08x\n",
787 __func__, rep, req, rqst, headerp->rm_xid);
788
789
790 req->rl_reply = rep;
791 xprt->reestablish_timeout = 0;
792
793
794
795 switch (headerp->rm_type) {
796 case htonl(RDMA_MSG):
797
798
799
800 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
801 (headerp->rm_body.rm_chunks[1] == xdr_zero &&
802 headerp->rm_body.rm_chunks[2] != xdr_zero) ||
803 (headerp->rm_body.rm_chunks[1] != xdr_zero &&
804 req->rl_nchunks == 0))
805 goto badheader;
806 if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
807
808
809 iptr = &headerp->rm_body.rm_chunks[2];
810 rdmalen = rpcrdma_count_chunks(rep,
811 req->rl_nchunks, 1, &iptr);
812
813 if (rdmalen < 0 || *iptr++ != xdr_zero)
814 goto badheader;
815 rep->rr_len -=
816 ((unsigned char *)iptr - (unsigned char *)headerp);
817 status = rep->rr_len + rdmalen;
818 r_xprt->rx_stats.total_rdma_reply += rdmalen;
819
820 if (rdmalen &= 3) {
821 rdmalen = 4 - rdmalen;
822 status += rdmalen;
823 }
824 } else {
825
826 rdmalen = 0;
827 iptr = (__be32 *)((unsigned char *)headerp + 28);
828 rep->rr_len -= 28;
829 status = rep->rr_len;
830 }
831
832 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
833 break;
834
835 case htonl(RDMA_NOMSG):
836
837 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
838 headerp->rm_body.rm_chunks[1] != xdr_zero ||
839 headerp->rm_body.rm_chunks[2] != xdr_one ||
840 req->rl_nchunks == 0)
841 goto badheader;
842 iptr = (__be32 *)((unsigned char *)headerp + 28);
843 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
844 if (rdmalen < 0)
845 goto badheader;
846 r_xprt->rx_stats.total_rdma_reply += rdmalen;
847
848 status = rdmalen;
849 break;
850
851badheader:
852 default:
853 dprintk("%s: invalid rpcrdma reply header (type %d):"
854 " chunks[012] == %d %d %d"
855 " expected chunks <= %d\n",
856 __func__, ntohl(headerp->rm_type),
857 headerp->rm_body.rm_chunks[0],
858 headerp->rm_body.rm_chunks[1],
859 headerp->rm_body.rm_chunks[2],
860 req->rl_nchunks);
861 status = -EIO;
862 r_xprt->rx_stats.bad_reply_count++;
863 break;
864 }
865
866 cwnd = xprt->cwnd;
867 xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
868 if (xprt->cwnd > cwnd)
869 xprt_release_rqst_cong(rqst->rq_task);
870
871 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
872 __func__, xprt, rqst, status);
873 xprt_complete_rqst(rqst->rq_task, status);
874 spin_unlock(&xprt->transport_lock);
875}
876