1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include "xprt_rdma.h"
49
50#include <linux/highmem.h>
51
52#ifdef RPC_DEBUG
53# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif
55
56enum rpcrdma_chunktype {
57 rpcrdma_noch = 0,
58 rpcrdma_readch,
59 rpcrdma_areadch,
60 rpcrdma_writech,
61 rpcrdma_replych
62};
63
64#ifdef RPC_DEBUG
65static const char transfertypes[][12] = {
66 "pure inline",
67 " read chunk",
68 "*read chunk",
69 "write chunk",
70 "reply chunk"
71};
72#endif
73
74
75
76
77
78
79
80
81
82
83
84
85static int
86rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
87 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
88{
89 int len, n = 0, p;
90
91 if (pos == 0 && xdrbuf->head[0].iov_len) {
92 seg[n].mr_page = NULL;
93 seg[n].mr_offset = xdrbuf->head[0].iov_base;
94 seg[n].mr_len = xdrbuf->head[0].iov_len;
95 ++n;
96 }
97
98 if (xdrbuf->page_len && (xdrbuf->pages[0] != NULL)) {
99 if (n == nsegs)
100 return 0;
101 seg[n].mr_page = xdrbuf->pages[0];
102 seg[n].mr_offset = (void *)(unsigned long) xdrbuf->page_base;
103 seg[n].mr_len = min_t(u32,
104 PAGE_SIZE - xdrbuf->page_base, xdrbuf->page_len);
105 len = xdrbuf->page_len - seg[n].mr_len;
106 ++n;
107 p = 1;
108 while (len > 0) {
109 if (n == nsegs)
110 return 0;
111 seg[n].mr_page = xdrbuf->pages[p];
112 seg[n].mr_offset = NULL;
113 seg[n].mr_len = min_t(u32, PAGE_SIZE, len);
114 len -= seg[n].mr_len;
115 ++n;
116 ++p;
117 }
118 }
119
120 if (xdrbuf->tail[0].iov_len) {
121
122
123 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
124 return n;
125 if (n == nsegs)
126 return 0;
127 seg[n].mr_page = NULL;
128 seg[n].mr_offset = xdrbuf->tail[0].iov_base;
129 seg[n].mr_len = xdrbuf->tail[0].iov_len;
130 ++n;
131 }
132
133 return n;
134}
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169static unsigned int
170rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
171 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
172{
173 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
174 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt);
175 int nsegs, nchunks = 0;
176 unsigned int pos;
177 struct rpcrdma_mr_seg *seg = req->rl_segments;
178 struct rpcrdma_read_chunk *cur_rchunk = NULL;
179 struct rpcrdma_write_array *warray = NULL;
180 struct rpcrdma_write_chunk *cur_wchunk = NULL;
181 __be32 *iptr = headerp->rm_body.rm_chunks;
182
183 if (type == rpcrdma_readch || type == rpcrdma_areadch) {
184
185 cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
186 } else {
187
188 *iptr++ = xdr_zero;
189 if (type == rpcrdma_replych)
190 *iptr++ = xdr_zero;
191 warray = (struct rpcrdma_write_array *) iptr;
192 cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
193 }
194
195 if (type == rpcrdma_replych || type == rpcrdma_areadch)
196 pos = 0;
197 else
198 pos = target->head[0].iov_len;
199
200 nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
201 if (nsegs == 0)
202 return 0;
203
204 do {
205
206 int n = rpcrdma_register_external(seg, nsegs,
207 cur_wchunk != NULL, r_xprt);
208 if (n <= 0)
209 goto out;
210 if (cur_rchunk) {
211 cur_rchunk->rc_discrim = xdr_one;
212
213 cur_rchunk->rc_position = htonl(pos);
214 cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
215 cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
216 xdr_encode_hyper(
217 (__be32 *)&cur_rchunk->rc_target.rs_offset,
218 seg->mr_base);
219 dprintk("RPC: %s: read chunk "
220 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
221 seg->mr_len, (unsigned long long)seg->mr_base,
222 seg->mr_rkey, pos, n < nsegs ? "more" : "last");
223 cur_rchunk++;
224 r_xprt->rx_stats.read_chunk_count++;
225 } else {
226 cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
227 cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
228 xdr_encode_hyper(
229 (__be32 *)&cur_wchunk->wc_target.rs_offset,
230 seg->mr_base);
231 dprintk("RPC: %s: %s chunk "
232 "elem %d@0x%llx:0x%x (%s)\n", __func__,
233 (type == rpcrdma_replych) ? "reply" : "write",
234 seg->mr_len, (unsigned long long)seg->mr_base,
235 seg->mr_rkey, n < nsegs ? "more" : "last");
236 cur_wchunk++;
237 if (type == rpcrdma_replych)
238 r_xprt->rx_stats.reply_chunk_count++;
239 else
240 r_xprt->rx_stats.write_chunk_count++;
241 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
242 }
243 nchunks++;
244 seg += n;
245 nsegs -= n;
246 } while (nsegs);
247
248
249 req->rl_nchunks = nchunks;
250
251 BUG_ON(nchunks == 0);
252 BUG_ON((r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
253 && (nchunks > 3));
254
255
256
257
258 if (cur_rchunk) {
259 iptr = (__be32 *) cur_rchunk;
260 *iptr++ = xdr_zero;
261 *iptr++ = xdr_zero;
262 *iptr++ = xdr_zero;
263 } else {
264 warray->wc_discrim = xdr_one;
265 warray->wc_nchunks = htonl(nchunks);
266 iptr = (__be32 *) cur_wchunk;
267 if (type == rpcrdma_writech) {
268 *iptr++ = xdr_zero;
269 *iptr++ = xdr_zero;
270 }
271 }
272
273
274
275
276 return (unsigned char *)iptr - (unsigned char *)headerp;
277
278out:
279 for (pos = 0; nchunks--;)
280 pos += rpcrdma_deregister_external(
281 &req->rl_segments[pos], r_xprt, NULL);
282 return 0;
283}
284
285
286
287
288
289
290
291
292static int
293rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
294{
295 int i, npages, curlen;
296 int copy_len;
297 unsigned char *srcp, *destp;
298 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
299
300 destp = rqst->rq_svec[0].iov_base;
301 curlen = rqst->rq_svec[0].iov_len;
302 destp += curlen;
303
304
305
306
307 pad -= (curlen + 36);
308 if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
309 pad = 0;
310
311 dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n",
312 __func__, pad, destp, rqst->rq_slen, curlen);
313
314 copy_len = rqst->rq_snd_buf.page_len;
315
316 if (rqst->rq_snd_buf.tail[0].iov_len) {
317 curlen = rqst->rq_snd_buf.tail[0].iov_len;
318 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
319 memmove(destp + copy_len,
320 rqst->rq_snd_buf.tail[0].iov_base, curlen);
321 r_xprt->rx_stats.pullup_copy_count += curlen;
322 }
323 dprintk("RPC: %s: tail destp 0x%p len %d\n",
324 __func__, destp + copy_len, curlen);
325 rqst->rq_svec[0].iov_len += curlen;
326 }
327
328 r_xprt->rx_stats.pullup_copy_count += copy_len;
329 npages = PAGE_ALIGN(rqst->rq_snd_buf.page_base+copy_len) >> PAGE_SHIFT;
330 for (i = 0; copy_len && i < npages; i++) {
331 if (i == 0)
332 curlen = PAGE_SIZE - rqst->rq_snd_buf.page_base;
333 else
334 curlen = PAGE_SIZE;
335 if (curlen > copy_len)
336 curlen = copy_len;
337 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
338 __func__, i, destp, copy_len, curlen);
339 srcp = kmap_atomic(rqst->rq_snd_buf.pages[i],
340 KM_SKB_SUNRPC_DATA);
341 if (i == 0)
342 memcpy(destp, srcp+rqst->rq_snd_buf.page_base, curlen);
343 else
344 memcpy(destp, srcp, curlen);
345 kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
346 rqst->rq_svec[0].iov_len += curlen;
347 destp += curlen;
348 copy_len -= curlen;
349 }
350
351 return pad;
352}
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367int
368rpcrdma_marshal_req(struct rpc_rqst *rqst)
369{
370 struct rpc_xprt *xprt = rqst->rq_task->tk_xprt;
371 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
372 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
373 char *base;
374 size_t hdrlen, rpclen, padlen;
375 enum rpcrdma_chunktype rtype, wtype;
376 struct rpcrdma_msg *headerp;
377
378
379
380
381
382 base = rqst->rq_svec[0].iov_base;
383 rpclen = rqst->rq_svec[0].iov_len;
384
385
386 headerp = (struct rpcrdma_msg *) req->rl_base;
387
388 headerp->rm_xid = rqst->rq_xid;
389 headerp->rm_vers = xdr_one;
390 headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests);
391 headerp->rm_type = htonl(RDMA_MSG);
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
413 wtype = rpcrdma_noch;
414 else if (rqst->rq_rcv_buf.page_len == 0)
415 wtype = rpcrdma_replych;
416 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
417 wtype = rpcrdma_writech;
418 else
419 wtype = rpcrdma_replych;
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
436 rtype = rpcrdma_noch;
437 else if (rqst->rq_snd_buf.page_len == 0)
438 rtype = rpcrdma_areadch;
439 else
440 rtype = rpcrdma_readch;
441
442
443 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
444 wtype = rpcrdma_noch;
445 BUG_ON(rtype != rpcrdma_noch && wtype != rpcrdma_noch);
446
447 if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS &&
448 (rtype != rpcrdma_noch || wtype != rpcrdma_noch)) {
449
450 dprintk("RPC: %s: too much data (%d/%d) for inline\n",
451 __func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len);
452 return -1;
453 }
454
455 hdrlen = 28;
456 padlen = 0;
457
458
459
460
461
462
463 if (rtype == rpcrdma_noch) {
464
465 padlen = rpcrdma_inline_pullup(rqst,
466 RPCRDMA_INLINE_PAD_VALUE(rqst));
467
468 if (padlen) {
469 headerp->rm_type = htonl(RDMA_MSGP);
470 headerp->rm_body.rm_padded.rm_align =
471 htonl(RPCRDMA_INLINE_PAD_VALUE(rqst));
472 headerp->rm_body.rm_padded.rm_thresh =
473 htonl(RPCRDMA_INLINE_PAD_THRESH);
474 headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
475 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
476 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
477 hdrlen += 2 * sizeof(u32);
478 BUG_ON(wtype != rpcrdma_noch);
479
480 } else {
481 headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
482 headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
483 headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
484
485 rpclen = rqst->rq_svec[0].iov_len;
486
487
488
489
490
491
492
493
494
495
496 if (wtype == rpcrdma_noch &&
497 r_xprt->rx_ia.ri_memreg_strategy > RPCRDMA_REGISTER)
498 wtype = rpcrdma_replych;
499 }
500 }
501
502
503
504
505
506 if (rtype != rpcrdma_noch) {
507 hdrlen = rpcrdma_create_chunks(rqst,
508 &rqst->rq_snd_buf, headerp, rtype);
509 wtype = rtype;
510
511 } else if (wtype != rpcrdma_noch) {
512 hdrlen = rpcrdma_create_chunks(rqst,
513 &rqst->rq_rcv_buf, headerp, wtype);
514 }
515
516 if (hdrlen == 0)
517 return -1;
518
519 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
520 " headerp 0x%p base 0x%p lkey 0x%x\n",
521 __func__, transfertypes[wtype], hdrlen, rpclen, padlen,
522 headerp, base, req->rl_iov.lkey);
523
524
525
526
527
528
529
530
531 req->rl_send_iov[0].addr = req->rl_iov.addr;
532 req->rl_send_iov[0].length = hdrlen;
533 req->rl_send_iov[0].lkey = req->rl_iov.lkey;
534
535 req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base);
536 req->rl_send_iov[1].length = rpclen;
537 req->rl_send_iov[1].lkey = req->rl_iov.lkey;
538
539 req->rl_niovs = 2;
540
541 if (padlen) {
542 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
543
544 req->rl_send_iov[2].addr = ep->rep_pad.addr;
545 req->rl_send_iov[2].length = padlen;
546 req->rl_send_iov[2].lkey = ep->rep_pad.lkey;
547
548 req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
549 req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
550 req->rl_send_iov[3].lkey = req->rl_iov.lkey;
551
552 req->rl_niovs = 4;
553 }
554
555 return 0;
556}
557
558
559
560
561
562static int
563rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
564{
565 unsigned int i, total_len;
566 struct rpcrdma_write_chunk *cur_wchunk;
567
568 i = ntohl(**iptrp);
569 if (i > max)
570 return -1;
571 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
572 total_len = 0;
573 while (i--) {
574 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
575 ifdebug(FACILITY) {
576 u64 off;
577 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
578 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
579 __func__,
580 ntohl(seg->rs_length),
581 (unsigned long long)off,
582 ntohl(seg->rs_handle));
583 }
584 total_len += ntohl(seg->rs_length);
585 ++cur_wchunk;
586 }
587
588 if (wrchunk) {
589 __be32 *w = (__be32 *) cur_wchunk;
590 if (*w++ != xdr_zero)
591 return -1;
592 cur_wchunk = (struct rpcrdma_write_chunk *) w;
593 }
594 if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
595 return -1;
596
597 *iptrp = (__be32 *) cur_wchunk;
598 return total_len;
599}
600
601
602
603
604static void
605rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
606{
607 int i, npages, curlen, olen;
608 char *destp;
609
610 curlen = rqst->rq_rcv_buf.head[0].iov_len;
611 if (curlen > copy_len) {
612 curlen = copy_len;
613 rqst->rq_rcv_buf.head[0].iov_len = curlen;
614 }
615
616 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
617 __func__, srcp, copy_len, curlen);
618
619
620 rqst->rq_rcv_buf.head[0].iov_base = srcp;
621 srcp += curlen;
622 copy_len -= curlen;
623
624 olen = copy_len;
625 i = 0;
626 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
627 if (copy_len && rqst->rq_rcv_buf.page_len) {
628 npages = PAGE_ALIGN(rqst->rq_rcv_buf.page_base +
629 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
630 for (; i < npages; i++) {
631 if (i == 0)
632 curlen = PAGE_SIZE - rqst->rq_rcv_buf.page_base;
633 else
634 curlen = PAGE_SIZE;
635 if (curlen > copy_len)
636 curlen = copy_len;
637 dprintk("RPC: %s: page %d"
638 " srcp 0x%p len %d curlen %d\n",
639 __func__, i, srcp, copy_len, curlen);
640 destp = kmap_atomic(rqst->rq_rcv_buf.pages[i],
641 KM_SKB_SUNRPC_DATA);
642 if (i == 0)
643 memcpy(destp + rqst->rq_rcv_buf.page_base,
644 srcp, curlen);
645 else
646 memcpy(destp, srcp, curlen);
647 flush_dcache_page(rqst->rq_rcv_buf.pages[i]);
648 kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
649 srcp += curlen;
650 copy_len -= curlen;
651 if (copy_len == 0)
652 break;
653 }
654 rqst->rq_rcv_buf.page_len = olen - copy_len;
655 } else
656 rqst->rq_rcv_buf.page_len = 0;
657
658 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
659 curlen = copy_len;
660 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
661 curlen = rqst->rq_rcv_buf.tail[0].iov_len;
662 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
663 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
664 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
665 __func__, srcp, copy_len, curlen);
666 rqst->rq_rcv_buf.tail[0].iov_len = curlen;
667 copy_len -= curlen; ++i;
668 } else
669 rqst->rq_rcv_buf.tail[0].iov_len = 0;
670
671 if (pad) {
672
673 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
674 while (pad--)
675 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
676 }
677
678 if (copy_len)
679 dprintk("RPC: %s: %d bytes in"
680 " %d extra segments (%d lost)\n",
681 __func__, olen, i, copy_len);
682
683
684 rqst->rq_private_buf = rqst->rq_rcv_buf;
685}
686
687
688
689
690
691
692
693void
694rpcrdma_conn_func(struct rpcrdma_ep *ep)
695{
696 struct rpc_xprt *xprt = ep->rep_xprt;
697
698 spin_lock_bh(&xprt->transport_lock);
699 if (++xprt->connect_cookie == 0)
700 ++xprt->connect_cookie;
701 if (ep->rep_connected > 0) {
702 if (!xprt_test_and_set_connected(xprt))
703 xprt_wake_pending_tasks(xprt, 0);
704 } else {
705 if (xprt_test_and_clear_connected(xprt))
706 xprt_wake_pending_tasks(xprt, -ENOTCONN);
707 }
708 spin_unlock_bh(&xprt->transport_lock);
709}
710
711
712
713
714
715static void
716rpcrdma_unbind_func(struct rpcrdma_rep *rep)
717{
718 wake_up(&rep->rr_unbind);
719}
720
721
722
723
724
725
726void
727rpcrdma_reply_handler(struct rpcrdma_rep *rep)
728{
729 struct rpcrdma_msg *headerp;
730 struct rpcrdma_req *req;
731 struct rpc_rqst *rqst;
732 struct rpc_xprt *xprt = rep->rr_xprt;
733 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
734 __be32 *iptr;
735 int i, rdmalen, status;
736
737
738 if (rep->rr_len == ~0U) {
739 rpcrdma_recv_buffer_put(rep);
740 if (r_xprt->rx_ep.rep_connected == 1) {
741 r_xprt->rx_ep.rep_connected = -EIO;
742 rpcrdma_conn_func(&r_xprt->rx_ep);
743 }
744 return;
745 }
746 if (rep->rr_len < 28) {
747 dprintk("RPC: %s: short/invalid reply\n", __func__);
748 goto repost;
749 }
750 headerp = (struct rpcrdma_msg *) rep->rr_base;
751 if (headerp->rm_vers != xdr_one) {
752 dprintk("RPC: %s: invalid version %d\n",
753 __func__, ntohl(headerp->rm_vers));
754 goto repost;
755 }
756
757
758 spin_lock(&xprt->transport_lock);
759 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
760 if (rqst == NULL) {
761 spin_unlock(&xprt->transport_lock);
762 dprintk("RPC: %s: reply 0x%p failed "
763 "to match any request xid 0x%08x len %d\n",
764 __func__, rep, headerp->rm_xid, rep->rr_len);
765repost:
766 r_xprt->rx_stats.bad_reply_count++;
767 rep->rr_func = rpcrdma_reply_handler;
768 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
769 rpcrdma_recv_buffer_put(rep);
770
771 return;
772 }
773
774
775 req = rpcr_to_rdmar(rqst);
776
777 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
778 " RPC request 0x%p xid 0x%08x\n",
779 __func__, rep, req, rqst, headerp->rm_xid);
780
781 BUG_ON(!req || req->rl_reply);
782
783
784 req->rl_reply = rep;
785
786
787
788 switch (headerp->rm_type) {
789 case htonl(RDMA_MSG):
790
791
792
793 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
794 (headerp->rm_body.rm_chunks[1] == xdr_zero &&
795 headerp->rm_body.rm_chunks[2] != xdr_zero) ||
796 (headerp->rm_body.rm_chunks[1] != xdr_zero &&
797 req->rl_nchunks == 0))
798 goto badheader;
799 if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
800
801
802 iptr = &headerp->rm_body.rm_chunks[2];
803 rdmalen = rpcrdma_count_chunks(rep,
804 req->rl_nchunks, 1, &iptr);
805
806 if (rdmalen < 0 || *iptr++ != xdr_zero)
807 goto badheader;
808 rep->rr_len -=
809 ((unsigned char *)iptr - (unsigned char *)headerp);
810 status = rep->rr_len + rdmalen;
811 r_xprt->rx_stats.total_rdma_reply += rdmalen;
812
813 if (rdmalen &= 3) {
814 rdmalen = 4 - rdmalen;
815 status += rdmalen;
816 }
817 } else {
818
819 rdmalen = 0;
820 iptr = (__be32 *)((unsigned char *)headerp + 28);
821 rep->rr_len -= 28;
822 status = rep->rr_len;
823 }
824
825 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
826 break;
827
828 case htonl(RDMA_NOMSG):
829
830 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
831 headerp->rm_body.rm_chunks[1] != xdr_zero ||
832 headerp->rm_body.rm_chunks[2] != xdr_one ||
833 req->rl_nchunks == 0)
834 goto badheader;
835 iptr = (__be32 *)((unsigned char *)headerp + 28);
836 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
837 if (rdmalen < 0)
838 goto badheader;
839 r_xprt->rx_stats.total_rdma_reply += rdmalen;
840
841 status = rdmalen;
842 break;
843
844badheader:
845 default:
846 dprintk("%s: invalid rpcrdma reply header (type %d):"
847 " chunks[012] == %d %d %d"
848 " expected chunks <= %d\n",
849 __func__, ntohl(headerp->rm_type),
850 headerp->rm_body.rm_chunks[0],
851 headerp->rm_body.rm_chunks[1],
852 headerp->rm_body.rm_chunks[2],
853 req->rl_nchunks);
854 status = -EIO;
855 r_xprt->rx_stats.bad_reply_count++;
856 break;
857 }
858
859
860
861 if (req->rl_nchunks) switch (r_xprt->rx_ia.ri_memreg_strategy) {
862 case RPCRDMA_MEMWINDOWS:
863 for (i = 0; req->rl_nchunks-- > 1;)
864 i += rpcrdma_deregister_external(
865 &req->rl_segments[i], r_xprt, NULL);
866
867 rep->rr_func = rpcrdma_unbind_func;
868 (void) rpcrdma_deregister_external(&req->rl_segments[i],
869 r_xprt, rep);
870 break;
871 case RPCRDMA_MEMWINDOWS_ASYNC:
872 for (i = 0; req->rl_nchunks--;)
873 i += rpcrdma_deregister_external(&req->rl_segments[i],
874 r_xprt, NULL);
875 break;
876 default:
877 break;
878 }
879
880 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
881 __func__, xprt, rqst, status);
882 xprt_complete_rqst(rqst->rq_task, status);
883 spin_unlock(&xprt->transport_lock);
884}
885