1
2
3
4
5
6
7
8#include <linux/sunrpc/rpc_rdma.h>
9#include <linux/sunrpc/svc_rdma.h>
10#include <linux/sunrpc/debug.h>
11
12#include <rdma/rw.h>
13
14#define RPCDBG_FACILITY RPCDBG_SVCXPRT
15
16static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
17static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36struct svc_rdma_rw_ctxt {
37 struct list_head rw_list;
38 struct rdma_rw_ctx rw_ctx;
39 int rw_nents;
40 struct sg_table rw_sg_table;
41 struct scatterlist rw_first_sgl[0];
42};
43
44static inline struct svc_rdma_rw_ctxt *
45svc_rdma_next_ctxt(struct list_head *list)
46{
47 return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
48 rw_list);
49}
50
51static struct svc_rdma_rw_ctxt *
52svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
53{
54 struct svc_rdma_rw_ctxt *ctxt;
55
56 spin_lock(&rdma->sc_rw_ctxt_lock);
57
58 ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
59 if (ctxt) {
60 list_del(&ctxt->rw_list);
61 spin_unlock(&rdma->sc_rw_ctxt_lock);
62 } else {
63 spin_unlock(&rdma->sc_rw_ctxt_lock);
64 ctxt = kmalloc(sizeof(*ctxt) +
65 SG_CHUNK_SIZE * sizeof(struct scatterlist),
66 GFP_KERNEL);
67 if (!ctxt)
68 goto out;
69 INIT_LIST_HEAD(&ctxt->rw_list);
70 }
71
72 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
73 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
74 ctxt->rw_sg_table.sgl)) {
75 kfree(ctxt);
76 ctxt = NULL;
77 }
78out:
79 return ctxt;
80}
81
82static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
83 struct svc_rdma_rw_ctxt *ctxt)
84{
85 sg_free_table_chained(&ctxt->rw_sg_table, true);
86
87 spin_lock(&rdma->sc_rw_ctxt_lock);
88 list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
89 spin_unlock(&rdma->sc_rw_ctxt_lock);
90}
91
92
93
94
95
96
97void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
98{
99 struct svc_rdma_rw_ctxt *ctxt;
100
101 while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
102 list_del(&ctxt->rw_list);
103 kfree(ctxt);
104 }
105}
106
107
108
109
110
111
112
113
114
115struct svc_rdma_chunk_ctxt {
116 struct ib_cqe cc_cqe;
117 struct svcxprt_rdma *cc_rdma;
118 struct list_head cc_rwctxts;
119 int cc_sqecount;
120};
121
122static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
123 struct svc_rdma_chunk_ctxt *cc)
124{
125 cc->cc_rdma = rdma;
126 svc_xprt_get(&rdma->sc_xprt);
127
128 INIT_LIST_HEAD(&cc->cc_rwctxts);
129 cc->cc_sqecount = 0;
130}
131
132static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
133 enum dma_data_direction dir)
134{
135 struct svcxprt_rdma *rdma = cc->cc_rdma;
136 struct svc_rdma_rw_ctxt *ctxt;
137
138 while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
139 list_del(&ctxt->rw_list);
140
141 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
142 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
143 ctxt->rw_nents, dir);
144 svc_rdma_put_rw_ctxt(rdma, ctxt);
145 }
146 svc_xprt_put(&rdma->sc_xprt);
147}
148
149
150
151
152
153struct svc_rdma_write_info {
154
155 unsigned int wi_seg_off;
156 unsigned int wi_seg_no;
157 unsigned int wi_nsegs;
158 __be32 *wi_segs;
159
160
161 struct xdr_buf *wi_xdr;
162 unsigned char *wi_base;
163 unsigned int wi_next_off;
164
165 struct svc_rdma_chunk_ctxt wi_cc;
166};
167
168static struct svc_rdma_write_info *
169svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
170{
171 struct svc_rdma_write_info *info;
172
173 info = kmalloc(sizeof(*info), GFP_KERNEL);
174 if (!info)
175 return info;
176
177 info->wi_seg_off = 0;
178 info->wi_seg_no = 0;
179 info->wi_nsegs = be32_to_cpup(++chunk);
180 info->wi_segs = ++chunk;
181 svc_rdma_cc_init(rdma, &info->wi_cc);
182 info->wi_cc.cc_cqe.done = svc_rdma_write_done;
183 return info;
184}
185
186static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
187{
188 svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
189 kfree(info);
190}
191
192
193
194
195
196
197
198
199static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
200{
201 struct ib_cqe *cqe = wc->wr_cqe;
202 struct svc_rdma_chunk_ctxt *cc =
203 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
204 struct svcxprt_rdma *rdma = cc->cc_rdma;
205 struct svc_rdma_write_info *info =
206 container_of(cc, struct svc_rdma_write_info, wi_cc);
207
208 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
209 wake_up(&rdma->sc_send_wait);
210
211 if (unlikely(wc->status != IB_WC_SUCCESS)) {
212 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
213 if (wc->status != IB_WC_WR_FLUSH_ERR)
214 pr_err("svcrdma: write ctx: %s (%u/0x%x)\n",
215 ib_wc_status_msg(wc->status),
216 wc->status, wc->vendor_err);
217 }
218
219 svc_rdma_write_info_free(info);
220}
221
222
223
224struct svc_rdma_read_info {
225 struct svc_rdma_op_ctxt *ri_readctxt;
226 unsigned int ri_position;
227 unsigned int ri_pageno;
228 unsigned int ri_pageoff;
229 unsigned int ri_chunklen;
230
231 struct svc_rdma_chunk_ctxt ri_cc;
232};
233
234static struct svc_rdma_read_info *
235svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
236{
237 struct svc_rdma_read_info *info;
238
239 info = kmalloc(sizeof(*info), GFP_KERNEL);
240 if (!info)
241 return info;
242
243 svc_rdma_cc_init(rdma, &info->ri_cc);
244 info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
245 return info;
246}
247
248static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
249{
250 svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
251 kfree(info);
252}
253
254
255
256
257
258
259
260static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
261{
262 struct ib_cqe *cqe = wc->wr_cqe;
263 struct svc_rdma_chunk_ctxt *cc =
264 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
265 struct svcxprt_rdma *rdma = cc->cc_rdma;
266 struct svc_rdma_read_info *info =
267 container_of(cc, struct svc_rdma_read_info, ri_cc);
268
269 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
270 wake_up(&rdma->sc_send_wait);
271
272 if (unlikely(wc->status != IB_WC_SUCCESS)) {
273 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
274 if (wc->status != IB_WC_WR_FLUSH_ERR)
275 pr_err("svcrdma: read ctx: %s (%u/0x%x)\n",
276 ib_wc_status_msg(wc->status),
277 wc->status, wc->vendor_err);
278 svc_rdma_put_context(info->ri_readctxt, 1);
279 } else {
280 spin_lock(&rdma->sc_rq_dto_lock);
281 list_add_tail(&info->ri_readctxt->list,
282 &rdma->sc_read_complete_q);
283 spin_unlock(&rdma->sc_rq_dto_lock);
284
285 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
286 svc_xprt_enqueue(&rdma->sc_xprt);
287 }
288
289 svc_rdma_read_info_free(info);
290}
291
292
293
294
295
296
297
298
299static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
300{
301 struct svcxprt_rdma *rdma = cc->cc_rdma;
302 struct svc_xprt *xprt = &rdma->sc_xprt;
303 struct ib_send_wr *first_wr, *bad_wr;
304 struct list_head *tmp;
305 struct ib_cqe *cqe;
306 int ret;
307
308 if (cc->cc_sqecount > rdma->sc_sq_depth)
309 return -EINVAL;
310
311 first_wr = NULL;
312 cqe = &cc->cc_cqe;
313 list_for_each(tmp, &cc->cc_rwctxts) {
314 struct svc_rdma_rw_ctxt *ctxt;
315
316 ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
317 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
318 rdma->sc_port_num, cqe, first_wr);
319 cqe = NULL;
320 }
321
322 do {
323 if (atomic_sub_return(cc->cc_sqecount,
324 &rdma->sc_sq_avail) > 0) {
325 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
326 if (ret)
327 break;
328 return 0;
329 }
330
331 atomic_inc(&rdma_stat_sq_starve);
332 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
333 wait_event(rdma->sc_send_wait,
334 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
335 } while (1);
336
337 pr_err("svcrdma: ib_post_send failed (%d)\n", ret);
338 set_bit(XPT_CLOSE, &xprt->xpt_flags);
339
340
341 if (bad_wr != first_wr)
342 return 0;
343
344 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
345 wake_up(&rdma->sc_send_wait);
346 return -ENOTCONN;
347}
348
349
350
351static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
352 unsigned int len,
353 struct svc_rdma_rw_ctxt *ctxt)
354{
355 struct scatterlist *sg = ctxt->rw_sg_table.sgl;
356
357 sg_set_buf(&sg[0], info->wi_base, len);
358 info->wi_base += len;
359
360 ctxt->rw_nents = 1;
361}
362
363
364
365static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
366 unsigned int remaining,
367 struct svc_rdma_rw_ctxt *ctxt)
368{
369 unsigned int sge_no, sge_bytes, page_off, page_no;
370 struct xdr_buf *xdr = info->wi_xdr;
371 struct scatterlist *sg;
372 struct page **page;
373
374 page_off = info->wi_next_off + xdr->page_base;
375 page_no = page_off >> PAGE_SHIFT;
376 page_off = offset_in_page(page_off);
377 page = xdr->pages + page_no;
378 info->wi_next_off += remaining;
379 sg = ctxt->rw_sg_table.sgl;
380 sge_no = 0;
381 do {
382 sge_bytes = min_t(unsigned int, remaining,
383 PAGE_SIZE - page_off);
384 sg_set_page(sg, *page, sge_bytes, page_off);
385
386 remaining -= sge_bytes;
387 sg = sg_next(sg);
388 page_off = 0;
389 sge_no++;
390 page++;
391 } while (remaining);
392
393 ctxt->rw_nents = sge_no;
394}
395
396
397
398
399static int
400svc_rdma_build_writes(struct svc_rdma_write_info *info,
401 void (*constructor)(struct svc_rdma_write_info *info,
402 unsigned int len,
403 struct svc_rdma_rw_ctxt *ctxt),
404 unsigned int remaining)
405{
406 struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
407 struct svcxprt_rdma *rdma = cc->cc_rdma;
408 struct svc_rdma_rw_ctxt *ctxt;
409 __be32 *seg;
410 int ret;
411
412 seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
413 do {
414 unsigned int write_len;
415 u32 seg_length, seg_handle;
416 u64 seg_offset;
417
418 if (info->wi_seg_no >= info->wi_nsegs)
419 goto out_overflow;
420
421 seg_handle = be32_to_cpup(seg);
422 seg_length = be32_to_cpup(seg + 1);
423 xdr_decode_hyper(seg + 2, &seg_offset);
424 seg_offset += info->wi_seg_off;
425
426 write_len = min(remaining, seg_length - info->wi_seg_off);
427 ctxt = svc_rdma_get_rw_ctxt(rdma,
428 (write_len >> PAGE_SHIFT) + 2);
429 if (!ctxt)
430 goto out_noctx;
431
432 constructor(info, write_len, ctxt);
433 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
434 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
435 ctxt->rw_nents, 0, seg_offset,
436 seg_handle, DMA_TO_DEVICE);
437 if (ret < 0)
438 goto out_initerr;
439
440 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
441 cc->cc_sqecount += ret;
442 if (write_len == seg_length - info->wi_seg_off) {
443 seg += 4;
444 info->wi_seg_no++;
445 info->wi_seg_off = 0;
446 } else {
447 info->wi_seg_off += write_len;
448 }
449 remaining -= write_len;
450 } while (remaining);
451
452 return 0;
453
454out_overflow:
455 dprintk("svcrdma: inadequate space in Write chunk (%u)\n",
456 info->wi_nsegs);
457 return -E2BIG;
458
459out_noctx:
460 dprintk("svcrdma: no R/W ctxs available\n");
461 return -ENOMEM;
462
463out_initerr:
464 svc_rdma_put_rw_ctxt(rdma, ctxt);
465 pr_err("svcrdma: failed to map pagelist (%d)\n", ret);
466 return -EIO;
467}
468
469
470
471
472
473
474static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
475 struct kvec *vec)
476{
477 info->wi_base = vec->iov_base;
478 return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
479 vec->iov_len);
480}
481
482
483
484
485
486
487static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
488 struct xdr_buf *xdr)
489{
490 info->wi_xdr = xdr;
491 info->wi_next_off = 0;
492 return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
493 xdr->page_len);
494}
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
510 struct xdr_buf *xdr)
511{
512 struct svc_rdma_write_info *info;
513 int ret;
514
515 if (!xdr->page_len)
516 return 0;
517
518 info = svc_rdma_write_info_alloc(rdma, wr_ch);
519 if (!info)
520 return -ENOMEM;
521
522 ret = svc_rdma_send_xdr_pagelist(info, xdr);
523 if (ret < 0)
524 goto out_err;
525
526 ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
527 if (ret < 0)
528 goto out_err;
529 return xdr->page_len;
530
531out_err:
532 svc_rdma_write_info_free(info);
533 return ret;
534}
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
551 bool writelist, struct xdr_buf *xdr)
552{
553 struct svc_rdma_write_info *info;
554 int consumed, ret;
555
556 info = svc_rdma_write_info_alloc(rdma, rp_ch);
557 if (!info)
558 return -ENOMEM;
559
560 ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
561 if (ret < 0)
562 goto out_err;
563 consumed = xdr->head[0].iov_len;
564
565
566
567
568 if (!writelist && xdr->page_len) {
569 ret = svc_rdma_send_xdr_pagelist(info, xdr);
570 if (ret < 0)
571 goto out_err;
572 consumed += xdr->page_len;
573 }
574
575 if (xdr->tail[0].iov_len) {
576 ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
577 if (ret < 0)
578 goto out_err;
579 consumed += xdr->tail[0].iov_len;
580 }
581
582 ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
583 if (ret < 0)
584 goto out_err;
585 return consumed;
586
587out_err:
588 svc_rdma_write_info_free(info);
589 return ret;
590}
591
592static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
593 struct svc_rqst *rqstp,
594 u32 rkey, u32 len, u64 offset)
595{
596 struct svc_rdma_op_ctxt *head = info->ri_readctxt;
597 struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
598 struct svc_rdma_rw_ctxt *ctxt;
599 unsigned int sge_no, seg_len;
600 struct scatterlist *sg;
601 int ret;
602
603 sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
604 ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
605 if (!ctxt)
606 goto out_noctx;
607 ctxt->rw_nents = sge_no;
608
609 dprintk("svcrdma: reading segment %u@0x%016llx:0x%08x (%u sges)\n",
610 len, offset, rkey, sge_no);
611
612 sg = ctxt->rw_sg_table.sgl;
613 for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
614 seg_len = min_t(unsigned int, len,
615 PAGE_SIZE - info->ri_pageoff);
616
617 head->arg.pages[info->ri_pageno] =
618 rqstp->rq_pages[info->ri_pageno];
619 if (!info->ri_pageoff)
620 head->count++;
621
622 sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
623 seg_len, info->ri_pageoff);
624 sg = sg_next(sg);
625
626 info->ri_pageoff += seg_len;
627 if (info->ri_pageoff == PAGE_SIZE) {
628 info->ri_pageno++;
629 info->ri_pageoff = 0;
630 }
631 len -= seg_len;
632
633
634 if (len &&
635 &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
636 goto out_overrun;
637 }
638
639 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp,
640 cc->cc_rdma->sc_port_num,
641 ctxt->rw_sg_table.sgl, ctxt->rw_nents,
642 0, offset, rkey, DMA_FROM_DEVICE);
643 if (ret < 0)
644 goto out_initerr;
645
646 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
647 cc->cc_sqecount += ret;
648 return 0;
649
650out_noctx:
651 dprintk("svcrdma: no R/W ctxs available\n");
652 return -ENOMEM;
653
654out_overrun:
655 dprintk("svcrdma: request overruns rq_pages\n");
656 return -EINVAL;
657
658out_initerr:
659 svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
660 pr_err("svcrdma: failed to map pagelist (%d)\n", ret);
661 return -EIO;
662}
663
664
665
666
667static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
668 struct svc_rdma_read_info *info,
669 __be32 *p)
670{
671 int ret;
672
673 ret = -EINVAL;
674 info->ri_chunklen = 0;
675 while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
676 u32 rs_handle, rs_length;
677 u64 rs_offset;
678
679 rs_handle = be32_to_cpup(p++);
680 rs_length = be32_to_cpup(p++);
681 p = xdr_decode_hyper(p, &rs_offset);
682
683 ret = svc_rdma_build_read_segment(info, rqstp,
684 rs_handle, rs_length,
685 rs_offset);
686 if (ret < 0)
687 break;
688
689 info->ri_chunklen += rs_length;
690 }
691
692 return ret;
693}
694
695
696
697
698
699
700
701
702static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
703 struct svc_rdma_read_info *info,
704 __be32 *p)
705{
706 struct svc_rdma_op_ctxt *head = info->ri_readctxt;
707 int ret;
708
709 dprintk("svcrdma: Reading Read chunk at position %u\n",
710 info->ri_position);
711
712 info->ri_pageno = head->hdr_count;
713 info->ri_pageoff = 0;
714
715 ret = svc_rdma_build_read_chunk(rqstp, info, p);
716 if (ret < 0)
717 goto out;
718
719
720
721
722
723
724 head->arg.tail[0].iov_base =
725 head->arg.head[0].iov_base + info->ri_position;
726 head->arg.tail[0].iov_len =
727 head->arg.head[0].iov_len - info->ri_position;
728 head->arg.head[0].iov_len = info->ri_position;
729
730
731
732
733
734
735
736
737
738
739 info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
740
741 head->arg.page_len = info->ri_chunklen;
742 head->arg.len += info->ri_chunklen;
743 head->arg.buflen += info->ri_chunklen;
744
745out:
746 return ret;
747}
748
749
750
751
752
753
754
755
756
757
758
759
760static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
761 struct svc_rdma_read_info *info,
762 __be32 *p)
763{
764 struct svc_rdma_op_ctxt *head = info->ri_readctxt;
765 int ret;
766
767 dprintk("svcrdma: Reading Position Zero Read chunk\n");
768
769 info->ri_pageno = head->hdr_count - 1;
770 info->ri_pageoff = offset_in_page(head->byte_len);
771
772 ret = svc_rdma_build_read_chunk(rqstp, info, p);
773 if (ret < 0)
774 goto out;
775
776 head->arg.len += info->ri_chunklen;
777 head->arg.buflen += info->ri_chunklen;
778
779 if (head->arg.buflen <= head->sge[0].length) {
780
781
782
783 head->arg.head[0].iov_len = info->ri_chunklen;
784 } else {
785
786
787
788 head->arg.head[0].iov_len =
789 head->sge[0].length - head->byte_len;
790 head->arg.page_len =
791 info->ri_chunklen - head->arg.head[0].iov_len;
792 }
793
794out:
795 return ret;
796}
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
816 struct svc_rdma_op_ctxt *head, __be32 *p)
817{
818 struct svc_rdma_read_info *info;
819 struct page **page;
820 int ret;
821
822
823
824
825
826 head->hdr_count = head->count;
827 head->arg.head[0] = rqstp->rq_arg.head[0];
828 head->arg.tail[0] = rqstp->rq_arg.tail[0];
829 head->arg.pages = head->pages;
830 head->arg.page_base = 0;
831 head->arg.page_len = 0;
832 head->arg.len = rqstp->rq_arg.len;
833 head->arg.buflen = rqstp->rq_arg.buflen;
834
835 info = svc_rdma_read_info_alloc(rdma);
836 if (!info)
837 return -ENOMEM;
838 info->ri_readctxt = head;
839
840 info->ri_position = be32_to_cpup(p + 1);
841 if (info->ri_position)
842 ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
843 else
844 ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
845
846
847 if (info->ri_pageoff > 0)
848 info->ri_pageno++;
849 rqstp->rq_respages = &rqstp->rq_pages[info->ri_pageno];
850 rqstp->rq_next_page = rqstp->rq_respages + 1;
851
852 if (ret < 0)
853 goto out;
854
855 ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
856
857out:
858
859
860
861
862 for (page = rqstp->rq_pages; page < rqstp->rq_respages; page++)
863 *page = NULL;
864
865 if (ret < 0)
866 svc_rdma_read_info_free(info);
867 return ret;
868}
869