1
2
3
4
5
6
7
8#include <rdma/rw.h>
9
10#include <linux/sunrpc/rpc_rdma.h>
11#include <linux/sunrpc/svc_rdma.h>
12#include <linux/sunrpc/debug.h>
13
14#include "xprt_rdma.h"
15#include <trace/events/rpcrdma.h>
16
17#define RPCDBG_FACILITY RPCDBG_SVCXPRT
18
19static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
20static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39struct svc_rdma_rw_ctxt {
40 struct list_head rw_list;
41 struct rdma_rw_ctx rw_ctx;
42 int rw_nents;
43 struct sg_table rw_sg_table;
44 struct scatterlist rw_first_sgl[0];
45};
46
47static inline struct svc_rdma_rw_ctxt *
48svc_rdma_next_ctxt(struct list_head *list)
49{
50 return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
51 rw_list);
52}
53
54static struct svc_rdma_rw_ctxt *
55svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
56{
57 struct svc_rdma_rw_ctxt *ctxt;
58
59 spin_lock(&rdma->sc_rw_ctxt_lock);
60
61 ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
62 if (ctxt) {
63 list_del(&ctxt->rw_list);
64 spin_unlock(&rdma->sc_rw_ctxt_lock);
65 } else {
66 spin_unlock(&rdma->sc_rw_ctxt_lock);
67 ctxt = kmalloc(sizeof(*ctxt) +
68 SG_CHUNK_SIZE * sizeof(struct scatterlist),
69 GFP_KERNEL);
70 if (!ctxt)
71 goto out;
72 INIT_LIST_HEAD(&ctxt->rw_list);
73 }
74
75 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
76 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
77 ctxt->rw_sg_table.sgl)) {
78 kfree(ctxt);
79 ctxt = NULL;
80 }
81out:
82 return ctxt;
83}
84
85static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
86 struct svc_rdma_rw_ctxt *ctxt)
87{
88 sg_free_table_chained(&ctxt->rw_sg_table, true);
89
90 spin_lock(&rdma->sc_rw_ctxt_lock);
91 list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
92 spin_unlock(&rdma->sc_rw_ctxt_lock);
93}
94
95
96
97
98
99
100void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
101{
102 struct svc_rdma_rw_ctxt *ctxt;
103
104 while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
105 list_del(&ctxt->rw_list);
106 kfree(ctxt);
107 }
108}
109
110
111
112
113
114
115
116
117
118struct svc_rdma_chunk_ctxt {
119 struct ib_cqe cc_cqe;
120 struct svcxprt_rdma *cc_rdma;
121 struct list_head cc_rwctxts;
122 int cc_sqecount;
123};
124
125static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
126 struct svc_rdma_chunk_ctxt *cc)
127{
128 cc->cc_rdma = rdma;
129 svc_xprt_get(&rdma->sc_xprt);
130
131 INIT_LIST_HEAD(&cc->cc_rwctxts);
132 cc->cc_sqecount = 0;
133}
134
135static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
136 enum dma_data_direction dir)
137{
138 struct svcxprt_rdma *rdma = cc->cc_rdma;
139 struct svc_rdma_rw_ctxt *ctxt;
140
141 while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
142 list_del(&ctxt->rw_list);
143
144 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
145 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
146 ctxt->rw_nents, dir);
147 svc_rdma_put_rw_ctxt(rdma, ctxt);
148 }
149 svc_xprt_put(&rdma->sc_xprt);
150}
151
152
153
154
155
156struct svc_rdma_write_info {
157
158 unsigned int wi_seg_off;
159 unsigned int wi_seg_no;
160 unsigned int wi_nsegs;
161 __be32 *wi_segs;
162
163
164 struct xdr_buf *wi_xdr;
165 unsigned char *wi_base;
166 unsigned int wi_next_off;
167
168 struct svc_rdma_chunk_ctxt wi_cc;
169};
170
171static struct svc_rdma_write_info *
172svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
173{
174 struct svc_rdma_write_info *info;
175
176 info = kmalloc(sizeof(*info), GFP_KERNEL);
177 if (!info)
178 return info;
179
180 info->wi_seg_off = 0;
181 info->wi_seg_no = 0;
182 info->wi_nsegs = be32_to_cpup(++chunk);
183 info->wi_segs = ++chunk;
184 svc_rdma_cc_init(rdma, &info->wi_cc);
185 info->wi_cc.cc_cqe.done = svc_rdma_write_done;
186 return info;
187}
188
189static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
190{
191 svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
192 kfree(info);
193}
194
195
196
197
198
199
200
201
202static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
203{
204 struct ib_cqe *cqe = wc->wr_cqe;
205 struct svc_rdma_chunk_ctxt *cc =
206 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
207 struct svcxprt_rdma *rdma = cc->cc_rdma;
208 struct svc_rdma_write_info *info =
209 container_of(cc, struct svc_rdma_write_info, wi_cc);
210
211 trace_svcrdma_wc_write(wc);
212
213 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
214 wake_up(&rdma->sc_send_wait);
215
216 if (unlikely(wc->status != IB_WC_SUCCESS)) {
217 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
218 if (wc->status != IB_WC_WR_FLUSH_ERR)
219 pr_err("svcrdma: write ctx: %s (%u/0x%x)\n",
220 ib_wc_status_msg(wc->status),
221 wc->status, wc->vendor_err);
222 }
223
224 svc_rdma_write_info_free(info);
225}
226
227
228
229struct svc_rdma_read_info {
230 struct svc_rdma_recv_ctxt *ri_readctxt;
231 unsigned int ri_position;
232 unsigned int ri_pageno;
233 unsigned int ri_pageoff;
234 unsigned int ri_chunklen;
235
236 struct svc_rdma_chunk_ctxt ri_cc;
237};
238
239static struct svc_rdma_read_info *
240svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
241{
242 struct svc_rdma_read_info *info;
243
244 info = kmalloc(sizeof(*info), GFP_KERNEL);
245 if (!info)
246 return info;
247
248 svc_rdma_cc_init(rdma, &info->ri_cc);
249 info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
250 return info;
251}
252
253static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
254{
255 svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
256 kfree(info);
257}
258
259
260
261
262
263
264
265static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
266{
267 struct ib_cqe *cqe = wc->wr_cqe;
268 struct svc_rdma_chunk_ctxt *cc =
269 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
270 struct svcxprt_rdma *rdma = cc->cc_rdma;
271 struct svc_rdma_read_info *info =
272 container_of(cc, struct svc_rdma_read_info, ri_cc);
273
274 trace_svcrdma_wc_read(wc);
275
276 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
277 wake_up(&rdma->sc_send_wait);
278
279 if (unlikely(wc->status != IB_WC_SUCCESS)) {
280 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
281 if (wc->status != IB_WC_WR_FLUSH_ERR)
282 pr_err("svcrdma: read ctx: %s (%u/0x%x)\n",
283 ib_wc_status_msg(wc->status),
284 wc->status, wc->vendor_err);
285 svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
286 } else {
287 spin_lock(&rdma->sc_rq_dto_lock);
288 list_add_tail(&info->ri_readctxt->rc_list,
289 &rdma->sc_read_complete_q);
290 spin_unlock(&rdma->sc_rq_dto_lock);
291
292 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
293 svc_xprt_enqueue(&rdma->sc_xprt);
294 }
295
296 svc_rdma_read_info_free(info);
297}
298
299
300
301
302
303
304
305
306static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
307{
308 struct svcxprt_rdma *rdma = cc->cc_rdma;
309 struct svc_xprt *xprt = &rdma->sc_xprt;
310 struct ib_send_wr *first_wr;
311 const struct ib_send_wr *bad_wr;
312 struct list_head *tmp;
313 struct ib_cqe *cqe;
314 int ret;
315
316 if (cc->cc_sqecount > rdma->sc_sq_depth)
317 return -EINVAL;
318
319 first_wr = NULL;
320 cqe = &cc->cc_cqe;
321 list_for_each(tmp, &cc->cc_rwctxts) {
322 struct svc_rdma_rw_ctxt *ctxt;
323
324 ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
325 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
326 rdma->sc_port_num, cqe, first_wr);
327 cqe = NULL;
328 }
329
330 do {
331 if (atomic_sub_return(cc->cc_sqecount,
332 &rdma->sc_sq_avail) > 0) {
333 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
334 trace_svcrdma_post_rw(&cc->cc_cqe,
335 cc->cc_sqecount, ret);
336 if (ret)
337 break;
338 return 0;
339 }
340
341 trace_svcrdma_sq_full(rdma);
342 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
343 wait_event(rdma->sc_send_wait,
344 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
345 trace_svcrdma_sq_retry(rdma);
346 } while (1);
347
348 set_bit(XPT_CLOSE, &xprt->xpt_flags);
349
350
351 if (bad_wr != first_wr)
352 return 0;
353
354 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
355 wake_up(&rdma->sc_send_wait);
356 return -ENOTCONN;
357}
358
359
360
361static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
362 unsigned int len,
363 struct svc_rdma_rw_ctxt *ctxt)
364{
365 struct scatterlist *sg = ctxt->rw_sg_table.sgl;
366
367 sg_set_buf(&sg[0], info->wi_base, len);
368 info->wi_base += len;
369
370 ctxt->rw_nents = 1;
371}
372
373
374
375static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
376 unsigned int remaining,
377 struct svc_rdma_rw_ctxt *ctxt)
378{
379 unsigned int sge_no, sge_bytes, page_off, page_no;
380 struct xdr_buf *xdr = info->wi_xdr;
381 struct scatterlist *sg;
382 struct page **page;
383
384 page_off = info->wi_next_off + xdr->page_base;
385 page_no = page_off >> PAGE_SHIFT;
386 page_off = offset_in_page(page_off);
387 page = xdr->pages + page_no;
388 info->wi_next_off += remaining;
389 sg = ctxt->rw_sg_table.sgl;
390 sge_no = 0;
391 do {
392 sge_bytes = min_t(unsigned int, remaining,
393 PAGE_SIZE - page_off);
394 sg_set_page(sg, *page, sge_bytes, page_off);
395
396 remaining -= sge_bytes;
397 sg = sg_next(sg);
398 page_off = 0;
399 sge_no++;
400 page++;
401 } while (remaining);
402
403 ctxt->rw_nents = sge_no;
404}
405
406
407
408
409static int
410svc_rdma_build_writes(struct svc_rdma_write_info *info,
411 void (*constructor)(struct svc_rdma_write_info *info,
412 unsigned int len,
413 struct svc_rdma_rw_ctxt *ctxt),
414 unsigned int remaining)
415{
416 struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
417 struct svcxprt_rdma *rdma = cc->cc_rdma;
418 struct svc_rdma_rw_ctxt *ctxt;
419 __be32 *seg;
420 int ret;
421
422 seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
423 do {
424 unsigned int write_len;
425 u32 seg_length, seg_handle;
426 u64 seg_offset;
427
428 if (info->wi_seg_no >= info->wi_nsegs)
429 goto out_overflow;
430
431 seg_handle = be32_to_cpup(seg);
432 seg_length = be32_to_cpup(seg + 1);
433 xdr_decode_hyper(seg + 2, &seg_offset);
434 seg_offset += info->wi_seg_off;
435
436 write_len = min(remaining, seg_length - info->wi_seg_off);
437 ctxt = svc_rdma_get_rw_ctxt(rdma,
438 (write_len >> PAGE_SHIFT) + 2);
439 if (!ctxt)
440 goto out_noctx;
441
442 constructor(info, write_len, ctxt);
443 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
444 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
445 ctxt->rw_nents, 0, seg_offset,
446 seg_handle, DMA_TO_DEVICE);
447 if (ret < 0)
448 goto out_initerr;
449
450 trace_svcrdma_encode_wseg(seg_handle, write_len, seg_offset);
451 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
452 cc->cc_sqecount += ret;
453 if (write_len == seg_length - info->wi_seg_off) {
454 seg += 4;
455 info->wi_seg_no++;
456 info->wi_seg_off = 0;
457 } else {
458 info->wi_seg_off += write_len;
459 }
460 remaining -= write_len;
461 } while (remaining);
462
463 return 0;
464
465out_overflow:
466 dprintk("svcrdma: inadequate space in Write chunk (%u)\n",
467 info->wi_nsegs);
468 return -E2BIG;
469
470out_noctx:
471 dprintk("svcrdma: no R/W ctxs available\n");
472 return -ENOMEM;
473
474out_initerr:
475 svc_rdma_put_rw_ctxt(rdma, ctxt);
476 trace_svcrdma_dma_map_rwctx(rdma, ret);
477 return -EIO;
478}
479
480
481
482
483
484
485static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
486 struct kvec *vec)
487{
488 info->wi_base = vec->iov_base;
489 return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
490 vec->iov_len);
491}
492
493
494
495
496
497
498static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
499 struct xdr_buf *xdr)
500{
501 info->wi_xdr = xdr;
502 info->wi_next_off = 0;
503 return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
504 xdr->page_len);
505}
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
521 struct xdr_buf *xdr)
522{
523 struct svc_rdma_write_info *info;
524 int ret;
525
526 if (!xdr->page_len)
527 return 0;
528
529 info = svc_rdma_write_info_alloc(rdma, wr_ch);
530 if (!info)
531 return -ENOMEM;
532
533 ret = svc_rdma_send_xdr_pagelist(info, xdr);
534 if (ret < 0)
535 goto out_err;
536
537 ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
538 if (ret < 0)
539 goto out_err;
540
541 trace_svcrdma_encode_write(xdr->page_len);
542 return xdr->page_len;
543
544out_err:
545 svc_rdma_write_info_free(info);
546 return ret;
547}
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
564 bool writelist, struct xdr_buf *xdr)
565{
566 struct svc_rdma_write_info *info;
567 int consumed, ret;
568
569 info = svc_rdma_write_info_alloc(rdma, rp_ch);
570 if (!info)
571 return -ENOMEM;
572
573 ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
574 if (ret < 0)
575 goto out_err;
576 consumed = xdr->head[0].iov_len;
577
578
579
580
581 if (!writelist && xdr->page_len) {
582 ret = svc_rdma_send_xdr_pagelist(info, xdr);
583 if (ret < 0)
584 goto out_err;
585 consumed += xdr->page_len;
586 }
587
588 if (xdr->tail[0].iov_len) {
589 ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
590 if (ret < 0)
591 goto out_err;
592 consumed += xdr->tail[0].iov_len;
593 }
594
595 ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
596 if (ret < 0)
597 goto out_err;
598
599 trace_svcrdma_encode_reply(consumed);
600 return consumed;
601
602out_err:
603 svc_rdma_write_info_free(info);
604 return ret;
605}
606
607static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
608 struct svc_rqst *rqstp,
609 u32 rkey, u32 len, u64 offset)
610{
611 struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
612 struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
613 struct svc_rdma_rw_ctxt *ctxt;
614 unsigned int sge_no, seg_len;
615 struct scatterlist *sg;
616 int ret;
617
618 sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
619 ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
620 if (!ctxt)
621 goto out_noctx;
622 ctxt->rw_nents = sge_no;
623
624 sg = ctxt->rw_sg_table.sgl;
625 for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
626 seg_len = min_t(unsigned int, len,
627 PAGE_SIZE - info->ri_pageoff);
628
629 head->rc_arg.pages[info->ri_pageno] =
630 rqstp->rq_pages[info->ri_pageno];
631 if (!info->ri_pageoff)
632 head->rc_page_count++;
633
634 sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
635 seg_len, info->ri_pageoff);
636 sg = sg_next(sg);
637
638 info->ri_pageoff += seg_len;
639 if (info->ri_pageoff == PAGE_SIZE) {
640 info->ri_pageno++;
641 info->ri_pageoff = 0;
642 }
643 len -= seg_len;
644
645
646 if (len &&
647 &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
648 goto out_overrun;
649 }
650
651 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp,
652 cc->cc_rdma->sc_port_num,
653 ctxt->rw_sg_table.sgl, ctxt->rw_nents,
654 0, offset, rkey, DMA_FROM_DEVICE);
655 if (ret < 0)
656 goto out_initerr;
657
658 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
659 cc->cc_sqecount += ret;
660 return 0;
661
662out_noctx:
663 dprintk("svcrdma: no R/W ctxs available\n");
664 return -ENOMEM;
665
666out_overrun:
667 dprintk("svcrdma: request overruns rq_pages\n");
668 return -EINVAL;
669
670out_initerr:
671 trace_svcrdma_dma_map_rwctx(cc->cc_rdma, ret);
672 svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
673 return -EIO;
674}
675
676
677
678
679static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
680 struct svc_rdma_read_info *info,
681 __be32 *p)
682{
683 unsigned int i;
684 int ret;
685
686 ret = -EINVAL;
687 info->ri_chunklen = 0;
688 while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
689 u32 rs_handle, rs_length;
690 u64 rs_offset;
691
692 rs_handle = be32_to_cpup(p++);
693 rs_length = be32_to_cpup(p++);
694 p = xdr_decode_hyper(p, &rs_offset);
695
696 ret = svc_rdma_build_read_segment(info, rqstp,
697 rs_handle, rs_length,
698 rs_offset);
699 if (ret < 0)
700 break;
701
702 trace_svcrdma_encode_rseg(rs_handle, rs_length, rs_offset);
703 info->ri_chunklen += rs_length;
704 }
705
706
707
708
709 for (i = 0; i < info->ri_readctxt->rc_page_count; i++)
710 rqstp->rq_pages[i] = NULL;
711
712 return ret;
713}
714
715
716
717
718
719
720
721
722static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
723 struct svc_rdma_read_info *info,
724 __be32 *p)
725{
726 struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
727 int ret;
728
729 ret = svc_rdma_build_read_chunk(rqstp, info, p);
730 if (ret < 0)
731 goto out;
732
733 trace_svcrdma_encode_read(info->ri_chunklen, info->ri_position);
734
735 head->rc_hdr_count = 0;
736
737
738
739
740
741
742 head->rc_arg.tail[0].iov_base =
743 head->rc_arg.head[0].iov_base + info->ri_position;
744 head->rc_arg.tail[0].iov_len =
745 head->rc_arg.head[0].iov_len - info->ri_position;
746 head->rc_arg.head[0].iov_len = info->ri_position;
747
748
749
750
751
752
753
754
755
756
757 info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
758
759 head->rc_arg.page_len = info->ri_chunklen;
760 head->rc_arg.len += info->ri_chunklen;
761 head->rc_arg.buflen += info->ri_chunklen;
762
763out:
764 return ret;
765}
766
767
768
769
770
771
772
773
774
775
776
777
778static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
779 struct svc_rdma_read_info *info,
780 __be32 *p)
781{
782 struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
783 int ret;
784
785 ret = svc_rdma_build_read_chunk(rqstp, info, p);
786 if (ret < 0)
787 goto out;
788
789 trace_svcrdma_encode_pzr(info->ri_chunklen);
790
791 head->rc_arg.len += info->ri_chunklen;
792 head->rc_arg.buflen += info->ri_chunklen;
793
794 head->rc_hdr_count = 1;
795 head->rc_arg.head[0].iov_base = page_address(head->rc_pages[0]);
796 head->rc_arg.head[0].iov_len = min_t(size_t, PAGE_SIZE,
797 info->ri_chunklen);
798
799 head->rc_arg.page_len = info->ri_chunklen -
800 head->rc_arg.head[0].iov_len;
801
802out:
803 return ret;
804}
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
824 struct svc_rdma_recv_ctxt *head, __be32 *p)
825{
826 struct svc_rdma_read_info *info;
827 int ret;
828
829
830
831
832
833 head->rc_arg.head[0] = rqstp->rq_arg.head[0];
834 head->rc_arg.tail[0] = rqstp->rq_arg.tail[0];
835 head->rc_arg.pages = head->rc_pages;
836 head->rc_arg.page_base = 0;
837 head->rc_arg.page_len = 0;
838 head->rc_arg.len = rqstp->rq_arg.len;
839 head->rc_arg.buflen = rqstp->rq_arg.buflen;
840
841 info = svc_rdma_read_info_alloc(rdma);
842 if (!info)
843 return -ENOMEM;
844 info->ri_readctxt = head;
845 info->ri_pageno = 0;
846 info->ri_pageoff = 0;
847
848 info->ri_position = be32_to_cpup(p + 1);
849 if (info->ri_position)
850 ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
851 else
852 ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
853 if (ret < 0)
854 goto out_err;
855
856 ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
857 if (ret < 0)
858 goto out_err;
859 return 0;
860
861out_err:
862 svc_rdma_read_info_free(info);
863 return ret;
864}
865