1
2
3
4
5
6
7
8#include <rdma/rw.h>
9
10#include <linux/sunrpc/xdr.h>
11#include <linux/sunrpc/rpc_rdma.h>
12#include <linux/sunrpc/svc_rdma.h>
13
14#include "xprt_rdma.h"
15#include <trace/events/rpcrdma.h>
16
17static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
18static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37struct svc_rdma_rw_ctxt {
38 struct list_head rw_list;
39 struct rdma_rw_ctx rw_ctx;
40 unsigned int rw_nents;
41 struct sg_table rw_sg_table;
42 struct scatterlist rw_first_sgl[];
43};
44
45static inline struct svc_rdma_rw_ctxt *
46svc_rdma_next_ctxt(struct list_head *list)
47{
48 return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
49 rw_list);
50}
51
52static struct svc_rdma_rw_ctxt *
53svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
54{
55 struct svc_rdma_rw_ctxt *ctxt;
56
57 spin_lock(&rdma->sc_rw_ctxt_lock);
58
59 ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
60 if (ctxt) {
61 list_del(&ctxt->rw_list);
62 spin_unlock(&rdma->sc_rw_ctxt_lock);
63 } else {
64 spin_unlock(&rdma->sc_rw_ctxt_lock);
65 ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
66 GFP_KERNEL);
67 if (!ctxt)
68 goto out_noctx;
69 INIT_LIST_HEAD(&ctxt->rw_list);
70 }
71
72 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
73 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
74 ctxt->rw_sg_table.sgl,
75 SG_CHUNK_SIZE))
76 goto out_free;
77 return ctxt;
78
79out_free:
80 kfree(ctxt);
81out_noctx:
82 trace_svcrdma_no_rwctx_err(rdma, sges);
83 return NULL;
84}
85
86static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
87 struct svc_rdma_rw_ctxt *ctxt)
88{
89 sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
90
91 spin_lock(&rdma->sc_rw_ctxt_lock);
92 list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
93 spin_unlock(&rdma->sc_rw_ctxt_lock);
94}
95
96
97
98
99
100
101void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
102{
103 struct svc_rdma_rw_ctxt *ctxt;
104
105 while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
106 list_del(&ctxt->rw_list);
107 kfree(ctxt);
108 }
109}
110
111
112
113
114
115
116
117
118
119
120
121
122static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
123 struct svc_rdma_rw_ctxt *ctxt,
124 u64 offset, u32 handle,
125 enum dma_data_direction direction)
126{
127 int ret;
128
129 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
130 ctxt->rw_sg_table.sgl, ctxt->rw_nents,
131 0, offset, handle, direction);
132 if (unlikely(ret < 0)) {
133 svc_rdma_put_rw_ctxt(rdma, ctxt);
134 trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
135 }
136 return ret;
137}
138
139
140
141
142
143
144
145
146
147struct svc_rdma_chunk_ctxt {
148 struct rpc_rdma_cid cc_cid;
149 struct ib_cqe cc_cqe;
150 struct svcxprt_rdma *cc_rdma;
151 struct list_head cc_rwctxts;
152 int cc_sqecount;
153};
154
155static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
156 struct rpc_rdma_cid *cid)
157{
158 cid->ci_queue_id = rdma->sc_sq_cq->res.id;
159 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
160}
161
162static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
163 struct svc_rdma_chunk_ctxt *cc)
164{
165 svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
166 cc->cc_rdma = rdma;
167
168 INIT_LIST_HEAD(&cc->cc_rwctxts);
169 cc->cc_sqecount = 0;
170}
171
172static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
173 enum dma_data_direction dir)
174{
175 struct svcxprt_rdma *rdma = cc->cc_rdma;
176 struct svc_rdma_rw_ctxt *ctxt;
177
178 while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
179 list_del(&ctxt->rw_list);
180
181 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
182 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
183 ctxt->rw_nents, dir);
184 svc_rdma_put_rw_ctxt(rdma, ctxt);
185 }
186}
187
188
189
190
191
192struct svc_rdma_write_info {
193 const struct svc_rdma_chunk *wi_chunk;
194
195
196 unsigned int wi_seg_off;
197 unsigned int wi_seg_no;
198
199
200 const struct xdr_buf *wi_xdr;
201 unsigned char *wi_base;
202 unsigned int wi_next_off;
203
204 struct svc_rdma_chunk_ctxt wi_cc;
205};
206
207static struct svc_rdma_write_info *
208svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma,
209 const struct svc_rdma_chunk *chunk)
210{
211 struct svc_rdma_write_info *info;
212
213 info = kmalloc(sizeof(*info), GFP_KERNEL);
214 if (!info)
215 return info;
216
217 info->wi_chunk = chunk;
218 info->wi_seg_off = 0;
219 info->wi_seg_no = 0;
220 svc_rdma_cc_init(rdma, &info->wi_cc);
221 info->wi_cc.cc_cqe.done = svc_rdma_write_done;
222 return info;
223}
224
225static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
226{
227 svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
228 kfree(info);
229}
230
231
232
233
234
235
236
237
238static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
239{
240 struct ib_cqe *cqe = wc->wr_cqe;
241 struct svc_rdma_chunk_ctxt *cc =
242 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
243 struct svcxprt_rdma *rdma = cc->cc_rdma;
244 struct svc_rdma_write_info *info =
245 container_of(cc, struct svc_rdma_write_info, wi_cc);
246
247 trace_svcrdma_wc_write(wc, &cc->cc_cid);
248
249 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
250 wake_up(&rdma->sc_send_wait);
251
252 if (unlikely(wc->status != IB_WC_SUCCESS))
253 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
254
255 svc_rdma_write_info_free(info);
256}
257
258
259
260struct svc_rdma_read_info {
261 struct svc_rqst *ri_rqst;
262 struct svc_rdma_recv_ctxt *ri_readctxt;
263 unsigned int ri_pageno;
264 unsigned int ri_pageoff;
265 unsigned int ri_totalbytes;
266
267 struct svc_rdma_chunk_ctxt ri_cc;
268};
269
270static struct svc_rdma_read_info *
271svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
272{
273 struct svc_rdma_read_info *info;
274
275 info = kmalloc(sizeof(*info), GFP_KERNEL);
276 if (!info)
277 return info;
278
279 svc_rdma_cc_init(rdma, &info->ri_cc);
280 info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
281 return info;
282}
283
284static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
285{
286 svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
287 kfree(info);
288}
289
290
291
292
293
294
295
296static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
297{
298 struct ib_cqe *cqe = wc->wr_cqe;
299 struct svc_rdma_chunk_ctxt *cc =
300 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
301 struct svcxprt_rdma *rdma = cc->cc_rdma;
302 struct svc_rdma_read_info *info =
303 container_of(cc, struct svc_rdma_read_info, ri_cc);
304
305 trace_svcrdma_wc_read(wc, &cc->cc_cid);
306
307 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
308 wake_up(&rdma->sc_send_wait);
309
310 if (unlikely(wc->status != IB_WC_SUCCESS)) {
311 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
312 svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
313 } else {
314 spin_lock(&rdma->sc_rq_dto_lock);
315 list_add_tail(&info->ri_readctxt->rc_list,
316 &rdma->sc_read_complete_q);
317
318 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
319 spin_unlock(&rdma->sc_rq_dto_lock);
320
321 svc_xprt_enqueue(&rdma->sc_xprt);
322 }
323
324 svc_rdma_read_info_free(info);
325}
326
327
328
329
330
331
332
333
334static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
335{
336 struct svcxprt_rdma *rdma = cc->cc_rdma;
337 struct svc_xprt *xprt = &rdma->sc_xprt;
338 struct ib_send_wr *first_wr;
339 const struct ib_send_wr *bad_wr;
340 struct list_head *tmp;
341 struct ib_cqe *cqe;
342 int ret;
343
344 if (cc->cc_sqecount > rdma->sc_sq_depth)
345 return -EINVAL;
346
347 first_wr = NULL;
348 cqe = &cc->cc_cqe;
349 list_for_each(tmp, &cc->cc_rwctxts) {
350 struct svc_rdma_rw_ctxt *ctxt;
351
352 ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
353 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
354 rdma->sc_port_num, cqe, first_wr);
355 cqe = NULL;
356 }
357
358 do {
359 if (atomic_sub_return(cc->cc_sqecount,
360 &rdma->sc_sq_avail) > 0) {
361 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
362 if (ret)
363 break;
364 return 0;
365 }
366
367 trace_svcrdma_sq_full(rdma);
368 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
369 wait_event(rdma->sc_send_wait,
370 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
371 trace_svcrdma_sq_retry(rdma);
372 } while (1);
373
374 trace_svcrdma_sq_post_err(rdma, ret);
375 set_bit(XPT_CLOSE, &xprt->xpt_flags);
376
377
378 if (bad_wr != first_wr)
379 return 0;
380
381 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
382 wake_up(&rdma->sc_send_wait);
383 return -ENOTCONN;
384}
385
386
387
388static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
389 unsigned int len,
390 struct svc_rdma_rw_ctxt *ctxt)
391{
392 struct scatterlist *sg = ctxt->rw_sg_table.sgl;
393
394 sg_set_buf(&sg[0], info->wi_base, len);
395 info->wi_base += len;
396
397 ctxt->rw_nents = 1;
398}
399
400
401
402static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
403 unsigned int remaining,
404 struct svc_rdma_rw_ctxt *ctxt)
405{
406 unsigned int sge_no, sge_bytes, page_off, page_no;
407 const struct xdr_buf *xdr = info->wi_xdr;
408 struct scatterlist *sg;
409 struct page **page;
410
411 page_off = info->wi_next_off + xdr->page_base;
412 page_no = page_off >> PAGE_SHIFT;
413 page_off = offset_in_page(page_off);
414 page = xdr->pages + page_no;
415 info->wi_next_off += remaining;
416 sg = ctxt->rw_sg_table.sgl;
417 sge_no = 0;
418 do {
419 sge_bytes = min_t(unsigned int, remaining,
420 PAGE_SIZE - page_off);
421 sg_set_page(sg, *page, sge_bytes, page_off);
422
423 remaining -= sge_bytes;
424 sg = sg_next(sg);
425 page_off = 0;
426 sge_no++;
427 page++;
428 } while (remaining);
429
430 ctxt->rw_nents = sge_no;
431}
432
433
434
435
436static int
437svc_rdma_build_writes(struct svc_rdma_write_info *info,
438 void (*constructor)(struct svc_rdma_write_info *info,
439 unsigned int len,
440 struct svc_rdma_rw_ctxt *ctxt),
441 unsigned int remaining)
442{
443 struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
444 struct svcxprt_rdma *rdma = cc->cc_rdma;
445 const struct svc_rdma_segment *seg;
446 struct svc_rdma_rw_ctxt *ctxt;
447 int ret;
448
449 do {
450 unsigned int write_len;
451 u64 offset;
452
453 seg = &info->wi_chunk->ch_segments[info->wi_seg_no];
454 if (!seg)
455 goto out_overflow;
456
457 write_len = min(remaining, seg->rs_length - info->wi_seg_off);
458 if (!write_len)
459 goto out_overflow;
460 ctxt = svc_rdma_get_rw_ctxt(rdma,
461 (write_len >> PAGE_SHIFT) + 2);
462 if (!ctxt)
463 return -ENOMEM;
464
465 constructor(info, write_len, ctxt);
466 offset = seg->rs_offset + info->wi_seg_off;
467 ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, seg->rs_handle,
468 DMA_TO_DEVICE);
469 if (ret < 0)
470 return -EIO;
471
472 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
473 cc->cc_sqecount += ret;
474 if (write_len == seg->rs_length - info->wi_seg_off) {
475 info->wi_seg_no++;
476 info->wi_seg_off = 0;
477 } else {
478 info->wi_seg_off += write_len;
479 }
480 remaining -= write_len;
481 } while (remaining);
482
483 return 0;
484
485out_overflow:
486 trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
487 info->wi_chunk->ch_segcount);
488 return -E2BIG;
489}
490
491
492
493
494
495
496
497
498
499
500
501
502static int svc_rdma_iov_write(struct svc_rdma_write_info *info,
503 const struct kvec *iov)
504{
505 info->wi_base = iov->iov_base;
506 return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
507 iov->iov_len);
508}
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523static int svc_rdma_pages_write(struct svc_rdma_write_info *info,
524 const struct xdr_buf *xdr,
525 unsigned int offset,
526 unsigned long length)
527{
528 info->wi_xdr = xdr;
529 info->wi_next_off = offset - xdr->head[0].iov_len;
530 return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
531 length);
532}
533
534
535
536
537
538
539
540
541
542
543
544
545static int svc_rdma_xb_write(const struct xdr_buf *xdr, void *data)
546{
547 struct svc_rdma_write_info *info = data;
548 int ret;
549
550 if (xdr->head[0].iov_len) {
551 ret = svc_rdma_iov_write(info, &xdr->head[0]);
552 if (ret < 0)
553 return ret;
554 }
555
556 if (xdr->page_len) {
557 ret = svc_rdma_pages_write(info, xdr, xdr->head[0].iov_len,
558 xdr->page_len);
559 if (ret < 0)
560 return ret;
561 }
562
563 if (xdr->tail[0].iov_len) {
564 ret = svc_rdma_iov_write(info, &xdr->tail[0]);
565 if (ret < 0)
566 return ret;
567 }
568
569 return xdr->len;
570}
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
586 const struct svc_rdma_chunk *chunk,
587 const struct xdr_buf *xdr)
588{
589 struct svc_rdma_write_info *info;
590 struct svc_rdma_chunk_ctxt *cc;
591 int ret;
592
593 info = svc_rdma_write_info_alloc(rdma, chunk);
594 if (!info)
595 return -ENOMEM;
596 cc = &info->wi_cc;
597
598 ret = svc_rdma_xb_write(xdr, info);
599 if (ret != xdr->len)
600 goto out_err;
601
602 trace_svcrdma_post_write_chunk(&cc->cc_cid, cc->cc_sqecount);
603 ret = svc_rdma_post_chunk_ctxt(cc);
604 if (ret < 0)
605 goto out_err;
606 return xdr->len;
607
608out_err:
609 svc_rdma_write_info_free(info);
610 return ret;
611}
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
627 const struct svc_rdma_recv_ctxt *rctxt,
628 const struct xdr_buf *xdr)
629{
630 struct svc_rdma_write_info *info;
631 struct svc_rdma_chunk_ctxt *cc;
632 struct svc_rdma_chunk *chunk;
633 int ret;
634
635 if (pcl_is_empty(&rctxt->rc_reply_pcl))
636 return 0;
637
638 chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
639 info = svc_rdma_write_info_alloc(rdma, chunk);
640 if (!info)
641 return -ENOMEM;
642 cc = &info->wi_cc;
643
644 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
645 svc_rdma_xb_write, info);
646 if (ret < 0)
647 goto out_err;
648
649 trace_svcrdma_post_reply_chunk(&cc->cc_cid, cc->cc_sqecount);
650 ret = svc_rdma_post_chunk_ctxt(cc);
651 if (ret < 0)
652 goto out_err;
653
654 return xdr->len;
655
656out_err:
657 svc_rdma_write_info_free(info);
658 return ret;
659}
660
661
662
663
664
665
666
667
668
669
670
671
672static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
673 const struct svc_rdma_segment *segment)
674{
675 struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
676 struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
677 struct svc_rqst *rqstp = info->ri_rqst;
678 struct svc_rdma_rw_ctxt *ctxt;
679 unsigned int sge_no, seg_len, len;
680 struct scatterlist *sg;
681 int ret;
682
683 len = segment->rs_length;
684 sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
685 ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
686 if (!ctxt)
687 return -ENOMEM;
688 ctxt->rw_nents = sge_no;
689
690 sg = ctxt->rw_sg_table.sgl;
691 for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
692 seg_len = min_t(unsigned int, len,
693 PAGE_SIZE - info->ri_pageoff);
694
695 head->rc_arg.pages[info->ri_pageno] =
696 rqstp->rq_pages[info->ri_pageno];
697 if (!info->ri_pageoff)
698 head->rc_page_count++;
699
700 sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
701 seg_len, info->ri_pageoff);
702 sg = sg_next(sg);
703
704 info->ri_pageoff += seg_len;
705 if (info->ri_pageoff == PAGE_SIZE) {
706 info->ri_pageno++;
707 info->ri_pageoff = 0;
708 }
709 len -= seg_len;
710
711
712 if (len &&
713 &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
714 goto out_overrun;
715 }
716
717 ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, segment->rs_offset,
718 segment->rs_handle, DMA_FROM_DEVICE);
719 if (ret < 0)
720 return -EIO;
721
722 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
723 cc->cc_sqecount += ret;
724 return 0;
725
726out_overrun:
727 trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
728 return -EINVAL;
729}
730
731
732
733
734
735
736
737
738
739
740
741
742static int svc_rdma_build_read_chunk(struct svc_rdma_read_info *info,
743 const struct svc_rdma_chunk *chunk)
744{
745 const struct svc_rdma_segment *segment;
746 int ret;
747
748 ret = -EINVAL;
749 pcl_for_each_segment(segment, chunk) {
750 ret = svc_rdma_build_read_segment(info, segment);
751 if (ret < 0)
752 break;
753 info->ri_totalbytes += segment->rs_length;
754 }
755 return ret;
756}
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773static int svc_rdma_copy_inline_range(struct svc_rdma_read_info *info,
774 unsigned int offset,
775 unsigned int remaining)
776{
777 struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
778 unsigned char *dst, *src = head->rc_recv_buf;
779 struct svc_rqst *rqstp = info->ri_rqst;
780 unsigned int page_no, numpages;
781
782 numpages = PAGE_ALIGN(info->ri_pageoff + remaining) >> PAGE_SHIFT;
783 for (page_no = 0; page_no < numpages; page_no++) {
784 unsigned int page_len;
785
786 page_len = min_t(unsigned int, remaining,
787 PAGE_SIZE - info->ri_pageoff);
788
789 head->rc_arg.pages[info->ri_pageno] =
790 rqstp->rq_pages[info->ri_pageno];
791 if (!info->ri_pageoff)
792 head->rc_page_count++;
793
794 dst = page_address(head->rc_arg.pages[info->ri_pageno]);
795 memcpy(dst + info->ri_pageno, src + offset, page_len);
796
797 info->ri_totalbytes += page_len;
798 info->ri_pageoff += page_len;
799 if (info->ri_pageoff == PAGE_SIZE) {
800 info->ri_pageno++;
801 info->ri_pageoff = 0;
802 }
803 remaining -= page_len;
804 offset += page_len;
805 }
806
807 return -EINVAL;
808}
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824static noinline int svc_rdma_read_multiple_chunks(struct svc_rdma_read_info *info)
825{
826 struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
827 const struct svc_rdma_pcl *pcl = &head->rc_read_pcl;
828 struct svc_rdma_chunk *chunk, *next;
829 struct xdr_buf *buf = &head->rc_arg;
830 unsigned int start, length;
831 int ret;
832
833 start = 0;
834 chunk = pcl_first_chunk(pcl);
835 length = chunk->ch_position;
836 ret = svc_rdma_copy_inline_range(info, start, length);
837 if (ret < 0)
838 return ret;
839
840 pcl_for_each_chunk(chunk, pcl) {
841 ret = svc_rdma_build_read_chunk(info, chunk);
842 if (ret < 0)
843 return ret;
844
845 next = pcl_next_chunk(pcl, chunk);
846 if (!next)
847 break;
848
849 start += length;
850 length = next->ch_position - info->ri_totalbytes;
851 ret = svc_rdma_copy_inline_range(info, start, length);
852 if (ret < 0)
853 return ret;
854 }
855
856 start += length;
857 length = head->rc_byte_len - start;
858 ret = svc_rdma_copy_inline_range(info, start, length);
859 if (ret < 0)
860 return ret;
861
862 buf->len += info->ri_totalbytes;
863 buf->buflen += info->ri_totalbytes;
864
865 head->rc_hdr_count = 1;
866 buf->head[0].iov_base = page_address(head->rc_pages[0]);
867 buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, info->ri_totalbytes);
868 buf->page_len = info->ri_totalbytes - buf->head[0].iov_len;
869 return 0;
870}
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889static int svc_rdma_read_data_item(struct svc_rdma_read_info *info)
890{
891 struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
892 struct xdr_buf *buf = &head->rc_arg;
893 struct svc_rdma_chunk *chunk;
894 unsigned int length;
895 int ret;
896
897 chunk = pcl_first_chunk(&head->rc_read_pcl);
898 ret = svc_rdma_build_read_chunk(info, chunk);
899 if (ret < 0)
900 goto out;
901
902 head->rc_hdr_count = 0;
903
904
905
906
907
908
909 buf->tail[0].iov_base = buf->head[0].iov_base + chunk->ch_position;
910 buf->tail[0].iov_len = buf->head[0].iov_len - chunk->ch_position;
911 buf->head[0].iov_len = chunk->ch_position;
912
913
914
915
916
917
918
919
920
921
922 length = XDR_QUADLEN(info->ri_totalbytes) << 2;
923 buf->page_len = length;
924 buf->len += length;
925 buf->buflen += length;
926
927out:
928 return ret;
929}
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945static int svc_rdma_read_chunk_range(struct svc_rdma_read_info *info,
946 const struct svc_rdma_chunk *chunk,
947 unsigned int offset, unsigned int length)
948{
949 const struct svc_rdma_segment *segment;
950 int ret;
951
952 ret = -EINVAL;
953 pcl_for_each_segment(segment, chunk) {
954 struct svc_rdma_segment dummy;
955
956 if (offset > segment->rs_length) {
957 offset -= segment->rs_length;
958 continue;
959 }
960
961 dummy.rs_handle = segment->rs_handle;
962 dummy.rs_length = min_t(u32, length, segment->rs_length) - offset;
963 dummy.rs_offset = segment->rs_offset + offset;
964
965 ret = svc_rdma_build_read_segment(info, &dummy);
966 if (ret < 0)
967 break;
968
969 info->ri_totalbytes += dummy.rs_length;
970 length -= dummy.rs_length;
971 offset = 0;
972 }
973 return ret;
974}
975
976
977
978
979
980
981
982
983
984
985
986
987static int svc_rdma_read_call_chunk(struct svc_rdma_read_info *info)
988{
989 struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
990 const struct svc_rdma_chunk *call_chunk =
991 pcl_first_chunk(&head->rc_call_pcl);
992 const struct svc_rdma_pcl *pcl = &head->rc_read_pcl;
993 struct svc_rdma_chunk *chunk, *next;
994 unsigned int start, length;
995 int ret;
996
997 if (pcl_is_empty(pcl))
998 return svc_rdma_build_read_chunk(info, call_chunk);
999
1000 start = 0;
1001 chunk = pcl_first_chunk(pcl);
1002 length = chunk->ch_position;
1003 ret = svc_rdma_read_chunk_range(info, call_chunk, start, length);
1004 if (ret < 0)
1005 return ret;
1006
1007 pcl_for_each_chunk(chunk, pcl) {
1008 ret = svc_rdma_build_read_chunk(info, chunk);
1009 if (ret < 0)
1010 return ret;
1011
1012 next = pcl_next_chunk(pcl, chunk);
1013 if (!next)
1014 break;
1015
1016 start += length;
1017 length = next->ch_position - info->ri_totalbytes;
1018 ret = svc_rdma_read_chunk_range(info, call_chunk,
1019 start, length);
1020 if (ret < 0)
1021 return ret;
1022 }
1023
1024 start += length;
1025 length = call_chunk->ch_length - start;
1026 return svc_rdma_read_chunk_range(info, call_chunk, start, length);
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048static noinline int svc_rdma_read_special(struct svc_rdma_read_info *info)
1049{
1050 struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
1051 struct xdr_buf *buf = &head->rc_arg;
1052 int ret;
1053
1054 ret = svc_rdma_read_call_chunk(info);
1055 if (ret < 0)
1056 goto out;
1057
1058 buf->len += info->ri_totalbytes;
1059 buf->buflen += info->ri_totalbytes;
1060
1061 head->rc_hdr_count = 1;
1062 buf->head[0].iov_base = page_address(head->rc_pages[0]);
1063 buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, info->ri_totalbytes);
1064 buf->page_len = info->ri_totalbytes - buf->head[0].iov_len;
1065
1066out:
1067 return ret;
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
1081 const unsigned int start,
1082 const unsigned int num_pages)
1083{
1084 unsigned int i;
1085
1086 for (i = start; i < num_pages + start; i++)
1087 rqstp->rq_pages[i] = NULL;
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113int svc_rdma_process_read_list(struct svcxprt_rdma *rdma,
1114 struct svc_rqst *rqstp,
1115 struct svc_rdma_recv_ctxt *head)
1116{
1117 struct svc_rdma_read_info *info;
1118 struct svc_rdma_chunk_ctxt *cc;
1119 int ret;
1120
1121
1122
1123
1124
1125 head->rc_arg.head[0] = rqstp->rq_arg.head[0];
1126 head->rc_arg.tail[0] = rqstp->rq_arg.tail[0];
1127 head->rc_arg.pages = head->rc_pages;
1128 head->rc_arg.page_base = 0;
1129 head->rc_arg.page_len = 0;
1130 head->rc_arg.len = rqstp->rq_arg.len;
1131 head->rc_arg.buflen = rqstp->rq_arg.buflen;
1132
1133 info = svc_rdma_read_info_alloc(rdma);
1134 if (!info)
1135 return -ENOMEM;
1136 cc = &info->ri_cc;
1137 info->ri_rqst = rqstp;
1138 info->ri_readctxt = head;
1139 info->ri_pageno = 0;
1140 info->ri_pageoff = 0;
1141 info->ri_totalbytes = 0;
1142
1143 if (pcl_is_empty(&head->rc_call_pcl)) {
1144 if (head->rc_read_pcl.cl_count == 1)
1145 ret = svc_rdma_read_data_item(info);
1146 else
1147 ret = svc_rdma_read_multiple_chunks(info);
1148 } else
1149 ret = svc_rdma_read_special(info);
1150 if (ret < 0)
1151 goto out_err;
1152
1153 trace_svcrdma_post_read_chunk(&cc->cc_cid, cc->cc_sqecount);
1154 ret = svc_rdma_post_chunk_ctxt(cc);
1155 if (ret < 0)
1156 goto out_err;
1157 svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
1158 return 1;
1159
1160out_err:
1161 svc_rdma_read_info_free(info);
1162 return ret;
1163}
1164