1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/slab.h>
14#include <linux/file.h>
15#include <linux/sched.h>
16#include <linux/sunrpc/clnt.h>
17#include <linux/nfs.h>
18#include <linux/nfs3.h>
19#include <linux/nfs4.h>
20#include <linux/nfs_fs.h>
21#include <linux/nfs_page.h>
22#include <linux/nfs_mount.h>
23#include <linux/export.h>
24
25#include "internal.h"
26#include "pnfs.h"
27#include "nfstrace.h"
28
29#define NFSDBG_FACILITY NFSDBG_PAGECACHE
30
31static struct kmem_cache *nfs_page_cachep;
32static const struct rpc_call_ops nfs_pgio_common_ops;
33
34static struct nfs_pgio_mirror *
35nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
36{
37 if (desc->pg_ops->pg_get_mirror)
38 return desc->pg_ops->pg_get_mirror(desc, idx);
39 return &desc->pg_mirrors[0];
40}
41
42struct nfs_pgio_mirror *
43nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
44{
45 return nfs_pgio_get_mirror(desc, desc->pg_mirror_idx);
46}
47EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
48
49static u32
50nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
51{
52 if (desc->pg_ops->pg_set_mirror)
53 return desc->pg_ops->pg_set_mirror(desc, idx);
54 return desc->pg_mirror_idx;
55}
56
57void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
58 struct nfs_pgio_header *hdr,
59 void (*release)(struct nfs_pgio_header *hdr))
60{
61 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
62
63
64 hdr->req = nfs_list_entry(mirror->pg_list.next);
65 hdr->inode = desc->pg_inode;
66 hdr->cred = nfs_req_openctx(hdr->req)->cred;
67 hdr->io_start = req_offset(hdr->req);
68 hdr->good_bytes = mirror->pg_count;
69 hdr->io_completion = desc->pg_io_completion;
70 hdr->dreq = desc->pg_dreq;
71 hdr->release = release;
72 hdr->completion_ops = desc->pg_completion_ops;
73 if (hdr->completion_ops->init_hdr)
74 hdr->completion_ops->init_hdr(hdr);
75
76 hdr->pgio_mirror_idx = desc->pg_mirror_idx;
77}
78EXPORT_SYMBOL_GPL(nfs_pgheader_init);
79
80void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
81{
82 unsigned int new = pos - hdr->io_start;
83
84 trace_nfs_pgio_error(hdr, error, pos);
85 if (hdr->good_bytes > new) {
86 hdr->good_bytes = new;
87 clear_bit(NFS_IOHDR_EOF, &hdr->flags);
88 if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags))
89 hdr->error = error;
90 }
91}
92
93static inline struct nfs_page *
94nfs_page_alloc(void)
95{
96 struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
97 if (p)
98 INIT_LIST_HEAD(&p->wb_list);
99 return p;
100}
101
102static inline void
103nfs_page_free(struct nfs_page *p)
104{
105 kmem_cache_free(nfs_page_cachep, p);
106}
107
108
109
110
111
112
113
114
115int
116nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
117{
118 return wait_var_event_killable(&l_ctx->io_count,
119 !atomic_read(&l_ctx->io_count));
120}
121
122
123
124
125
126
127
128
129
130
131bool
132nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
133{
134 struct inode *inode = d_inode(l_ctx->open_context->dentry);
135 bool ret = false;
136
137 if (atomic_read(&l_ctx->io_count) > 0) {
138 rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL);
139 ret = true;
140 }
141
142 if (atomic_read(&l_ctx->io_count) == 0) {
143 rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task);
144 ret = false;
145 }
146
147 return ret;
148}
149EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
150
151
152
153
154
155struct nfs_page *
156nfs_page_group_lock_head(struct nfs_page *req)
157{
158 struct nfs_page *head = req->wb_head;
159
160 while (!nfs_lock_request(head)) {
161 int ret = nfs_wait_on_request(head);
162 if (ret < 0)
163 return ERR_PTR(ret);
164 }
165 if (head != req)
166 kref_get(&head->wb_kref);
167 return head;
168}
169
170
171
172
173
174
175
176
177
178static void
179nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
180{
181 struct nfs_page *tmp;
182
183
184 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
185 if (!kref_read(&tmp->wb_kref))
186 continue;
187 nfs_unlock_and_release_request(tmp);
188 }
189}
190
191
192
193
194
195
196
197
198
199
200static int
201nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
202{
203 int ret;
204
205 if (!kref_get_unless_zero(&subreq->wb_kref))
206 return 0;
207 while (!nfs_lock_request(subreq)) {
208 nfs_page_group_unlock(head);
209 ret = nfs_wait_on_request(subreq);
210 if (!ret)
211 ret = nfs_page_group_lock(head);
212 if (ret < 0) {
213 nfs_unroll_locks(head, subreq);
214 nfs_release_request(subreq);
215 return ret;
216 }
217 }
218 return 0;
219}
220
221
222
223
224
225
226
227
228int nfs_page_group_lock_subrequests(struct nfs_page *head)
229{
230 struct nfs_page *subreq;
231 int ret;
232
233 ret = nfs_page_group_lock(head);
234 if (ret < 0)
235 return ret;
236
237 for (subreq = head->wb_this_page; subreq != head;
238 subreq = subreq->wb_this_page) {
239 ret = nfs_page_group_lock_subreq(head, subreq);
240 if (ret < 0)
241 return ret;
242 }
243 nfs_page_group_unlock(head);
244 return 0;
245}
246
247
248
249
250
251
252
253
254
255int
256nfs_page_set_headlock(struct nfs_page *req)
257{
258 if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
259 return 0;
260
261 set_bit(PG_CONTENDED1, &req->wb_flags);
262 smp_mb__after_atomic();
263 return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
264 TASK_UNINTERRUPTIBLE);
265}
266
267
268
269
270
271void
272nfs_page_clear_headlock(struct nfs_page *req)
273{
274 smp_mb__before_atomic();
275 clear_bit(PG_HEADLOCK, &req->wb_flags);
276 smp_mb__after_atomic();
277 if (!test_bit(PG_CONTENDED1, &req->wb_flags))
278 return;
279 wake_up_bit(&req->wb_flags, PG_HEADLOCK);
280}
281
282
283
284
285
286
287
288
289
290
291int
292nfs_page_group_lock(struct nfs_page *req)
293{
294 int ret;
295
296 ret = nfs_page_set_headlock(req);
297 if (ret || req->wb_head == req)
298 return ret;
299 return nfs_page_set_headlock(req->wb_head);
300}
301
302
303
304
305
306void
307nfs_page_group_unlock(struct nfs_page *req)
308{
309 if (req != req->wb_head)
310 nfs_page_clear_headlock(req->wb_head);
311 nfs_page_clear_headlock(req);
312}
313
314
315
316
317
318
319static bool
320nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
321{
322 struct nfs_page *head = req->wb_head;
323 struct nfs_page *tmp;
324
325 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
326 WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
327
328 tmp = req->wb_this_page;
329 while (tmp != req) {
330 if (!test_bit(bit, &tmp->wb_flags))
331 return false;
332 tmp = tmp->wb_this_page;
333 }
334
335
336 tmp = req;
337 do {
338 clear_bit(bit, &tmp->wb_flags);
339 tmp = tmp->wb_this_page;
340 } while (tmp != req);
341
342 return true;
343}
344
345
346
347
348
349
350
351bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
352{
353 bool ret;
354
355 nfs_page_group_lock(req);
356 ret = nfs_page_group_sync_on_bit_locked(req, bit);
357 nfs_page_group_unlock(req);
358
359 return ret;
360}
361
362
363
364
365
366
367
368static inline void
369nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
370{
371 struct inode *inode;
372 WARN_ON_ONCE(prev == req);
373
374 if (!prev) {
375
376 req->wb_head = req;
377 req->wb_this_page = req;
378 } else {
379
380 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
381 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
382 req->wb_head = prev->wb_head;
383 req->wb_this_page = prev->wb_this_page;
384 prev->wb_this_page = req;
385
386
387
388 kref_get(&req->wb_head->wb_kref);
389
390
391
392
393 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
394 inode = page_file_mapping(req->wb_page)->host;
395 set_bit(PG_INODE_REF, &req->wb_flags);
396 kref_get(&req->wb_kref);
397 atomic_long_inc(&NFS_I(inode)->nrequests);
398 }
399 }
400}
401
402
403
404
405
406
407
408
409static void
410nfs_page_group_destroy(struct kref *kref)
411{
412 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
413 struct nfs_page *head = req->wb_head;
414 struct nfs_page *tmp, *next;
415
416 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
417 goto out;
418
419 tmp = req;
420 do {
421 next = tmp->wb_this_page;
422
423 tmp->wb_this_page = tmp;
424 tmp->wb_head = tmp;
425 nfs_free_request(tmp);
426 tmp = next;
427 } while (tmp != req);
428out:
429
430 if (head != req)
431 nfs_release_request(head);
432}
433
434static struct nfs_page *
435__nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
436 unsigned int pgbase, unsigned int offset,
437 unsigned int count)
438{
439 struct nfs_page *req;
440 struct nfs_open_context *ctx = l_ctx->open_context;
441
442 if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
443 return ERR_PTR(-EBADF);
444
445 req = nfs_page_alloc();
446 if (req == NULL)
447 return ERR_PTR(-ENOMEM);
448
449 req->wb_lock_context = l_ctx;
450 refcount_inc(&l_ctx->count);
451 atomic_inc(&l_ctx->io_count);
452
453
454
455
456 req->wb_page = page;
457 if (page) {
458 req->wb_index = page_index(page);
459 get_page(page);
460 }
461 req->wb_offset = offset;
462 req->wb_pgbase = pgbase;
463 req->wb_bytes = count;
464 kref_init(&req->wb_kref);
465 req->wb_nio = 0;
466 return req;
467}
468
469
470
471
472
473
474
475
476
477
478
479
480struct nfs_page *
481nfs_create_request(struct nfs_open_context *ctx, struct page *page,
482 unsigned int offset, unsigned int count)
483{
484 struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
485 struct nfs_page *ret;
486
487 if (IS_ERR(l_ctx))
488 return ERR_CAST(l_ctx);
489 ret = __nfs_create_request(l_ctx, page, offset, offset, count);
490 if (!IS_ERR(ret))
491 nfs_page_group_init(ret, NULL);
492 nfs_put_lock_context(l_ctx);
493 return ret;
494}
495
496static struct nfs_page *
497nfs_create_subreq(struct nfs_page *req,
498 unsigned int pgbase,
499 unsigned int offset,
500 unsigned int count)
501{
502 struct nfs_page *last;
503 struct nfs_page *ret;
504
505 ret = __nfs_create_request(req->wb_lock_context, req->wb_page,
506 pgbase, offset, count);
507 if (!IS_ERR(ret)) {
508
509 for (last = req->wb_head;
510 last->wb_this_page != req->wb_head;
511 last = last->wb_this_page)
512 ;
513
514 nfs_lock_request(ret);
515 ret->wb_index = req->wb_index;
516 nfs_page_group_init(ret, last);
517 ret->wb_nio = req->wb_nio;
518 }
519 return ret;
520}
521
522
523
524
525
526void nfs_unlock_request(struct nfs_page *req)
527{
528 if (!NFS_WBACK_BUSY(req)) {
529 printk(KERN_ERR "NFS: Invalid unlock attempted\n");
530 BUG();
531 }
532 smp_mb__before_atomic();
533 clear_bit(PG_BUSY, &req->wb_flags);
534 smp_mb__after_atomic();
535 if (!test_bit(PG_CONTENDED2, &req->wb_flags))
536 return;
537 wake_up_bit(&req->wb_flags, PG_BUSY);
538}
539
540
541
542
543
544void nfs_unlock_and_release_request(struct nfs_page *req)
545{
546 nfs_unlock_request(req);
547 nfs_release_request(req);
548}
549
550
551
552
553
554
555
556
557static void nfs_clear_request(struct nfs_page *req)
558{
559 struct page *page = req->wb_page;
560 struct nfs_lock_context *l_ctx = req->wb_lock_context;
561 struct nfs_open_context *ctx;
562
563 if (page != NULL) {
564 put_page(page);
565 req->wb_page = NULL;
566 }
567 if (l_ctx != NULL) {
568 if (atomic_dec_and_test(&l_ctx->io_count)) {
569 wake_up_var(&l_ctx->io_count);
570 ctx = l_ctx->open_context;
571 if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags))
572 rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq);
573 }
574 nfs_put_lock_context(l_ctx);
575 req->wb_lock_context = NULL;
576 }
577}
578
579
580
581
582
583
584
585void nfs_free_request(struct nfs_page *req)
586{
587 WARN_ON_ONCE(req->wb_this_page != req);
588
589
590 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
591 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
592 WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
593 WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
594 WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
595
596
597 nfs_clear_request(req);
598 nfs_page_free(req);
599}
600
601void nfs_release_request(struct nfs_page *req)
602{
603 kref_put(&req->wb_kref, nfs_page_group_destroy);
604}
605EXPORT_SYMBOL_GPL(nfs_release_request);
606
607
608
609
610
611
612
613
614int
615nfs_wait_on_request(struct nfs_page *req)
616{
617 if (!test_bit(PG_BUSY, &req->wb_flags))
618 return 0;
619 set_bit(PG_CONTENDED2, &req->wb_flags);
620 smp_mb__after_atomic();
621 return wait_on_bit_io(&req->wb_flags, PG_BUSY,
622 TASK_UNINTERRUPTIBLE);
623}
624EXPORT_SYMBOL_GPL(nfs_wait_on_request);
625
626
627
628
629
630
631
632
633
634
635size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
636 struct nfs_page *prev, struct nfs_page *req)
637{
638 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
639
640
641 if (mirror->pg_count > mirror->pg_bsize) {
642
643 WARN_ON_ONCE(1);
644 return 0;
645 }
646
647
648
649
650
651 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
652 sizeof(struct page *) > PAGE_SIZE)
653 return 0;
654
655 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
656}
657EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
658
659struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
660{
661 struct nfs_pgio_header *hdr = ops->rw_alloc_header();
662
663 if (hdr) {
664 INIT_LIST_HEAD(&hdr->pages);
665 hdr->rw_ops = ops;
666 }
667 return hdr;
668}
669EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
670
671
672
673
674
675
676
677
678
679static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
680{
681 if (hdr->args.context)
682 put_nfs_open_context(hdr->args.context);
683 if (hdr->page_array.pagevec != hdr->page_array.page_array)
684 kfree(hdr->page_array.pagevec);
685}
686
687
688
689
690
691void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
692{
693 nfs_pgio_data_destroy(hdr);
694 hdr->rw_ops->rw_free_header(hdr);
695}
696EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
697
698
699
700
701
702
703
704
705static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
706 unsigned int count,
707 int how, struct nfs_commit_info *cinfo)
708{
709 struct nfs_page *req = hdr->req;
710
711
712
713
714 hdr->args.fh = NFS_FH(hdr->inode);
715 hdr->args.offset = req_offset(req);
716
717 hdr->mds_offset = hdr->args.offset;
718 hdr->args.pgbase = req->wb_pgbase;
719 hdr->args.pages = hdr->page_array.pagevec;
720 hdr->args.count = count;
721 hdr->args.context = get_nfs_open_context(nfs_req_openctx(req));
722 hdr->args.lock_context = req->wb_lock_context;
723 hdr->args.stable = NFS_UNSTABLE;
724 switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
725 case 0:
726 break;
727 case FLUSH_COND_STABLE:
728 if (nfs_reqs_to_commit(cinfo))
729 break;
730 fallthrough;
731 default:
732 hdr->args.stable = NFS_FILE_SYNC;
733 }
734
735 hdr->res.fattr = &hdr->fattr;
736 hdr->res.count = 0;
737 hdr->res.eof = 0;
738 hdr->res.verf = &hdr->verf;
739 nfs_fattr_init(&hdr->fattr);
740}
741
742
743
744
745
746
747static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
748{
749 struct nfs_pgio_header *hdr = calldata;
750 int err;
751 err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
752 if (err)
753 rpc_exit(task, err);
754}
755
756int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
757 const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
758 const struct rpc_call_ops *call_ops, int how, int flags)
759{
760 struct rpc_task *task;
761 struct rpc_message msg = {
762 .rpc_argp = &hdr->args,
763 .rpc_resp = &hdr->res,
764 .rpc_cred = cred,
765 };
766 struct rpc_task_setup task_setup_data = {
767 .rpc_client = clnt,
768 .task = &hdr->task,
769 .rpc_message = &msg,
770 .callback_ops = call_ops,
771 .callback_data = hdr,
772 .workqueue = nfsiod_workqueue,
773 .flags = RPC_TASK_ASYNC | flags,
774 };
775
776 hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
777
778 dprintk("NFS: initiated pgio call "
779 "(req %s/%llu, %u bytes @ offset %llu)\n",
780 hdr->inode->i_sb->s_id,
781 (unsigned long long)NFS_FILEID(hdr->inode),
782 hdr->args.count,
783 (unsigned long long)hdr->args.offset);
784
785 task = rpc_run_task(&task_setup_data);
786 if (IS_ERR(task))
787 return PTR_ERR(task);
788 rpc_put_task(task);
789 return 0;
790}
791EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
792
793
794
795
796
797static void nfs_pgio_error(struct nfs_pgio_header *hdr)
798{
799 set_bit(NFS_IOHDR_REDO, &hdr->flags);
800 hdr->completion_ops->completion(hdr);
801}
802
803
804
805
806
807static void nfs_pgio_release(void *calldata)
808{
809 struct nfs_pgio_header *hdr = calldata;
810 hdr->completion_ops->completion(hdr);
811}
812
813static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
814 unsigned int bsize)
815{
816 INIT_LIST_HEAD(&mirror->pg_list);
817 mirror->pg_bytes_written = 0;
818 mirror->pg_count = 0;
819 mirror->pg_bsize = bsize;
820 mirror->pg_base = 0;
821 mirror->pg_recoalesce = 0;
822}
823
824
825
826
827
828
829
830
831
832
833
834void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
835 struct inode *inode,
836 const struct nfs_pageio_ops *pg_ops,
837 const struct nfs_pgio_completion_ops *compl_ops,
838 const struct nfs_rw_ops *rw_ops,
839 size_t bsize,
840 int io_flags)
841{
842 desc->pg_moreio = 0;
843 desc->pg_inode = inode;
844 desc->pg_ops = pg_ops;
845 desc->pg_completion_ops = compl_ops;
846 desc->pg_rw_ops = rw_ops;
847 desc->pg_ioflags = io_flags;
848 desc->pg_error = 0;
849 desc->pg_lseg = NULL;
850 desc->pg_io_completion = NULL;
851 desc->pg_dreq = NULL;
852 desc->pg_bsize = bsize;
853
854 desc->pg_mirror_count = 1;
855 desc->pg_mirror_idx = 0;
856
857 desc->pg_mirrors_dynamic = NULL;
858 desc->pg_mirrors = desc->pg_mirrors_static;
859 nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
860 desc->pg_maxretrans = 0;
861}
862
863
864
865
866
867
868static void nfs_pgio_result(struct rpc_task *task, void *calldata)
869{
870 struct nfs_pgio_header *hdr = calldata;
871 struct inode *inode = hdr->inode;
872
873 dprintk("NFS: %s: %5u, (status %d)\n", __func__,
874 task->tk_pid, task->tk_status);
875
876 if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
877 return;
878 if (task->tk_status < 0)
879 nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
880 else
881 hdr->rw_ops->rw_result(task, hdr);
882}
883
884
885
886
887
888
889
890
891
892int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
893 struct nfs_pgio_header *hdr)
894{
895 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
896
897 struct nfs_page *req;
898 struct page **pages,
899 *last_page;
900 struct list_head *head = &mirror->pg_list;
901 struct nfs_commit_info cinfo;
902 struct nfs_page_array *pg_array = &hdr->page_array;
903 unsigned int pagecount, pageused;
904 gfp_t gfp_flags = GFP_KERNEL;
905
906 pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
907 pg_array->npages = pagecount;
908
909 if (pagecount <= ARRAY_SIZE(pg_array->page_array))
910 pg_array->pagevec = pg_array->page_array;
911 else {
912 pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
913 if (!pg_array->pagevec) {
914 pg_array->npages = 0;
915 nfs_pgio_error(hdr);
916 desc->pg_error = -ENOMEM;
917 return desc->pg_error;
918 }
919 }
920
921 nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
922 pages = hdr->page_array.pagevec;
923 last_page = NULL;
924 pageused = 0;
925 while (!list_empty(head)) {
926 req = nfs_list_entry(head->next);
927 nfs_list_move_request(req, &hdr->pages);
928
929 if (!last_page || last_page != req->wb_page) {
930 pageused++;
931 if (pageused > pagecount)
932 break;
933 *pages++ = last_page = req->wb_page;
934 }
935 }
936 if (WARN_ON_ONCE(pageused != pagecount)) {
937 nfs_pgio_error(hdr);
938 desc->pg_error = -EINVAL;
939 return desc->pg_error;
940 }
941
942 if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
943 (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
944 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
945
946
947 nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo);
948 desc->pg_rpc_callops = &nfs_pgio_common_ops;
949 return 0;
950}
951EXPORT_SYMBOL_GPL(nfs_generic_pgio);
952
953static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
954{
955 struct nfs_pgio_header *hdr;
956 int ret;
957
958 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
959 if (!hdr) {
960 desc->pg_error = -ENOMEM;
961 return desc->pg_error;
962 }
963 nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
964 ret = nfs_generic_pgio(desc, hdr);
965 if (ret == 0)
966 ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
967 hdr,
968 hdr->cred,
969 NFS_PROTO(hdr->inode),
970 desc->pg_rpc_callops,
971 desc->pg_ioflags,
972 RPC_TASK_CRED_NOREF);
973 return ret;
974}
975
976static struct nfs_pgio_mirror *
977nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
978 unsigned int mirror_count)
979{
980 struct nfs_pgio_mirror *ret;
981 unsigned int i;
982
983 kfree(desc->pg_mirrors_dynamic);
984 desc->pg_mirrors_dynamic = NULL;
985 if (mirror_count == 1)
986 return desc->pg_mirrors_static;
987 ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_KERNEL);
988 if (ret != NULL) {
989 for (i = 0; i < mirror_count; i++)
990 nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
991 desc->pg_mirrors_dynamic = ret;
992 }
993 return ret;
994}
995
996
997
998
999
1000static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
1001 struct nfs_page *req)
1002{
1003 unsigned int mirror_count = 1;
1004
1005 if (pgio->pg_ops->pg_get_mirror_count)
1006 mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
1007 if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
1008 return;
1009
1010 if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
1011 pgio->pg_error = -EINVAL;
1012 return;
1013 }
1014
1015 pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
1016 if (pgio->pg_mirrors == NULL) {
1017 pgio->pg_error = -ENOMEM;
1018 pgio->pg_mirrors = pgio->pg_mirrors_static;
1019 mirror_count = 1;
1020 }
1021 pgio->pg_mirror_count = mirror_count;
1022}
1023
1024static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
1025{
1026 pgio->pg_mirror_count = 1;
1027 pgio->pg_mirror_idx = 0;
1028 pgio->pg_mirrors = pgio->pg_mirrors_static;
1029 kfree(pgio->pg_mirrors_dynamic);
1030 pgio->pg_mirrors_dynamic = NULL;
1031}
1032
1033static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
1034 const struct nfs_lock_context *l2)
1035{
1036 return l1->lockowner == l2->lockowner;
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051static unsigned int nfs_coalesce_size(struct nfs_page *prev,
1052 struct nfs_page *req,
1053 struct nfs_pageio_descriptor *pgio)
1054{
1055 struct file_lock_context *flctx;
1056
1057 if (prev) {
1058 if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev)))
1059 return 0;
1060 flctx = d_inode(nfs_req_openctx(req)->dentry)->i_flctx;
1061 if (flctx != NULL &&
1062 !(list_empty_careful(&flctx->flc_posix) &&
1063 list_empty_careful(&flctx->flc_flock)) &&
1064 !nfs_match_lock_context(req->wb_lock_context,
1065 prev->wb_lock_context))
1066 return 0;
1067 if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
1068 return 0;
1069 if (req->wb_page == prev->wb_page) {
1070 if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
1071 return 0;
1072 } else {
1073 if (req->wb_pgbase != 0 ||
1074 prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
1075 return 0;
1076 }
1077 }
1078 return pgio->pg_ops->pg_test(pgio, prev, req);
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static unsigned int
1090nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
1091 struct nfs_page *req)
1092{
1093 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1094 struct nfs_page *prev = NULL;
1095 unsigned int size;
1096
1097 if (mirror->pg_count != 0) {
1098 prev = nfs_list_entry(mirror->pg_list.prev);
1099 } else {
1100 if (desc->pg_ops->pg_init)
1101 desc->pg_ops->pg_init(desc, req);
1102 if (desc->pg_error < 0)
1103 return 0;
1104 mirror->pg_base = req->wb_pgbase;
1105 }
1106
1107 if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
1108 if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
1109 desc->pg_error = -ETIMEDOUT;
1110 else
1111 desc->pg_error = -EIO;
1112 return 0;
1113 }
1114
1115 size = nfs_coalesce_size(prev, req, desc);
1116 if (size < req->wb_bytes)
1117 return size;
1118 nfs_list_move_request(req, &mirror->pg_list);
1119 mirror->pg_count += req->wb_bytes;
1120 return req->wb_bytes;
1121}
1122
1123
1124
1125
1126static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
1127{
1128 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1129
1130
1131 if (!list_empty(&mirror->pg_list)) {
1132 int error = desc->pg_ops->pg_doio(desc);
1133 if (error < 0)
1134 desc->pg_error = error;
1135 else
1136 mirror->pg_bytes_written += mirror->pg_count;
1137 }
1138 if (list_empty(&mirror->pg_list)) {
1139 mirror->pg_count = 0;
1140 mirror->pg_base = 0;
1141 }
1142}
1143
1144static void
1145nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
1146 struct nfs_page *req)
1147{
1148 LIST_HEAD(head);
1149
1150 nfs_list_move_request(req, &head);
1151 desc->pg_completion_ops->error_cleanup(&head, desc->pg_error);
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1167 struct nfs_page *req)
1168{
1169 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1170 struct nfs_page *subreq;
1171 unsigned int size, subreq_size;
1172
1173 nfs_page_group_lock(req);
1174
1175 subreq = req;
1176 subreq_size = subreq->wb_bytes;
1177 for(;;) {
1178 size = nfs_pageio_do_add_request(desc, subreq);
1179 if (size == subreq_size) {
1180
1181 if (subreq == req)
1182 break;
1183 req->wb_pgbase += size;
1184 req->wb_bytes -= size;
1185 req->wb_offset += size;
1186 subreq_size = req->wb_bytes;
1187 subreq = req;
1188 continue;
1189 }
1190 if (WARN_ON_ONCE(subreq != req)) {
1191 nfs_page_group_unlock(req);
1192 nfs_pageio_cleanup_request(desc, subreq);
1193 subreq = req;
1194 subreq_size = req->wb_bytes;
1195 nfs_page_group_lock(req);
1196 }
1197 if (!size) {
1198
1199 nfs_page_group_unlock(req);
1200 desc->pg_moreio = 1;
1201 nfs_pageio_doio(desc);
1202 if (desc->pg_error < 0 || mirror->pg_recoalesce)
1203 return 0;
1204
1205 nfs_page_group_lock(req);
1206 continue;
1207 }
1208 subreq = nfs_create_subreq(req, req->wb_pgbase,
1209 req->wb_offset, size);
1210 if (IS_ERR(subreq))
1211 goto err_ptr;
1212 subreq_size = size;
1213 }
1214
1215 nfs_page_group_unlock(req);
1216 return 1;
1217err_ptr:
1218 desc->pg_error = PTR_ERR(subreq);
1219 nfs_page_group_unlock(req);
1220 return 0;
1221}
1222
1223static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1224{
1225 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1226 LIST_HEAD(head);
1227
1228 do {
1229 list_splice_init(&mirror->pg_list, &head);
1230 mirror->pg_bytes_written -= mirror->pg_count;
1231 mirror->pg_count = 0;
1232 mirror->pg_base = 0;
1233 mirror->pg_recoalesce = 0;
1234
1235 while (!list_empty(&head)) {
1236 struct nfs_page *req;
1237
1238 req = list_first_entry(&head, struct nfs_page, wb_list);
1239 if (__nfs_pageio_add_request(desc, req))
1240 continue;
1241 if (desc->pg_error < 0) {
1242 list_splice_tail(&head, &mirror->pg_list);
1243 mirror->pg_recoalesce = 1;
1244 return 0;
1245 }
1246 break;
1247 }
1248 } while (mirror->pg_recoalesce);
1249 return 1;
1250}
1251
1252static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
1253 struct nfs_page *req)
1254{
1255 int ret;
1256
1257 do {
1258 ret = __nfs_pageio_add_request(desc, req);
1259 if (ret)
1260 break;
1261 if (desc->pg_error < 0)
1262 break;
1263 ret = nfs_do_recoalesce(desc);
1264 } while (ret);
1265
1266 return ret;
1267}
1268
1269static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
1270{
1271 u32 midx;
1272 struct nfs_pgio_mirror *mirror;
1273
1274 if (!desc->pg_error)
1275 return;
1276
1277 for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1278 mirror = nfs_pgio_get_mirror(desc, midx);
1279 desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
1280 desc->pg_error);
1281 }
1282}
1283
1284int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1285 struct nfs_page *req)
1286{
1287 u32 midx;
1288 unsigned int pgbase, offset, bytes;
1289 struct nfs_page *dupreq;
1290
1291 pgbase = req->wb_pgbase;
1292 offset = req->wb_offset;
1293 bytes = req->wb_bytes;
1294
1295 nfs_pageio_setup_mirroring(desc, req);
1296 if (desc->pg_error < 0)
1297 goto out_failed;
1298
1299
1300 for (midx = 1; midx < desc->pg_mirror_count; midx++) {
1301 nfs_page_group_lock(req);
1302
1303 dupreq = nfs_create_subreq(req,
1304 pgbase, offset, bytes);
1305
1306 nfs_page_group_unlock(req);
1307 if (IS_ERR(dupreq)) {
1308 desc->pg_error = PTR_ERR(dupreq);
1309 goto out_failed;
1310 }
1311
1312 nfs_pgio_set_current_mirror(desc, midx);
1313 if (!nfs_pageio_add_request_mirror(desc, dupreq))
1314 goto out_cleanup_subreq;
1315 }
1316
1317 nfs_pgio_set_current_mirror(desc, 0);
1318 if (!nfs_pageio_add_request_mirror(desc, req))
1319 goto out_failed;
1320
1321 return 1;
1322
1323out_cleanup_subreq:
1324 nfs_pageio_cleanup_request(desc, dupreq);
1325out_failed:
1326 nfs_pageio_error_cleanup(desc);
1327 return 0;
1328}
1329
1330
1331
1332
1333
1334
1335
1336static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1337 u32 mirror_idx)
1338{
1339 struct nfs_pgio_mirror *mirror;
1340 u32 restore_idx;
1341
1342 restore_idx = nfs_pgio_set_current_mirror(desc, mirror_idx);
1343 mirror = nfs_pgio_current_mirror(desc);
1344
1345 for (;;) {
1346 nfs_pageio_doio(desc);
1347 if (desc->pg_error < 0 || !mirror->pg_recoalesce)
1348 break;
1349 if (!nfs_do_recoalesce(desc))
1350 break;
1351 }
1352 nfs_pgio_set_current_mirror(desc, restore_idx);
1353}
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1366 struct nfs_pgio_header *hdr)
1367{
1368 LIST_HEAD(pages);
1369
1370 desc->pg_io_completion = hdr->io_completion;
1371 desc->pg_dreq = hdr->dreq;
1372 list_splice_init(&hdr->pages, &pages);
1373 while (!list_empty(&pages)) {
1374 struct nfs_page *req = nfs_list_entry(pages.next);
1375
1376 if (!nfs_pageio_add_request(desc, req))
1377 break;
1378 }
1379 nfs_pageio_complete(desc);
1380 if (!list_empty(&pages)) {
1381 int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
1382 hdr->completion_ops->error_cleanup(&pages, err);
1383 nfs_set_pgio_error(hdr, err, hdr->io_start);
1384 return err;
1385 }
1386 return 0;
1387}
1388EXPORT_SYMBOL_GPL(nfs_pageio_resend);
1389
1390
1391
1392
1393
1394void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
1395{
1396 u32 midx;
1397
1398 for (midx = 0; midx < desc->pg_mirror_count; midx++)
1399 nfs_pageio_complete_mirror(desc, midx);
1400
1401 if (desc->pg_error < 0)
1402 nfs_pageio_error_cleanup(desc);
1403 if (desc->pg_ops->pg_cleanup)
1404 desc->pg_ops->pg_cleanup(desc);
1405 nfs_pageio_cleanup_mirroring(desc);
1406}
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
1420{
1421 struct nfs_pgio_mirror *mirror;
1422 struct nfs_page *prev;
1423 u32 midx;
1424
1425 for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1426 mirror = nfs_pgio_get_mirror(desc, midx);
1427 if (!list_empty(&mirror->pg_list)) {
1428 prev = nfs_list_entry(mirror->pg_list.prev);
1429 if (index != prev->wb_index + 1) {
1430 nfs_pageio_complete(desc);
1431 break;
1432 }
1433 }
1434 }
1435}
1436
1437
1438
1439
1440void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
1441{
1442 nfs_pageio_complete(pgio);
1443}
1444
1445int __init nfs_init_nfspagecache(void)
1446{
1447 nfs_page_cachep = kmem_cache_create("nfs_page",
1448 sizeof(struct nfs_page),
1449 0, SLAB_HWCACHE_ALIGN,
1450 NULL);
1451 if (nfs_page_cachep == NULL)
1452 return -ENOMEM;
1453
1454 return 0;
1455}
1456
1457void nfs_destroy_nfspagecache(void)
1458{
1459 kmem_cache_destroy(nfs_page_cachep);
1460}
1461
1462static const struct rpc_call_ops nfs_pgio_common_ops = {
1463 .rpc_call_prepare = nfs_pgio_prepare,
1464 .rpc_call_done = nfs_pgio_result,
1465 .rpc_release = nfs_pgio_release,
1466};
1467
1468const struct nfs_pageio_ops nfs_pgio_rw_ops = {
1469 .pg_test = nfs_generic_pg_test,
1470 .pg_doio = nfs_generic_pg_pgios,
1471};
1472