1
2
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/file.h>
14#include <linux/writeback.h>
15#include <linux/swap.h>
16#include <linux/migrate.h>
17
18#include <linux/sunrpc/clnt.h>
19#include <linux/nfs_fs.h>
20#include <linux/nfs_mount.h>
21#include <linux/nfs_page.h>
22#include <linux/backing-dev.h>
23#include <linux/export.h>
24#include <linux/freezer.h>
25#include <linux/wait.h>
26#include <linux/iversion.h>
27
28#include <linux/uaccess.h>
29
30#include "delegation.h"
31#include "internal.h"
32#include "iostat.h"
33#include "nfs4_fs.h"
34#include "fscache.h"
35#include "pnfs.h"
36
37#include "nfstrace.h"
38
39#define NFSDBG_FACILITY NFSDBG_PAGECACHE
40
41#define MIN_POOL_WRITE (32)
42#define MIN_POOL_COMMIT (4)
43
44struct nfs_io_completion {
45 void (*complete)(void *data);
46 void *data;
47 struct kref refcount;
48};
49
50
51
52
53static void nfs_redirty_request(struct nfs_page *req);
54static const struct rpc_call_ops nfs_commit_ops;
55static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
56static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
57static const struct nfs_rw_ops nfs_rw_write_ops;
58static void nfs_clear_request_commit(struct nfs_page *req);
59static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
60 struct inode *inode);
61static struct nfs_page *
62nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
63 struct page *page);
64
65static struct kmem_cache *nfs_wdata_cachep;
66static mempool_t *nfs_wdata_mempool;
67static struct kmem_cache *nfs_cdata_cachep;
68static mempool_t *nfs_commit_mempool;
69
70struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail)
71{
72 struct nfs_commit_data *p;
73
74 if (never_fail)
75 p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
76 else {
77
78
79
80
81
82 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT);
83 if (!p)
84 p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO |
85 __GFP_NOWARN | __GFP_NORETRY);
86 if (!p)
87 return NULL;
88 }
89
90 memset(p, 0, sizeof(*p));
91 INIT_LIST_HEAD(&p->pages);
92 return p;
93}
94EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
95
96void nfs_commit_free(struct nfs_commit_data *p)
97{
98 mempool_free(p, nfs_commit_mempool);
99}
100EXPORT_SYMBOL_GPL(nfs_commit_free);
101
102static struct nfs_pgio_header *nfs_writehdr_alloc(void)
103{
104 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO);
105
106 memset(p, 0, sizeof(*p));
107 p->rw_mode = FMODE_WRITE;
108 return p;
109}
110
111static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
112{
113 mempool_free(hdr, nfs_wdata_mempool);
114}
115
116static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags)
117{
118 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags);
119}
120
121static void nfs_io_completion_init(struct nfs_io_completion *ioc,
122 void (*complete)(void *), void *data)
123{
124 ioc->complete = complete;
125 ioc->data = data;
126 kref_init(&ioc->refcount);
127}
128
129static void nfs_io_completion_release(struct kref *kref)
130{
131 struct nfs_io_completion *ioc = container_of(kref,
132 struct nfs_io_completion, refcount);
133 ioc->complete(ioc->data);
134 kfree(ioc);
135}
136
137static void nfs_io_completion_get(struct nfs_io_completion *ioc)
138{
139 if (ioc != NULL)
140 kref_get(&ioc->refcount);
141}
142
143static void nfs_io_completion_put(struct nfs_io_completion *ioc)
144{
145 if (ioc != NULL)
146 kref_put(&ioc->refcount, nfs_io_completion_release);
147}
148
149static struct nfs_page *
150nfs_page_private_request(struct page *page)
151{
152 if (!PagePrivate(page))
153 return NULL;
154 return (struct nfs_page *)page_private(page);
155}
156
157
158
159
160
161
162
163
164static struct nfs_page *
165nfs_page_find_private_request(struct page *page)
166{
167 struct address_space *mapping = page_file_mapping(page);
168 struct nfs_page *req;
169
170 if (!PagePrivate(page))
171 return NULL;
172 spin_lock(&mapping->private_lock);
173 req = nfs_page_private_request(page);
174 if (req) {
175 WARN_ON_ONCE(req->wb_head != req);
176 kref_get(&req->wb_kref);
177 }
178 spin_unlock(&mapping->private_lock);
179 return req;
180}
181
182static struct nfs_page *
183nfs_page_find_swap_request(struct page *page)
184{
185 struct inode *inode = page_file_mapping(page)->host;
186 struct nfs_inode *nfsi = NFS_I(inode);
187 struct nfs_page *req = NULL;
188 if (!PageSwapCache(page))
189 return NULL;
190 mutex_lock(&nfsi->commit_mutex);
191 if (PageSwapCache(page)) {
192 req = nfs_page_search_commits_for_head_request_locked(nfsi,
193 page);
194 if (req) {
195 WARN_ON_ONCE(req->wb_head != req);
196 kref_get(&req->wb_kref);
197 }
198 }
199 mutex_unlock(&nfsi->commit_mutex);
200 return req;
201}
202
203
204
205
206
207
208static struct nfs_page *nfs_page_find_head_request(struct page *page)
209{
210 struct nfs_page *req;
211
212 req = nfs_page_find_private_request(page);
213 if (!req)
214 req = nfs_page_find_swap_request(page);
215 return req;
216}
217
218
219static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
220{
221 struct inode *inode = page_file_mapping(page)->host;
222 loff_t end, i_size;
223 pgoff_t end_index;
224
225 spin_lock(&inode->i_lock);
226 i_size = i_size_read(inode);
227 end_index = (i_size - 1) >> PAGE_SHIFT;
228 if (i_size > 0 && page_index(page) < end_index)
229 goto out;
230 end = page_file_offset(page) + ((loff_t)offset+count);
231 if (i_size >= end)
232 goto out;
233 i_size_write(inode, end);
234 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
235 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
236out:
237 spin_unlock(&inode->i_lock);
238}
239
240
241static void nfs_set_pageerror(struct page *page)
242{
243 nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
244}
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259static struct nfs_page *
260nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
261{
262 struct nfs_page *req;
263
264 req = head;
265 do {
266 if (page_offset >= req->wb_pgbase &&
267 page_offset < (req->wb_pgbase + req->wb_bytes))
268 return req;
269
270 req = req->wb_this_page;
271 } while (req != head);
272
273 return NULL;
274}
275
276
277
278
279
280
281
282
283static bool nfs_page_group_covers_page(struct nfs_page *req)
284{
285 struct nfs_page *tmp;
286 unsigned int pos = 0;
287 unsigned int len = nfs_page_length(req->wb_page);
288
289 nfs_page_group_lock(req);
290
291 for (;;) {
292 tmp = nfs_page_group_search_locked(req->wb_head, pos);
293 if (!tmp)
294 break;
295 pos = tmp->wb_pgbase + tmp->wb_bytes;
296 }
297
298 nfs_page_group_unlock(req);
299 return pos >= len;
300}
301
302
303
304
305static void nfs_mark_uptodate(struct nfs_page *req)
306{
307 if (PageUptodate(req->wb_page))
308 return;
309 if (!nfs_page_group_covers_page(req))
310 return;
311 SetPageUptodate(req->wb_page);
312}
313
314static int wb_priority(struct writeback_control *wbc)
315{
316 int ret = 0;
317
318 if (wbc->sync_mode == WB_SYNC_ALL)
319 ret = FLUSH_COND_STABLE;
320 return ret;
321}
322
323
324
325
326
327int nfs_congestion_kb;
328
329#define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
330#define NFS_CONGESTION_OFF_THRESH \
331 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
332
333static void nfs_set_page_writeback(struct page *page)
334{
335 struct inode *inode = page_file_mapping(page)->host;
336 struct nfs_server *nfss = NFS_SERVER(inode);
337 int ret = test_set_page_writeback(page);
338
339 WARN_ON_ONCE(ret != 0);
340
341 if (atomic_long_inc_return(&nfss->writeback) >
342 NFS_CONGESTION_ON_THRESH)
343 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
344}
345
346static void nfs_end_page_writeback(struct nfs_page *req)
347{
348 struct inode *inode = page_file_mapping(req->wb_page)->host;
349 struct nfs_server *nfss = NFS_SERVER(inode);
350 bool is_done;
351
352 is_done = nfs_page_group_sync_on_bit(req, PG_WB_END);
353 nfs_unlock_request(req);
354 if (!is_done)
355 return;
356
357 end_page_writeback(req->wb_page);
358 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
359 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
360}
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376static void
377nfs_unroll_locks(struct inode *inode, struct nfs_page *head,
378 struct nfs_page *req)
379{
380 struct nfs_page *tmp;
381
382
383 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
384 if (!kref_read(&tmp->wb_kref))
385 continue;
386 nfs_unlock_and_release_request(tmp);
387 }
388}
389
390
391
392
393
394
395
396
397
398
399
400static void
401nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
402 struct nfs_page *old_head,
403 struct inode *inode)
404{
405 while (destroy_list) {
406 struct nfs_page *subreq = destroy_list;
407
408 destroy_list = (subreq->wb_this_page == old_head) ?
409 NULL : subreq->wb_this_page;
410
411 WARN_ON_ONCE(old_head != subreq->wb_head);
412
413
414 subreq->wb_this_page = subreq;
415
416 clear_bit(PG_REMOVE, &subreq->wb_flags);
417
418
419 if (!kref_read(&subreq->wb_kref)) {
420
421 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags))
422 nfs_free_request(subreq);
423 continue;
424 }
425
426 subreq->wb_head = subreq;
427
428 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
429 nfs_release_request(subreq);
430 atomic_long_dec(&NFS_I(inode)->nrequests);
431 }
432
433
434
435 nfs_unlock_and_release_request(subreq);
436 }
437}
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457static struct nfs_page *
458nfs_lock_and_join_requests(struct page *page)
459{
460 struct inode *inode = page_file_mapping(page)->host;
461 struct nfs_page *head, *subreq;
462 struct nfs_page *destroy_list = NULL;
463 unsigned int total_bytes;
464 int ret;
465
466try_again:
467
468
469
470
471
472 head = nfs_page_find_head_request(page);
473 if (!head)
474 return NULL;
475
476
477 if (!nfs_lock_request(head)) {
478 ret = nfs_wait_on_request(head);
479 nfs_release_request(head);
480 if (ret < 0)
481 return ERR_PTR(ret);
482 goto try_again;
483 }
484
485
486 if (head != nfs_page_private_request(page) && !PageSwapCache(page)) {
487 nfs_unlock_and_release_request(head);
488 goto try_again;
489 }
490
491 ret = nfs_page_group_lock(head);
492 if (ret < 0)
493 goto release_request;
494
495
496 total_bytes = head->wb_bytes;
497 for (subreq = head->wb_this_page; subreq != head;
498 subreq = subreq->wb_this_page) {
499
500 if (!kref_get_unless_zero(&subreq->wb_kref)) {
501 if (subreq->wb_offset == head->wb_offset + total_bytes)
502 total_bytes += subreq->wb_bytes;
503 continue;
504 }
505
506 while (!nfs_lock_request(subreq)) {
507
508
509
510
511 nfs_page_group_unlock(head);
512 ret = nfs_wait_on_request(subreq);
513 if (!ret)
514 ret = nfs_page_group_lock(head);
515 if (ret < 0) {
516 nfs_unroll_locks(inode, head, subreq);
517 nfs_release_request(subreq);
518 goto release_request;
519 }
520 }
521
522
523
524
525 if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
526
527 total_bytes += subreq->wb_bytes;
528 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
529 ((subreq->wb_offset + subreq->wb_bytes) >
530 (head->wb_offset + total_bytes)))) {
531 nfs_page_group_unlock(head);
532 nfs_unroll_locks(inode, head, subreq);
533 nfs_unlock_and_release_request(subreq);
534 ret = -EIO;
535 goto release_request;
536 }
537 }
538
539
540
541 subreq = head;
542 do {
543 nfs_clear_request_commit(subreq);
544 subreq = subreq->wb_this_page;
545 } while (subreq != head);
546
547
548 if (head->wb_this_page != head) {
549
550 destroy_list = head->wb_this_page;
551 head->wb_this_page = head;
552
553
554
555 head->wb_bytes = total_bytes;
556 }
557
558
559 if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) {
560 set_bit(PG_INODE_REF, &head->wb_flags);
561 kref_get(&head->wb_kref);
562 atomic_long_inc(&NFS_I(inode)->nrequests);
563 }
564
565 nfs_page_group_unlock(head);
566
567 nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
568
569
570 if (!(PagePrivate(page) || PageSwapCache(page))) {
571 nfs_unlock_and_release_request(head);
572 return NULL;
573 }
574
575
576
577 return head;
578
579release_request:
580 nfs_unlock_and_release_request(head);
581 return ERR_PTR(ret);
582}
583
584static void nfs_write_error_remove_page(struct nfs_page *req)
585{
586 nfs_end_page_writeback(req);
587 generic_error_remove_page(page_file_mapping(req->wb_page),
588 req->wb_page);
589 nfs_release_request(req);
590}
591
592static bool
593nfs_error_is_fatal_on_server(int err)
594{
595 switch (err) {
596 case 0:
597 case -ERESTARTSYS:
598 case -EINTR:
599 return false;
600 }
601 return nfs_error_is_fatal(err);
602}
603
604
605
606
607
608static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
609 struct page *page)
610{
611 struct nfs_page *req;
612 int ret = 0;
613
614 req = nfs_lock_and_join_requests(page);
615 if (!req)
616 goto out;
617 ret = PTR_ERR(req);
618 if (IS_ERR(req))
619 goto out;
620
621 nfs_set_page_writeback(page);
622 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
623
624 ret = 0;
625
626 if (nfs_error_is_fatal_on_server(req->wb_context->error))
627 goto out_launder;
628
629 if (!nfs_pageio_add_request(pgio, req)) {
630 ret = pgio->pg_error;
631
632
633
634 if (nfs_error_is_fatal(ret)) {
635 nfs_context_set_write_error(req->wb_context, ret);
636 if (nfs_error_is_fatal_on_server(ret))
637 goto out_launder;
638 }
639 nfs_redirty_request(req);
640 ret = -EAGAIN;
641 } else
642 nfs_add_stats(page_file_mapping(page)->host,
643 NFSIOS_WRITEPAGES, 1);
644out:
645 return ret;
646out_launder:
647 nfs_write_error_remove_page(req);
648 return ret;
649}
650
651static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
652 struct nfs_pageio_descriptor *pgio)
653{
654 int ret;
655
656 nfs_pageio_cond_complete(pgio, page_index(page));
657 ret = nfs_page_async_flush(pgio, page);
658 if (ret == -EAGAIN) {
659 redirty_page_for_writepage(wbc, page);
660 ret = 0;
661 }
662 return ret;
663}
664
665
666
667
668static int nfs_writepage_locked(struct page *page,
669 struct writeback_control *wbc)
670{
671 struct nfs_pageio_descriptor pgio;
672 struct inode *inode = page_file_mapping(page)->host;
673 int err;
674
675 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
676 nfs_pageio_init_write(&pgio, inode, 0,
677 false, &nfs_async_write_completion_ops);
678 err = nfs_do_writepage(page, wbc, &pgio);
679 nfs_pageio_complete(&pgio);
680 if (err < 0)
681 return err;
682 if (pgio.pg_error < 0)
683 return pgio.pg_error;
684 return 0;
685}
686
687int nfs_writepage(struct page *page, struct writeback_control *wbc)
688{
689 int ret;
690
691 ret = nfs_writepage_locked(page, wbc);
692 unlock_page(page);
693 return ret;
694}
695
696static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
697{
698 int ret;
699
700 ret = nfs_do_writepage(page, wbc, data);
701 unlock_page(page);
702 return ret;
703}
704
705static void nfs_io_completion_commit(void *inode)
706{
707 nfs_commit_inode(inode, 0);
708}
709
710int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
711{
712 struct inode *inode = mapping->host;
713 struct nfs_pageio_descriptor pgio;
714 struct nfs_io_completion *ioc = nfs_io_completion_alloc(GFP_NOFS);
715 int err;
716
717 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
718
719 if (ioc)
720 nfs_io_completion_init(ioc, nfs_io_completion_commit, inode);
721
722 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
723 &nfs_async_write_completion_ops);
724 pgio.pg_io_completion = ioc;
725 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
726 nfs_pageio_complete(&pgio);
727 nfs_io_completion_put(ioc);
728
729 if (err < 0)
730 goto out_err;
731 err = pgio.pg_error;
732 if (err < 0)
733 goto out_err;
734 return 0;
735out_err:
736 return err;
737}
738
739
740
741
742static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
743{
744 struct address_space *mapping = page_file_mapping(req->wb_page);
745 struct nfs_inode *nfsi = NFS_I(inode);
746
747 WARN_ON_ONCE(req->wb_this_page != req);
748
749
750 nfs_lock_request(req);
751
752
753
754
755
756 spin_lock(&mapping->private_lock);
757 if (!nfs_have_writebacks(inode) &&
758 NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
759 inode_inc_iversion_raw(inode);
760 if (likely(!PageSwapCache(req->wb_page))) {
761 set_bit(PG_MAPPED, &req->wb_flags);
762 SetPagePrivate(req->wb_page);
763 set_page_private(req->wb_page, (unsigned long)req);
764 }
765 spin_unlock(&mapping->private_lock);
766 atomic_long_inc(&nfsi->nrequests);
767
768
769
770
771 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
772 kref_get(&req->wb_kref);
773}
774
775
776
777
778static void nfs_inode_remove_request(struct nfs_page *req)
779{
780 struct address_space *mapping = page_file_mapping(req->wb_page);
781 struct inode *inode = mapping->host;
782 struct nfs_inode *nfsi = NFS_I(inode);
783 struct nfs_page *head;
784
785 atomic_long_dec(&nfsi->nrequests);
786 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
787 head = req->wb_head;
788
789 spin_lock(&mapping->private_lock);
790 if (likely(head->wb_page && !PageSwapCache(head->wb_page))) {
791 set_page_private(head->wb_page, 0);
792 ClearPagePrivate(head->wb_page);
793 clear_bit(PG_MAPPED, &head->wb_flags);
794 }
795 spin_unlock(&mapping->private_lock);
796 }
797
798 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
799 nfs_release_request(req);
800}
801
802static void
803nfs_mark_request_dirty(struct nfs_page *req)
804{
805 if (req->wb_page)
806 __set_page_dirty_nobuffers(req->wb_page);
807}
808
809
810
811
812
813
814
815
816
817static struct nfs_page *
818nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
819 struct page *page)
820{
821 struct nfs_page *freq, *t;
822 struct nfs_commit_info cinfo;
823 struct inode *inode = &nfsi->vfs_inode;
824
825 nfs_init_cinfo_from_inode(&cinfo, inode);
826
827
828 freq = pnfs_search_commit_reqs(inode, &cinfo, page);
829 if (freq)
830 return freq->wb_head;
831
832
833 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
834 if (freq->wb_page == page)
835 return freq->wb_head;
836 }
837
838 return NULL;
839}
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854void
855nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
856 struct nfs_commit_info *cinfo)
857{
858 set_bit(PG_CLEAN, &req->wb_flags);
859 nfs_list_add_request(req, dst);
860 atomic_long_inc(&cinfo->mds->ncommit);
861}
862EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877void
878nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
879{
880 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
881 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
882 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
883 if (req->wb_page)
884 nfs_mark_page_unstable(req->wb_page, cinfo);
885}
886EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
887
888
889
890
891
892
893
894
895
896
897
898
899void
900nfs_request_remove_commit_list(struct nfs_page *req,
901 struct nfs_commit_info *cinfo)
902{
903 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
904 return;
905 nfs_list_remove_request(req);
906 atomic_long_dec(&cinfo->mds->ncommit);
907}
908EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
909
910static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
911 struct inode *inode)
912{
913 cinfo->inode = inode;
914 cinfo->mds = &NFS_I(inode)->commit_info;
915 cinfo->ds = pnfs_get_ds_info(inode);
916 cinfo->dreq = NULL;
917 cinfo->completion_ops = &nfs_commit_completion_ops;
918}
919
920void nfs_init_cinfo(struct nfs_commit_info *cinfo,
921 struct inode *inode,
922 struct nfs_direct_req *dreq)
923{
924 if (dreq)
925 nfs_init_cinfo_from_dreq(cinfo, dreq);
926 else
927 nfs_init_cinfo_from_inode(cinfo, inode);
928}
929EXPORT_SYMBOL_GPL(nfs_init_cinfo);
930
931
932
933
934void
935nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
936 struct nfs_commit_info *cinfo, u32 ds_commit_idx)
937{
938 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
939 return;
940 nfs_request_add_commit_list(req, cinfo);
941}
942
943static void
944nfs_clear_page_commit(struct page *page)
945{
946 dec_node_page_state(page, NR_UNSTABLE_NFS);
947 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
948 WB_RECLAIMABLE);
949}
950
951
952static void
953nfs_clear_request_commit(struct nfs_page *req)
954{
955 if (test_bit(PG_CLEAN, &req->wb_flags)) {
956 struct inode *inode = d_inode(req->wb_context->dentry);
957 struct nfs_commit_info cinfo;
958
959 nfs_init_cinfo_from_inode(&cinfo, inode);
960 mutex_lock(&NFS_I(inode)->commit_mutex);
961 if (!pnfs_clear_request_commit(req, &cinfo)) {
962 nfs_request_remove_commit_list(req, &cinfo);
963 }
964 mutex_unlock(&NFS_I(inode)->commit_mutex);
965 nfs_clear_page_commit(req->wb_page);
966 }
967}
968
969int nfs_write_need_commit(struct nfs_pgio_header *hdr)
970{
971 if (hdr->verf.committed == NFS_DATA_SYNC)
972 return hdr->lseg == NULL;
973 return hdr->verf.committed != NFS_FILE_SYNC;
974}
975
976static void nfs_async_write_init(struct nfs_pgio_header *hdr)
977{
978 nfs_io_completion_get(hdr->io_completion);
979}
980
981static void nfs_write_completion(struct nfs_pgio_header *hdr)
982{
983 struct nfs_commit_info cinfo;
984 unsigned long bytes = 0;
985
986 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
987 goto out;
988 nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
989 while (!list_empty(&hdr->pages)) {
990 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
991
992 bytes += req->wb_bytes;
993 nfs_list_remove_request(req);
994 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
995 (hdr->good_bytes < bytes)) {
996 nfs_set_pageerror(req->wb_page);
997 nfs_context_set_write_error(req->wb_context, hdr->error);
998 goto remove_req;
999 }
1000 if (nfs_write_need_commit(hdr)) {
1001 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
1002 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
1003 hdr->pgio_mirror_idx);
1004 goto next;
1005 }
1006remove_req:
1007 nfs_inode_remove_request(req);
1008next:
1009 nfs_end_page_writeback(req);
1010 nfs_release_request(req);
1011 }
1012out:
1013 nfs_io_completion_put(hdr->io_completion);
1014 hdr->release(hdr);
1015}
1016
1017unsigned long
1018nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
1019{
1020 return atomic_long_read(&cinfo->mds->ncommit);
1021}
1022
1023
1024int
1025nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
1026 struct nfs_commit_info *cinfo, int max)
1027{
1028 struct nfs_page *req, *tmp;
1029 int ret = 0;
1030
1031restart:
1032 list_for_each_entry_safe(req, tmp, src, wb_list) {
1033 kref_get(&req->wb_kref);
1034 if (!nfs_lock_request(req)) {
1035 int status;
1036
1037
1038 if (!list_empty(dst)) {
1039 nfs_release_request(req);
1040 continue;
1041 }
1042
1043 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1044 status = nfs_wait_on_request(req);
1045 nfs_release_request(req);
1046 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1047 if (status < 0)
1048 break;
1049 goto restart;
1050 }
1051 nfs_request_remove_commit_list(req, cinfo);
1052 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1053 nfs_list_add_request(req, dst);
1054 ret++;
1055 if ((ret == max) && !cinfo->dreq)
1056 break;
1057 cond_resched();
1058 }
1059 return ret;
1060}
1061EXPORT_SYMBOL_GPL(nfs_scan_commit_list);
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072int
1073nfs_scan_commit(struct inode *inode, struct list_head *dst,
1074 struct nfs_commit_info *cinfo)
1075{
1076 int ret = 0;
1077
1078 if (!atomic_long_read(&cinfo->mds->ncommit))
1079 return 0;
1080 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1081 if (atomic_long_read(&cinfo->mds->ncommit) > 0) {
1082 const int max = INT_MAX;
1083
1084 ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
1085 cinfo, max);
1086 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
1087 }
1088 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1089 return ret;
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
1100 struct page *page,
1101 unsigned int offset,
1102 unsigned int bytes)
1103{
1104 struct nfs_page *req;
1105 unsigned int rqend;
1106 unsigned int end;
1107 int error;
1108
1109 end = offset + bytes;
1110
1111 req = nfs_lock_and_join_requests(page);
1112 if (IS_ERR_OR_NULL(req))
1113 return req;
1114
1115 rqend = req->wb_offset + req->wb_bytes;
1116
1117
1118
1119
1120
1121
1122 if (offset > rqend || end < req->wb_offset)
1123 goto out_flushme;
1124
1125
1126 if (offset < req->wb_offset) {
1127 req->wb_offset = offset;
1128 req->wb_pgbase = offset;
1129 }
1130 if (end > rqend)
1131 req->wb_bytes = end - req->wb_offset;
1132 else
1133 req->wb_bytes = rqend - req->wb_offset;
1134 return req;
1135out_flushme:
1136
1137
1138
1139
1140
1141 nfs_mark_request_dirty(req);
1142 nfs_unlock_and_release_request(req);
1143 error = nfs_wb_page(inode, page);
1144 return (error < 0) ? ERR_PTR(error) : NULL;
1145}
1146
1147
1148
1149
1150
1151
1152
1153
1154static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
1155 struct page *page, unsigned int offset, unsigned int bytes)
1156{
1157 struct inode *inode = page_file_mapping(page)->host;
1158 struct nfs_page *req;
1159
1160 req = nfs_try_to_update_request(inode, page, offset, bytes);
1161 if (req != NULL)
1162 goto out;
1163 req = nfs_create_request(ctx, page, NULL, offset, bytes);
1164 if (IS_ERR(req))
1165 goto out;
1166 nfs_inode_add_request(inode, req);
1167out:
1168 return req;
1169}
1170
1171static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
1172 unsigned int offset, unsigned int count)
1173{
1174 struct nfs_page *req;
1175
1176 req = nfs_setup_write_request(ctx, page, offset, count);
1177 if (IS_ERR(req))
1178 return PTR_ERR(req);
1179
1180 nfs_grow_file(page, offset, count);
1181 nfs_mark_uptodate(req);
1182 nfs_mark_request_dirty(req);
1183 nfs_unlock_and_release_request(req);
1184 return 0;
1185}
1186
1187int nfs_flush_incompatible(struct file *file, struct page *page)
1188{
1189 struct nfs_open_context *ctx = nfs_file_open_context(file);
1190 struct nfs_lock_context *l_ctx;
1191 struct file_lock_context *flctx = file_inode(file)->i_flctx;
1192 struct nfs_page *req;
1193 int do_flush, status;
1194
1195
1196
1197
1198
1199
1200
1201
1202 do {
1203 req = nfs_page_find_head_request(page);
1204 if (req == NULL)
1205 return 0;
1206 l_ctx = req->wb_lock_context;
1207 do_flush = req->wb_page != page ||
1208 !nfs_match_open_context(req->wb_context, ctx);
1209 if (l_ctx && flctx &&
1210 !(list_empty_careful(&flctx->flc_posix) &&
1211 list_empty_careful(&flctx->flc_flock))) {
1212 do_flush |= l_ctx->lockowner != current->files;
1213 }
1214 nfs_release_request(req);
1215 if (!do_flush)
1216 return 0;
1217 status = nfs_wb_page(page_file_mapping(page)->host, page);
1218 } while (status == 0);
1219 return status;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232int
1233nfs_key_timeout_notify(struct file *filp, struct inode *inode)
1234{
1235 struct nfs_open_context *ctx = nfs_file_open_context(filp);
1236 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1237
1238 return rpcauth_key_timeout_notify(auth, ctx->cred);
1239}
1240
1241
1242
1243
1244bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
1245{
1246 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1247
1248 return rpcauth_cred_key_to_expire(auth, ctx->cred);
1249}
1250
1251
1252
1253
1254
1255
1256static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
1257{
1258 struct nfs_inode *nfsi = NFS_I(inode);
1259
1260 if (nfs_have_delegated_attributes(inode))
1261 goto out;
1262 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
1263 return false;
1264 smp_rmb();
1265 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
1266 return false;
1267out:
1268 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1269 return false;
1270 return PageUptodate(page) != 0;
1271}
1272
1273static bool
1274is_whole_file_wrlock(struct file_lock *fl)
1275{
1276 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1277 fl->fl_type == F_WRLCK;
1278}
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
1289{
1290 int ret;
1291 struct file_lock_context *flctx = inode->i_flctx;
1292 struct file_lock *fl;
1293
1294 if (file->f_flags & O_DSYNC)
1295 return 0;
1296 if (!nfs_write_pageuptodate(page, inode))
1297 return 0;
1298 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1299 return 1;
1300 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1301 list_empty_careful(&flctx->flc_posix)))
1302 return 1;
1303
1304
1305 ret = 0;
1306 spin_lock(&flctx->flc_lock);
1307 if (!list_empty(&flctx->flc_posix)) {
1308 fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1309 fl_list);
1310 if (is_whole_file_wrlock(fl))
1311 ret = 1;
1312 } else if (!list_empty(&flctx->flc_flock)) {
1313 fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1314 fl_list);
1315 if (fl->fl_type == F_WRLCK)
1316 ret = 1;
1317 }
1318 spin_unlock(&flctx->flc_lock);
1319 return ret;
1320}
1321
1322
1323
1324
1325
1326
1327
1328int nfs_updatepage(struct file *file, struct page *page,
1329 unsigned int offset, unsigned int count)
1330{
1331 struct nfs_open_context *ctx = nfs_file_open_context(file);
1332 struct inode *inode = page_file_mapping(page)->host;
1333 int status = 0;
1334
1335 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
1336
1337 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
1338 file, count, (long long)(page_file_offset(page) + offset));
1339
1340 if (!count)
1341 goto out;
1342
1343 if (nfs_can_extend_write(file, page, inode)) {
1344 count = max(count + offset, nfs_page_length(page));
1345 offset = 0;
1346 }
1347
1348 status = nfs_writepage_setup(ctx, page, offset, count);
1349 if (status < 0)
1350 nfs_set_pageerror(page);
1351 else
1352 __set_page_dirty_nobuffers(page);
1353out:
1354 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
1355 status, (long long)i_size_read(inode));
1356 return status;
1357}
1358
1359static int flush_task_priority(int how)
1360{
1361 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
1362 case FLUSH_HIGHPRI:
1363 return RPC_PRIORITY_HIGH;
1364 case FLUSH_LOWPRI:
1365 return RPC_PRIORITY_LOW;
1366 }
1367 return RPC_PRIORITY_NORMAL;
1368}
1369
1370static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1371 struct rpc_message *msg,
1372 const struct nfs_rpc_ops *rpc_ops,
1373 struct rpc_task_setup *task_setup_data, int how)
1374{
1375 int priority = flush_task_priority(how);
1376
1377 task_setup_data->priority = priority;
1378 rpc_ops->write_setup(hdr, msg);
1379 trace_nfs_initiate_write(hdr->inode, hdr->io_start, hdr->good_bytes,
1380 hdr->args.stable);
1381
1382 nfs4_state_protect_write(NFS_SERVER(hdr->inode)->nfs_client,
1383 &task_setup_data->rpc_client, msg, hdr);
1384}
1385
1386
1387
1388
1389
1390static void nfs_redirty_request(struct nfs_page *req)
1391{
1392 nfs_mark_request_dirty(req);
1393 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1394 nfs_end_page_writeback(req);
1395 nfs_release_request(req);
1396}
1397
1398static void nfs_async_write_error(struct list_head *head)
1399{
1400 struct nfs_page *req;
1401
1402 while (!list_empty(head)) {
1403 req = nfs_list_entry(head->next);
1404 nfs_list_remove_request(req);
1405 nfs_redirty_request(req);
1406 }
1407}
1408
1409static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
1410{
1411 nfs_async_write_error(&hdr->pages);
1412}
1413
1414static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1415 .init_hdr = nfs_async_write_init,
1416 .error_cleanup = nfs_async_write_error,
1417 .completion = nfs_write_completion,
1418 .reschedule_io = nfs_async_write_reschedule_io,
1419};
1420
1421void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1422 struct inode *inode, int ioflags, bool force_mds,
1423 const struct nfs_pgio_completion_ops *compl_ops)
1424{
1425 struct nfs_server *server = NFS_SERVER(inode);
1426 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
1427
1428#ifdef CONFIG_NFS_V4_1
1429 if (server->pnfs_curr_ld && !force_mds)
1430 pg_ops = server->pnfs_curr_ld->pg_write_ops;
1431#endif
1432 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops,
1433 server->wsize, ioflags);
1434}
1435EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1436
1437void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1438{
1439 struct nfs_pgio_mirror *mirror;
1440
1441 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
1442 pgio->pg_ops->pg_cleanup(pgio);
1443
1444 pgio->pg_ops = &nfs_pgio_rw_ops;
1445
1446 nfs_pageio_stop_mirroring(pgio);
1447
1448 mirror = &pgio->pg_mirrors[0];
1449 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1450}
1451EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1452
1453
1454void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1455{
1456 struct nfs_commit_data *data = calldata;
1457
1458 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1459}
1460
1461
1462
1463
1464static int nfs_should_remove_suid(const struct inode *inode)
1465{
1466 umode_t mode = inode->i_mode;
1467 int kill = 0;
1468
1469
1470 if (unlikely(mode & S_ISUID))
1471 kill = ATTR_KILL_SUID;
1472
1473
1474
1475
1476
1477 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1478 kill |= ATTR_KILL_SGID;
1479
1480 if (unlikely(kill && S_ISREG(mode)))
1481 return kill;
1482
1483 return 0;
1484}
1485
1486static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1487 struct nfs_fattr *fattr)
1488{
1489 struct nfs_pgio_args *argp = &hdr->args;
1490 struct nfs_pgio_res *resp = &hdr->res;
1491 u64 size = argp->offset + resp->count;
1492
1493 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1494 fattr->size = size;
1495 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1496 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1497 return;
1498 }
1499 if (size != fattr->size)
1500 return;
1501
1502 nfs_fattr_set_barrier(fattr);
1503
1504 fattr->valid |= NFS_ATTR_FATTR_SIZE;
1505}
1506
1507void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1508{
1509 struct nfs_fattr *fattr = &hdr->fattr;
1510 struct inode *inode = hdr->inode;
1511
1512 spin_lock(&inode->i_lock);
1513 nfs_writeback_check_extend(hdr, fattr);
1514 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1515 spin_unlock(&inode->i_lock);
1516}
1517EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
1518
1519
1520
1521
1522static int nfs_writeback_done(struct rpc_task *task,
1523 struct nfs_pgio_header *hdr,
1524 struct inode *inode)
1525{
1526 int status;
1527
1528
1529
1530
1531
1532
1533
1534
1535 status = NFS_PROTO(inode)->write_done(task, hdr);
1536 if (status != 0)
1537 return status;
1538
1539 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
1540 trace_nfs_writeback_done(inode, task->tk_status,
1541 hdr->args.offset, hdr->res.verf);
1542
1543 if (hdr->res.verf->committed < hdr->args.stable &&
1544 task->tk_status >= 0) {
1545
1546
1547
1548
1549
1550
1551
1552
1553 static unsigned long complain;
1554
1555
1556 if (time_before(complain, jiffies)) {
1557 dprintk("NFS: faulty NFS server %s:"
1558 " (committed = %d) != (stable = %d)\n",
1559 NFS_SERVER(inode)->nfs_client->cl_hostname,
1560 hdr->res.verf->committed, hdr->args.stable);
1561 complain = jiffies + 300 * HZ;
1562 }
1563 }
1564
1565
1566 if (nfs_should_remove_suid(inode)) {
1567 spin_lock(&inode->i_lock);
1568 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
1569 spin_unlock(&inode->i_lock);
1570 }
1571 return 0;
1572}
1573
1574
1575
1576
1577static void nfs_writeback_result(struct rpc_task *task,
1578 struct nfs_pgio_header *hdr)
1579{
1580 struct nfs_pgio_args *argp = &hdr->args;
1581 struct nfs_pgio_res *resp = &hdr->res;
1582
1583 if (resp->count < argp->count) {
1584 static unsigned long complain;
1585
1586
1587 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE);
1588
1589
1590 if (resp->count == 0) {
1591 if (time_before(complain, jiffies)) {
1592 printk(KERN_WARNING
1593 "NFS: Server wrote zero bytes, expected %u.\n",
1594 argp->count);
1595 complain = jiffies + 300 * HZ;
1596 }
1597 nfs_set_pgio_error(hdr, -EIO, argp->offset);
1598 task->tk_status = -EIO;
1599 return;
1600 }
1601
1602
1603 if (!task->tk_ops) {
1604 hdr->pnfs_error = -EAGAIN;
1605 return;
1606 }
1607
1608
1609 if (resp->verf->committed != NFS_UNSTABLE) {
1610
1611 hdr->mds_offset += resp->count;
1612 argp->offset += resp->count;
1613 argp->pgbase += resp->count;
1614 argp->count -= resp->count;
1615 } else {
1616
1617
1618
1619 argp->stable = NFS_FILE_SYNC;
1620 }
1621 rpc_restart_call_prepare(task);
1622 }
1623}
1624
1625static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
1626{
1627 return wait_var_event_killable(&cinfo->rpcs_out,
1628 !atomic_read(&cinfo->rpcs_out));
1629}
1630
1631static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
1632{
1633 atomic_inc(&cinfo->rpcs_out);
1634}
1635
1636static void nfs_commit_end(struct nfs_mds_commit_info *cinfo)
1637{
1638 if (atomic_dec_and_test(&cinfo->rpcs_out))
1639 wake_up_var(&cinfo->rpcs_out);
1640}
1641
1642void nfs_commitdata_release(struct nfs_commit_data *data)
1643{
1644 put_nfs_open_context(data->context);
1645 nfs_commit_free(data);
1646}
1647EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1648
1649int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1650 const struct nfs_rpc_ops *nfs_ops,
1651 const struct rpc_call_ops *call_ops,
1652 int how, int flags)
1653{
1654 struct rpc_task *task;
1655 int priority = flush_task_priority(how);
1656 struct rpc_message msg = {
1657 .rpc_argp = &data->args,
1658 .rpc_resp = &data->res,
1659 .rpc_cred = data->cred,
1660 };
1661 struct rpc_task_setup task_setup_data = {
1662 .task = &data->task,
1663 .rpc_client = clnt,
1664 .rpc_message = &msg,
1665 .callback_ops = call_ops,
1666 .callback_data = data,
1667 .workqueue = nfsiod_workqueue,
1668 .flags = RPC_TASK_ASYNC | flags,
1669 .priority = priority,
1670 };
1671
1672 nfs_ops->commit_setup(data, &msg);
1673 trace_nfs_initiate_commit(data);
1674
1675 dprintk("NFS: initiated commit call\n");
1676
1677 nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client,
1678 NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg);
1679
1680 task = rpc_run_task(&task_setup_data);
1681 if (IS_ERR(task))
1682 return PTR_ERR(task);
1683 if (how & FLUSH_SYNC)
1684 rpc_wait_for_completion_task(task);
1685 rpc_put_task(task);
1686 return 0;
1687}
1688EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1689
1690static loff_t nfs_get_lwb(struct list_head *head)
1691{
1692 loff_t lwb = 0;
1693 struct nfs_page *req;
1694
1695 list_for_each_entry(req, head, wb_list)
1696 if (lwb < (req_offset(req) + req->wb_bytes))
1697 lwb = req_offset(req) + req->wb_bytes;
1698
1699 return lwb;
1700}
1701
1702
1703
1704
1705void nfs_init_commit(struct nfs_commit_data *data,
1706 struct list_head *head,
1707 struct pnfs_layout_segment *lseg,
1708 struct nfs_commit_info *cinfo)
1709{
1710 struct nfs_page *first = nfs_list_entry(head->next);
1711 struct inode *inode = d_inode(first->wb_context->dentry);
1712
1713
1714
1715
1716 list_splice_init(head, &data->pages);
1717
1718 data->inode = inode;
1719 data->cred = first->wb_context->cred;
1720 data->lseg = lseg;
1721
1722 if (lseg)
1723 data->lwb = nfs_get_lwb(&data->pages);
1724 data->mds_ops = &nfs_commit_ops;
1725 data->completion_ops = cinfo->completion_ops;
1726 data->dreq = cinfo->dreq;
1727
1728 data->args.fh = NFS_FH(data->inode);
1729
1730 data->args.offset = 0;
1731 data->args.count = 0;
1732 data->context = get_nfs_open_context(first->wb_context);
1733 data->res.fattr = &data->fattr;
1734 data->res.verf = &data->verf;
1735 nfs_fattr_init(&data->fattr);
1736}
1737EXPORT_SYMBOL_GPL(nfs_init_commit);
1738
1739void nfs_retry_commit(struct list_head *page_list,
1740 struct pnfs_layout_segment *lseg,
1741 struct nfs_commit_info *cinfo,
1742 u32 ds_commit_idx)
1743{
1744 struct nfs_page *req;
1745
1746 while (!list_empty(page_list)) {
1747 req = nfs_list_entry(page_list->next);
1748 nfs_list_remove_request(req);
1749 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1750 if (!cinfo->dreq)
1751 nfs_clear_page_commit(req->wb_page);
1752 nfs_unlock_and_release_request(req);
1753 }
1754}
1755EXPORT_SYMBOL_GPL(nfs_retry_commit);
1756
1757static void
1758nfs_commit_resched_write(struct nfs_commit_info *cinfo,
1759 struct nfs_page *req)
1760{
1761 __set_page_dirty_nobuffers(req->wb_page);
1762}
1763
1764
1765
1766
1767static int
1768nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1769 struct nfs_commit_info *cinfo)
1770{
1771 struct nfs_commit_data *data;
1772
1773
1774 if (list_empty(head))
1775 return 0;
1776
1777 data = nfs_commitdata_alloc(true);
1778
1779
1780 nfs_init_commit(data, head, NULL, cinfo);
1781 atomic_inc(&cinfo->mds->rpcs_out);
1782 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
1783 data->mds_ops, how, 0);
1784}
1785
1786
1787
1788
1789static void nfs_commit_done(struct rpc_task *task, void *calldata)
1790{
1791 struct nfs_commit_data *data = calldata;
1792
1793 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1794 task->tk_pid, task->tk_status);
1795
1796
1797 NFS_PROTO(data->inode)->commit_done(task, data);
1798 trace_nfs_commit_done(data);
1799}
1800
1801static void nfs_commit_release_pages(struct nfs_commit_data *data)
1802{
1803 struct nfs_page *req;
1804 int status = data->task.tk_status;
1805 struct nfs_commit_info cinfo;
1806 struct nfs_server *nfss;
1807
1808 while (!list_empty(&data->pages)) {
1809 req = nfs_list_entry(data->pages.next);
1810 nfs_list_remove_request(req);
1811 if (req->wb_page)
1812 nfs_clear_page_commit(req->wb_page);
1813
1814 dprintk("NFS: commit (%s/%llu %d@%lld)",
1815 req->wb_context->dentry->d_sb->s_id,
1816 (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)),
1817 req->wb_bytes,
1818 (long long)req_offset(req));
1819 if (status < 0) {
1820 nfs_context_set_write_error(req->wb_context, status);
1821 if (req->wb_page)
1822 nfs_inode_remove_request(req);
1823 dprintk_cont(", error = %d\n", status);
1824 goto next;
1825 }
1826
1827
1828
1829 if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
1830
1831 if (req->wb_page)
1832 nfs_inode_remove_request(req);
1833 dprintk_cont(" OK\n");
1834 goto next;
1835 }
1836
1837 dprintk_cont(" mismatch\n");
1838 nfs_mark_request_dirty(req);
1839 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1840 next:
1841 nfs_unlock_and_release_request(req);
1842
1843 cond_resched();
1844 }
1845 nfss = NFS_SERVER(data->inode);
1846 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
1847 clear_bdi_congested(inode_to_bdi(data->inode), BLK_RW_ASYNC);
1848
1849 nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1850 nfs_commit_end(cinfo.mds);
1851}
1852
1853static void nfs_commit_release(void *calldata)
1854{
1855 struct nfs_commit_data *data = calldata;
1856
1857 data->completion_ops->completion(data);
1858 nfs_commitdata_release(calldata);
1859}
1860
1861static const struct rpc_call_ops nfs_commit_ops = {
1862 .rpc_call_prepare = nfs_commit_prepare,
1863 .rpc_call_done = nfs_commit_done,
1864 .rpc_release = nfs_commit_release,
1865};
1866
1867static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1868 .completion = nfs_commit_release_pages,
1869 .resched_write = nfs_commit_resched_write,
1870};
1871
1872int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1873 int how, struct nfs_commit_info *cinfo)
1874{
1875 int status;
1876
1877 status = pnfs_commit_list(inode, head, how, cinfo);
1878 if (status == PNFS_NOT_ATTEMPTED)
1879 status = nfs_commit_list(inode, head, how, cinfo);
1880 return status;
1881}
1882
1883static int __nfs_commit_inode(struct inode *inode, int how,
1884 struct writeback_control *wbc)
1885{
1886 LIST_HEAD(head);
1887 struct nfs_commit_info cinfo;
1888 int may_wait = how & FLUSH_SYNC;
1889 int ret, nscan;
1890
1891 nfs_init_cinfo_from_inode(&cinfo, inode);
1892 nfs_commit_begin(cinfo.mds);
1893 for (;;) {
1894 ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
1895 if (ret <= 0)
1896 break;
1897 ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
1898 if (ret < 0)
1899 break;
1900 ret = 0;
1901 if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
1902 if (nscan < wbc->nr_to_write)
1903 wbc->nr_to_write -= nscan;
1904 else
1905 wbc->nr_to_write = 0;
1906 }
1907 if (nscan < INT_MAX)
1908 break;
1909 cond_resched();
1910 }
1911 nfs_commit_end(cinfo.mds);
1912 if (ret || !may_wait)
1913 return ret;
1914 return wait_on_commit(cinfo.mds);
1915}
1916
1917int nfs_commit_inode(struct inode *inode, int how)
1918{
1919 return __nfs_commit_inode(inode, how, NULL);
1920}
1921EXPORT_SYMBOL_GPL(nfs_commit_inode);
1922
1923int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1924{
1925 struct nfs_inode *nfsi = NFS_I(inode);
1926 int flags = FLUSH_SYNC;
1927 int ret = 0;
1928
1929 if (wbc->sync_mode == WB_SYNC_NONE) {
1930
1931 if (!atomic_long_read(&nfsi->commit_info.ncommit))
1932 goto check_requests_outstanding;
1933
1934
1935
1936
1937 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
1938 goto out_mark_dirty;
1939
1940
1941 flags = 0;
1942 }
1943
1944 ret = __nfs_commit_inode(inode, flags, wbc);
1945 if (!ret) {
1946 if (flags & FLUSH_SYNC)
1947 return 0;
1948 } else if (atomic_long_read(&nfsi->commit_info.ncommit))
1949 goto out_mark_dirty;
1950
1951check_requests_outstanding:
1952 if (!atomic_read(&nfsi->commit_info.rpcs_out))
1953 return ret;
1954out_mark_dirty:
1955 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1956 return ret;
1957}
1958EXPORT_SYMBOL_GPL(nfs_write_inode);
1959
1960
1961
1962
1963
1964
1965
1966int nfs_filemap_write_and_wait_range(struct address_space *mapping,
1967 loff_t lstart, loff_t lend)
1968{
1969 int ret;
1970
1971 ret = filemap_write_and_wait_range(mapping, lstart, lend);
1972 if (ret == 0)
1973 ret = pnfs_sync_inode(mapping->host, true);
1974 return ret;
1975}
1976EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range);
1977
1978
1979
1980
1981int nfs_wb_all(struct inode *inode)
1982{
1983 int ret;
1984
1985 trace_nfs_writeback_inode_enter(inode);
1986
1987 ret = filemap_write_and_wait(inode->i_mapping);
1988 if (ret)
1989 goto out;
1990 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1991 if (ret < 0)
1992 goto out;
1993 pnfs_sync_inode(inode, true);
1994 ret = 0;
1995
1996out:
1997 trace_nfs_writeback_inode_exit(inode, ret);
1998 return ret;
1999}
2000EXPORT_SYMBOL_GPL(nfs_wb_all);
2001
2002int nfs_wb_page_cancel(struct inode *inode, struct page *page)
2003{
2004 struct nfs_page *req;
2005 int ret = 0;
2006
2007 wait_on_page_writeback(page);
2008
2009
2010
2011 req = nfs_lock_and_join_requests(page);
2012
2013 if (IS_ERR(req)) {
2014 ret = PTR_ERR(req);
2015 } else if (req) {
2016
2017
2018
2019
2020 nfs_inode_remove_request(req);
2021 nfs_unlock_and_release_request(req);
2022 }
2023
2024 return ret;
2025}
2026
2027
2028
2029
2030int nfs_wb_page(struct inode *inode, struct page *page)
2031{
2032 loff_t range_start = page_file_offset(page);
2033 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
2034 struct writeback_control wbc = {
2035 .sync_mode = WB_SYNC_ALL,
2036 .nr_to_write = 0,
2037 .range_start = range_start,
2038 .range_end = range_end,
2039 };
2040 int ret;
2041
2042 trace_nfs_writeback_page_enter(inode);
2043
2044 for (;;) {
2045 wait_on_page_writeback(page);
2046 if (clear_page_dirty_for_io(page)) {
2047 ret = nfs_writepage_locked(page, &wbc);
2048 if (ret < 0)
2049 goto out_error;
2050 continue;
2051 }
2052 ret = 0;
2053 if (!PagePrivate(page))
2054 break;
2055 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2056 if (ret < 0)
2057 goto out_error;
2058 }
2059out_error:
2060 trace_nfs_writeback_page_exit(inode, ret);
2061 return ret;
2062}
2063
2064#ifdef CONFIG_MIGRATION
2065int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
2066 struct page *page, enum migrate_mode mode)
2067{
2068
2069
2070
2071
2072
2073
2074
2075
2076 if (PagePrivate(page))
2077 return -EBUSY;
2078
2079 if (!nfs_fscache_release_page(page, GFP_KERNEL))
2080 return -EBUSY;
2081
2082 return migrate_page(mapping, newpage, page, mode);
2083}
2084#endif
2085
2086int __init nfs_init_writepagecache(void)
2087{
2088 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
2089 sizeof(struct nfs_pgio_header),
2090 0, SLAB_HWCACHE_ALIGN,
2091 NULL);
2092 if (nfs_wdata_cachep == NULL)
2093 return -ENOMEM;
2094
2095 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
2096 nfs_wdata_cachep);
2097 if (nfs_wdata_mempool == NULL)
2098 goto out_destroy_write_cache;
2099
2100 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
2101 sizeof(struct nfs_commit_data),
2102 0, SLAB_HWCACHE_ALIGN,
2103 NULL);
2104 if (nfs_cdata_cachep == NULL)
2105 goto out_destroy_write_mempool;
2106
2107 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
2108 nfs_cdata_cachep);
2109 if (nfs_commit_mempool == NULL)
2110 goto out_destroy_commit_cache;
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128 nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
2129 if (nfs_congestion_kb > 256*1024)
2130 nfs_congestion_kb = 256*1024;
2131
2132 return 0;
2133
2134out_destroy_commit_cache:
2135 kmem_cache_destroy(nfs_cdata_cachep);
2136out_destroy_write_mempool:
2137 mempool_destroy(nfs_wdata_mempool);
2138out_destroy_write_cache:
2139 kmem_cache_destroy(nfs_wdata_cachep);
2140 return -ENOMEM;
2141}
2142
2143void nfs_destroy_writepagecache(void)
2144{
2145 mempool_destroy(nfs_commit_mempool);
2146 kmem_cache_destroy(nfs_cdata_cachep);
2147 mempool_destroy(nfs_wdata_mempool);
2148 kmem_cache_destroy(nfs_wdata_cachep);
2149}
2150
2151static const struct nfs_rw_ops nfs_rw_write_ops = {
2152 .rw_alloc_header = nfs_writehdr_alloc,
2153 .rw_free_header = nfs_writehdr_free,
2154 .rw_done = nfs_writeback_done,
2155 .rw_result = nfs_writeback_result,
2156 .rw_initiate = nfs_initiate_write,
2157};
2158