1
2
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/file.h>
14#include <linux/writeback.h>
15#include <linux/swap.h>
16#include <linux/migrate.h>
17
18#include <linux/sunrpc/clnt.h>
19#include <linux/nfs_fs.h>
20#include <linux/nfs_mount.h>
21#include <linux/nfs_page.h>
22#include <linux/backing-dev.h>
23#include <linux/export.h>
24#include <linux/freezer.h>
25#include <linux/wait.h>
26#include <linux/iversion.h>
27
28#include <linux/uaccess.h>
29#include <linux/sched/mm.h>
30
31#include "delegation.h"
32#include "internal.h"
33#include "iostat.h"
34#include "nfs4_fs.h"
35#include "fscache.h"
36#include "pnfs.h"
37
38#include "nfstrace.h"
39
40#define NFSDBG_FACILITY NFSDBG_PAGECACHE
41
42#define MIN_POOL_WRITE (32)
43#define MIN_POOL_COMMIT (4)
44
45struct nfs_io_completion {
46 void (*complete)(void *data);
47 void *data;
48 struct kref refcount;
49};
50
51
52
53
54static void nfs_redirty_request(struct nfs_page *req);
55static const struct rpc_call_ops nfs_commit_ops;
56static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
57static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
58static const struct nfs_rw_ops nfs_rw_write_ops;
59static void nfs_inode_remove_request(struct nfs_page *req);
60static void nfs_clear_request_commit(struct nfs_page *req);
61static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
62 struct inode *inode);
63static struct nfs_page *
64nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
65 struct page *page);
66
67static struct kmem_cache *nfs_wdata_cachep;
68static mempool_t *nfs_wdata_mempool;
69static struct kmem_cache *nfs_cdata_cachep;
70static mempool_t *nfs_commit_mempool;
71
72struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail)
73{
74 struct nfs_commit_data *p;
75
76 if (never_fail)
77 p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
78 else {
79
80
81
82
83
84 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT);
85 if (!p)
86 p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO |
87 __GFP_NOWARN | __GFP_NORETRY);
88 if (!p)
89 return NULL;
90 }
91
92 memset(p, 0, sizeof(*p));
93 INIT_LIST_HEAD(&p->pages);
94 return p;
95}
96EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
97
98void nfs_commit_free(struct nfs_commit_data *p)
99{
100 mempool_free(p, nfs_commit_mempool);
101}
102EXPORT_SYMBOL_GPL(nfs_commit_free);
103
104static struct nfs_pgio_header *nfs_writehdr_alloc(void)
105{
106 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL);
107
108 memset(p, 0, sizeof(*p));
109 p->rw_mode = FMODE_WRITE;
110 return p;
111}
112
113static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
114{
115 mempool_free(hdr, nfs_wdata_mempool);
116}
117
118static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags)
119{
120 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags);
121}
122
123static void nfs_io_completion_init(struct nfs_io_completion *ioc,
124 void (*complete)(void *), void *data)
125{
126 ioc->complete = complete;
127 ioc->data = data;
128 kref_init(&ioc->refcount);
129}
130
131static void nfs_io_completion_release(struct kref *kref)
132{
133 struct nfs_io_completion *ioc = container_of(kref,
134 struct nfs_io_completion, refcount);
135 ioc->complete(ioc->data);
136 kfree(ioc);
137}
138
139static void nfs_io_completion_get(struct nfs_io_completion *ioc)
140{
141 if (ioc != NULL)
142 kref_get(&ioc->refcount);
143}
144
145static void nfs_io_completion_put(struct nfs_io_completion *ioc)
146{
147 if (ioc != NULL)
148 kref_put(&ioc->refcount, nfs_io_completion_release);
149}
150
151static void
152nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
153{
154 if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) {
155 kref_get(&req->wb_kref);
156 atomic_long_inc(&NFS_I(inode)->nrequests);
157 }
158}
159
160static int
161nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
162{
163 int ret;
164
165 if (!test_bit(PG_REMOVE, &req->wb_flags))
166 return 0;
167 ret = nfs_page_group_lock(req);
168 if (ret)
169 return ret;
170 if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
171 nfs_page_set_inode_ref(req, inode);
172 nfs_page_group_unlock(req);
173 return 0;
174}
175
176static struct nfs_page *
177nfs_page_private_request(struct page *page)
178{
179 if (!PagePrivate(page))
180 return NULL;
181 return (struct nfs_page *)page_private(page);
182}
183
184
185
186
187
188
189
190
191static struct nfs_page *
192nfs_page_find_private_request(struct page *page)
193{
194 struct address_space *mapping = page_file_mapping(page);
195 struct nfs_page *req;
196
197 if (!PagePrivate(page))
198 return NULL;
199 spin_lock(&mapping->private_lock);
200 req = nfs_page_private_request(page);
201 if (req) {
202 WARN_ON_ONCE(req->wb_head != req);
203 kref_get(&req->wb_kref);
204 }
205 spin_unlock(&mapping->private_lock);
206 return req;
207}
208
209static struct nfs_page *
210nfs_page_find_swap_request(struct page *page)
211{
212 struct inode *inode = page_file_mapping(page)->host;
213 struct nfs_inode *nfsi = NFS_I(inode);
214 struct nfs_page *req = NULL;
215 if (!PageSwapCache(page))
216 return NULL;
217 mutex_lock(&nfsi->commit_mutex);
218 if (PageSwapCache(page)) {
219 req = nfs_page_search_commits_for_head_request_locked(nfsi,
220 page);
221 if (req) {
222 WARN_ON_ONCE(req->wb_head != req);
223 kref_get(&req->wb_kref);
224 }
225 }
226 mutex_unlock(&nfsi->commit_mutex);
227 return req;
228}
229
230
231
232
233
234
235static struct nfs_page *nfs_page_find_head_request(struct page *page)
236{
237 struct nfs_page *req;
238
239 req = nfs_page_find_private_request(page);
240 if (!req)
241 req = nfs_page_find_swap_request(page);
242 return req;
243}
244
245static struct nfs_page *nfs_find_and_lock_page_request(struct page *page)
246{
247 struct inode *inode = page_file_mapping(page)->host;
248 struct nfs_page *req, *head;
249 int ret;
250
251 for (;;) {
252 req = nfs_page_find_head_request(page);
253 if (!req)
254 return req;
255 head = nfs_page_group_lock_head(req);
256 if (head != req)
257 nfs_release_request(req);
258 if (IS_ERR(head))
259 return head;
260 ret = nfs_cancel_remove_inode(head, inode);
261 if (ret < 0) {
262 nfs_unlock_and_release_request(head);
263 return ERR_PTR(ret);
264 }
265
266 if (head == nfs_page_private_request(page))
267 break;
268 if (PageSwapCache(page))
269 break;
270 nfs_unlock_and_release_request(head);
271 }
272 return head;
273}
274
275
276static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
277{
278 struct inode *inode = page_file_mapping(page)->host;
279 loff_t end, i_size;
280 pgoff_t end_index;
281
282 spin_lock(&inode->i_lock);
283 i_size = i_size_read(inode);
284 end_index = (i_size - 1) >> PAGE_SHIFT;
285 if (i_size > 0 && page_index(page) < end_index)
286 goto out;
287 end = page_file_offset(page) + ((loff_t)offset+count);
288 if (i_size >= end)
289 goto out;
290 i_size_write(inode, end);
291 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
292 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
293out:
294 spin_unlock(&inode->i_lock);
295}
296
297
298static void nfs_set_pageerror(struct address_space *mapping)
299{
300 struct inode *inode = mapping->host;
301
302 nfs_zap_mapping(mapping->host, mapping);
303
304 spin_lock(&inode->i_lock);
305 nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED |
306 NFS_INO_REVAL_PAGECACHE |
307 NFS_INO_INVALID_SIZE);
308 spin_unlock(&inode->i_lock);
309}
310
311static void nfs_mapping_set_error(struct page *page, int error)
312{
313 struct address_space *mapping = page_file_mapping(page);
314
315 SetPageError(page);
316 mapping_set_error(mapping, error);
317 nfs_set_pageerror(mapping);
318}
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333static struct nfs_page *
334nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
335{
336 struct nfs_page *req;
337
338 req = head;
339 do {
340 if (page_offset >= req->wb_pgbase &&
341 page_offset < (req->wb_pgbase + req->wb_bytes))
342 return req;
343
344 req = req->wb_this_page;
345 } while (req != head);
346
347 return NULL;
348}
349
350
351
352
353
354
355
356
357static bool nfs_page_group_covers_page(struct nfs_page *req)
358{
359 struct nfs_page *tmp;
360 unsigned int pos = 0;
361 unsigned int len = nfs_page_length(req->wb_page);
362
363 nfs_page_group_lock(req);
364
365 for (;;) {
366 tmp = nfs_page_group_search_locked(req->wb_head, pos);
367 if (!tmp)
368 break;
369 pos = tmp->wb_pgbase + tmp->wb_bytes;
370 }
371
372 nfs_page_group_unlock(req);
373 return pos >= len;
374}
375
376
377
378
379static void nfs_mark_uptodate(struct nfs_page *req)
380{
381 if (PageUptodate(req->wb_page))
382 return;
383 if (!nfs_page_group_covers_page(req))
384 return;
385 SetPageUptodate(req->wb_page);
386}
387
388static int wb_priority(struct writeback_control *wbc)
389{
390 int ret = 0;
391
392 if (wbc->sync_mode == WB_SYNC_ALL)
393 ret = FLUSH_COND_STABLE;
394 return ret;
395}
396
397
398
399
400
401int nfs_congestion_kb;
402
403#define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
404#define NFS_CONGESTION_OFF_THRESH \
405 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
406
407static void nfs_set_page_writeback(struct page *page)
408{
409 struct inode *inode = page_file_mapping(page)->host;
410 struct nfs_server *nfss = NFS_SERVER(inode);
411 int ret = test_set_page_writeback(page);
412
413 WARN_ON_ONCE(ret != 0);
414
415 if (atomic_long_inc_return(&nfss->writeback) >
416 NFS_CONGESTION_ON_THRESH)
417 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
418}
419
420static void nfs_end_page_writeback(struct nfs_page *req)
421{
422 struct inode *inode = page_file_mapping(req->wb_page)->host;
423 struct nfs_server *nfss = NFS_SERVER(inode);
424 bool is_done;
425
426 is_done = nfs_page_group_sync_on_bit(req, PG_WB_END);
427 nfs_unlock_request(req);
428 if (!is_done)
429 return;
430
431 end_page_writeback(req->wb_page);
432 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
433 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
434}
435
436
437
438
439
440
441
442
443
444
445
446static void
447nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
448 struct nfs_page *old_head,
449 struct inode *inode)
450{
451 while (destroy_list) {
452 struct nfs_page *subreq = destroy_list;
453
454 destroy_list = (subreq->wb_this_page == old_head) ?
455 NULL : subreq->wb_this_page;
456
457
458 nfs_page_set_headlock(subreq);
459 WARN_ON_ONCE(old_head != subreq->wb_head);
460
461
462 subreq->wb_this_page = subreq;
463 subreq->wb_head = subreq;
464
465 clear_bit(PG_REMOVE, &subreq->wb_flags);
466
467
468 if (!kref_read(&subreq->wb_kref)) {
469
470 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) {
471 nfs_page_clear_headlock(subreq);
472 nfs_free_request(subreq);
473 } else
474 nfs_page_clear_headlock(subreq);
475 continue;
476 }
477 nfs_page_clear_headlock(subreq);
478
479 nfs_release_request(old_head);
480
481 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
482 nfs_release_request(subreq);
483 atomic_long_dec(&NFS_I(inode)->nrequests);
484 }
485
486
487
488 nfs_unlock_and_release_request(subreq);
489 }
490}
491
492
493
494
495
496
497
498
499
500
501
502
503void
504nfs_join_page_group(struct nfs_page *head, struct inode *inode)
505{
506 struct nfs_page *subreq;
507 struct nfs_page *destroy_list = NULL;
508 unsigned int pgbase, off, bytes;
509
510 pgbase = head->wb_pgbase;
511 bytes = head->wb_bytes;
512 off = head->wb_offset;
513 for (subreq = head->wb_this_page; subreq != head;
514 subreq = subreq->wb_this_page) {
515
516 if (pgbase > subreq->wb_pgbase) {
517 off -= pgbase - subreq->wb_pgbase;
518 bytes += pgbase - subreq->wb_pgbase;
519 pgbase = subreq->wb_pgbase;
520 }
521 bytes = max(subreq->wb_pgbase + subreq->wb_bytes
522 - pgbase, bytes);
523 }
524
525
526 head->wb_pgbase = pgbase;
527 head->wb_bytes = bytes;
528 head->wb_offset = off;
529
530
531
532 subreq = head;
533 do {
534 nfs_clear_request_commit(subreq);
535 subreq = subreq->wb_this_page;
536 } while (subreq != head);
537
538
539 if (head->wb_this_page != head) {
540
541 destroy_list = head->wb_this_page;
542 head->wb_this_page = head;
543 }
544
545 nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
546}
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563static struct nfs_page *
564nfs_lock_and_join_requests(struct page *page)
565{
566 struct inode *inode = page_file_mapping(page)->host;
567 struct nfs_page *head;
568 int ret;
569
570
571
572
573
574
575 head = nfs_find_and_lock_page_request(page);
576 if (IS_ERR_OR_NULL(head))
577 return head;
578
579
580 ret = nfs_page_group_lock_subrequests(head);
581 if (ret < 0) {
582 nfs_unlock_and_release_request(head);
583 return ERR_PTR(ret);
584 }
585
586 nfs_join_page_group(head, inode);
587
588 return head;
589}
590
591static void nfs_write_error(struct nfs_page *req, int error)
592{
593 trace_nfs_write_error(req, error);
594 nfs_mapping_set_error(req->wb_page, error);
595 nfs_inode_remove_request(req);
596 nfs_end_page_writeback(req);
597 nfs_release_request(req);
598}
599
600
601
602
603
604static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
605 struct page *page)
606{
607 struct nfs_page *req;
608 int ret = 0;
609
610 req = nfs_lock_and_join_requests(page);
611 if (!req)
612 goto out;
613 ret = PTR_ERR(req);
614 if (IS_ERR(req))
615 goto out;
616
617 nfs_set_page_writeback(page);
618 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
619
620
621 ret = pgio->pg_error;
622 if (nfs_error_is_fatal_on_server(ret))
623 goto out_launder;
624
625 ret = 0;
626 if (!nfs_pageio_add_request(pgio, req)) {
627 ret = pgio->pg_error;
628
629
630
631 if (nfs_error_is_fatal(ret)) {
632 if (nfs_error_is_fatal_on_server(ret))
633 goto out_launder;
634 } else
635 ret = -EAGAIN;
636 nfs_redirty_request(req);
637 pgio->pg_error = 0;
638 } else
639 nfs_add_stats(page_file_mapping(page)->host,
640 NFSIOS_WRITEPAGES, 1);
641out:
642 return ret;
643out_launder:
644 nfs_write_error(req, ret);
645 return 0;
646}
647
648static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
649 struct nfs_pageio_descriptor *pgio)
650{
651 int ret;
652
653 nfs_pageio_cond_complete(pgio, page_index(page));
654 ret = nfs_page_async_flush(pgio, page);
655 if (ret == -EAGAIN) {
656 redirty_page_for_writepage(wbc, page);
657 ret = AOP_WRITEPAGE_ACTIVATE;
658 }
659 return ret;
660}
661
662
663
664
665static int nfs_writepage_locked(struct page *page,
666 struct writeback_control *wbc)
667{
668 struct nfs_pageio_descriptor pgio;
669 struct inode *inode = page_file_mapping(page)->host;
670 int err;
671
672 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
673 nfs_pageio_init_write(&pgio, inode, 0,
674 false, &nfs_async_write_completion_ops);
675 err = nfs_do_writepage(page, wbc, &pgio);
676 pgio.pg_error = 0;
677 nfs_pageio_complete(&pgio);
678 if (err < 0)
679 return err;
680 if (nfs_error_is_fatal(pgio.pg_error))
681 return pgio.pg_error;
682 return 0;
683}
684
685int nfs_writepage(struct page *page, struct writeback_control *wbc)
686{
687 int ret;
688
689 ret = nfs_writepage_locked(page, wbc);
690 if (ret != AOP_WRITEPAGE_ACTIVATE)
691 unlock_page(page);
692 return ret;
693}
694
695static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
696{
697 int ret;
698
699 ret = nfs_do_writepage(page, wbc, data);
700 if (ret != AOP_WRITEPAGE_ACTIVATE)
701 unlock_page(page);
702 return ret;
703}
704
705static void nfs_io_completion_commit(void *inode)
706{
707 nfs_commit_inode(inode, 0);
708}
709
710int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
711{
712 struct inode *inode = mapping->host;
713 struct nfs_pageio_descriptor pgio;
714 struct nfs_io_completion *ioc;
715 int err;
716
717 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
718
719 ioc = nfs_io_completion_alloc(GFP_KERNEL);
720 if (ioc)
721 nfs_io_completion_init(ioc, nfs_io_completion_commit, inode);
722
723 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
724 &nfs_async_write_completion_ops);
725 pgio.pg_io_completion = ioc;
726 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
727 pgio.pg_error = 0;
728 nfs_pageio_complete(&pgio);
729 nfs_io_completion_put(ioc);
730
731 if (err < 0)
732 goto out_err;
733 err = pgio.pg_error;
734 if (nfs_error_is_fatal(err))
735 goto out_err;
736 return 0;
737out_err:
738 return err;
739}
740
741
742
743
744static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
745{
746 struct address_space *mapping = page_file_mapping(req->wb_page);
747 struct nfs_inode *nfsi = NFS_I(inode);
748
749 WARN_ON_ONCE(req->wb_this_page != req);
750
751
752 nfs_lock_request(req);
753
754
755
756
757
758 spin_lock(&mapping->private_lock);
759 if (likely(!PageSwapCache(req->wb_page))) {
760 set_bit(PG_MAPPED, &req->wb_flags);
761 SetPagePrivate(req->wb_page);
762 set_page_private(req->wb_page, (unsigned long)req);
763 }
764 spin_unlock(&mapping->private_lock);
765 atomic_long_inc(&nfsi->nrequests);
766
767
768
769
770 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
771 kref_get(&req->wb_kref);
772}
773
774
775
776
777static void nfs_inode_remove_request(struct nfs_page *req)
778{
779 struct address_space *mapping = page_file_mapping(req->wb_page);
780 struct inode *inode = mapping->host;
781 struct nfs_inode *nfsi = NFS_I(inode);
782 struct nfs_page *head;
783
784 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
785 head = req->wb_head;
786
787 spin_lock(&mapping->private_lock);
788 if (likely(head->wb_page && !PageSwapCache(head->wb_page))) {
789 set_page_private(head->wb_page, 0);
790 ClearPagePrivate(head->wb_page);
791 clear_bit(PG_MAPPED, &head->wb_flags);
792 }
793 spin_unlock(&mapping->private_lock);
794 }
795
796 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
797 nfs_release_request(req);
798 atomic_long_dec(&nfsi->nrequests);
799 }
800}
801
802static void
803nfs_mark_request_dirty(struct nfs_page *req)
804{
805 if (req->wb_page)
806 __set_page_dirty_nobuffers(req->wb_page);
807}
808
809
810
811
812
813
814
815
816
817static struct nfs_page *
818nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
819 struct page *page)
820{
821 struct nfs_page *freq, *t;
822 struct nfs_commit_info cinfo;
823 struct inode *inode = &nfsi->vfs_inode;
824
825 nfs_init_cinfo_from_inode(&cinfo, inode);
826
827
828 freq = pnfs_search_commit_reqs(inode, &cinfo, page);
829 if (freq)
830 return freq->wb_head;
831
832
833 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
834 if (freq->wb_page == page)
835 return freq->wb_head;
836 }
837
838 return NULL;
839}
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854void
855nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
856 struct nfs_commit_info *cinfo)
857{
858 set_bit(PG_CLEAN, &req->wb_flags);
859 nfs_list_add_request(req, dst);
860 atomic_long_inc(&cinfo->mds->ncommit);
861}
862EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
863
864
865
866
867
868
869
870
871
872
873
874
875
876void
877nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
878{
879 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
880 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
881 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
882 if (req->wb_page)
883 nfs_mark_page_unstable(req->wb_page, cinfo);
884}
885EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
886
887
888
889
890
891
892
893
894
895
896
897
898void
899nfs_request_remove_commit_list(struct nfs_page *req,
900 struct nfs_commit_info *cinfo)
901{
902 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
903 return;
904 nfs_list_remove_request(req);
905 atomic_long_dec(&cinfo->mds->ncommit);
906}
907EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
908
909static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
910 struct inode *inode)
911{
912 cinfo->inode = inode;
913 cinfo->mds = &NFS_I(inode)->commit_info;
914 cinfo->ds = pnfs_get_ds_info(inode);
915 cinfo->dreq = NULL;
916 cinfo->completion_ops = &nfs_commit_completion_ops;
917}
918
919void nfs_init_cinfo(struct nfs_commit_info *cinfo,
920 struct inode *inode,
921 struct nfs_direct_req *dreq)
922{
923 if (dreq)
924 nfs_init_cinfo_from_dreq(cinfo, dreq);
925 else
926 nfs_init_cinfo_from_inode(cinfo, inode);
927}
928EXPORT_SYMBOL_GPL(nfs_init_cinfo);
929
930
931
932
933void
934nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
935 struct nfs_commit_info *cinfo, u32 ds_commit_idx)
936{
937 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
938 return;
939 nfs_request_add_commit_list(req, cinfo);
940}
941
942static void
943nfs_clear_page_commit(struct page *page)
944{
945 dec_node_page_state(page, NR_WRITEBACK);
946 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
947 WB_WRITEBACK);
948}
949
950
951static void
952nfs_clear_request_commit(struct nfs_page *req)
953{
954 if (test_bit(PG_CLEAN, &req->wb_flags)) {
955 struct nfs_open_context *ctx = nfs_req_openctx(req);
956 struct inode *inode = d_inode(ctx->dentry);
957 struct nfs_commit_info cinfo;
958
959 nfs_init_cinfo_from_inode(&cinfo, inode);
960 mutex_lock(&NFS_I(inode)->commit_mutex);
961 if (!pnfs_clear_request_commit(req, &cinfo)) {
962 nfs_request_remove_commit_list(req, &cinfo);
963 }
964 mutex_unlock(&NFS_I(inode)->commit_mutex);
965 nfs_clear_page_commit(req->wb_page);
966 }
967}
968
969int nfs_write_need_commit(struct nfs_pgio_header *hdr)
970{
971 if (hdr->verf.committed == NFS_DATA_SYNC)
972 return hdr->lseg == NULL;
973 return hdr->verf.committed != NFS_FILE_SYNC;
974}
975
976static void nfs_async_write_init(struct nfs_pgio_header *hdr)
977{
978 nfs_io_completion_get(hdr->io_completion);
979}
980
981static void nfs_write_completion(struct nfs_pgio_header *hdr)
982{
983 struct nfs_commit_info cinfo;
984 unsigned long bytes = 0;
985
986 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
987 goto out;
988 nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
989 while (!list_empty(&hdr->pages)) {
990 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
991
992 bytes += req->wb_bytes;
993 nfs_list_remove_request(req);
994 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
995 (hdr->good_bytes < bytes)) {
996 trace_nfs_comp_error(req, hdr->error);
997 nfs_mapping_set_error(req->wb_page, hdr->error);
998 goto remove_req;
999 }
1000 if (nfs_write_need_commit(hdr)) {
1001
1002 req->wb_nio = 0;
1003 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
1004 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
1005 hdr->pgio_mirror_idx);
1006 goto next;
1007 }
1008remove_req:
1009 nfs_inode_remove_request(req);
1010next:
1011 nfs_end_page_writeback(req);
1012 nfs_release_request(req);
1013 }
1014out:
1015 nfs_io_completion_put(hdr->io_completion);
1016 hdr->release(hdr);
1017}
1018
1019unsigned long
1020nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
1021{
1022 return atomic_long_read(&cinfo->mds->ncommit);
1023}
1024
1025
1026int
1027nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
1028 struct nfs_commit_info *cinfo, int max)
1029{
1030 struct nfs_page *req, *tmp;
1031 int ret = 0;
1032
1033restart:
1034 list_for_each_entry_safe(req, tmp, src, wb_list) {
1035 kref_get(&req->wb_kref);
1036 if (!nfs_lock_request(req)) {
1037 int status;
1038
1039
1040 if (!list_empty(dst)) {
1041 nfs_release_request(req);
1042 continue;
1043 }
1044
1045 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1046 status = nfs_wait_on_request(req);
1047 nfs_release_request(req);
1048 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1049 if (status < 0)
1050 break;
1051 goto restart;
1052 }
1053 nfs_request_remove_commit_list(req, cinfo);
1054 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1055 nfs_list_add_request(req, dst);
1056 ret++;
1057 if ((ret == max) && !cinfo->dreq)
1058 break;
1059 cond_resched();
1060 }
1061 return ret;
1062}
1063EXPORT_SYMBOL_GPL(nfs_scan_commit_list);
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074int
1075nfs_scan_commit(struct inode *inode, struct list_head *dst,
1076 struct nfs_commit_info *cinfo)
1077{
1078 int ret = 0;
1079
1080 if (!atomic_long_read(&cinfo->mds->ncommit))
1081 return 0;
1082 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1083 if (atomic_long_read(&cinfo->mds->ncommit) > 0) {
1084 const int max = INT_MAX;
1085
1086 ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
1087 cinfo, max);
1088 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
1089 }
1090 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1091 return ret;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
1102 struct page *page,
1103 unsigned int offset,
1104 unsigned int bytes)
1105{
1106 struct nfs_page *req;
1107 unsigned int rqend;
1108 unsigned int end;
1109 int error;
1110
1111 end = offset + bytes;
1112
1113 req = nfs_lock_and_join_requests(page);
1114 if (IS_ERR_OR_NULL(req))
1115 return req;
1116
1117 rqend = req->wb_offset + req->wb_bytes;
1118
1119
1120
1121
1122
1123
1124 if (offset > rqend || end < req->wb_offset)
1125 goto out_flushme;
1126
1127
1128 if (offset < req->wb_offset) {
1129 req->wb_offset = offset;
1130 req->wb_pgbase = offset;
1131 }
1132 if (end > rqend)
1133 req->wb_bytes = end - req->wb_offset;
1134 else
1135 req->wb_bytes = rqend - req->wb_offset;
1136 req->wb_nio = 0;
1137 return req;
1138out_flushme:
1139
1140
1141
1142
1143
1144 nfs_mark_request_dirty(req);
1145 nfs_unlock_and_release_request(req);
1146 error = nfs_wb_page(inode, page);
1147 return (error < 0) ? ERR_PTR(error) : NULL;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
1158 struct page *page, unsigned int offset, unsigned int bytes)
1159{
1160 struct inode *inode = page_file_mapping(page)->host;
1161 struct nfs_page *req;
1162
1163 req = nfs_try_to_update_request(inode, page, offset, bytes);
1164 if (req != NULL)
1165 goto out;
1166 req = nfs_create_request(ctx, page, offset, bytes);
1167 if (IS_ERR(req))
1168 goto out;
1169 nfs_inode_add_request(inode, req);
1170out:
1171 return req;
1172}
1173
1174static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
1175 unsigned int offset, unsigned int count)
1176{
1177 struct nfs_page *req;
1178
1179 req = nfs_setup_write_request(ctx, page, offset, count);
1180 if (IS_ERR(req))
1181 return PTR_ERR(req);
1182
1183 nfs_grow_file(page, offset, count);
1184 nfs_mark_uptodate(req);
1185 nfs_mark_request_dirty(req);
1186 nfs_unlock_and_release_request(req);
1187 return 0;
1188}
1189
1190int nfs_flush_incompatible(struct file *file, struct page *page)
1191{
1192 struct nfs_open_context *ctx = nfs_file_open_context(file);
1193 struct nfs_lock_context *l_ctx;
1194 struct file_lock_context *flctx = file_inode(file)->i_flctx;
1195 struct nfs_page *req;
1196 int do_flush, status;
1197
1198
1199
1200
1201
1202
1203
1204
1205 do {
1206 req = nfs_page_find_head_request(page);
1207 if (req == NULL)
1208 return 0;
1209 l_ctx = req->wb_lock_context;
1210 do_flush = req->wb_page != page ||
1211 !nfs_match_open_context(nfs_req_openctx(req), ctx);
1212 if (l_ctx && flctx &&
1213 !(list_empty_careful(&flctx->flc_posix) &&
1214 list_empty_careful(&flctx->flc_flock))) {
1215 do_flush |= l_ctx->lockowner != current->files;
1216 }
1217 nfs_release_request(req);
1218 if (!do_flush)
1219 return 0;
1220 status = nfs_wb_page(page_file_mapping(page)->host, page);
1221 } while (status == 0);
1222 return status;
1223}
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235int
1236nfs_key_timeout_notify(struct file *filp, struct inode *inode)
1237{
1238 struct nfs_open_context *ctx = nfs_file_open_context(filp);
1239
1240 if (nfs_ctx_key_to_expire(ctx, inode) &&
1241 !ctx->ll_cred)
1242
1243 return -EACCES;
1244 return 0;
1245}
1246
1247
1248
1249
1250bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
1251{
1252 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1253 struct rpc_cred *cred = ctx->ll_cred;
1254 struct auth_cred acred = {
1255 .cred = ctx->cred,
1256 };
1257
1258 if (cred && !cred->cr_ops->crmatch(&acred, cred, 0)) {
1259 put_rpccred(cred);
1260 ctx->ll_cred = NULL;
1261 cred = NULL;
1262 }
1263 if (!cred)
1264 cred = auth->au_ops->lookup_cred(auth, &acred, 0);
1265 if (!cred || IS_ERR(cred))
1266 return true;
1267 ctx->ll_cred = cred;
1268 return !!(cred->cr_ops->crkey_timeout &&
1269 cred->cr_ops->crkey_timeout(cred));
1270}
1271
1272
1273
1274
1275
1276
1277static bool nfs_write_pageuptodate(struct page *page, struct inode *inode,
1278 unsigned int pagelen)
1279{
1280 struct nfs_inode *nfsi = NFS_I(inode);
1281
1282 if (nfs_have_delegated_attributes(inode))
1283 goto out;
1284 if (nfsi->cache_validity &
1285 (NFS_INO_REVAL_PAGECACHE | NFS_INO_INVALID_SIZE))
1286 return false;
1287 smp_rmb();
1288 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags) && pagelen != 0)
1289 return false;
1290out:
1291 if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0)
1292 return false;
1293 return PageUptodate(page) != 0;
1294}
1295
1296static bool
1297is_whole_file_wrlock(struct file_lock *fl)
1298{
1299 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1300 fl->fl_type == F_WRLCK;
1301}
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311static int nfs_can_extend_write(struct file *file, struct page *page,
1312 struct inode *inode, unsigned int pagelen)
1313{
1314 int ret;
1315 struct file_lock_context *flctx = inode->i_flctx;
1316 struct file_lock *fl;
1317
1318 if (file->f_flags & O_DSYNC)
1319 return 0;
1320 if (!nfs_write_pageuptodate(page, inode, pagelen))
1321 return 0;
1322 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1323 return 1;
1324 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1325 list_empty_careful(&flctx->flc_posix)))
1326 return 1;
1327
1328
1329 ret = 0;
1330 spin_lock(&flctx->flc_lock);
1331 if (!list_empty(&flctx->flc_posix)) {
1332 fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1333 fl_list);
1334 if (is_whole_file_wrlock(fl))
1335 ret = 1;
1336 } else if (!list_empty(&flctx->flc_flock)) {
1337 fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1338 fl_list);
1339 if (fl->fl_type == F_WRLCK)
1340 ret = 1;
1341 }
1342 spin_unlock(&flctx->flc_lock);
1343 return ret;
1344}
1345
1346
1347
1348
1349
1350
1351
1352int nfs_updatepage(struct file *file, struct page *page,
1353 unsigned int offset, unsigned int count)
1354{
1355 struct nfs_open_context *ctx = nfs_file_open_context(file);
1356 struct address_space *mapping = page_file_mapping(page);
1357 struct inode *inode = mapping->host;
1358 unsigned int pagelen = nfs_page_length(page);
1359 int status = 0;
1360
1361 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
1362
1363 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
1364 file, count, (long long)(page_file_offset(page) + offset));
1365
1366 if (!count)
1367 goto out;
1368
1369 if (nfs_can_extend_write(file, page, inode, pagelen)) {
1370 count = max(count + offset, pagelen);
1371 offset = 0;
1372 }
1373
1374 status = nfs_writepage_setup(ctx, page, offset, count);
1375 if (status < 0)
1376 nfs_set_pageerror(mapping);
1377 else
1378 __set_page_dirty_nobuffers(page);
1379out:
1380 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
1381 status, (long long)i_size_read(inode));
1382 return status;
1383}
1384
1385static int flush_task_priority(int how)
1386{
1387 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
1388 case FLUSH_HIGHPRI:
1389 return RPC_PRIORITY_HIGH;
1390 case FLUSH_LOWPRI:
1391 return RPC_PRIORITY_LOW;
1392 }
1393 return RPC_PRIORITY_NORMAL;
1394}
1395
1396static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1397 struct rpc_message *msg,
1398 const struct nfs_rpc_ops *rpc_ops,
1399 struct rpc_task_setup *task_setup_data, int how)
1400{
1401 int priority = flush_task_priority(how);
1402
1403 task_setup_data->priority = priority;
1404 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client);
1405 trace_nfs_initiate_write(hdr);
1406}
1407
1408
1409
1410
1411
1412static void nfs_redirty_request(struct nfs_page *req)
1413{
1414
1415 req->wb_nio++;
1416 nfs_mark_request_dirty(req);
1417 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
1418 nfs_end_page_writeback(req);
1419 nfs_release_request(req);
1420}
1421
1422static void nfs_async_write_error(struct list_head *head, int error)
1423{
1424 struct nfs_page *req;
1425
1426 while (!list_empty(head)) {
1427 req = nfs_list_entry(head->next);
1428 nfs_list_remove_request(req);
1429 if (nfs_error_is_fatal(error))
1430 nfs_write_error(req, error);
1431 else
1432 nfs_redirty_request(req);
1433 }
1434}
1435
1436static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
1437{
1438 nfs_async_write_error(&hdr->pages, 0);
1439 filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset,
1440 hdr->args.offset + hdr->args.count - 1);
1441}
1442
1443static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1444 .init_hdr = nfs_async_write_init,
1445 .error_cleanup = nfs_async_write_error,
1446 .completion = nfs_write_completion,
1447 .reschedule_io = nfs_async_write_reschedule_io,
1448};
1449
1450void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1451 struct inode *inode, int ioflags, bool force_mds,
1452 const struct nfs_pgio_completion_ops *compl_ops)
1453{
1454 struct nfs_server *server = NFS_SERVER(inode);
1455 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
1456
1457#ifdef CONFIG_NFS_V4_1
1458 if (server->pnfs_curr_ld && !force_mds)
1459 pg_ops = server->pnfs_curr_ld->pg_write_ops;
1460#endif
1461 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops,
1462 server->wsize, ioflags);
1463}
1464EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1465
1466void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1467{
1468 struct nfs_pgio_mirror *mirror;
1469
1470 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
1471 pgio->pg_ops->pg_cleanup(pgio);
1472
1473 pgio->pg_ops = &nfs_pgio_rw_ops;
1474
1475 nfs_pageio_stop_mirroring(pgio);
1476
1477 mirror = &pgio->pg_mirrors[0];
1478 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1479}
1480EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1481
1482
1483void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1484{
1485 struct nfs_commit_data *data = calldata;
1486
1487 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1488}
1489
1490
1491
1492
1493static int nfs_should_remove_suid(const struct inode *inode)
1494{
1495 umode_t mode = inode->i_mode;
1496 int kill = 0;
1497
1498
1499 if (unlikely(mode & S_ISUID))
1500 kill = ATTR_KILL_SUID;
1501
1502
1503
1504
1505
1506 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1507 kill |= ATTR_KILL_SGID;
1508
1509 if (unlikely(kill && S_ISREG(mode)))
1510 return kill;
1511
1512 return 0;
1513}
1514
1515static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1516 struct nfs_fattr *fattr)
1517{
1518 struct nfs_pgio_args *argp = &hdr->args;
1519 struct nfs_pgio_res *resp = &hdr->res;
1520 u64 size = argp->offset + resp->count;
1521
1522 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1523 fattr->size = size;
1524 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1525 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1526 return;
1527 }
1528 if (size != fattr->size)
1529 return;
1530
1531 nfs_fattr_set_barrier(fattr);
1532
1533 fattr->valid |= NFS_ATTR_FATTR_SIZE;
1534}
1535
1536void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1537{
1538 struct nfs_fattr *fattr = &hdr->fattr;
1539 struct inode *inode = hdr->inode;
1540
1541 spin_lock(&inode->i_lock);
1542 nfs_writeback_check_extend(hdr, fattr);
1543 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1544 spin_unlock(&inode->i_lock);
1545}
1546EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
1547
1548
1549
1550
1551static int nfs_writeback_done(struct rpc_task *task,
1552 struct nfs_pgio_header *hdr,
1553 struct inode *inode)
1554{
1555 int status;
1556
1557
1558
1559
1560
1561
1562
1563
1564 status = NFS_PROTO(inode)->write_done(task, hdr);
1565 if (status != 0)
1566 return status;
1567
1568 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
1569 trace_nfs_writeback_done(task, hdr);
1570
1571 if (hdr->res.verf->committed < hdr->args.stable &&
1572 task->tk_status >= 0) {
1573
1574
1575
1576
1577
1578
1579
1580
1581 static unsigned long complain;
1582
1583
1584 if (time_before(complain, jiffies)) {
1585 dprintk("NFS: faulty NFS server %s:"
1586 " (committed = %d) != (stable = %d)\n",
1587 NFS_SERVER(inode)->nfs_client->cl_hostname,
1588 hdr->res.verf->committed, hdr->args.stable);
1589 complain = jiffies + 300 * HZ;
1590 }
1591 }
1592
1593
1594 if (nfs_should_remove_suid(inode)) {
1595 spin_lock(&inode->i_lock);
1596 nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
1597 spin_unlock(&inode->i_lock);
1598 }
1599 return 0;
1600}
1601
1602
1603
1604
1605static void nfs_writeback_result(struct rpc_task *task,
1606 struct nfs_pgio_header *hdr)
1607{
1608 struct nfs_pgio_args *argp = &hdr->args;
1609 struct nfs_pgio_res *resp = &hdr->res;
1610
1611 if (resp->count < argp->count) {
1612 static unsigned long complain;
1613
1614
1615 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE);
1616
1617
1618 if (resp->count == 0) {
1619 if (time_before(complain, jiffies)) {
1620 printk(KERN_WARNING
1621 "NFS: Server wrote zero bytes, expected %u.\n",
1622 argp->count);
1623 complain = jiffies + 300 * HZ;
1624 }
1625 nfs_set_pgio_error(hdr, -EIO, argp->offset);
1626 task->tk_status = -EIO;
1627 return;
1628 }
1629
1630
1631 if (!task->tk_ops) {
1632 hdr->pnfs_error = -EAGAIN;
1633 return;
1634 }
1635
1636
1637 if (resp->verf->committed != NFS_UNSTABLE) {
1638
1639 hdr->mds_offset += resp->count;
1640 argp->offset += resp->count;
1641 argp->pgbase += resp->count;
1642 argp->count -= resp->count;
1643 } else {
1644
1645
1646
1647 argp->stable = NFS_FILE_SYNC;
1648 }
1649 resp->count = 0;
1650 resp->verf->committed = 0;
1651 rpc_restart_call_prepare(task);
1652 }
1653}
1654
1655static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
1656{
1657 return wait_var_event_killable(&cinfo->rpcs_out,
1658 !atomic_read(&cinfo->rpcs_out));
1659}
1660
1661static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
1662{
1663 atomic_inc(&cinfo->rpcs_out);
1664}
1665
1666static void nfs_commit_end(struct nfs_mds_commit_info *cinfo)
1667{
1668 if (atomic_dec_and_test(&cinfo->rpcs_out))
1669 wake_up_var(&cinfo->rpcs_out);
1670}
1671
1672void nfs_commitdata_release(struct nfs_commit_data *data)
1673{
1674 put_nfs_open_context(data->context);
1675 nfs_commit_free(data);
1676}
1677EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1678
1679int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1680 const struct nfs_rpc_ops *nfs_ops,
1681 const struct rpc_call_ops *call_ops,
1682 int how, int flags)
1683{
1684 struct rpc_task *task;
1685 int priority = flush_task_priority(how);
1686 struct rpc_message msg = {
1687 .rpc_argp = &data->args,
1688 .rpc_resp = &data->res,
1689 .rpc_cred = data->cred,
1690 };
1691 struct rpc_task_setup task_setup_data = {
1692 .task = &data->task,
1693 .rpc_client = clnt,
1694 .rpc_message = &msg,
1695 .callback_ops = call_ops,
1696 .callback_data = data,
1697 .workqueue = nfsiod_workqueue,
1698 .flags = RPC_TASK_ASYNC | flags,
1699 .priority = priority,
1700 };
1701
1702 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client);
1703 trace_nfs_initiate_commit(data);
1704
1705 dprintk("NFS: initiated commit call\n");
1706
1707 task = rpc_run_task(&task_setup_data);
1708 if (IS_ERR(task))
1709 return PTR_ERR(task);
1710 if (how & FLUSH_SYNC)
1711 rpc_wait_for_completion_task(task);
1712 rpc_put_task(task);
1713 return 0;
1714}
1715EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1716
1717static loff_t nfs_get_lwb(struct list_head *head)
1718{
1719 loff_t lwb = 0;
1720 struct nfs_page *req;
1721
1722 list_for_each_entry(req, head, wb_list)
1723 if (lwb < (req_offset(req) + req->wb_bytes))
1724 lwb = req_offset(req) + req->wb_bytes;
1725
1726 return lwb;
1727}
1728
1729
1730
1731
1732void nfs_init_commit(struct nfs_commit_data *data,
1733 struct list_head *head,
1734 struct pnfs_layout_segment *lseg,
1735 struct nfs_commit_info *cinfo)
1736{
1737 struct nfs_page *first;
1738 struct nfs_open_context *ctx;
1739 struct inode *inode;
1740
1741
1742
1743
1744 if (head)
1745 list_splice_init(head, &data->pages);
1746
1747 first = nfs_list_entry(data->pages.next);
1748 ctx = nfs_req_openctx(first);
1749 inode = d_inode(ctx->dentry);
1750
1751 data->inode = inode;
1752 data->cred = ctx->cred;
1753 data->lseg = lseg;
1754
1755 if (lseg)
1756 data->lwb = nfs_get_lwb(&data->pages);
1757 data->mds_ops = &nfs_commit_ops;
1758 data->completion_ops = cinfo->completion_ops;
1759 data->dreq = cinfo->dreq;
1760
1761 data->args.fh = NFS_FH(data->inode);
1762
1763 data->args.offset = 0;
1764 data->args.count = 0;
1765 data->context = get_nfs_open_context(ctx);
1766 data->res.fattr = &data->fattr;
1767 data->res.verf = &data->verf;
1768 nfs_fattr_init(&data->fattr);
1769}
1770EXPORT_SYMBOL_GPL(nfs_init_commit);
1771
1772void nfs_retry_commit(struct list_head *page_list,
1773 struct pnfs_layout_segment *lseg,
1774 struct nfs_commit_info *cinfo,
1775 u32 ds_commit_idx)
1776{
1777 struct nfs_page *req;
1778
1779 while (!list_empty(page_list)) {
1780 req = nfs_list_entry(page_list->next);
1781 nfs_list_remove_request(req);
1782 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1783 if (!cinfo->dreq)
1784 nfs_clear_page_commit(req->wb_page);
1785 nfs_unlock_and_release_request(req);
1786 }
1787}
1788EXPORT_SYMBOL_GPL(nfs_retry_commit);
1789
1790static void
1791nfs_commit_resched_write(struct nfs_commit_info *cinfo,
1792 struct nfs_page *req)
1793{
1794 __set_page_dirty_nobuffers(req->wb_page);
1795}
1796
1797
1798
1799
1800static int
1801nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1802 struct nfs_commit_info *cinfo)
1803{
1804 struct nfs_commit_data *data;
1805
1806
1807 if (list_empty(head))
1808 return 0;
1809
1810 data = nfs_commitdata_alloc(true);
1811
1812
1813 nfs_init_commit(data, head, NULL, cinfo);
1814 atomic_inc(&cinfo->mds->rpcs_out);
1815 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
1816 data->mds_ops, how, RPC_TASK_CRED_NOREF);
1817}
1818
1819
1820
1821
1822static void nfs_commit_done(struct rpc_task *task, void *calldata)
1823{
1824 struct nfs_commit_data *data = calldata;
1825
1826 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1827 task->tk_pid, task->tk_status);
1828
1829
1830 NFS_PROTO(data->inode)->commit_done(task, data);
1831 trace_nfs_commit_done(task, data);
1832}
1833
1834static void nfs_commit_release_pages(struct nfs_commit_data *data)
1835{
1836 const struct nfs_writeverf *verf = data->res.verf;
1837 struct nfs_page *req;
1838 int status = data->task.tk_status;
1839 struct nfs_commit_info cinfo;
1840 struct nfs_server *nfss;
1841
1842 while (!list_empty(&data->pages)) {
1843 req = nfs_list_entry(data->pages.next);
1844 nfs_list_remove_request(req);
1845 if (req->wb_page)
1846 nfs_clear_page_commit(req->wb_page);
1847
1848 dprintk("NFS: commit (%s/%llu %d@%lld)",
1849 nfs_req_openctx(req)->dentry->d_sb->s_id,
1850 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)),
1851 req->wb_bytes,
1852 (long long)req_offset(req));
1853 if (status < 0) {
1854 if (req->wb_page) {
1855 trace_nfs_commit_error(req, status);
1856 nfs_mapping_set_error(req->wb_page, status);
1857 nfs_inode_remove_request(req);
1858 }
1859 dprintk_cont(", error = %d\n", status);
1860 goto next;
1861 }
1862
1863
1864
1865 if (nfs_write_match_verf(verf, req)) {
1866
1867 if (req->wb_page)
1868 nfs_inode_remove_request(req);
1869 dprintk_cont(" OK\n");
1870 goto next;
1871 }
1872
1873 dprintk_cont(" mismatch\n");
1874 nfs_mark_request_dirty(req);
1875 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
1876 next:
1877 nfs_unlock_and_release_request(req);
1878
1879 cond_resched();
1880 }
1881 nfss = NFS_SERVER(data->inode);
1882 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
1883 clear_bdi_congested(inode_to_bdi(data->inode), BLK_RW_ASYNC);
1884
1885 nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1886 nfs_commit_end(cinfo.mds);
1887}
1888
1889static void nfs_commit_release(void *calldata)
1890{
1891 struct nfs_commit_data *data = calldata;
1892
1893 data->completion_ops->completion(data);
1894 nfs_commitdata_release(calldata);
1895}
1896
1897static const struct rpc_call_ops nfs_commit_ops = {
1898 .rpc_call_prepare = nfs_commit_prepare,
1899 .rpc_call_done = nfs_commit_done,
1900 .rpc_release = nfs_commit_release,
1901};
1902
1903static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1904 .completion = nfs_commit_release_pages,
1905 .resched_write = nfs_commit_resched_write,
1906};
1907
1908int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1909 int how, struct nfs_commit_info *cinfo)
1910{
1911 int status;
1912
1913 status = pnfs_commit_list(inode, head, how, cinfo);
1914 if (status == PNFS_NOT_ATTEMPTED)
1915 status = nfs_commit_list(inode, head, how, cinfo);
1916 return status;
1917}
1918
1919static int __nfs_commit_inode(struct inode *inode, int how,
1920 struct writeback_control *wbc)
1921{
1922 LIST_HEAD(head);
1923 struct nfs_commit_info cinfo;
1924 int may_wait = how & FLUSH_SYNC;
1925 int ret, nscan;
1926
1927 nfs_init_cinfo_from_inode(&cinfo, inode);
1928 nfs_commit_begin(cinfo.mds);
1929 for (;;) {
1930 ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
1931 if (ret <= 0)
1932 break;
1933 ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
1934 if (ret < 0)
1935 break;
1936 ret = 0;
1937 if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
1938 if (nscan < wbc->nr_to_write)
1939 wbc->nr_to_write -= nscan;
1940 else
1941 wbc->nr_to_write = 0;
1942 }
1943 if (nscan < INT_MAX)
1944 break;
1945 cond_resched();
1946 }
1947 nfs_commit_end(cinfo.mds);
1948 if (ret || !may_wait)
1949 return ret;
1950 return wait_on_commit(cinfo.mds);
1951}
1952
1953int nfs_commit_inode(struct inode *inode, int how)
1954{
1955 return __nfs_commit_inode(inode, how, NULL);
1956}
1957EXPORT_SYMBOL_GPL(nfs_commit_inode);
1958
1959int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1960{
1961 struct nfs_inode *nfsi = NFS_I(inode);
1962 int flags = FLUSH_SYNC;
1963 int ret = 0;
1964
1965 if (wbc->sync_mode == WB_SYNC_NONE) {
1966
1967 if (!atomic_long_read(&nfsi->commit_info.ncommit))
1968 goto check_requests_outstanding;
1969
1970
1971
1972
1973 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
1974 goto out_mark_dirty;
1975
1976
1977 flags = 0;
1978 }
1979
1980 ret = __nfs_commit_inode(inode, flags, wbc);
1981 if (!ret) {
1982 if (flags & FLUSH_SYNC)
1983 return 0;
1984 } else if (atomic_long_read(&nfsi->commit_info.ncommit))
1985 goto out_mark_dirty;
1986
1987check_requests_outstanding:
1988 if (!atomic_read(&nfsi->commit_info.rpcs_out))
1989 return ret;
1990out_mark_dirty:
1991 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1992 return ret;
1993}
1994EXPORT_SYMBOL_GPL(nfs_write_inode);
1995
1996
1997
1998
1999
2000
2001
2002int nfs_filemap_write_and_wait_range(struct address_space *mapping,
2003 loff_t lstart, loff_t lend)
2004{
2005 int ret;
2006
2007 ret = filemap_write_and_wait_range(mapping, lstart, lend);
2008 if (ret == 0)
2009 ret = pnfs_sync_inode(mapping->host, true);
2010 return ret;
2011}
2012EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range);
2013
2014
2015
2016
2017int nfs_wb_all(struct inode *inode)
2018{
2019 int ret;
2020
2021 trace_nfs_writeback_inode_enter(inode);
2022
2023 ret = filemap_write_and_wait(inode->i_mapping);
2024 if (ret)
2025 goto out;
2026 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2027 if (ret < 0)
2028 goto out;
2029 pnfs_sync_inode(inode, true);
2030 ret = 0;
2031
2032out:
2033 trace_nfs_writeback_inode_exit(inode, ret);
2034 return ret;
2035}
2036EXPORT_SYMBOL_GPL(nfs_wb_all);
2037
2038int nfs_wb_page_cancel(struct inode *inode, struct page *page)
2039{
2040 struct nfs_page *req;
2041 int ret = 0;
2042
2043 wait_on_page_writeback(page);
2044
2045
2046
2047 req = nfs_lock_and_join_requests(page);
2048
2049 if (IS_ERR(req)) {
2050 ret = PTR_ERR(req);
2051 } else if (req) {
2052
2053
2054
2055
2056 nfs_inode_remove_request(req);
2057 nfs_unlock_and_release_request(req);
2058 }
2059
2060 return ret;
2061}
2062
2063
2064
2065
2066int nfs_wb_page(struct inode *inode, struct page *page)
2067{
2068 loff_t range_start = page_file_offset(page);
2069 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
2070 struct writeback_control wbc = {
2071 .sync_mode = WB_SYNC_ALL,
2072 .nr_to_write = 0,
2073 .range_start = range_start,
2074 .range_end = range_end,
2075 };
2076 int ret;
2077
2078 trace_nfs_writeback_page_enter(inode);
2079
2080 for (;;) {
2081 wait_on_page_writeback(page);
2082 if (clear_page_dirty_for_io(page)) {
2083 ret = nfs_writepage_locked(page, &wbc);
2084 if (ret < 0)
2085 goto out_error;
2086 continue;
2087 }
2088 ret = 0;
2089 if (!PagePrivate(page))
2090 break;
2091 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2092 if (ret < 0)
2093 goto out_error;
2094 }
2095out_error:
2096 trace_nfs_writeback_page_exit(inode, ret);
2097 return ret;
2098}
2099
2100#ifdef CONFIG_MIGRATION
2101int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
2102 struct page *page, enum migrate_mode mode)
2103{
2104
2105
2106
2107
2108
2109
2110
2111
2112 if (PagePrivate(page))
2113 return -EBUSY;
2114
2115 if (!nfs_fscache_release_page(page, GFP_KERNEL))
2116 return -EBUSY;
2117
2118 return migrate_page(mapping, newpage, page, mode);
2119}
2120#endif
2121
2122int __init nfs_init_writepagecache(void)
2123{
2124 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
2125 sizeof(struct nfs_pgio_header),
2126 0, SLAB_HWCACHE_ALIGN,
2127 NULL);
2128 if (nfs_wdata_cachep == NULL)
2129 return -ENOMEM;
2130
2131 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
2132 nfs_wdata_cachep);
2133 if (nfs_wdata_mempool == NULL)
2134 goto out_destroy_write_cache;
2135
2136 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
2137 sizeof(struct nfs_commit_data),
2138 0, SLAB_HWCACHE_ALIGN,
2139 NULL);
2140 if (nfs_cdata_cachep == NULL)
2141 goto out_destroy_write_mempool;
2142
2143 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
2144 nfs_cdata_cachep);
2145 if (nfs_commit_mempool == NULL)
2146 goto out_destroy_commit_cache;
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10);
2165 if (nfs_congestion_kb > 256*1024)
2166 nfs_congestion_kb = 256*1024;
2167
2168 return 0;
2169
2170out_destroy_commit_cache:
2171 kmem_cache_destroy(nfs_cdata_cachep);
2172out_destroy_write_mempool:
2173 mempool_destroy(nfs_wdata_mempool);
2174out_destroy_write_cache:
2175 kmem_cache_destroy(nfs_wdata_cachep);
2176 return -ENOMEM;
2177}
2178
2179void nfs_destroy_writepagecache(void)
2180{
2181 mempool_destroy(nfs_commit_mempool);
2182 kmem_cache_destroy(nfs_cdata_cachep);
2183 mempool_destroy(nfs_wdata_mempool);
2184 kmem_cache_destroy(nfs_wdata_cachep);
2185}
2186
2187static const struct nfs_rw_ops nfs_rw_write_ops = {
2188 .rw_alloc_header = nfs_writehdr_alloc,
2189 .rw_free_header = nfs_writehdr_free,
2190 .rw_done = nfs_writeback_done,
2191 .rw_result = nfs_writeback_result,
2192 .rw_initiate = nfs_initiate_write,
2193};
2194