1
2
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/file.h>
14#include <linux/writeback.h>
15#include <linux/swap.h>
16#include <linux/migrate.h>
17
18#include <linux/sunrpc/clnt.h>
19#include <linux/nfs_fs.h>
20#include <linux/nfs_mount.h>
21#include <linux/nfs_page.h>
22#include <linux/backing-dev.h>
23#include <linux/export.h>
24#include <linux/freezer.h>
25#include <linux/wait.h>
26#include <linux/iversion.h>
27
28#include <linux/uaccess.h>
29#include <linux/sched/mm.h>
30
31#include "delegation.h"
32#include "internal.h"
33#include "iostat.h"
34#include "nfs4_fs.h"
35#include "fscache.h"
36#include "pnfs.h"
37
38#include "nfstrace.h"
39
40#define NFSDBG_FACILITY NFSDBG_PAGECACHE
41
42#define MIN_POOL_WRITE (32)
43#define MIN_POOL_COMMIT (4)
44
45struct nfs_io_completion {
46 void (*complete)(void *data);
47 void *data;
48 struct kref refcount;
49};
50
51
52
53
54static void nfs_redirty_request(struct nfs_page *req);
55static const struct rpc_call_ops nfs_commit_ops;
56static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
57static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
58static const struct nfs_rw_ops nfs_rw_write_ops;
59static void nfs_inode_remove_request(struct nfs_page *req);
60static void nfs_clear_request_commit(struct nfs_page *req);
61static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
62 struct inode *inode);
63static struct nfs_page *
64nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
65 struct page *page);
66
67static struct kmem_cache *nfs_wdata_cachep;
68static mempool_t *nfs_wdata_mempool;
69static struct kmem_cache *nfs_cdata_cachep;
70static mempool_t *nfs_commit_mempool;
71
72struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail)
73{
74 struct nfs_commit_data *p;
75
76 if (never_fail)
77 p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
78 else {
79
80
81
82
83
84 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT);
85 if (!p)
86 p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO |
87 __GFP_NOWARN | __GFP_NORETRY);
88 if (!p)
89 return NULL;
90 }
91
92 memset(p, 0, sizeof(*p));
93 INIT_LIST_HEAD(&p->pages);
94 return p;
95}
96EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
97
98void nfs_commit_free(struct nfs_commit_data *p)
99{
100 mempool_free(p, nfs_commit_mempool);
101}
102EXPORT_SYMBOL_GPL(nfs_commit_free);
103
104static struct nfs_pgio_header *nfs_writehdr_alloc(void)
105{
106 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL);
107
108 memset(p, 0, sizeof(*p));
109 p->rw_mode = FMODE_WRITE;
110 return p;
111}
112
113static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
114{
115 mempool_free(hdr, nfs_wdata_mempool);
116}
117
118static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags)
119{
120 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags);
121}
122
123static void nfs_io_completion_init(struct nfs_io_completion *ioc,
124 void (*complete)(void *), void *data)
125{
126 ioc->complete = complete;
127 ioc->data = data;
128 kref_init(&ioc->refcount);
129}
130
131static void nfs_io_completion_release(struct kref *kref)
132{
133 struct nfs_io_completion *ioc = container_of(kref,
134 struct nfs_io_completion, refcount);
135 ioc->complete(ioc->data);
136 kfree(ioc);
137}
138
139static void nfs_io_completion_get(struct nfs_io_completion *ioc)
140{
141 if (ioc != NULL)
142 kref_get(&ioc->refcount);
143}
144
145static void nfs_io_completion_put(struct nfs_io_completion *ioc)
146{
147 if (ioc != NULL)
148 kref_put(&ioc->refcount, nfs_io_completion_release);
149}
150
151static void
152nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
153{
154 if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) {
155 kref_get(&req->wb_kref);
156 atomic_long_inc(&NFS_I(inode)->nrequests);
157 }
158}
159
160static int
161nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
162{
163 int ret;
164
165 if (!test_bit(PG_REMOVE, &req->wb_flags))
166 return 0;
167 ret = nfs_page_group_lock(req);
168 if (ret)
169 return ret;
170 if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
171 nfs_page_set_inode_ref(req, inode);
172 nfs_page_group_unlock(req);
173 return 0;
174}
175
176static struct nfs_page *
177nfs_page_private_request(struct page *page)
178{
179 if (!PagePrivate(page))
180 return NULL;
181 return (struct nfs_page *)page_private(page);
182}
183
184
185
186
187
188
189
190
191static struct nfs_page *
192nfs_page_find_private_request(struct page *page)
193{
194 struct address_space *mapping = page_file_mapping(page);
195 struct nfs_page *req;
196
197 if (!PagePrivate(page))
198 return NULL;
199 spin_lock(&mapping->private_lock);
200 req = nfs_page_private_request(page);
201 if (req) {
202 WARN_ON_ONCE(req->wb_head != req);
203 kref_get(&req->wb_kref);
204 }
205 spin_unlock(&mapping->private_lock);
206 return req;
207}
208
209static struct nfs_page *
210nfs_page_find_swap_request(struct page *page)
211{
212 struct inode *inode = page_file_mapping(page)->host;
213 struct nfs_inode *nfsi = NFS_I(inode);
214 struct nfs_page *req = NULL;
215 if (!PageSwapCache(page))
216 return NULL;
217 mutex_lock(&nfsi->commit_mutex);
218 if (PageSwapCache(page)) {
219 req = nfs_page_search_commits_for_head_request_locked(nfsi,
220 page);
221 if (req) {
222 WARN_ON_ONCE(req->wb_head != req);
223 kref_get(&req->wb_kref);
224 }
225 }
226 mutex_unlock(&nfsi->commit_mutex);
227 return req;
228}
229
230
231
232
233
234
235static struct nfs_page *nfs_page_find_head_request(struct page *page)
236{
237 struct nfs_page *req;
238
239 req = nfs_page_find_private_request(page);
240 if (!req)
241 req = nfs_page_find_swap_request(page);
242 return req;
243}
244
245static struct nfs_page *nfs_find_and_lock_page_request(struct page *page)
246{
247 struct inode *inode = page_file_mapping(page)->host;
248 struct nfs_page *req, *head;
249 int ret;
250
251 for (;;) {
252 req = nfs_page_find_head_request(page);
253 if (!req)
254 return req;
255 head = nfs_page_group_lock_head(req);
256 if (head != req)
257 nfs_release_request(req);
258 if (IS_ERR(head))
259 return head;
260 ret = nfs_cancel_remove_inode(head, inode);
261 if (ret < 0) {
262 nfs_unlock_and_release_request(head);
263 return ERR_PTR(ret);
264 }
265
266 if (head == nfs_page_private_request(page))
267 break;
268 if (PageSwapCache(page))
269 break;
270 nfs_unlock_and_release_request(head);
271 }
272 return head;
273}
274
275
276static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
277{
278 struct inode *inode = page_file_mapping(page)->host;
279 loff_t end, i_size;
280 pgoff_t end_index;
281
282 spin_lock(&inode->i_lock);
283 i_size = i_size_read(inode);
284 end_index = (i_size - 1) >> PAGE_SHIFT;
285 if (i_size > 0 && page_index(page) < end_index)
286 goto out;
287 end = page_file_offset(page) + ((loff_t)offset+count);
288 if (i_size >= end)
289 goto out;
290 i_size_write(inode, end);
291 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
292 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
293out:
294 spin_unlock(&inode->i_lock);
295}
296
297
298static void nfs_set_pageerror(struct address_space *mapping)
299{
300 struct inode *inode = mapping->host;
301
302 nfs_zap_mapping(mapping->host, mapping);
303
304 spin_lock(&inode->i_lock);
305 nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED |
306 NFS_INO_REVAL_PAGECACHE |
307 NFS_INO_INVALID_SIZE);
308 spin_unlock(&inode->i_lock);
309}
310
311static void nfs_mapping_set_error(struct page *page, int error)
312{
313 struct address_space *mapping = page_file_mapping(page);
314
315 SetPageError(page);
316 mapping_set_error(mapping, error);
317 nfs_set_pageerror(mapping);
318}
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333static struct nfs_page *
334nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
335{
336 struct nfs_page *req;
337
338 req = head;
339 do {
340 if (page_offset >= req->wb_pgbase &&
341 page_offset < (req->wb_pgbase + req->wb_bytes))
342 return req;
343
344 req = req->wb_this_page;
345 } while (req != head);
346
347 return NULL;
348}
349
350
351
352
353
354
355
356
357static bool nfs_page_group_covers_page(struct nfs_page *req)
358{
359 struct nfs_page *tmp;
360 unsigned int pos = 0;
361 unsigned int len = nfs_page_length(req->wb_page);
362
363 nfs_page_group_lock(req);
364
365 for (;;) {
366 tmp = nfs_page_group_search_locked(req->wb_head, pos);
367 if (!tmp)
368 break;
369 pos = tmp->wb_pgbase + tmp->wb_bytes;
370 }
371
372 nfs_page_group_unlock(req);
373 return pos >= len;
374}
375
376
377
378
379static void nfs_mark_uptodate(struct nfs_page *req)
380{
381 if (PageUptodate(req->wb_page))
382 return;
383 if (!nfs_page_group_covers_page(req))
384 return;
385 SetPageUptodate(req->wb_page);
386}
387
388static int wb_priority(struct writeback_control *wbc)
389{
390 int ret = 0;
391
392 if (wbc->sync_mode == WB_SYNC_ALL)
393 ret = FLUSH_COND_STABLE;
394 return ret;
395}
396
397
398
399
400
401int nfs_congestion_kb;
402
403#define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
404#define NFS_CONGESTION_OFF_THRESH \
405 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
406
407static void nfs_set_page_writeback(struct page *page)
408{
409 struct inode *inode = page_file_mapping(page)->host;
410 struct nfs_server *nfss = NFS_SERVER(inode);
411 int ret = test_set_page_writeback(page);
412
413 WARN_ON_ONCE(ret != 0);
414
415 if (atomic_long_inc_return(&nfss->writeback) >
416 NFS_CONGESTION_ON_THRESH)
417 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
418}
419
420static void nfs_end_page_writeback(struct nfs_page *req)
421{
422 struct inode *inode = page_file_mapping(req->wb_page)->host;
423 struct nfs_server *nfss = NFS_SERVER(inode);
424 bool is_done;
425
426 is_done = nfs_page_group_sync_on_bit(req, PG_WB_END);
427 nfs_unlock_request(req);
428 if (!is_done)
429 return;
430
431 end_page_writeback(req->wb_page);
432 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
433 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
434}
435
436
437
438
439
440
441
442
443
444
445
446static void
447nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
448 struct nfs_page *old_head,
449 struct inode *inode)
450{
451 while (destroy_list) {
452 struct nfs_page *subreq = destroy_list;
453
454 destroy_list = (subreq->wb_this_page == old_head) ?
455 NULL : subreq->wb_this_page;
456
457
458 nfs_page_set_headlock(subreq);
459 WARN_ON_ONCE(old_head != subreq->wb_head);
460
461
462 subreq->wb_this_page = subreq;
463 subreq->wb_head = subreq;
464
465 clear_bit(PG_REMOVE, &subreq->wb_flags);
466
467
468 if (!kref_read(&subreq->wb_kref)) {
469
470 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) {
471 nfs_page_clear_headlock(subreq);
472 nfs_free_request(subreq);
473 } else
474 nfs_page_clear_headlock(subreq);
475 continue;
476 }
477 nfs_page_clear_headlock(subreq);
478
479 nfs_release_request(old_head);
480
481 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
482 nfs_release_request(subreq);
483 atomic_long_dec(&NFS_I(inode)->nrequests);
484 }
485
486
487
488 nfs_unlock_and_release_request(subreq);
489 }
490}
491
492
493
494
495
496
497
498
499
500
501
502
503void
504nfs_join_page_group(struct nfs_page *head, struct inode *inode)
505{
506 struct nfs_page *subreq;
507 struct nfs_page *destroy_list = NULL;
508 unsigned int pgbase, off, bytes;
509
510 pgbase = head->wb_pgbase;
511 bytes = head->wb_bytes;
512 off = head->wb_offset;
513 for (subreq = head->wb_this_page; subreq != head;
514 subreq = subreq->wb_this_page) {
515
516 if (pgbase > subreq->wb_pgbase) {
517 off -= pgbase - subreq->wb_pgbase;
518 bytes += pgbase - subreq->wb_pgbase;
519 pgbase = subreq->wb_pgbase;
520 }
521 bytes = max(subreq->wb_pgbase + subreq->wb_bytes
522 - pgbase, bytes);
523 }
524
525
526 head->wb_pgbase = pgbase;
527 head->wb_bytes = bytes;
528 head->wb_offset = off;
529
530
531
532 subreq = head;
533 do {
534 nfs_clear_request_commit(subreq);
535 subreq = subreq->wb_this_page;
536 } while (subreq != head);
537
538
539 if (head->wb_this_page != head) {
540
541 destroy_list = head->wb_this_page;
542 head->wb_this_page = head;
543 }
544
545 nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
546}
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563static struct nfs_page *
564nfs_lock_and_join_requests(struct page *page)
565{
566 struct inode *inode = page_file_mapping(page)->host;
567 struct nfs_page *head;
568 int ret;
569
570
571
572
573
574
575 head = nfs_find_and_lock_page_request(page);
576 if (IS_ERR_OR_NULL(head))
577 return head;
578
579
580 ret = nfs_page_group_lock_subrequests(head);
581 if (ret < 0) {
582 nfs_unlock_and_release_request(head);
583 return ERR_PTR(ret);
584 }
585
586 nfs_join_page_group(head, inode);
587
588 return head;
589}
590
591static void nfs_write_error(struct nfs_page *req, int error)
592{
593 trace_nfs_write_error(req, error);
594 nfs_mapping_set_error(req->wb_page, error);
595 nfs_inode_remove_request(req);
596 nfs_end_page_writeback(req);
597 nfs_release_request(req);
598}
599
600
601
602
603
604static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
605 struct page *page)
606{
607 struct nfs_page *req;
608 int ret = 0;
609
610 req = nfs_lock_and_join_requests(page);
611 if (!req)
612 goto out;
613 ret = PTR_ERR(req);
614 if (IS_ERR(req))
615 goto out;
616
617 nfs_set_page_writeback(page);
618 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
619
620
621 ret = pgio->pg_error;
622 if (nfs_error_is_fatal_on_server(ret))
623 goto out_launder;
624
625 ret = 0;
626 if (!nfs_pageio_add_request(pgio, req)) {
627 ret = pgio->pg_error;
628
629
630
631 if (nfs_error_is_fatal(ret)) {
632 if (nfs_error_is_fatal_on_server(ret))
633 goto out_launder;
634 } else
635 ret = -EAGAIN;
636 nfs_redirty_request(req);
637 pgio->pg_error = 0;
638 } else
639 nfs_add_stats(page_file_mapping(page)->host,
640 NFSIOS_WRITEPAGES, 1);
641out:
642 return ret;
643out_launder:
644 nfs_write_error(req, ret);
645 return 0;
646}
647
648static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
649 struct nfs_pageio_descriptor *pgio)
650{
651 int ret;
652
653 nfs_pageio_cond_complete(pgio, page_index(page));
654 ret = nfs_page_async_flush(pgio, page);
655 if (ret == -EAGAIN) {
656 redirty_page_for_writepage(wbc, page);
657 ret = AOP_WRITEPAGE_ACTIVATE;
658 }
659 return ret;
660}
661
662
663
664
665static int nfs_writepage_locked(struct page *page,
666 struct writeback_control *wbc)
667{
668 struct nfs_pageio_descriptor pgio;
669 struct inode *inode = page_file_mapping(page)->host;
670 int err;
671
672 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
673 nfs_pageio_init_write(&pgio, inode, 0,
674 false, &nfs_async_write_completion_ops);
675 err = nfs_do_writepage(page, wbc, &pgio);
676 pgio.pg_error = 0;
677 nfs_pageio_complete(&pgio);
678 if (err < 0)
679 return err;
680 if (nfs_error_is_fatal(pgio.pg_error))
681 return pgio.pg_error;
682 return 0;
683}
684
685int nfs_writepage(struct page *page, struct writeback_control *wbc)
686{
687 int ret;
688
689 ret = nfs_writepage_locked(page, wbc);
690 if (ret != AOP_WRITEPAGE_ACTIVATE)
691 unlock_page(page);
692 return ret;
693}
694
695static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
696{
697 int ret;
698
699 ret = nfs_do_writepage(page, wbc, data);
700 if (ret != AOP_WRITEPAGE_ACTIVATE)
701 unlock_page(page);
702 return ret;
703}
704
705static void nfs_io_completion_commit(void *inode)
706{
707 nfs_commit_inode(inode, 0);
708}
709
710int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
711{
712 struct inode *inode = mapping->host;
713 struct nfs_pageio_descriptor pgio;
714 struct nfs_io_completion *ioc;
715 int err;
716
717 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
718
719 ioc = nfs_io_completion_alloc(GFP_KERNEL);
720 if (ioc)
721 nfs_io_completion_init(ioc, nfs_io_completion_commit, inode);
722
723 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
724 &nfs_async_write_completion_ops);
725 pgio.pg_io_completion = ioc;
726 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
727 pgio.pg_error = 0;
728 nfs_pageio_complete(&pgio);
729 nfs_io_completion_put(ioc);
730
731 if (err < 0)
732 goto out_err;
733 err = pgio.pg_error;
734 if (nfs_error_is_fatal(err))
735 goto out_err;
736 return 0;
737out_err:
738 return err;
739}
740
741
742
743
744static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
745{
746 struct address_space *mapping = page_file_mapping(req->wb_page);
747 struct nfs_inode *nfsi = NFS_I(inode);
748
749 WARN_ON_ONCE(req->wb_this_page != req);
750
751
752 nfs_lock_request(req);
753
754
755
756
757
758 spin_lock(&mapping->private_lock);
759 if (!nfs_have_writebacks(inode) &&
760 NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
761 inode_inc_iversion_raw(inode);
762 if (likely(!PageSwapCache(req->wb_page))) {
763 set_bit(PG_MAPPED, &req->wb_flags);
764 SetPagePrivate(req->wb_page);
765 set_page_private(req->wb_page, (unsigned long)req);
766 }
767 spin_unlock(&mapping->private_lock);
768 atomic_long_inc(&nfsi->nrequests);
769
770
771
772
773 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
774 kref_get(&req->wb_kref);
775}
776
777
778
779
780static void nfs_inode_remove_request(struct nfs_page *req)
781{
782 struct address_space *mapping = page_file_mapping(req->wb_page);
783 struct inode *inode = mapping->host;
784 struct nfs_inode *nfsi = NFS_I(inode);
785 struct nfs_page *head;
786
787 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
788 head = req->wb_head;
789
790 spin_lock(&mapping->private_lock);
791 if (likely(head->wb_page && !PageSwapCache(head->wb_page))) {
792 set_page_private(head->wb_page, 0);
793 ClearPagePrivate(head->wb_page);
794 clear_bit(PG_MAPPED, &head->wb_flags);
795 }
796 spin_unlock(&mapping->private_lock);
797 }
798
799 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
800 nfs_release_request(req);
801 atomic_long_dec(&nfsi->nrequests);
802 }
803}
804
805static void
806nfs_mark_request_dirty(struct nfs_page *req)
807{
808 if (req->wb_page)
809 __set_page_dirty_nobuffers(req->wb_page);
810}
811
812
813
814
815
816
817
818
819
820static struct nfs_page *
821nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
822 struct page *page)
823{
824 struct nfs_page *freq, *t;
825 struct nfs_commit_info cinfo;
826 struct inode *inode = &nfsi->vfs_inode;
827
828 nfs_init_cinfo_from_inode(&cinfo, inode);
829
830
831 freq = pnfs_search_commit_reqs(inode, &cinfo, page);
832 if (freq)
833 return freq->wb_head;
834
835
836 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
837 if (freq->wb_page == page)
838 return freq->wb_head;
839 }
840
841 return NULL;
842}
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857void
858nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
859 struct nfs_commit_info *cinfo)
860{
861 set_bit(PG_CLEAN, &req->wb_flags);
862 nfs_list_add_request(req, dst);
863 atomic_long_inc(&cinfo->mds->ncommit);
864}
865EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
866
867
868
869
870
871
872
873
874
875
876
877
878
879void
880nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
881{
882 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
883 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
884 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
885 if (req->wb_page)
886 nfs_mark_page_unstable(req->wb_page, cinfo);
887}
888EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
889
890
891
892
893
894
895
896
897
898
899
900
901void
902nfs_request_remove_commit_list(struct nfs_page *req,
903 struct nfs_commit_info *cinfo)
904{
905 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
906 return;
907 nfs_list_remove_request(req);
908 atomic_long_dec(&cinfo->mds->ncommit);
909}
910EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
911
912static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
913 struct inode *inode)
914{
915 cinfo->inode = inode;
916 cinfo->mds = &NFS_I(inode)->commit_info;
917 cinfo->ds = pnfs_get_ds_info(inode);
918 cinfo->dreq = NULL;
919 cinfo->completion_ops = &nfs_commit_completion_ops;
920}
921
922void nfs_init_cinfo(struct nfs_commit_info *cinfo,
923 struct inode *inode,
924 struct nfs_direct_req *dreq)
925{
926 if (dreq)
927 nfs_init_cinfo_from_dreq(cinfo, dreq);
928 else
929 nfs_init_cinfo_from_inode(cinfo, inode);
930}
931EXPORT_SYMBOL_GPL(nfs_init_cinfo);
932
933
934
935
936void
937nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
938 struct nfs_commit_info *cinfo, u32 ds_commit_idx)
939{
940 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
941 return;
942 nfs_request_add_commit_list(req, cinfo);
943}
944
945static void
946nfs_clear_page_commit(struct page *page)
947{
948 dec_node_page_state(page, NR_WRITEBACK);
949 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
950 WB_WRITEBACK);
951}
952
953
954static void
955nfs_clear_request_commit(struct nfs_page *req)
956{
957 if (test_bit(PG_CLEAN, &req->wb_flags)) {
958 struct nfs_open_context *ctx = nfs_req_openctx(req);
959 struct inode *inode = d_inode(ctx->dentry);
960 struct nfs_commit_info cinfo;
961
962 nfs_init_cinfo_from_inode(&cinfo, inode);
963 mutex_lock(&NFS_I(inode)->commit_mutex);
964 if (!pnfs_clear_request_commit(req, &cinfo)) {
965 nfs_request_remove_commit_list(req, &cinfo);
966 }
967 mutex_unlock(&NFS_I(inode)->commit_mutex);
968 nfs_clear_page_commit(req->wb_page);
969 }
970}
971
972int nfs_write_need_commit(struct nfs_pgio_header *hdr)
973{
974 if (hdr->verf.committed == NFS_DATA_SYNC)
975 return hdr->lseg == NULL;
976 return hdr->verf.committed != NFS_FILE_SYNC;
977}
978
979static void nfs_async_write_init(struct nfs_pgio_header *hdr)
980{
981 nfs_io_completion_get(hdr->io_completion);
982}
983
984static void nfs_write_completion(struct nfs_pgio_header *hdr)
985{
986 struct nfs_commit_info cinfo;
987 unsigned long bytes = 0;
988
989 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
990 goto out;
991 nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
992 while (!list_empty(&hdr->pages)) {
993 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
994
995 bytes += req->wb_bytes;
996 nfs_list_remove_request(req);
997 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
998 (hdr->good_bytes < bytes)) {
999 trace_nfs_comp_error(req, hdr->error);
1000 nfs_mapping_set_error(req->wb_page, hdr->error);
1001 goto remove_req;
1002 }
1003 if (nfs_write_need_commit(hdr)) {
1004
1005 req->wb_nio = 0;
1006 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
1007 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
1008 hdr->pgio_mirror_idx);
1009 goto next;
1010 }
1011remove_req:
1012 nfs_inode_remove_request(req);
1013next:
1014 nfs_end_page_writeback(req);
1015 nfs_release_request(req);
1016 }
1017out:
1018 nfs_io_completion_put(hdr->io_completion);
1019 hdr->release(hdr);
1020}
1021
1022unsigned long
1023nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
1024{
1025 return atomic_long_read(&cinfo->mds->ncommit);
1026}
1027
1028
1029int
1030nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
1031 struct nfs_commit_info *cinfo, int max)
1032{
1033 struct nfs_page *req, *tmp;
1034 int ret = 0;
1035
1036restart:
1037 list_for_each_entry_safe(req, tmp, src, wb_list) {
1038 kref_get(&req->wb_kref);
1039 if (!nfs_lock_request(req)) {
1040 int status;
1041
1042
1043 if (!list_empty(dst)) {
1044 nfs_release_request(req);
1045 continue;
1046 }
1047
1048 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1049 status = nfs_wait_on_request(req);
1050 nfs_release_request(req);
1051 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1052 if (status < 0)
1053 break;
1054 goto restart;
1055 }
1056 nfs_request_remove_commit_list(req, cinfo);
1057 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1058 nfs_list_add_request(req, dst);
1059 ret++;
1060 if ((ret == max) && !cinfo->dreq)
1061 break;
1062 cond_resched();
1063 }
1064 return ret;
1065}
1066EXPORT_SYMBOL_GPL(nfs_scan_commit_list);
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077int
1078nfs_scan_commit(struct inode *inode, struct list_head *dst,
1079 struct nfs_commit_info *cinfo)
1080{
1081 int ret = 0;
1082
1083 if (!atomic_long_read(&cinfo->mds->ncommit))
1084 return 0;
1085 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1086 if (atomic_long_read(&cinfo->mds->ncommit) > 0) {
1087 const int max = INT_MAX;
1088
1089 ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
1090 cinfo, max);
1091 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
1092 }
1093 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1094 return ret;
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
1105 struct page *page,
1106 unsigned int offset,
1107 unsigned int bytes)
1108{
1109 struct nfs_page *req;
1110 unsigned int rqend;
1111 unsigned int end;
1112 int error;
1113
1114 end = offset + bytes;
1115
1116 req = nfs_lock_and_join_requests(page);
1117 if (IS_ERR_OR_NULL(req))
1118 return req;
1119
1120 rqend = req->wb_offset + req->wb_bytes;
1121
1122
1123
1124
1125
1126
1127 if (offset > rqend || end < req->wb_offset)
1128 goto out_flushme;
1129
1130
1131 if (offset < req->wb_offset) {
1132 req->wb_offset = offset;
1133 req->wb_pgbase = offset;
1134 }
1135 if (end > rqend)
1136 req->wb_bytes = end - req->wb_offset;
1137 else
1138 req->wb_bytes = rqend - req->wb_offset;
1139 req->wb_nio = 0;
1140 return req;
1141out_flushme:
1142
1143
1144
1145
1146
1147 nfs_mark_request_dirty(req);
1148 nfs_unlock_and_release_request(req);
1149 error = nfs_wb_page(inode, page);
1150 return (error < 0) ? ERR_PTR(error) : NULL;
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
1161 struct page *page, unsigned int offset, unsigned int bytes)
1162{
1163 struct inode *inode = page_file_mapping(page)->host;
1164 struct nfs_page *req;
1165
1166 req = nfs_try_to_update_request(inode, page, offset, bytes);
1167 if (req != NULL)
1168 goto out;
1169 req = nfs_create_request(ctx, page, offset, bytes);
1170 if (IS_ERR(req))
1171 goto out;
1172 nfs_inode_add_request(inode, req);
1173out:
1174 return req;
1175}
1176
1177static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
1178 unsigned int offset, unsigned int count)
1179{
1180 struct nfs_page *req;
1181
1182 req = nfs_setup_write_request(ctx, page, offset, count);
1183 if (IS_ERR(req))
1184 return PTR_ERR(req);
1185
1186 nfs_grow_file(page, offset, count);
1187 nfs_mark_uptodate(req);
1188 nfs_mark_request_dirty(req);
1189 nfs_unlock_and_release_request(req);
1190 return 0;
1191}
1192
1193int nfs_flush_incompatible(struct file *file, struct page *page)
1194{
1195 struct nfs_open_context *ctx = nfs_file_open_context(file);
1196 struct nfs_lock_context *l_ctx;
1197 struct file_lock_context *flctx = file_inode(file)->i_flctx;
1198 struct nfs_page *req;
1199 int do_flush, status;
1200
1201
1202
1203
1204
1205
1206
1207
1208 do {
1209 req = nfs_page_find_head_request(page);
1210 if (req == NULL)
1211 return 0;
1212 l_ctx = req->wb_lock_context;
1213 do_flush = req->wb_page != page ||
1214 !nfs_match_open_context(nfs_req_openctx(req), ctx);
1215 if (l_ctx && flctx &&
1216 !(list_empty_careful(&flctx->flc_posix) &&
1217 list_empty_careful(&flctx->flc_flock))) {
1218 do_flush |= l_ctx->lockowner != current->files;
1219 }
1220 nfs_release_request(req);
1221 if (!do_flush)
1222 return 0;
1223 status = nfs_wb_page(page_file_mapping(page)->host, page);
1224 } while (status == 0);
1225 return status;
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238int
1239nfs_key_timeout_notify(struct file *filp, struct inode *inode)
1240{
1241 struct nfs_open_context *ctx = nfs_file_open_context(filp);
1242
1243 if (nfs_ctx_key_to_expire(ctx, inode) &&
1244 !ctx->ll_cred)
1245
1246 return -EACCES;
1247 return 0;
1248}
1249
1250
1251
1252
1253bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
1254{
1255 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1256 struct rpc_cred *cred = ctx->ll_cred;
1257 struct auth_cred acred = {
1258 .cred = ctx->cred,
1259 };
1260
1261 if (cred && !cred->cr_ops->crmatch(&acred, cred, 0)) {
1262 put_rpccred(cred);
1263 ctx->ll_cred = NULL;
1264 cred = NULL;
1265 }
1266 if (!cred)
1267 cred = auth->au_ops->lookup_cred(auth, &acred, 0);
1268 if (!cred || IS_ERR(cred))
1269 return true;
1270 ctx->ll_cred = cred;
1271 return !!(cred->cr_ops->crkey_timeout &&
1272 cred->cr_ops->crkey_timeout(cred));
1273}
1274
1275
1276
1277
1278
1279
1280static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
1281{
1282 struct nfs_inode *nfsi = NFS_I(inode);
1283
1284 if (nfs_have_delegated_attributes(inode))
1285 goto out;
1286 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
1287 return false;
1288 smp_rmb();
1289 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
1290 return false;
1291out:
1292 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1293 return false;
1294 return PageUptodate(page) != 0;
1295}
1296
1297static bool
1298is_whole_file_wrlock(struct file_lock *fl)
1299{
1300 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1301 fl->fl_type == F_WRLCK;
1302}
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
1313{
1314 int ret;
1315 struct file_lock_context *flctx = inode->i_flctx;
1316 struct file_lock *fl;
1317
1318 if (file->f_flags & O_DSYNC)
1319 return 0;
1320 if (!nfs_write_pageuptodate(page, inode))
1321 return 0;
1322 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1323 return 1;
1324 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1325 list_empty_careful(&flctx->flc_posix)))
1326 return 1;
1327
1328
1329 ret = 0;
1330 spin_lock(&flctx->flc_lock);
1331 if (!list_empty(&flctx->flc_posix)) {
1332 fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1333 fl_list);
1334 if (is_whole_file_wrlock(fl))
1335 ret = 1;
1336 } else if (!list_empty(&flctx->flc_flock)) {
1337 fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1338 fl_list);
1339 if (fl->fl_type == F_WRLCK)
1340 ret = 1;
1341 }
1342 spin_unlock(&flctx->flc_lock);
1343 return ret;
1344}
1345
1346
1347
1348
1349
1350
1351
1352int nfs_updatepage(struct file *file, struct page *page,
1353 unsigned int offset, unsigned int count)
1354{
1355 struct nfs_open_context *ctx = nfs_file_open_context(file);
1356 struct address_space *mapping = page_file_mapping(page);
1357 struct inode *inode = mapping->host;
1358 int status = 0;
1359
1360 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
1361
1362 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
1363 file, count, (long long)(page_file_offset(page) + offset));
1364
1365 if (!count)
1366 goto out;
1367
1368 if (nfs_can_extend_write(file, page, inode)) {
1369 count = max(count + offset, nfs_page_length(page));
1370 offset = 0;
1371 }
1372
1373 status = nfs_writepage_setup(ctx, page, offset, count);
1374 if (status < 0)
1375 nfs_set_pageerror(mapping);
1376 else
1377 __set_page_dirty_nobuffers(page);
1378out:
1379 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
1380 status, (long long)i_size_read(inode));
1381 return status;
1382}
1383
1384static int flush_task_priority(int how)
1385{
1386 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
1387 case FLUSH_HIGHPRI:
1388 return RPC_PRIORITY_HIGH;
1389 case FLUSH_LOWPRI:
1390 return RPC_PRIORITY_LOW;
1391 }
1392 return RPC_PRIORITY_NORMAL;
1393}
1394
1395static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1396 struct rpc_message *msg,
1397 const struct nfs_rpc_ops *rpc_ops,
1398 struct rpc_task_setup *task_setup_data, int how)
1399{
1400 int priority = flush_task_priority(how);
1401
1402 task_setup_data->priority = priority;
1403 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client);
1404 trace_nfs_initiate_write(hdr);
1405}
1406
1407
1408
1409
1410
1411static void nfs_redirty_request(struct nfs_page *req)
1412{
1413
1414 req->wb_nio++;
1415 nfs_mark_request_dirty(req);
1416 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
1417 nfs_end_page_writeback(req);
1418 nfs_release_request(req);
1419}
1420
1421static void nfs_async_write_error(struct list_head *head, int error)
1422{
1423 struct nfs_page *req;
1424
1425 while (!list_empty(head)) {
1426 req = nfs_list_entry(head->next);
1427 nfs_list_remove_request(req);
1428 if (nfs_error_is_fatal(error))
1429 nfs_write_error(req, error);
1430 else
1431 nfs_redirty_request(req);
1432 }
1433}
1434
1435static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
1436{
1437 nfs_async_write_error(&hdr->pages, 0);
1438 filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset,
1439 hdr->args.offset + hdr->args.count - 1);
1440}
1441
1442static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1443 .init_hdr = nfs_async_write_init,
1444 .error_cleanup = nfs_async_write_error,
1445 .completion = nfs_write_completion,
1446 .reschedule_io = nfs_async_write_reschedule_io,
1447};
1448
1449void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1450 struct inode *inode, int ioflags, bool force_mds,
1451 const struct nfs_pgio_completion_ops *compl_ops)
1452{
1453 struct nfs_server *server = NFS_SERVER(inode);
1454 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
1455
1456#ifdef CONFIG_NFS_V4_1
1457 if (server->pnfs_curr_ld && !force_mds)
1458 pg_ops = server->pnfs_curr_ld->pg_write_ops;
1459#endif
1460 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops,
1461 server->wsize, ioflags);
1462}
1463EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1464
1465void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1466{
1467 struct nfs_pgio_mirror *mirror;
1468
1469 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
1470 pgio->pg_ops->pg_cleanup(pgio);
1471
1472 pgio->pg_ops = &nfs_pgio_rw_ops;
1473
1474 nfs_pageio_stop_mirroring(pgio);
1475
1476 mirror = &pgio->pg_mirrors[0];
1477 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1478}
1479EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1480
1481
1482void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1483{
1484 struct nfs_commit_data *data = calldata;
1485
1486 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1487}
1488
1489
1490
1491
1492static int nfs_should_remove_suid(const struct inode *inode)
1493{
1494 umode_t mode = inode->i_mode;
1495 int kill = 0;
1496
1497
1498 if (unlikely(mode & S_ISUID))
1499 kill = ATTR_KILL_SUID;
1500
1501
1502
1503
1504
1505 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1506 kill |= ATTR_KILL_SGID;
1507
1508 if (unlikely(kill && S_ISREG(mode)))
1509 return kill;
1510
1511 return 0;
1512}
1513
1514static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1515 struct nfs_fattr *fattr)
1516{
1517 struct nfs_pgio_args *argp = &hdr->args;
1518 struct nfs_pgio_res *resp = &hdr->res;
1519 u64 size = argp->offset + resp->count;
1520
1521 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1522 fattr->size = size;
1523 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1524 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1525 return;
1526 }
1527 if (size != fattr->size)
1528 return;
1529
1530 nfs_fattr_set_barrier(fattr);
1531
1532 fattr->valid |= NFS_ATTR_FATTR_SIZE;
1533}
1534
1535void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1536{
1537 struct nfs_fattr *fattr = &hdr->fattr;
1538 struct inode *inode = hdr->inode;
1539
1540 spin_lock(&inode->i_lock);
1541 nfs_writeback_check_extend(hdr, fattr);
1542 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1543 spin_unlock(&inode->i_lock);
1544}
1545EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
1546
1547
1548
1549
1550static int nfs_writeback_done(struct rpc_task *task,
1551 struct nfs_pgio_header *hdr,
1552 struct inode *inode)
1553{
1554 int status;
1555
1556
1557
1558
1559
1560
1561
1562
1563 status = NFS_PROTO(inode)->write_done(task, hdr);
1564 if (status != 0)
1565 return status;
1566
1567 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
1568 trace_nfs_writeback_done(task, hdr);
1569
1570 if (hdr->res.verf->committed < hdr->args.stable &&
1571 task->tk_status >= 0) {
1572
1573
1574
1575
1576
1577
1578
1579
1580 static unsigned long complain;
1581
1582
1583 if (time_before(complain, jiffies)) {
1584 dprintk("NFS: faulty NFS server %s:"
1585 " (committed = %d) != (stable = %d)\n",
1586 NFS_SERVER(inode)->nfs_client->cl_hostname,
1587 hdr->res.verf->committed, hdr->args.stable);
1588 complain = jiffies + 300 * HZ;
1589 }
1590 }
1591
1592
1593 if (nfs_should_remove_suid(inode)) {
1594 spin_lock(&inode->i_lock);
1595 nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
1596 spin_unlock(&inode->i_lock);
1597 }
1598 return 0;
1599}
1600
1601
1602
1603
1604static void nfs_writeback_result(struct rpc_task *task,
1605 struct nfs_pgio_header *hdr)
1606{
1607 struct nfs_pgio_args *argp = &hdr->args;
1608 struct nfs_pgio_res *resp = &hdr->res;
1609
1610 if (resp->count < argp->count) {
1611 static unsigned long complain;
1612
1613
1614 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE);
1615
1616
1617 if (resp->count == 0) {
1618 if (time_before(complain, jiffies)) {
1619 printk(KERN_WARNING
1620 "NFS: Server wrote zero bytes, expected %u.\n",
1621 argp->count);
1622 complain = jiffies + 300 * HZ;
1623 }
1624 nfs_set_pgio_error(hdr, -EIO, argp->offset);
1625 task->tk_status = -EIO;
1626 return;
1627 }
1628
1629
1630 if (!task->tk_ops) {
1631 hdr->pnfs_error = -EAGAIN;
1632 return;
1633 }
1634
1635
1636 if (resp->verf->committed != NFS_UNSTABLE) {
1637
1638 hdr->mds_offset += resp->count;
1639 argp->offset += resp->count;
1640 argp->pgbase += resp->count;
1641 argp->count -= resp->count;
1642 } else {
1643
1644
1645
1646 argp->stable = NFS_FILE_SYNC;
1647 }
1648 resp->count = 0;
1649 resp->verf->committed = 0;
1650 rpc_restart_call_prepare(task);
1651 }
1652}
1653
1654static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
1655{
1656 return wait_var_event_killable(&cinfo->rpcs_out,
1657 !atomic_read(&cinfo->rpcs_out));
1658}
1659
1660static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
1661{
1662 atomic_inc(&cinfo->rpcs_out);
1663}
1664
1665static void nfs_commit_end(struct nfs_mds_commit_info *cinfo)
1666{
1667 if (atomic_dec_and_test(&cinfo->rpcs_out))
1668 wake_up_var(&cinfo->rpcs_out);
1669}
1670
1671void nfs_commitdata_release(struct nfs_commit_data *data)
1672{
1673 put_nfs_open_context(data->context);
1674 nfs_commit_free(data);
1675}
1676EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1677
1678int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1679 const struct nfs_rpc_ops *nfs_ops,
1680 const struct rpc_call_ops *call_ops,
1681 int how, int flags)
1682{
1683 struct rpc_task *task;
1684 int priority = flush_task_priority(how);
1685 struct rpc_message msg = {
1686 .rpc_argp = &data->args,
1687 .rpc_resp = &data->res,
1688 .rpc_cred = data->cred,
1689 };
1690 struct rpc_task_setup task_setup_data = {
1691 .task = &data->task,
1692 .rpc_client = clnt,
1693 .rpc_message = &msg,
1694 .callback_ops = call_ops,
1695 .callback_data = data,
1696 .workqueue = nfsiod_workqueue,
1697 .flags = RPC_TASK_ASYNC | flags,
1698 .priority = priority,
1699 };
1700
1701 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client);
1702 trace_nfs_initiate_commit(data);
1703
1704 dprintk("NFS: initiated commit call\n");
1705
1706 task = rpc_run_task(&task_setup_data);
1707 if (IS_ERR(task))
1708 return PTR_ERR(task);
1709 if (how & FLUSH_SYNC)
1710 rpc_wait_for_completion_task(task);
1711 rpc_put_task(task);
1712 return 0;
1713}
1714EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1715
1716static loff_t nfs_get_lwb(struct list_head *head)
1717{
1718 loff_t lwb = 0;
1719 struct nfs_page *req;
1720
1721 list_for_each_entry(req, head, wb_list)
1722 if (lwb < (req_offset(req) + req->wb_bytes))
1723 lwb = req_offset(req) + req->wb_bytes;
1724
1725 return lwb;
1726}
1727
1728
1729
1730
1731void nfs_init_commit(struct nfs_commit_data *data,
1732 struct list_head *head,
1733 struct pnfs_layout_segment *lseg,
1734 struct nfs_commit_info *cinfo)
1735{
1736 struct nfs_page *first;
1737 struct nfs_open_context *ctx;
1738 struct inode *inode;
1739
1740
1741
1742
1743 if (head)
1744 list_splice_init(head, &data->pages);
1745
1746 first = nfs_list_entry(data->pages.next);
1747 ctx = nfs_req_openctx(first);
1748 inode = d_inode(ctx->dentry);
1749
1750 data->inode = inode;
1751 data->cred = ctx->cred;
1752 data->lseg = lseg;
1753
1754 if (lseg)
1755 data->lwb = nfs_get_lwb(&data->pages);
1756 data->mds_ops = &nfs_commit_ops;
1757 data->completion_ops = cinfo->completion_ops;
1758 data->dreq = cinfo->dreq;
1759
1760 data->args.fh = NFS_FH(data->inode);
1761
1762 data->args.offset = 0;
1763 data->args.count = 0;
1764 data->context = get_nfs_open_context(ctx);
1765 data->res.fattr = &data->fattr;
1766 data->res.verf = &data->verf;
1767 nfs_fattr_init(&data->fattr);
1768}
1769EXPORT_SYMBOL_GPL(nfs_init_commit);
1770
1771void nfs_retry_commit(struct list_head *page_list,
1772 struct pnfs_layout_segment *lseg,
1773 struct nfs_commit_info *cinfo,
1774 u32 ds_commit_idx)
1775{
1776 struct nfs_page *req;
1777
1778 while (!list_empty(page_list)) {
1779 req = nfs_list_entry(page_list->next);
1780 nfs_list_remove_request(req);
1781 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1782 if (!cinfo->dreq)
1783 nfs_clear_page_commit(req->wb_page);
1784 nfs_unlock_and_release_request(req);
1785 }
1786}
1787EXPORT_SYMBOL_GPL(nfs_retry_commit);
1788
1789static void
1790nfs_commit_resched_write(struct nfs_commit_info *cinfo,
1791 struct nfs_page *req)
1792{
1793 __set_page_dirty_nobuffers(req->wb_page);
1794}
1795
1796
1797
1798
1799static int
1800nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1801 struct nfs_commit_info *cinfo)
1802{
1803 struct nfs_commit_data *data;
1804
1805
1806 if (list_empty(head))
1807 return 0;
1808
1809 data = nfs_commitdata_alloc(true);
1810
1811
1812 nfs_init_commit(data, head, NULL, cinfo);
1813 atomic_inc(&cinfo->mds->rpcs_out);
1814 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
1815 data->mds_ops, how, RPC_TASK_CRED_NOREF);
1816}
1817
1818
1819
1820
1821static void nfs_commit_done(struct rpc_task *task, void *calldata)
1822{
1823 struct nfs_commit_data *data = calldata;
1824
1825 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1826 task->tk_pid, task->tk_status);
1827
1828
1829 NFS_PROTO(data->inode)->commit_done(task, data);
1830 trace_nfs_commit_done(task, data);
1831}
1832
1833static void nfs_commit_release_pages(struct nfs_commit_data *data)
1834{
1835 const struct nfs_writeverf *verf = data->res.verf;
1836 struct nfs_page *req;
1837 int status = data->task.tk_status;
1838 struct nfs_commit_info cinfo;
1839 struct nfs_server *nfss;
1840
1841 while (!list_empty(&data->pages)) {
1842 req = nfs_list_entry(data->pages.next);
1843 nfs_list_remove_request(req);
1844 if (req->wb_page)
1845 nfs_clear_page_commit(req->wb_page);
1846
1847 dprintk("NFS: commit (%s/%llu %d@%lld)",
1848 nfs_req_openctx(req)->dentry->d_sb->s_id,
1849 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)),
1850 req->wb_bytes,
1851 (long long)req_offset(req));
1852 if (status < 0) {
1853 if (req->wb_page) {
1854 trace_nfs_commit_error(req, status);
1855 nfs_mapping_set_error(req->wb_page, status);
1856 nfs_inode_remove_request(req);
1857 }
1858 dprintk_cont(", error = %d\n", status);
1859 goto next;
1860 }
1861
1862
1863
1864 if (nfs_write_match_verf(verf, req)) {
1865
1866 if (req->wb_page)
1867 nfs_inode_remove_request(req);
1868 dprintk_cont(" OK\n");
1869 goto next;
1870 }
1871
1872 dprintk_cont(" mismatch\n");
1873 nfs_mark_request_dirty(req);
1874 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
1875 next:
1876 nfs_unlock_and_release_request(req);
1877
1878 cond_resched();
1879 }
1880 nfss = NFS_SERVER(data->inode);
1881 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
1882 clear_bdi_congested(inode_to_bdi(data->inode), BLK_RW_ASYNC);
1883
1884 nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1885 nfs_commit_end(cinfo.mds);
1886}
1887
1888static void nfs_commit_release(void *calldata)
1889{
1890 struct nfs_commit_data *data = calldata;
1891
1892 data->completion_ops->completion(data);
1893 nfs_commitdata_release(calldata);
1894}
1895
1896static const struct rpc_call_ops nfs_commit_ops = {
1897 .rpc_call_prepare = nfs_commit_prepare,
1898 .rpc_call_done = nfs_commit_done,
1899 .rpc_release = nfs_commit_release,
1900};
1901
1902static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1903 .completion = nfs_commit_release_pages,
1904 .resched_write = nfs_commit_resched_write,
1905};
1906
1907int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1908 int how, struct nfs_commit_info *cinfo)
1909{
1910 int status;
1911
1912 status = pnfs_commit_list(inode, head, how, cinfo);
1913 if (status == PNFS_NOT_ATTEMPTED)
1914 status = nfs_commit_list(inode, head, how, cinfo);
1915 return status;
1916}
1917
1918static int __nfs_commit_inode(struct inode *inode, int how,
1919 struct writeback_control *wbc)
1920{
1921 LIST_HEAD(head);
1922 struct nfs_commit_info cinfo;
1923 int may_wait = how & FLUSH_SYNC;
1924 int ret, nscan;
1925
1926 nfs_init_cinfo_from_inode(&cinfo, inode);
1927 nfs_commit_begin(cinfo.mds);
1928 for (;;) {
1929 ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
1930 if (ret <= 0)
1931 break;
1932 ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
1933 if (ret < 0)
1934 break;
1935 ret = 0;
1936 if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
1937 if (nscan < wbc->nr_to_write)
1938 wbc->nr_to_write -= nscan;
1939 else
1940 wbc->nr_to_write = 0;
1941 }
1942 if (nscan < INT_MAX)
1943 break;
1944 cond_resched();
1945 }
1946 nfs_commit_end(cinfo.mds);
1947 if (ret || !may_wait)
1948 return ret;
1949 return wait_on_commit(cinfo.mds);
1950}
1951
1952int nfs_commit_inode(struct inode *inode, int how)
1953{
1954 return __nfs_commit_inode(inode, how, NULL);
1955}
1956EXPORT_SYMBOL_GPL(nfs_commit_inode);
1957
1958int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1959{
1960 struct nfs_inode *nfsi = NFS_I(inode);
1961 int flags = FLUSH_SYNC;
1962 int ret = 0;
1963
1964 if (wbc->sync_mode == WB_SYNC_NONE) {
1965
1966 if (!atomic_long_read(&nfsi->commit_info.ncommit))
1967 goto check_requests_outstanding;
1968
1969
1970
1971
1972 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
1973 goto out_mark_dirty;
1974
1975
1976 flags = 0;
1977 }
1978
1979 ret = __nfs_commit_inode(inode, flags, wbc);
1980 if (!ret) {
1981 if (flags & FLUSH_SYNC)
1982 return 0;
1983 } else if (atomic_long_read(&nfsi->commit_info.ncommit))
1984 goto out_mark_dirty;
1985
1986check_requests_outstanding:
1987 if (!atomic_read(&nfsi->commit_info.rpcs_out))
1988 return ret;
1989out_mark_dirty:
1990 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1991 return ret;
1992}
1993EXPORT_SYMBOL_GPL(nfs_write_inode);
1994
1995
1996
1997
1998
1999
2000
2001int nfs_filemap_write_and_wait_range(struct address_space *mapping,
2002 loff_t lstart, loff_t lend)
2003{
2004 int ret;
2005
2006 ret = filemap_write_and_wait_range(mapping, lstart, lend);
2007 if (ret == 0)
2008 ret = pnfs_sync_inode(mapping->host, true);
2009 return ret;
2010}
2011EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range);
2012
2013
2014
2015
2016int nfs_wb_all(struct inode *inode)
2017{
2018 int ret;
2019
2020 trace_nfs_writeback_inode_enter(inode);
2021
2022 ret = filemap_write_and_wait(inode->i_mapping);
2023 if (ret)
2024 goto out;
2025 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2026 if (ret < 0)
2027 goto out;
2028 pnfs_sync_inode(inode, true);
2029 ret = 0;
2030
2031out:
2032 trace_nfs_writeback_inode_exit(inode, ret);
2033 return ret;
2034}
2035EXPORT_SYMBOL_GPL(nfs_wb_all);
2036
2037int nfs_wb_page_cancel(struct inode *inode, struct page *page)
2038{
2039 struct nfs_page *req;
2040 int ret = 0;
2041
2042 wait_on_page_writeback(page);
2043
2044
2045
2046 req = nfs_lock_and_join_requests(page);
2047
2048 if (IS_ERR(req)) {
2049 ret = PTR_ERR(req);
2050 } else if (req) {
2051
2052
2053
2054
2055 nfs_inode_remove_request(req);
2056 nfs_unlock_and_release_request(req);
2057 }
2058
2059 return ret;
2060}
2061
2062
2063
2064
2065int nfs_wb_page(struct inode *inode, struct page *page)
2066{
2067 loff_t range_start = page_file_offset(page);
2068 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
2069 struct writeback_control wbc = {
2070 .sync_mode = WB_SYNC_ALL,
2071 .nr_to_write = 0,
2072 .range_start = range_start,
2073 .range_end = range_end,
2074 };
2075 int ret;
2076
2077 trace_nfs_writeback_page_enter(inode);
2078
2079 for (;;) {
2080 wait_on_page_writeback(page);
2081 if (clear_page_dirty_for_io(page)) {
2082 ret = nfs_writepage_locked(page, &wbc);
2083 if (ret < 0)
2084 goto out_error;
2085 continue;
2086 }
2087 ret = 0;
2088 if (!PagePrivate(page))
2089 break;
2090 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2091 if (ret < 0)
2092 goto out_error;
2093 }
2094out_error:
2095 trace_nfs_writeback_page_exit(inode, ret);
2096 return ret;
2097}
2098
2099#ifdef CONFIG_MIGRATION
2100int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
2101 struct page *page, enum migrate_mode mode)
2102{
2103
2104
2105
2106
2107
2108
2109
2110
2111 if (PagePrivate(page))
2112 return -EBUSY;
2113
2114 if (!nfs_fscache_release_page(page, GFP_KERNEL))
2115 return -EBUSY;
2116
2117 return migrate_page(mapping, newpage, page, mode);
2118}
2119#endif
2120
2121int __init nfs_init_writepagecache(void)
2122{
2123 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
2124 sizeof(struct nfs_pgio_header),
2125 0, SLAB_HWCACHE_ALIGN,
2126 NULL);
2127 if (nfs_wdata_cachep == NULL)
2128 return -ENOMEM;
2129
2130 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
2131 nfs_wdata_cachep);
2132 if (nfs_wdata_mempool == NULL)
2133 goto out_destroy_write_cache;
2134
2135 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
2136 sizeof(struct nfs_commit_data),
2137 0, SLAB_HWCACHE_ALIGN,
2138 NULL);
2139 if (nfs_cdata_cachep == NULL)
2140 goto out_destroy_write_mempool;
2141
2142 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
2143 nfs_cdata_cachep);
2144 if (nfs_commit_mempool == NULL)
2145 goto out_destroy_commit_cache;
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10);
2164 if (nfs_congestion_kb > 256*1024)
2165 nfs_congestion_kb = 256*1024;
2166
2167 return 0;
2168
2169out_destroy_commit_cache:
2170 kmem_cache_destroy(nfs_cdata_cachep);
2171out_destroy_write_mempool:
2172 mempool_destroy(nfs_wdata_mempool);
2173out_destroy_write_cache:
2174 kmem_cache_destroy(nfs_wdata_cachep);
2175 return -ENOMEM;
2176}
2177
2178void nfs_destroy_writepagecache(void)
2179{
2180 mempool_destroy(nfs_commit_mempool);
2181 kmem_cache_destroy(nfs_cdata_cachep);
2182 mempool_destroy(nfs_wdata_mempool);
2183 kmem_cache_destroy(nfs_wdata_cachep);
2184}
2185
2186static const struct nfs_rw_ops nfs_rw_write_ops = {
2187 .rw_alloc_header = nfs_writehdr_alloc,
2188 .rw_free_header = nfs_writehdr_free,
2189 .rw_done = nfs_writeback_done,
2190 .rw_result = nfs_writeback_result,
2191 .rw_initiate = nfs_initiate_write,
2192};
2193