1
2
3
4
5
6
7
8
9
10#include <linux/types.h>
11#include <linux/slab.h>
12#include <linux/mm.h>
13#include <linux/pagemap.h>
14#include <linux/file.h>
15#include <linux/writeback.h>
16#include <linux/swap.h>
17#include <linux/migrate.h>
18
19#include <linux/sunrpc/clnt.h>
20#include <linux/nfs_fs.h>
21#include <linux/nfs_mount.h>
22#include <linux/nfs_page.h>
23#include <linux/backing-dev.h>
24#include <linux/export.h>
25#include <linux/freezer.h>
26#include <linux/wait.h>
27#include <linux/iversion.h>
28
29#include <linux/uaccess.h>
30#include <linux/sched/mm.h>
31
32#include "delegation.h"
33#include "internal.h"
34#include "iostat.h"
35#include "nfs4_fs.h"
36#include "fscache.h"
37#include "pnfs.h"
38
39#include "nfstrace.h"
40
41#define NFSDBG_FACILITY NFSDBG_PAGECACHE
42
43#define MIN_POOL_WRITE (32)
44#define MIN_POOL_COMMIT (4)
45
46struct nfs_io_completion {
47 void (*complete)(void *data);
48 void *data;
49 struct kref refcount;
50};
51
52
53
54
55static void nfs_redirty_request(struct nfs_page *req);
56static const struct rpc_call_ops nfs_commit_ops;
57static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
58static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
59static const struct nfs_rw_ops nfs_rw_write_ops;
60static void nfs_inode_remove_request(struct nfs_page *req);
61static void nfs_clear_request_commit(struct nfs_page *req);
62static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
63 struct inode *inode);
64static struct nfs_page *
65nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
66 struct page *page);
67
68static struct kmem_cache *nfs_wdata_cachep;
69static mempool_t *nfs_wdata_mempool;
70static struct kmem_cache *nfs_cdata_cachep;
71static mempool_t *nfs_commit_mempool;
72
73struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail)
74{
75 struct nfs_commit_data *p;
76
77 if (never_fail)
78 p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
79 else {
80
81
82
83
84
85 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT);
86 if (!p)
87 p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO |
88 __GFP_NOWARN | __GFP_NORETRY);
89 if (!p)
90 return NULL;
91 }
92
93 memset(p, 0, sizeof(*p));
94 INIT_LIST_HEAD(&p->pages);
95 return p;
96}
97EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
98
99void nfs_commit_free(struct nfs_commit_data *p)
100{
101 mempool_free(p, nfs_commit_mempool);
102}
103EXPORT_SYMBOL_GPL(nfs_commit_free);
104
105static struct nfs_pgio_header *nfs_writehdr_alloc(void)
106{
107 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL);
108
109 memset(p, 0, sizeof(*p));
110 p->rw_mode = FMODE_WRITE;
111 return p;
112}
113
114static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
115{
116 mempool_free(hdr, nfs_wdata_mempool);
117}
118
119static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags)
120{
121 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags);
122}
123
124static void nfs_io_completion_init(struct nfs_io_completion *ioc,
125 void (*complete)(void *), void *data)
126{
127 ioc->complete = complete;
128 ioc->data = data;
129 kref_init(&ioc->refcount);
130}
131
132static void nfs_io_completion_release(struct kref *kref)
133{
134 struct nfs_io_completion *ioc = container_of(kref,
135 struct nfs_io_completion, refcount);
136 ioc->complete(ioc->data);
137 kfree(ioc);
138}
139
140static void nfs_io_completion_get(struct nfs_io_completion *ioc)
141{
142 if (ioc != NULL)
143 kref_get(&ioc->refcount);
144}
145
146static void nfs_io_completion_put(struct nfs_io_completion *ioc)
147{
148 if (ioc != NULL)
149 kref_put(&ioc->refcount, nfs_io_completion_release);
150}
151
152static struct nfs_page *
153nfs_page_private_request(struct page *page)
154{
155 if (!PagePrivate(page))
156 return NULL;
157 return (struct nfs_page *)page_private(page);
158}
159
160
161
162
163
164
165
166
167static struct nfs_page *
168nfs_page_find_private_request(struct page *page)
169{
170 struct address_space *mapping = page_file_mapping(page);
171 struct nfs_page *req;
172
173 if (!PagePrivate(page))
174 return NULL;
175 spin_lock(&mapping->private_lock);
176 req = nfs_page_private_request(page);
177 if (req) {
178 WARN_ON_ONCE(req->wb_head != req);
179 kref_get(&req->wb_kref);
180 }
181 spin_unlock(&mapping->private_lock);
182 return req;
183}
184
185static struct nfs_page *
186nfs_page_find_swap_request(struct page *page)
187{
188 struct inode *inode = page_file_mapping(page)->host;
189 struct nfs_inode *nfsi = NFS_I(inode);
190 struct nfs_page *req = NULL;
191 if (!PageSwapCache(page))
192 return NULL;
193 mutex_lock(&nfsi->commit_mutex);
194 if (PageSwapCache(page)) {
195 req = nfs_page_search_commits_for_head_request_locked(nfsi,
196 page);
197 if (req) {
198 WARN_ON_ONCE(req->wb_head != req);
199 kref_get(&req->wb_kref);
200 }
201 }
202 mutex_unlock(&nfsi->commit_mutex);
203 return req;
204}
205
206
207
208
209
210
211static struct nfs_page *nfs_page_find_head_request(struct page *page)
212{
213 struct nfs_page *req;
214
215 req = nfs_page_find_private_request(page);
216 if (!req)
217 req = nfs_page_find_swap_request(page);
218 return req;
219}
220
221
222static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
223{
224 struct inode *inode = page_file_mapping(page)->host;
225 loff_t end, i_size;
226 pgoff_t end_index;
227
228 spin_lock(&inode->i_lock);
229 i_size = i_size_read(inode);
230 end_index = (i_size - 1) >> PAGE_SHIFT;
231 if (i_size > 0 && page_index(page) < end_index)
232 goto out;
233 end = page_file_offset(page) + ((loff_t)offset+count);
234 if (i_size >= end)
235 goto out;
236 i_size_write(inode, end);
237 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
238 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
239out:
240 spin_unlock(&inode->i_lock);
241}
242
243
244static void nfs_set_pageerror(struct address_space *mapping)
245{
246 struct inode *inode = mapping->host;
247
248 nfs_zap_mapping(mapping->host, mapping);
249
250 spin_lock(&inode->i_lock);
251 NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED |
252 NFS_INO_REVAL_PAGECACHE |
253 NFS_INO_INVALID_SIZE;
254 spin_unlock(&inode->i_lock);
255}
256
257static void nfs_mapping_set_error(struct page *page, int error)
258{
259 struct address_space *mapping = page_file_mapping(page);
260
261 SetPageError(page);
262 mapping_set_error(mapping, error);
263 nfs_set_pageerror(mapping);
264}
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279static struct nfs_page *
280nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
281{
282 struct nfs_page *req;
283
284 req = head;
285 do {
286 if (page_offset >= req->wb_pgbase &&
287 page_offset < (req->wb_pgbase + req->wb_bytes))
288 return req;
289
290 req = req->wb_this_page;
291 } while (req != head);
292
293 return NULL;
294}
295
296
297
298
299
300
301
302
303static bool nfs_page_group_covers_page(struct nfs_page *req)
304{
305 struct nfs_page *tmp;
306 unsigned int pos = 0;
307 unsigned int len = nfs_page_length(req->wb_page);
308
309 nfs_page_group_lock(req);
310
311 for (;;) {
312 tmp = nfs_page_group_search_locked(req->wb_head, pos);
313 if (!tmp)
314 break;
315 pos = tmp->wb_pgbase + tmp->wb_bytes;
316 }
317
318 nfs_page_group_unlock(req);
319 return pos >= len;
320}
321
322
323
324
325static void nfs_mark_uptodate(struct nfs_page *req)
326{
327 if (PageUptodate(req->wb_page))
328 return;
329 if (!nfs_page_group_covers_page(req))
330 return;
331 SetPageUptodate(req->wb_page);
332}
333
334static int wb_priority(struct writeback_control *wbc)
335{
336 int ret = 0;
337
338 if (wbc->sync_mode == WB_SYNC_ALL)
339 ret = FLUSH_COND_STABLE;
340 return ret;
341}
342
343
344
345
346
347int nfs_congestion_kb;
348
349#define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
350#define NFS_CONGESTION_OFF_THRESH \
351 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
352
353static void nfs_set_page_writeback(struct page *page)
354{
355 struct inode *inode = page_file_mapping(page)->host;
356 struct nfs_server *nfss = NFS_SERVER(inode);
357 int ret = test_set_page_writeback(page);
358
359 WARN_ON_ONCE(ret != 0);
360
361 if (atomic_long_inc_return(&nfss->writeback) >
362 NFS_CONGESTION_ON_THRESH)
363 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
364}
365
366static void nfs_end_page_writeback(struct nfs_page *req)
367{
368 struct inode *inode = page_file_mapping(req->wb_page)->host;
369 struct nfs_server *nfss = NFS_SERVER(inode);
370 bool is_done;
371
372 is_done = nfs_page_group_sync_on_bit(req, PG_WB_END);
373 nfs_unlock_request(req);
374 if (!is_done)
375 return;
376
377 end_page_writeback(req->wb_page);
378 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
379 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
380}
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396static void
397nfs_unroll_locks(struct inode *inode, struct nfs_page *head,
398 struct nfs_page *req)
399{
400 struct nfs_page *tmp;
401
402
403 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
404 if (!kref_read(&tmp->wb_kref))
405 continue;
406 nfs_unlock_and_release_request(tmp);
407 }
408}
409
410
411
412
413
414
415
416
417
418
419
420static void
421nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
422 struct nfs_page *old_head,
423 struct inode *inode)
424{
425 while (destroy_list) {
426 struct nfs_page *subreq = destroy_list;
427
428 destroy_list = (subreq->wb_this_page == old_head) ?
429 NULL : subreq->wb_this_page;
430
431 WARN_ON_ONCE(old_head != subreq->wb_head);
432
433
434 subreq->wb_this_page = subreq;
435
436 clear_bit(PG_REMOVE, &subreq->wb_flags);
437
438
439 if (!kref_read(&subreq->wb_kref)) {
440
441 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags))
442 nfs_free_request(subreq);
443 continue;
444 }
445
446 subreq->wb_head = subreq;
447
448 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
449 nfs_release_request(subreq);
450 atomic_long_dec(&NFS_I(inode)->nrequests);
451 }
452
453
454
455 nfs_unlock_and_release_request(subreq);
456 }
457}
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477static struct nfs_page *
478nfs_lock_and_join_requests(struct page *page)
479{
480 struct inode *inode = page_file_mapping(page)->host;
481 struct nfs_page *head, *subreq;
482 struct nfs_page *destroy_list = NULL;
483 unsigned int total_bytes;
484 int ret;
485
486try_again:
487
488
489
490
491
492 head = nfs_page_find_head_request(page);
493 if (!head)
494 return NULL;
495
496
497 if (!nfs_lock_request(head)) {
498 ret = nfs_wait_on_request(head);
499 nfs_release_request(head);
500 if (ret < 0)
501 return ERR_PTR(ret);
502 goto try_again;
503 }
504
505
506 if (head != nfs_page_private_request(page) && !PageSwapCache(page)) {
507 nfs_unlock_and_release_request(head);
508 goto try_again;
509 }
510
511 ret = nfs_page_group_lock(head);
512 if (ret < 0)
513 goto release_request;
514
515
516 total_bytes = head->wb_bytes;
517 for (subreq = head->wb_this_page; subreq != head;
518 subreq = subreq->wb_this_page) {
519
520 if (!kref_get_unless_zero(&subreq->wb_kref)) {
521 if (subreq->wb_offset == head->wb_offset + total_bytes)
522 total_bytes += subreq->wb_bytes;
523 continue;
524 }
525
526 while (!nfs_lock_request(subreq)) {
527
528
529
530
531 nfs_page_group_unlock(head);
532 ret = nfs_wait_on_request(subreq);
533 if (!ret)
534 ret = nfs_page_group_lock(head);
535 if (ret < 0) {
536 nfs_unroll_locks(inode, head, subreq);
537 nfs_release_request(subreq);
538 goto release_request;
539 }
540 }
541
542
543
544
545 if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
546
547 total_bytes += subreq->wb_bytes;
548 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
549 ((subreq->wb_offset + subreq->wb_bytes) >
550 (head->wb_offset + total_bytes)))) {
551 nfs_page_group_unlock(head);
552 nfs_unroll_locks(inode, head, subreq);
553 nfs_unlock_and_release_request(subreq);
554 ret = -EIO;
555 goto release_request;
556 }
557 }
558
559
560
561 subreq = head;
562 do {
563 nfs_clear_request_commit(subreq);
564 subreq = subreq->wb_this_page;
565 } while (subreq != head);
566
567
568 if (head->wb_this_page != head) {
569
570 destroy_list = head->wb_this_page;
571 head->wb_this_page = head;
572
573
574
575 head->wb_bytes = total_bytes;
576 }
577
578
579 if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) {
580 set_bit(PG_INODE_REF, &head->wb_flags);
581 kref_get(&head->wb_kref);
582 atomic_long_inc(&NFS_I(inode)->nrequests);
583 }
584
585 nfs_page_group_unlock(head);
586
587 nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
588
589
590 if (!(PagePrivate(page) || PageSwapCache(page))) {
591 nfs_unlock_and_release_request(head);
592 return NULL;
593 }
594
595
596
597 return head;
598
599release_request:
600 nfs_unlock_and_release_request(head);
601 return ERR_PTR(ret);
602}
603
604static void nfs_write_error(struct nfs_page *req, int error)
605{
606 trace_nfs_write_error(req, error);
607 nfs_mapping_set_error(req->wb_page, error);
608 nfs_inode_remove_request(req);
609 nfs_end_page_writeback(req);
610 nfs_release_request(req);
611}
612
613
614
615
616
617static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
618 struct page *page)
619{
620 struct nfs_page *req;
621 int ret = 0;
622
623 req = nfs_lock_and_join_requests(page);
624 if (!req)
625 goto out;
626 ret = PTR_ERR(req);
627 if (IS_ERR(req))
628 goto out;
629
630 nfs_set_page_writeback(page);
631 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
632
633
634 ret = pgio->pg_error;
635 if (nfs_error_is_fatal_on_server(ret))
636 goto out_launder;
637
638 ret = 0;
639 if (!nfs_pageio_add_request(pgio, req)) {
640 ret = pgio->pg_error;
641
642
643
644 if (nfs_error_is_fatal(ret)) {
645 if (nfs_error_is_fatal_on_server(ret))
646 goto out_launder;
647 } else
648 ret = -EAGAIN;
649 nfs_redirty_request(req);
650 pgio->pg_error = 0;
651 } else
652 nfs_add_stats(page_file_mapping(page)->host,
653 NFSIOS_WRITEPAGES, 1);
654out:
655 return ret;
656out_launder:
657 nfs_write_error(req, ret);
658 return 0;
659}
660
661static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
662 struct nfs_pageio_descriptor *pgio)
663{
664 int ret;
665
666 nfs_pageio_cond_complete(pgio, page_index(page));
667 ret = nfs_page_async_flush(pgio, page);
668 if (ret == -EAGAIN) {
669 redirty_page_for_writepage(wbc, page);
670 ret = AOP_WRITEPAGE_ACTIVATE;
671 }
672 return ret;
673}
674
675
676
677
678static int nfs_writepage_locked(struct page *page,
679 struct writeback_control *wbc)
680{
681 struct nfs_pageio_descriptor pgio;
682 struct inode *inode = page_file_mapping(page)->host;
683 int err;
684
685 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
686 nfs_pageio_init_write(&pgio, inode, 0,
687 false, &nfs_async_write_completion_ops);
688 err = nfs_do_writepage(page, wbc, &pgio);
689 pgio.pg_error = 0;
690 nfs_pageio_complete(&pgio);
691 if (err < 0)
692 return err;
693 if (nfs_error_is_fatal(pgio.pg_error))
694 return pgio.pg_error;
695 return 0;
696}
697
698int nfs_writepage(struct page *page, struct writeback_control *wbc)
699{
700 int ret;
701
702 ret = nfs_writepage_locked(page, wbc);
703 if (ret != AOP_WRITEPAGE_ACTIVATE)
704 unlock_page(page);
705 return ret;
706}
707
708static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
709{
710 int ret;
711
712 ret = nfs_do_writepage(page, wbc, data);
713 if (ret != AOP_WRITEPAGE_ACTIVATE)
714 unlock_page(page);
715 return ret;
716}
717
718static void nfs_io_completion_commit(void *inode)
719{
720 nfs_commit_inode(inode, 0);
721}
722
723int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
724{
725 struct inode *inode = mapping->host;
726 struct nfs_pageio_descriptor pgio;
727 struct nfs_io_completion *ioc;
728 int err;
729
730 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
731
732 ioc = nfs_io_completion_alloc(GFP_KERNEL);
733 if (ioc)
734 nfs_io_completion_init(ioc, nfs_io_completion_commit, inode);
735
736 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
737 &nfs_async_write_completion_ops);
738 pgio.pg_io_completion = ioc;
739 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
740 pgio.pg_error = 0;
741 nfs_pageio_complete(&pgio);
742 nfs_io_completion_put(ioc);
743
744 if (err < 0)
745 goto out_err;
746 err = pgio.pg_error;
747 if (nfs_error_is_fatal(err))
748 goto out_err;
749 return 0;
750out_err:
751 return err;
752}
753
754
755
756
757static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
758{
759 struct address_space *mapping = page_file_mapping(req->wb_page);
760 struct nfs_inode *nfsi = NFS_I(inode);
761
762 WARN_ON_ONCE(req->wb_this_page != req);
763
764
765 nfs_lock_request(req);
766
767
768
769
770
771 spin_lock(&mapping->private_lock);
772 if (!nfs_have_writebacks(inode) &&
773 NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
774 inode_inc_iversion_raw(inode);
775 if (likely(!PageSwapCache(req->wb_page))) {
776 set_bit(PG_MAPPED, &req->wb_flags);
777 SetPagePrivate(req->wb_page);
778 set_page_private(req->wb_page, (unsigned long)req);
779 }
780 spin_unlock(&mapping->private_lock);
781 atomic_long_inc(&nfsi->nrequests);
782
783
784
785
786 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
787 kref_get(&req->wb_kref);
788}
789
790
791
792
793static void nfs_inode_remove_request(struct nfs_page *req)
794{
795 struct address_space *mapping = page_file_mapping(req->wb_page);
796 struct inode *inode = mapping->host;
797 struct nfs_inode *nfsi = NFS_I(inode);
798 struct nfs_page *head;
799
800 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
801 head = req->wb_head;
802
803 spin_lock(&mapping->private_lock);
804 if (likely(head->wb_page && !PageSwapCache(head->wb_page))) {
805 set_page_private(head->wb_page, 0);
806 ClearPagePrivate(head->wb_page);
807 clear_bit(PG_MAPPED, &head->wb_flags);
808 }
809 spin_unlock(&mapping->private_lock);
810 }
811
812 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
813 nfs_release_request(req);
814 atomic_long_dec(&nfsi->nrequests);
815 }
816}
817
818static void
819nfs_mark_request_dirty(struct nfs_page *req)
820{
821 if (req->wb_page)
822 __set_page_dirty_nobuffers(req->wb_page);
823}
824
825
826
827
828
829
830
831
832
833static struct nfs_page *
834nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
835 struct page *page)
836{
837 struct nfs_page *freq, *t;
838 struct nfs_commit_info cinfo;
839 struct inode *inode = &nfsi->vfs_inode;
840
841 nfs_init_cinfo_from_inode(&cinfo, inode);
842
843
844 freq = pnfs_search_commit_reqs(inode, &cinfo, page);
845 if (freq)
846 return freq->wb_head;
847
848
849 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
850 if (freq->wb_page == page)
851 return freq->wb_head;
852 }
853
854 return NULL;
855}
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870void
871nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
872 struct nfs_commit_info *cinfo)
873{
874 set_bit(PG_CLEAN, &req->wb_flags);
875 nfs_list_add_request(req, dst);
876 atomic_long_inc(&cinfo->mds->ncommit);
877}
878EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
879
880
881
882
883
884
885
886
887
888
889
890
891
892void
893nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
894{
895 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
896 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
897 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
898 if (req->wb_page)
899 nfs_mark_page_unstable(req->wb_page, cinfo);
900}
901EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
902
903
904
905
906
907
908
909
910
911
912
913
914void
915nfs_request_remove_commit_list(struct nfs_page *req,
916 struct nfs_commit_info *cinfo)
917{
918 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
919 return;
920 nfs_list_remove_request(req);
921 atomic_long_dec(&cinfo->mds->ncommit);
922}
923EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
924
925static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
926 struct inode *inode)
927{
928 cinfo->inode = inode;
929 cinfo->mds = &NFS_I(inode)->commit_info;
930 cinfo->ds = pnfs_get_ds_info(inode);
931 cinfo->dreq = NULL;
932 cinfo->completion_ops = &nfs_commit_completion_ops;
933}
934
935void nfs_init_cinfo(struct nfs_commit_info *cinfo,
936 struct inode *inode,
937 struct nfs_direct_req *dreq)
938{
939 if (dreq)
940 nfs_init_cinfo_from_dreq(cinfo, dreq);
941 else
942 nfs_init_cinfo_from_inode(cinfo, inode);
943}
944EXPORT_SYMBOL_GPL(nfs_init_cinfo);
945
946
947
948
949void
950nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
951 struct nfs_commit_info *cinfo, u32 ds_commit_idx)
952{
953 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
954 return;
955 nfs_request_add_commit_list(req, cinfo);
956}
957
958static void
959nfs_clear_page_commit(struct page *page)
960{
961 dec_node_page_state(page, NR_UNSTABLE_NFS);
962 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
963 WB_RECLAIMABLE);
964}
965
966
967static void
968nfs_clear_request_commit(struct nfs_page *req)
969{
970 if (test_bit(PG_CLEAN, &req->wb_flags)) {
971 struct nfs_open_context *ctx = nfs_req_openctx(req);
972 struct inode *inode = d_inode(ctx->dentry);
973 struct nfs_commit_info cinfo;
974
975 nfs_init_cinfo_from_inode(&cinfo, inode);
976 mutex_lock(&NFS_I(inode)->commit_mutex);
977 if (!pnfs_clear_request_commit(req, &cinfo)) {
978 nfs_request_remove_commit_list(req, &cinfo);
979 }
980 mutex_unlock(&NFS_I(inode)->commit_mutex);
981 nfs_clear_page_commit(req->wb_page);
982 }
983}
984
985int nfs_write_need_commit(struct nfs_pgio_header *hdr)
986{
987 if (hdr->verf.committed == NFS_DATA_SYNC)
988 return hdr->lseg == NULL;
989 return hdr->verf.committed != NFS_FILE_SYNC;
990}
991
992static void nfs_async_write_init(struct nfs_pgio_header *hdr)
993{
994 nfs_io_completion_get(hdr->io_completion);
995}
996
997static void nfs_write_completion(struct nfs_pgio_header *hdr)
998{
999 struct nfs_commit_info cinfo;
1000 unsigned long bytes = 0;
1001
1002 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
1003 goto out;
1004 nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
1005 while (!list_empty(&hdr->pages)) {
1006 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
1007
1008 bytes += req->wb_bytes;
1009 nfs_list_remove_request(req);
1010 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
1011 (hdr->good_bytes < bytes)) {
1012 trace_nfs_comp_error(req, hdr->error);
1013 nfs_mapping_set_error(req->wb_page, hdr->error);
1014 goto remove_req;
1015 }
1016 if (nfs_write_need_commit(hdr)) {
1017
1018 req->wb_nio = 0;
1019 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
1020 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
1021 hdr->pgio_mirror_idx);
1022 goto next;
1023 }
1024remove_req:
1025 nfs_inode_remove_request(req);
1026next:
1027 nfs_end_page_writeback(req);
1028 nfs_release_request(req);
1029 }
1030out:
1031 nfs_io_completion_put(hdr->io_completion);
1032 hdr->release(hdr);
1033}
1034
1035unsigned long
1036nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
1037{
1038 return atomic_long_read(&cinfo->mds->ncommit);
1039}
1040
1041
1042int
1043nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
1044 struct nfs_commit_info *cinfo, int max)
1045{
1046 struct nfs_page *req, *tmp;
1047 int ret = 0;
1048
1049restart:
1050 list_for_each_entry_safe(req, tmp, src, wb_list) {
1051 kref_get(&req->wb_kref);
1052 if (!nfs_lock_request(req)) {
1053 int status;
1054
1055
1056 if (!list_empty(dst)) {
1057 nfs_release_request(req);
1058 continue;
1059 }
1060
1061 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1062 status = nfs_wait_on_request(req);
1063 nfs_release_request(req);
1064 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1065 if (status < 0)
1066 break;
1067 goto restart;
1068 }
1069 nfs_request_remove_commit_list(req, cinfo);
1070 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1071 nfs_list_add_request(req, dst);
1072 ret++;
1073 if ((ret == max) && !cinfo->dreq)
1074 break;
1075 cond_resched();
1076 }
1077 return ret;
1078}
1079EXPORT_SYMBOL_GPL(nfs_scan_commit_list);
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090int
1091nfs_scan_commit(struct inode *inode, struct list_head *dst,
1092 struct nfs_commit_info *cinfo)
1093{
1094 int ret = 0;
1095
1096 if (!atomic_long_read(&cinfo->mds->ncommit))
1097 return 0;
1098 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1099 if (atomic_long_read(&cinfo->mds->ncommit) > 0) {
1100 const int max = INT_MAX;
1101
1102 ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
1103 cinfo, max);
1104 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
1105 }
1106 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1107 return ret;
1108}
1109
1110
1111
1112
1113
1114
1115
1116
1117static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
1118 struct page *page,
1119 unsigned int offset,
1120 unsigned int bytes)
1121{
1122 struct nfs_page *req;
1123 unsigned int rqend;
1124 unsigned int end;
1125 int error;
1126
1127 end = offset + bytes;
1128
1129 req = nfs_lock_and_join_requests(page);
1130 if (IS_ERR_OR_NULL(req))
1131 return req;
1132
1133 rqend = req->wb_offset + req->wb_bytes;
1134
1135
1136
1137
1138
1139
1140 if (offset > rqend || end < req->wb_offset)
1141 goto out_flushme;
1142
1143
1144 if (offset < req->wb_offset) {
1145 req->wb_offset = offset;
1146 req->wb_pgbase = offset;
1147 }
1148 if (end > rqend)
1149 req->wb_bytes = end - req->wb_offset;
1150 else
1151 req->wb_bytes = rqend - req->wb_offset;
1152 req->wb_nio = 0;
1153 return req;
1154out_flushme:
1155
1156
1157
1158
1159
1160 nfs_mark_request_dirty(req);
1161 nfs_unlock_and_release_request(req);
1162 error = nfs_wb_page(inode, page);
1163 return (error < 0) ? ERR_PTR(error) : NULL;
1164}
1165
1166
1167
1168
1169
1170
1171
1172
1173static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
1174 struct page *page, unsigned int offset, unsigned int bytes)
1175{
1176 struct inode *inode = page_file_mapping(page)->host;
1177 struct nfs_page *req;
1178
1179 req = nfs_try_to_update_request(inode, page, offset, bytes);
1180 if (req != NULL)
1181 goto out;
1182 req = nfs_create_request(ctx, page, offset, bytes);
1183 if (IS_ERR(req))
1184 goto out;
1185 nfs_inode_add_request(inode, req);
1186out:
1187 return req;
1188}
1189
1190static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
1191 unsigned int offset, unsigned int count)
1192{
1193 struct nfs_page *req;
1194
1195 req = nfs_setup_write_request(ctx, page, offset, count);
1196 if (IS_ERR(req))
1197 return PTR_ERR(req);
1198
1199 nfs_grow_file(page, offset, count);
1200 nfs_mark_uptodate(req);
1201 nfs_mark_request_dirty(req);
1202 nfs_unlock_and_release_request(req);
1203 return 0;
1204}
1205
1206int nfs_flush_incompatible(struct file *file, struct page *page)
1207{
1208 struct nfs_open_context *ctx = nfs_file_open_context(file);
1209 struct nfs_lock_context *l_ctx;
1210 struct file_lock_context *flctx = file_inode(file)->i_flctx;
1211 struct nfs_page *req;
1212 int do_flush, status;
1213
1214
1215
1216
1217
1218
1219
1220
1221 do {
1222 req = nfs_page_find_head_request(page);
1223 if (req == NULL)
1224 return 0;
1225 l_ctx = req->wb_lock_context;
1226 do_flush = req->wb_page != page ||
1227 !nfs_match_open_context(nfs_req_openctx(req), ctx);
1228 if (l_ctx && flctx &&
1229 !(list_empty_careful(&flctx->flc_posix) &&
1230 list_empty_careful(&flctx->flc_flock))) {
1231 do_flush |= l_ctx->lockowner != current->files;
1232 }
1233 nfs_release_request(req);
1234 if (!do_flush)
1235 return 0;
1236 status = nfs_wb_page(page_file_mapping(page)->host, page);
1237 } while (status == 0);
1238 return status;
1239}
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251int
1252nfs_key_timeout_notify(struct file *filp, struct inode *inode)
1253{
1254 struct nfs_open_context *ctx = nfs_file_open_context(filp);
1255
1256 if (nfs_ctx_key_to_expire(ctx, inode) &&
1257 !ctx->ll_cred)
1258
1259 return -EACCES;
1260 return 0;
1261}
1262
1263
1264
1265
1266bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
1267{
1268 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1269 struct rpc_cred *cred = ctx->ll_cred;
1270 struct auth_cred acred = {
1271 .cred = ctx->cred,
1272 };
1273
1274 if (cred && !cred->cr_ops->crmatch(&acred, cred, 0)) {
1275 put_rpccred(cred);
1276 ctx->ll_cred = NULL;
1277 cred = NULL;
1278 }
1279 if (!cred)
1280 cred = auth->au_ops->lookup_cred(auth, &acred, 0);
1281 if (!cred || IS_ERR(cred))
1282 return true;
1283 ctx->ll_cred = cred;
1284 return !!(cred->cr_ops->crkey_timeout &&
1285 cred->cr_ops->crkey_timeout(cred));
1286}
1287
1288
1289
1290
1291
1292
1293static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
1294{
1295 struct nfs_inode *nfsi = NFS_I(inode);
1296
1297 if (nfs_have_delegated_attributes(inode))
1298 goto out;
1299 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
1300 return false;
1301 smp_rmb();
1302 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
1303 return false;
1304out:
1305 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1306 return false;
1307 return PageUptodate(page) != 0;
1308}
1309
1310static bool
1311is_whole_file_wrlock(struct file_lock *fl)
1312{
1313 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1314 fl->fl_type == F_WRLCK;
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
1326{
1327 int ret;
1328 struct file_lock_context *flctx = inode->i_flctx;
1329 struct file_lock *fl;
1330
1331 if (file->f_flags & O_DSYNC)
1332 return 0;
1333 if (!nfs_write_pageuptodate(page, inode))
1334 return 0;
1335 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1336 return 1;
1337 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1338 list_empty_careful(&flctx->flc_posix)))
1339 return 1;
1340
1341
1342 ret = 0;
1343 spin_lock(&flctx->flc_lock);
1344 if (!list_empty(&flctx->flc_posix)) {
1345 fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1346 fl_list);
1347 if (is_whole_file_wrlock(fl))
1348 ret = 1;
1349 } else if (!list_empty(&flctx->flc_flock)) {
1350 fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1351 fl_list);
1352 if (fl->fl_type == F_WRLCK)
1353 ret = 1;
1354 }
1355 spin_unlock(&flctx->flc_lock);
1356 return ret;
1357}
1358
1359
1360
1361
1362
1363
1364
1365int nfs_updatepage(struct file *file, struct page *page,
1366 unsigned int offset, unsigned int count)
1367{
1368 struct nfs_open_context *ctx = nfs_file_open_context(file);
1369 struct address_space *mapping = page_file_mapping(page);
1370 struct inode *inode = mapping->host;
1371 int status = 0;
1372
1373 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
1374
1375 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
1376 file, count, (long long)(page_file_offset(page) + offset));
1377
1378 if (!count)
1379 goto out;
1380
1381 if (nfs_can_extend_write(file, page, inode)) {
1382 count = max(count + offset, nfs_page_length(page));
1383 offset = 0;
1384 }
1385
1386 status = nfs_writepage_setup(ctx, page, offset, count);
1387 if (status < 0)
1388 nfs_set_pageerror(mapping);
1389 else
1390 __set_page_dirty_nobuffers(page);
1391out:
1392 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
1393 status, (long long)i_size_read(inode));
1394 return status;
1395}
1396
1397static int flush_task_priority(int how)
1398{
1399 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
1400 case FLUSH_HIGHPRI:
1401 return RPC_PRIORITY_HIGH;
1402 case FLUSH_LOWPRI:
1403 return RPC_PRIORITY_LOW;
1404 }
1405 return RPC_PRIORITY_NORMAL;
1406}
1407
1408static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1409 struct rpc_message *msg,
1410 const struct nfs_rpc_ops *rpc_ops,
1411 struct rpc_task_setup *task_setup_data, int how)
1412{
1413 int priority = flush_task_priority(how);
1414
1415 task_setup_data->priority = priority;
1416 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client);
1417 trace_nfs_initiate_write(hdr);
1418}
1419
1420
1421
1422
1423
1424static void nfs_redirty_request(struct nfs_page *req)
1425{
1426
1427 req->wb_nio++;
1428 nfs_mark_request_dirty(req);
1429 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
1430 nfs_end_page_writeback(req);
1431 nfs_release_request(req);
1432}
1433
1434static void nfs_async_write_error(struct list_head *head, int error)
1435{
1436 struct nfs_page *req;
1437
1438 while (!list_empty(head)) {
1439 req = nfs_list_entry(head->next);
1440 nfs_list_remove_request(req);
1441 if (nfs_error_is_fatal(error))
1442 nfs_write_error(req, error);
1443 else
1444 nfs_redirty_request(req);
1445 }
1446}
1447
1448static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
1449{
1450 nfs_async_write_error(&hdr->pages, 0);
1451 filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset,
1452 hdr->args.offset + hdr->args.count - 1);
1453}
1454
1455static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1456 .init_hdr = nfs_async_write_init,
1457 .error_cleanup = nfs_async_write_error,
1458 .completion = nfs_write_completion,
1459 .reschedule_io = nfs_async_write_reschedule_io,
1460};
1461
1462void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1463 struct inode *inode, int ioflags, bool force_mds,
1464 const struct nfs_pgio_completion_ops *compl_ops)
1465{
1466 struct nfs_server *server = NFS_SERVER(inode);
1467 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
1468
1469#ifdef CONFIG_NFS_V4_1
1470 if (server->pnfs_curr_ld && !force_mds)
1471 pg_ops = server->pnfs_curr_ld->pg_write_ops;
1472#endif
1473 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops,
1474 server->wsize, ioflags);
1475}
1476EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1477
1478void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1479{
1480 struct nfs_pgio_mirror *mirror;
1481
1482 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
1483 pgio->pg_ops->pg_cleanup(pgio);
1484
1485 pgio->pg_ops = &nfs_pgio_rw_ops;
1486
1487 nfs_pageio_stop_mirroring(pgio);
1488
1489 mirror = &pgio->pg_mirrors[0];
1490 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1491}
1492EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1493
1494
1495void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1496{
1497 struct nfs_commit_data *data = calldata;
1498
1499 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1500}
1501
1502
1503
1504
1505static int nfs_should_remove_suid(const struct inode *inode)
1506{
1507 umode_t mode = inode->i_mode;
1508 int kill = 0;
1509
1510
1511 if (unlikely(mode & S_ISUID))
1512 kill = ATTR_KILL_SUID;
1513
1514
1515
1516
1517
1518 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1519 kill |= ATTR_KILL_SGID;
1520
1521 if (unlikely(kill && S_ISREG(mode)))
1522 return kill;
1523
1524 return 0;
1525}
1526
1527static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1528 struct nfs_fattr *fattr)
1529{
1530 struct nfs_pgio_args *argp = &hdr->args;
1531 struct nfs_pgio_res *resp = &hdr->res;
1532 u64 size = argp->offset + resp->count;
1533
1534 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1535 fattr->size = size;
1536 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1537 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1538 return;
1539 }
1540 if (size != fattr->size)
1541 return;
1542
1543 nfs_fattr_set_barrier(fattr);
1544
1545 fattr->valid |= NFS_ATTR_FATTR_SIZE;
1546}
1547
1548void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1549{
1550 struct nfs_fattr *fattr = &hdr->fattr;
1551 struct inode *inode = hdr->inode;
1552
1553 spin_lock(&inode->i_lock);
1554 nfs_writeback_check_extend(hdr, fattr);
1555 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1556 spin_unlock(&inode->i_lock);
1557}
1558EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
1559
1560
1561
1562
1563static int nfs_writeback_done(struct rpc_task *task,
1564 struct nfs_pgio_header *hdr,
1565 struct inode *inode)
1566{
1567 int status;
1568
1569
1570
1571
1572
1573
1574
1575
1576 status = NFS_PROTO(inode)->write_done(task, hdr);
1577 if (status != 0)
1578 return status;
1579
1580 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
1581 trace_nfs_writeback_done(task, hdr);
1582
1583 if (hdr->res.verf->committed < hdr->args.stable &&
1584 task->tk_status >= 0) {
1585
1586
1587
1588
1589
1590
1591
1592
1593 static unsigned long complain;
1594
1595
1596 if (time_before(complain, jiffies)) {
1597 dprintk("NFS: faulty NFS server %s:"
1598 " (committed = %d) != (stable = %d)\n",
1599 NFS_SERVER(inode)->nfs_client->cl_hostname,
1600 hdr->res.verf->committed, hdr->args.stable);
1601 complain = jiffies + 300 * HZ;
1602 }
1603 }
1604
1605
1606 if (nfs_should_remove_suid(inode)) {
1607 spin_lock(&inode->i_lock);
1608 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
1609 spin_unlock(&inode->i_lock);
1610 }
1611 return 0;
1612}
1613
1614
1615
1616
1617static void nfs_writeback_result(struct rpc_task *task,
1618 struct nfs_pgio_header *hdr)
1619{
1620 struct nfs_pgio_args *argp = &hdr->args;
1621 struct nfs_pgio_res *resp = &hdr->res;
1622
1623 if (resp->count < argp->count) {
1624 static unsigned long complain;
1625
1626
1627 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE);
1628
1629
1630 if (resp->count == 0) {
1631 if (time_before(complain, jiffies)) {
1632 printk(KERN_WARNING
1633 "NFS: Server wrote zero bytes, expected %u.\n",
1634 argp->count);
1635 complain = jiffies + 300 * HZ;
1636 }
1637 nfs_set_pgio_error(hdr, -EIO, argp->offset);
1638 task->tk_status = -EIO;
1639 return;
1640 }
1641
1642
1643 if (!task->tk_ops) {
1644 hdr->pnfs_error = -EAGAIN;
1645 return;
1646 }
1647
1648
1649 if (resp->verf->committed != NFS_UNSTABLE) {
1650
1651 hdr->mds_offset += resp->count;
1652 argp->offset += resp->count;
1653 argp->pgbase += resp->count;
1654 argp->count -= resp->count;
1655 } else {
1656
1657
1658
1659 argp->stable = NFS_FILE_SYNC;
1660 }
1661 resp->count = 0;
1662 resp->verf->committed = 0;
1663 rpc_restart_call_prepare(task);
1664 }
1665}
1666
1667static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
1668{
1669 return wait_var_event_killable(&cinfo->rpcs_out,
1670 !atomic_read(&cinfo->rpcs_out));
1671}
1672
1673static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
1674{
1675 atomic_inc(&cinfo->rpcs_out);
1676}
1677
1678static void nfs_commit_end(struct nfs_mds_commit_info *cinfo)
1679{
1680 if (atomic_dec_and_test(&cinfo->rpcs_out))
1681 wake_up_var(&cinfo->rpcs_out);
1682}
1683
1684void nfs_commitdata_release(struct nfs_commit_data *data)
1685{
1686 put_nfs_open_context(data->context);
1687 nfs_commit_free(data);
1688}
1689EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1690
1691int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1692 const struct nfs_rpc_ops *nfs_ops,
1693 const struct rpc_call_ops *call_ops,
1694 int how, int flags)
1695{
1696 struct rpc_task *task;
1697 int priority = flush_task_priority(how);
1698 struct rpc_message msg = {
1699 .rpc_argp = &data->args,
1700 .rpc_resp = &data->res,
1701 .rpc_cred = data->cred,
1702 };
1703 struct rpc_task_setup task_setup_data = {
1704 .task = &data->task,
1705 .rpc_client = clnt,
1706 .rpc_message = &msg,
1707 .callback_ops = call_ops,
1708 .callback_data = data,
1709 .workqueue = nfsiod_workqueue,
1710 .flags = RPC_TASK_ASYNC | flags,
1711 .priority = priority,
1712 };
1713
1714 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client);
1715 trace_nfs_initiate_commit(data);
1716
1717 dprintk("NFS: initiated commit call\n");
1718
1719 task = rpc_run_task(&task_setup_data);
1720 if (IS_ERR(task))
1721 return PTR_ERR(task);
1722 if (how & FLUSH_SYNC)
1723 rpc_wait_for_completion_task(task);
1724 rpc_put_task(task);
1725 return 0;
1726}
1727EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1728
1729static loff_t nfs_get_lwb(struct list_head *head)
1730{
1731 loff_t lwb = 0;
1732 struct nfs_page *req;
1733
1734 list_for_each_entry(req, head, wb_list)
1735 if (lwb < (req_offset(req) + req->wb_bytes))
1736 lwb = req_offset(req) + req->wb_bytes;
1737
1738 return lwb;
1739}
1740
1741
1742
1743
1744void nfs_init_commit(struct nfs_commit_data *data,
1745 struct list_head *head,
1746 struct pnfs_layout_segment *lseg,
1747 struct nfs_commit_info *cinfo)
1748{
1749 struct nfs_page *first = nfs_list_entry(head->next);
1750 struct nfs_open_context *ctx = nfs_req_openctx(first);
1751 struct inode *inode = d_inode(ctx->dentry);
1752
1753
1754
1755
1756 list_splice_init(head, &data->pages);
1757
1758 data->inode = inode;
1759 data->cred = ctx->cred;
1760 data->lseg = lseg;
1761
1762 if (lseg)
1763 data->lwb = nfs_get_lwb(&data->pages);
1764 data->mds_ops = &nfs_commit_ops;
1765 data->completion_ops = cinfo->completion_ops;
1766 data->dreq = cinfo->dreq;
1767
1768 data->args.fh = NFS_FH(data->inode);
1769
1770 data->args.offset = 0;
1771 data->args.count = 0;
1772 data->context = get_nfs_open_context(ctx);
1773 data->res.fattr = &data->fattr;
1774 data->res.verf = &data->verf;
1775 nfs_fattr_init(&data->fattr);
1776}
1777EXPORT_SYMBOL_GPL(nfs_init_commit);
1778
1779void nfs_retry_commit(struct list_head *page_list,
1780 struct pnfs_layout_segment *lseg,
1781 struct nfs_commit_info *cinfo,
1782 u32 ds_commit_idx)
1783{
1784 struct nfs_page *req;
1785
1786 while (!list_empty(page_list)) {
1787 req = nfs_list_entry(page_list->next);
1788 nfs_list_remove_request(req);
1789 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1790 if (!cinfo->dreq)
1791 nfs_clear_page_commit(req->wb_page);
1792 nfs_unlock_and_release_request(req);
1793 }
1794}
1795EXPORT_SYMBOL_GPL(nfs_retry_commit);
1796
1797static void
1798nfs_commit_resched_write(struct nfs_commit_info *cinfo,
1799 struct nfs_page *req)
1800{
1801 __set_page_dirty_nobuffers(req->wb_page);
1802}
1803
1804
1805
1806
1807static int
1808nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1809 struct nfs_commit_info *cinfo)
1810{
1811 struct nfs_commit_data *data;
1812
1813
1814 if (list_empty(head))
1815 return 0;
1816
1817 data = nfs_commitdata_alloc(true);
1818
1819
1820 nfs_init_commit(data, head, NULL, cinfo);
1821 atomic_inc(&cinfo->mds->rpcs_out);
1822 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
1823 data->mds_ops, how, 0);
1824}
1825
1826
1827
1828
1829static void nfs_commit_done(struct rpc_task *task, void *calldata)
1830{
1831 struct nfs_commit_data *data = calldata;
1832
1833 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1834 task->tk_pid, task->tk_status);
1835
1836
1837 NFS_PROTO(data->inode)->commit_done(task, data);
1838 trace_nfs_commit_done(task, data);
1839}
1840
1841static void nfs_commit_release_pages(struct nfs_commit_data *data)
1842{
1843 const struct nfs_writeverf *verf = data->res.verf;
1844 struct nfs_page *req;
1845 int status = data->task.tk_status;
1846 struct nfs_commit_info cinfo;
1847 struct nfs_server *nfss;
1848
1849 while (!list_empty(&data->pages)) {
1850 req = nfs_list_entry(data->pages.next);
1851 nfs_list_remove_request(req);
1852 if (req->wb_page)
1853 nfs_clear_page_commit(req->wb_page);
1854
1855 dprintk("NFS: commit (%s/%llu %d@%lld)",
1856 nfs_req_openctx(req)->dentry->d_sb->s_id,
1857 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)),
1858 req->wb_bytes,
1859 (long long)req_offset(req));
1860 if (status < 0) {
1861 if (req->wb_page) {
1862 trace_nfs_commit_error(req, status);
1863 nfs_mapping_set_error(req->wb_page, status);
1864 nfs_inode_remove_request(req);
1865 }
1866 dprintk_cont(", error = %d\n", status);
1867 goto next;
1868 }
1869
1870
1871
1872 if (verf->committed > NFS_UNSTABLE &&
1873 !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) {
1874
1875 if (req->wb_page)
1876 nfs_inode_remove_request(req);
1877 dprintk_cont(" OK\n");
1878 goto next;
1879 }
1880
1881 dprintk_cont(" mismatch\n");
1882 nfs_mark_request_dirty(req);
1883 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
1884 next:
1885 nfs_unlock_and_release_request(req);
1886
1887 cond_resched();
1888 }
1889 nfss = NFS_SERVER(data->inode);
1890 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
1891 clear_bdi_congested(inode_to_bdi(data->inode), BLK_RW_ASYNC);
1892
1893 nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1894 nfs_commit_end(cinfo.mds);
1895}
1896
1897static void nfs_commit_release(void *calldata)
1898{
1899 struct nfs_commit_data *data = calldata;
1900
1901 data->completion_ops->completion(data);
1902 nfs_commitdata_release(calldata);
1903}
1904
1905static const struct rpc_call_ops nfs_commit_ops = {
1906 .rpc_call_prepare = nfs_commit_prepare,
1907 .rpc_call_done = nfs_commit_done,
1908 .rpc_release = nfs_commit_release,
1909};
1910
1911static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1912 .completion = nfs_commit_release_pages,
1913 .resched_write = nfs_commit_resched_write,
1914};
1915
1916int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1917 int how, struct nfs_commit_info *cinfo)
1918{
1919 int status;
1920
1921 status = pnfs_commit_list(inode, head, how, cinfo);
1922 if (status == PNFS_NOT_ATTEMPTED)
1923 status = nfs_commit_list(inode, head, how, cinfo);
1924 return status;
1925}
1926
1927static int __nfs_commit_inode(struct inode *inode, int how,
1928 struct writeback_control *wbc)
1929{
1930 LIST_HEAD(head);
1931 struct nfs_commit_info cinfo;
1932 int may_wait = how & FLUSH_SYNC;
1933 int ret, nscan;
1934
1935 nfs_init_cinfo_from_inode(&cinfo, inode);
1936 nfs_commit_begin(cinfo.mds);
1937 for (;;) {
1938 ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
1939 if (ret <= 0)
1940 break;
1941 ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
1942 if (ret < 0)
1943 break;
1944 ret = 0;
1945 if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
1946 if (nscan < wbc->nr_to_write)
1947 wbc->nr_to_write -= nscan;
1948 else
1949 wbc->nr_to_write = 0;
1950 }
1951 if (nscan < INT_MAX)
1952 break;
1953 cond_resched();
1954 }
1955 nfs_commit_end(cinfo.mds);
1956 if (ret || !may_wait)
1957 return ret;
1958 return wait_on_commit(cinfo.mds);
1959}
1960
1961int nfs_commit_inode(struct inode *inode, int how)
1962{
1963 return __nfs_commit_inode(inode, how, NULL);
1964}
1965EXPORT_SYMBOL_GPL(nfs_commit_inode);
1966
1967int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1968{
1969 struct nfs_inode *nfsi = NFS_I(inode);
1970 int flags = FLUSH_SYNC;
1971 int ret = 0;
1972
1973 if (wbc->sync_mode == WB_SYNC_NONE) {
1974
1975 if (!atomic_long_read(&nfsi->commit_info.ncommit))
1976 goto check_requests_outstanding;
1977
1978
1979
1980
1981 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
1982 goto out_mark_dirty;
1983
1984
1985 flags = 0;
1986 }
1987
1988 ret = __nfs_commit_inode(inode, flags, wbc);
1989 if (!ret) {
1990 if (flags & FLUSH_SYNC)
1991 return 0;
1992 } else if (atomic_long_read(&nfsi->commit_info.ncommit))
1993 goto out_mark_dirty;
1994
1995check_requests_outstanding:
1996 if (!atomic_read(&nfsi->commit_info.rpcs_out))
1997 return ret;
1998out_mark_dirty:
1999 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
2000 return ret;
2001}
2002EXPORT_SYMBOL_GPL(nfs_write_inode);
2003
2004
2005
2006
2007
2008
2009
2010int nfs_filemap_write_and_wait_range(struct address_space *mapping,
2011 loff_t lstart, loff_t lend)
2012{
2013 int ret;
2014
2015 ret = filemap_write_and_wait_range(mapping, lstart, lend);
2016 if (ret == 0)
2017 ret = pnfs_sync_inode(mapping->host, true);
2018 return ret;
2019}
2020EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range);
2021
2022
2023
2024
2025int nfs_wb_all(struct inode *inode)
2026{
2027 int ret;
2028
2029 trace_nfs_writeback_inode_enter(inode);
2030
2031 ret = filemap_write_and_wait(inode->i_mapping);
2032 if (ret)
2033 goto out;
2034 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2035 if (ret < 0)
2036 goto out;
2037 pnfs_sync_inode(inode, true);
2038 ret = 0;
2039
2040out:
2041 trace_nfs_writeback_inode_exit(inode, ret);
2042 return ret;
2043}
2044EXPORT_SYMBOL_GPL(nfs_wb_all);
2045
2046int nfs_wb_page_cancel(struct inode *inode, struct page *page)
2047{
2048 struct nfs_page *req;
2049 int ret = 0;
2050
2051 wait_on_page_writeback(page);
2052
2053
2054
2055 req = nfs_lock_and_join_requests(page);
2056
2057 if (IS_ERR(req)) {
2058 ret = PTR_ERR(req);
2059 } else if (req) {
2060
2061
2062
2063
2064 nfs_inode_remove_request(req);
2065 nfs_unlock_and_release_request(req);
2066 }
2067
2068 return ret;
2069}
2070
2071
2072
2073
2074int nfs_wb_page(struct inode *inode, struct page *page)
2075{
2076 loff_t range_start = page_file_offset(page);
2077 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
2078 struct writeback_control wbc = {
2079 .sync_mode = WB_SYNC_ALL,
2080 .nr_to_write = 0,
2081 .range_start = range_start,
2082 .range_end = range_end,
2083 };
2084 int ret;
2085
2086 trace_nfs_writeback_page_enter(inode);
2087
2088 for (;;) {
2089 wait_on_page_writeback(page);
2090 if (clear_page_dirty_for_io(page)) {
2091 ret = nfs_writepage_locked(page, &wbc);
2092 if (ret < 0)
2093 goto out_error;
2094 continue;
2095 }
2096 ret = 0;
2097 if (!PagePrivate(page))
2098 break;
2099 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2100 if (ret < 0)
2101 goto out_error;
2102 }
2103out_error:
2104 trace_nfs_writeback_page_exit(inode, ret);
2105 return ret;
2106}
2107
2108#ifdef CONFIG_MIGRATION
2109int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
2110 struct page *page, enum migrate_mode mode)
2111{
2112
2113
2114
2115
2116
2117
2118
2119
2120 if (PagePrivate(page))
2121 return -EBUSY;
2122
2123 if (!nfs_fscache_release_page(page, GFP_KERNEL))
2124 return -EBUSY;
2125
2126 return migrate_page(mapping, newpage, page, mode);
2127}
2128#endif
2129
2130int __init nfs_init_writepagecache(void)
2131{
2132 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
2133 sizeof(struct nfs_pgio_header),
2134 0, SLAB_HWCACHE_ALIGN,
2135 NULL);
2136 if (nfs_wdata_cachep == NULL)
2137 return -ENOMEM;
2138
2139 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
2140 nfs_wdata_cachep);
2141 if (nfs_wdata_mempool == NULL)
2142 goto out_destroy_write_cache;
2143
2144 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
2145 sizeof(struct nfs_commit_data),
2146 0, SLAB_HWCACHE_ALIGN,
2147 NULL);
2148 if (nfs_cdata_cachep == NULL)
2149 goto out_destroy_write_mempool;
2150
2151 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
2152 nfs_cdata_cachep);
2153 if (nfs_commit_mempool == NULL)
2154 goto out_destroy_commit_cache;
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10);
2173 if (nfs_congestion_kb > 256*1024)
2174 nfs_congestion_kb = 256*1024;
2175
2176 return 0;
2177
2178out_destroy_commit_cache:
2179 kmem_cache_destroy(nfs_cdata_cachep);
2180out_destroy_write_mempool:
2181 mempool_destroy(nfs_wdata_mempool);
2182out_destroy_write_cache:
2183 kmem_cache_destroy(nfs_wdata_cachep);
2184 return -ENOMEM;
2185}
2186
2187void nfs_destroy_writepagecache(void)
2188{
2189 mempool_destroy(nfs_commit_mempool);
2190 kmem_cache_destroy(nfs_cdata_cachep);
2191 mempool_destroy(nfs_wdata_mempool);
2192 kmem_cache_destroy(nfs_wdata_cachep);
2193}
2194
2195static const struct nfs_rw_ops nfs_rw_write_ops = {
2196 .rw_alloc_header = nfs_writehdr_alloc,
2197 .rw_free_header = nfs_writehdr_free,
2198 .rw_done = nfs_writeback_done,
2199 .rw_result = nfs_writeback_result,
2200 .rw_initiate = nfs_initiate_write,
2201};
2202