1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/errno.h>
43#include <linux/sched.h>
44#include <linux/kernel.h>
45#include <linux/file.h>
46#include <linux/pagemap.h>
47#include <linux/kref.h>
48#include <linux/slab.h>
49#include <linux/task_io_accounting_ops.h>
50#include <linux/module.h>
51
52#include <linux/nfs_fs.h>
53#include <linux/nfs_page.h>
54#include <linux/sunrpc/clnt.h>
55
56#include <linux/uaccess.h>
57#include <linux/atomic.h>
58
59#include "internal.h"
60#include "iostat.h"
61#include "pnfs.h"
62
63#define NFSDBG_FACILITY NFSDBG_VFS
64
65static struct kmem_cache *nfs_direct_cachep;
66
67struct nfs_direct_req {
68 struct kref kref;
69
70
71 struct nfs_open_context *ctx;
72 struct nfs_lock_context *l_ctx;
73 struct kiocb * iocb;
74 struct inode * inode;
75
76
77 atomic_t io_count;
78 spinlock_t lock;
79
80 loff_t io_start;
81 ssize_t count,
82 max_count,
83 bytes_left,
84 error;
85 struct completion completion;
86
87
88 struct nfs_mds_commit_info mds_cinfo;
89 struct pnfs_ds_commit_info ds_cinfo;
90 struct work_struct work;
91 int flags;
92
93#define NFS_ODIRECT_DO_COMMIT (1)
94#define NFS_ODIRECT_RESCHED_WRITES (2)
95
96#define NFS_ODIRECT_SHOULD_DIRTY (3)
97 struct nfs_writeverf verf;
98};
99
100static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
101static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
102static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
103static void nfs_direct_write_schedule_work(struct work_struct *work);
104
105static inline void get_dreq(struct nfs_direct_req *dreq)
106{
107 atomic_inc(&dreq->io_count);
108}
109
110static inline int put_dreq(struct nfs_direct_req *dreq)
111{
112 return atomic_dec_and_test(&dreq->io_count);
113}
114
115static void
116nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
117 const struct nfs_pgio_header *hdr,
118 ssize_t dreq_len)
119{
120 if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
121 test_bit(NFS_IOHDR_EOF, &hdr->flags)))
122 return;
123 if (dreq->max_count >= dreq_len) {
124 dreq->max_count = dreq_len;
125 if (dreq->count > dreq_len)
126 dreq->count = dreq_len;
127
128 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
129 dreq->error = hdr->error;
130 else
131 dreq->error = 0;
132 }
133}
134
135static void
136nfs_direct_count_bytes(struct nfs_direct_req *dreq,
137 const struct nfs_pgio_header *hdr)
138{
139 loff_t hdr_end = hdr->io_start + hdr->good_bytes;
140 ssize_t dreq_len = 0;
141
142 if (hdr_end > dreq->io_start)
143 dreq_len = hdr_end - dreq->io_start;
144
145 nfs_direct_handle_truncated(dreq, hdr, dreq_len);
146
147 if (dreq_len > dreq->max_count)
148 dreq_len = dreq->max_count;
149
150 if (dreq->count < dreq_len)
151 dreq->count = dreq_len;
152}
153
154
155
156
157
158
159
160
161
162static struct nfs_writeverf *
163nfs_direct_select_verf(struct nfs_direct_req *dreq,
164 struct nfs_client *ds_clp,
165 int commit_idx)
166{
167 struct nfs_writeverf *verfp = &dreq->verf;
168
169#ifdef CONFIG_NFS_V4_1
170
171
172
173
174 if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
175 if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
176 verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
177 else
178 WARN_ON_ONCE(1);
179 }
180#endif
181 return verfp;
182}
183
184
185
186
187
188
189
190
191
192static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
193 struct nfs_pgio_header *hdr)
194{
195 struct nfs_writeverf *verfp;
196
197 verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
198 WARN_ON_ONCE(verfp->committed >= 0);
199 memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
200 WARN_ON_ONCE(verfp->committed < 0);
201}
202
203static int nfs_direct_cmp_verf(const struct nfs_writeverf *v1,
204 const struct nfs_writeverf *v2)
205{
206 return nfs_write_verifier_cmp(&v1->verifier, &v2->verifier);
207}
208
209
210
211
212
213
214
215
216
217
218static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
219 struct nfs_pgio_header *hdr)
220{
221 struct nfs_writeverf *verfp;
222
223 verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
224 if (verfp->committed < 0) {
225 nfs_direct_set_hdr_verf(dreq, hdr);
226 return 0;
227 }
228 return nfs_direct_cmp_verf(verfp, &hdr->verf);
229}
230
231
232
233
234
235
236
237
238
239static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
240 struct nfs_commit_data *data)
241{
242 struct nfs_writeverf *verfp;
243
244 verfp = nfs_direct_select_verf(dreq, data->ds_clp,
245 data->ds_commit_index);
246
247
248 if (verfp->committed < 0)
249 return 1;
250
251 return nfs_direct_cmp_verf(verfp, &data->verf);
252}
253
254
255
256
257
258
259
260
261
262
263
264ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
265{
266 struct inode *inode = iocb->ki_filp->f_mapping->host;
267
268
269 if (!IS_SWAPFILE(inode))
270 return 0;
271
272 VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
273
274 if (iov_iter_rw(iter) == READ)
275 return nfs_file_direct_read(iocb, iter);
276 return nfs_file_direct_write(iocb, iter);
277}
278
279static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
280{
281 unsigned int i;
282 for (i = 0; i < npages; i++)
283 put_page(pages[i]);
284}
285
286void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
287 struct nfs_direct_req *dreq)
288{
289 cinfo->inode = dreq->inode;
290 cinfo->mds = &dreq->mds_cinfo;
291 cinfo->ds = &dreq->ds_cinfo;
292 cinfo->dreq = dreq;
293 cinfo->completion_ops = &nfs_direct_commit_completion_ops;
294}
295
296static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
297{
298 struct nfs_direct_req *dreq;
299
300 dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
301 if (!dreq)
302 return NULL;
303
304 kref_init(&dreq->kref);
305 kref_get(&dreq->kref);
306 init_completion(&dreq->completion);
307 INIT_LIST_HEAD(&dreq->mds_cinfo.list);
308 dreq->verf.committed = NFS_INVALID_STABLE_HOW;
309 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
310 spin_lock_init(&dreq->lock);
311
312 return dreq;
313}
314
315static void nfs_direct_req_free(struct kref *kref)
316{
317 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
318
319 nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
320 if (dreq->l_ctx != NULL)
321 nfs_put_lock_context(dreq->l_ctx);
322 if (dreq->ctx != NULL)
323 put_nfs_open_context(dreq->ctx);
324 kmem_cache_free(nfs_direct_cachep, dreq);
325}
326
327static void nfs_direct_req_release(struct nfs_direct_req *dreq)
328{
329 kref_put(&dreq->kref, nfs_direct_req_free);
330}
331
332ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
333{
334 return dreq->bytes_left;
335}
336EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
337
338
339
340
341static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
342{
343 ssize_t result = -EIOCBQUEUED;
344
345
346 if (dreq->iocb)
347 goto out;
348
349 result = wait_for_completion_killable(&dreq->completion);
350
351 if (!result) {
352 result = dreq->count;
353 WARN_ON_ONCE(dreq->count < 0);
354 }
355 if (!result)
356 result = dreq->error;
357
358out:
359 return (ssize_t) result;
360}
361
362
363
364
365
366static void nfs_direct_complete(struct nfs_direct_req *dreq)
367{
368 struct inode *inode = dreq->inode;
369
370 inode_dio_end(inode);
371
372 if (dreq->iocb) {
373 long res = (long) dreq->error;
374 if (dreq->count != 0) {
375 res = (long) dreq->count;
376 WARN_ON_ONCE(dreq->count < 0);
377 }
378 dreq->iocb->ki_complete(dreq->iocb, res, 0);
379 }
380
381 complete(&dreq->completion);
382
383 nfs_direct_req_release(dreq);
384}
385
386static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
387{
388 unsigned long bytes = 0;
389 struct nfs_direct_req *dreq = hdr->dreq;
390
391 spin_lock(&dreq->lock);
392 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
393 spin_unlock(&dreq->lock);
394 goto out_put;
395 }
396
397 nfs_direct_count_bytes(dreq, hdr);
398 spin_unlock(&dreq->lock);
399
400 while (!list_empty(&hdr->pages)) {
401 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
402 struct page *page = req->wb_page;
403
404 if (!PageCompound(page) && bytes < hdr->good_bytes &&
405 (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
406 set_page_dirty(page);
407 bytes += req->wb_bytes;
408 nfs_list_remove_request(req);
409 nfs_release_request(req);
410 }
411out_put:
412 if (put_dreq(dreq))
413 nfs_direct_complete(dreq);
414 hdr->release(hdr);
415}
416
417static void nfs_read_sync_pgio_error(struct list_head *head, int error)
418{
419 struct nfs_page *req;
420
421 while (!list_empty(head)) {
422 req = nfs_list_entry(head->next);
423 nfs_list_remove_request(req);
424 nfs_release_request(req);
425 }
426}
427
428static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
429{
430 get_dreq(hdr->dreq);
431}
432
433static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
434 .error_cleanup = nfs_read_sync_pgio_error,
435 .init_hdr = nfs_direct_pgio_init,
436 .completion = nfs_direct_read_completion,
437};
438
439
440
441
442
443
444
445
446
447static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
448 struct iov_iter *iter,
449 loff_t pos)
450{
451 struct nfs_pageio_descriptor desc;
452 struct inode *inode = dreq->inode;
453 ssize_t result = -EINVAL;
454 size_t requested_bytes = 0;
455 size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
456
457 nfs_pageio_init_read(&desc, dreq->inode, false,
458 &nfs_direct_read_completion_ops);
459 get_dreq(dreq);
460 desc.pg_dreq = dreq;
461 inode_dio_begin(inode);
462
463 while (iov_iter_count(iter)) {
464 struct page **pagevec;
465 size_t bytes;
466 size_t pgbase;
467 unsigned npages, i;
468
469 result = iov_iter_get_pages_alloc(iter, &pagevec,
470 rsize, &pgbase);
471 if (result < 0)
472 break;
473
474 bytes = result;
475 iov_iter_advance(iter, bytes);
476 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
477 for (i = 0; i < npages; i++) {
478 struct nfs_page *req;
479 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
480
481 req = nfs_create_request(dreq->ctx, pagevec[i],
482 pgbase, req_len);
483 if (IS_ERR(req)) {
484 result = PTR_ERR(req);
485 break;
486 }
487 req->wb_index = pos >> PAGE_SHIFT;
488 req->wb_offset = pos & ~PAGE_MASK;
489 if (!nfs_pageio_add_request(&desc, req)) {
490 result = desc.pg_error;
491 nfs_release_request(req);
492 break;
493 }
494 pgbase = 0;
495 bytes -= req_len;
496 requested_bytes += req_len;
497 pos += req_len;
498 dreq->bytes_left -= req_len;
499 }
500 nfs_direct_release_pages(pagevec, npages);
501 kvfree(pagevec);
502 if (result < 0)
503 break;
504 }
505
506 nfs_pageio_complete(&desc);
507
508
509
510
511
512 if (requested_bytes == 0) {
513 inode_dio_end(inode);
514 nfs_direct_req_release(dreq);
515 return result < 0 ? result : -EIO;
516 }
517
518 if (put_dreq(dreq))
519 nfs_direct_complete(dreq);
520 return requested_bytes;
521}
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
543{
544 struct file *file = iocb->ki_filp;
545 struct address_space *mapping = file->f_mapping;
546 struct inode *inode = mapping->host;
547 struct nfs_direct_req *dreq;
548 struct nfs_lock_context *l_ctx;
549 ssize_t result = -EINVAL, requested;
550 size_t count = iov_iter_count(iter);
551 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
552
553 dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
554 file, count, (long long) iocb->ki_pos);
555
556 result = 0;
557 if (!count)
558 goto out;
559
560 task_io_account_read(count);
561
562 result = -ENOMEM;
563 dreq = nfs_direct_req_alloc();
564 if (dreq == NULL)
565 goto out;
566
567 dreq->inode = inode;
568 dreq->bytes_left = dreq->max_count = count;
569 dreq->io_start = iocb->ki_pos;
570 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
571 l_ctx = nfs_get_lock_context(dreq->ctx);
572 if (IS_ERR(l_ctx)) {
573 result = PTR_ERR(l_ctx);
574 goto out_release;
575 }
576 dreq->l_ctx = l_ctx;
577 if (!is_sync_kiocb(iocb))
578 dreq->iocb = iocb;
579
580 if (iter_is_iovec(iter))
581 dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
582
583 nfs_start_io_direct(inode);
584
585 NFS_I(inode)->read_io += count;
586 requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
587
588 nfs_end_io_direct(inode);
589
590 if (requested > 0) {
591 result = nfs_direct_wait(dreq);
592 if (result > 0) {
593 requested -= result;
594 iocb->ki_pos += result;
595 }
596 iov_iter_revert(iter, requested);
597 } else {
598 result = requested;
599 }
600
601out_release:
602 nfs_direct_req_release(dreq);
603out:
604 return result;
605}
606
607static void
608nfs_direct_write_scan_commit_list(struct inode *inode,
609 struct list_head *list,
610 struct nfs_commit_info *cinfo)
611{
612 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
613#ifdef CONFIG_NFS_V4_1
614 if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
615 NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
616#endif
617 nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
618 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
619}
620
621static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
622{
623 struct nfs_pageio_descriptor desc;
624 struct nfs_page *req, *tmp;
625 LIST_HEAD(reqs);
626 struct nfs_commit_info cinfo;
627 LIST_HEAD(failed);
628
629 nfs_init_cinfo_from_dreq(&cinfo, dreq);
630 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
631
632 dreq->count = 0;
633 dreq->max_count = 0;
634 list_for_each_entry(req, &reqs, wb_list)
635 dreq->max_count += req->wb_bytes;
636 dreq->verf.committed = NFS_INVALID_STABLE_HOW;
637 nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
638 get_dreq(dreq);
639
640 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
641 &nfs_direct_write_completion_ops);
642 desc.pg_dreq = dreq;
643
644 list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
645
646 req->wb_nio++;
647 if (!nfs_pageio_add_request(&desc, req)) {
648 nfs_list_move_request(req, &failed);
649 spin_lock(&cinfo.inode->i_lock);
650 dreq->flags = 0;
651 if (desc.pg_error < 0)
652 dreq->error = desc.pg_error;
653 else
654 dreq->error = -EIO;
655 spin_unlock(&cinfo.inode->i_lock);
656 }
657 nfs_release_request(req);
658 }
659 nfs_pageio_complete(&desc);
660
661 while (!list_empty(&failed)) {
662 req = nfs_list_entry(failed.next);
663 nfs_list_remove_request(req);
664 nfs_unlock_and_release_request(req);
665 }
666
667 if (put_dreq(dreq))
668 nfs_direct_write_complete(dreq);
669}
670
671static void nfs_direct_commit_complete(struct nfs_commit_data *data)
672{
673 struct nfs_direct_req *dreq = data->dreq;
674 struct nfs_commit_info cinfo;
675 struct nfs_page *req;
676 int status = data->task.tk_status;
677
678 nfs_init_cinfo_from_dreq(&cinfo, dreq);
679 if (status < 0 || nfs_direct_cmp_commit_data_verf(dreq, data))
680 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
681
682 while (!list_empty(&data->pages)) {
683 req = nfs_list_entry(data->pages.next);
684 nfs_list_remove_request(req);
685 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
686
687
688
689
690 req->wb_nio = 0;
691
692 nfs_mark_request_commit(req, NULL, &cinfo, 0);
693 } else
694 nfs_release_request(req);
695 nfs_unlock_and_release_request(req);
696 }
697
698 if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
699 nfs_direct_write_complete(dreq);
700}
701
702static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
703 struct nfs_page *req)
704{
705 struct nfs_direct_req *dreq = cinfo->dreq;
706
707 spin_lock(&dreq->lock);
708 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
709 spin_unlock(&dreq->lock);
710 nfs_mark_request_commit(req, NULL, cinfo, 0);
711}
712
713static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
714 .completion = nfs_direct_commit_complete,
715 .resched_write = nfs_direct_resched_write,
716};
717
718static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
719{
720 int res;
721 struct nfs_commit_info cinfo;
722 LIST_HEAD(mds_list);
723
724 nfs_init_cinfo_from_dreq(&cinfo, dreq);
725 nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
726 res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
727 if (res < 0)
728 nfs_direct_write_reschedule(dreq);
729}
730
731static void nfs_direct_write_schedule_work(struct work_struct *work)
732{
733 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
734 int flags = dreq->flags;
735
736 dreq->flags = 0;
737 switch (flags) {
738 case NFS_ODIRECT_DO_COMMIT:
739 nfs_direct_commit_schedule(dreq);
740 break;
741 case NFS_ODIRECT_RESCHED_WRITES:
742 nfs_direct_write_reschedule(dreq);
743 break;
744 default:
745 nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
746 nfs_direct_complete(dreq);
747 }
748}
749
750static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
751{
752 queue_work(nfsiod_workqueue, &dreq->work);
753}
754
755static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
756{
757 struct nfs_direct_req *dreq = hdr->dreq;
758 struct nfs_commit_info cinfo;
759 bool request_commit = false;
760 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
761
762 nfs_init_cinfo_from_dreq(&cinfo, dreq);
763
764 spin_lock(&dreq->lock);
765 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
766 spin_unlock(&dreq->lock);
767 goto out_put;
768 }
769
770 nfs_direct_count_bytes(dreq, hdr);
771 if (hdr->good_bytes != 0) {
772 if (nfs_write_need_commit(hdr)) {
773 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
774 request_commit = true;
775 else if (dreq->flags == 0) {
776 nfs_direct_set_hdr_verf(dreq, hdr);
777 request_commit = true;
778 dreq->flags = NFS_ODIRECT_DO_COMMIT;
779 } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
780 request_commit = true;
781 if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
782 dreq->flags =
783 NFS_ODIRECT_RESCHED_WRITES;
784 }
785 }
786 }
787 spin_unlock(&dreq->lock);
788
789 while (!list_empty(&hdr->pages)) {
790
791 req = nfs_list_entry(hdr->pages.next);
792 nfs_list_remove_request(req);
793 if (request_commit) {
794 kref_get(&req->wb_kref);
795 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
796 hdr->ds_commit_idx);
797 }
798 nfs_unlock_and_release_request(req);
799 }
800
801out_put:
802 if (put_dreq(dreq))
803 nfs_direct_write_complete(dreq);
804 hdr->release(hdr);
805}
806
807static void nfs_write_sync_pgio_error(struct list_head *head, int error)
808{
809 struct nfs_page *req;
810
811 while (!list_empty(head)) {
812 req = nfs_list_entry(head->next);
813 nfs_list_remove_request(req);
814 nfs_unlock_and_release_request(req);
815 }
816}
817
818static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
819{
820 struct nfs_direct_req *dreq = hdr->dreq;
821
822 spin_lock(&dreq->lock);
823 if (dreq->error == 0) {
824 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
825
826 hdr->verf.committed = NFS_UNSTABLE;
827 hdr->good_bytes = hdr->args.count;
828 }
829 spin_unlock(&dreq->lock);
830}
831
832static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
833 .error_cleanup = nfs_write_sync_pgio_error,
834 .init_hdr = nfs_direct_pgio_init,
835 .completion = nfs_direct_write_completion,
836 .reschedule_io = nfs_direct_write_reschedule_io,
837};
838
839
840
841
842
843
844
845
846
847
848
849
850
851static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
852 struct iov_iter *iter,
853 loff_t pos)
854{
855 struct nfs_pageio_descriptor desc;
856 struct inode *inode = dreq->inode;
857 ssize_t result = 0;
858 size_t requested_bytes = 0;
859 size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
860
861 nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
862 &nfs_direct_write_completion_ops);
863 desc.pg_dreq = dreq;
864 get_dreq(dreq);
865 inode_dio_begin(inode);
866
867 NFS_I(inode)->write_io += iov_iter_count(iter);
868 while (iov_iter_count(iter)) {
869 struct page **pagevec;
870 size_t bytes;
871 size_t pgbase;
872 unsigned npages, i;
873
874 result = iov_iter_get_pages_alloc(iter, &pagevec,
875 wsize, &pgbase);
876 if (result < 0)
877 break;
878
879 bytes = result;
880 iov_iter_advance(iter, bytes);
881 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
882 for (i = 0; i < npages; i++) {
883 struct nfs_page *req;
884 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
885
886 req = nfs_create_request(dreq->ctx, pagevec[i],
887 pgbase, req_len);
888 if (IS_ERR(req)) {
889 result = PTR_ERR(req);
890 break;
891 }
892
893 if (desc.pg_error < 0) {
894 nfs_free_request(req);
895 result = desc.pg_error;
896 break;
897 }
898
899 nfs_lock_request(req);
900 req->wb_index = pos >> PAGE_SHIFT;
901 req->wb_offset = pos & ~PAGE_MASK;
902 if (!nfs_pageio_add_request(&desc, req)) {
903 result = desc.pg_error;
904 nfs_unlock_and_release_request(req);
905 break;
906 }
907 pgbase = 0;
908 bytes -= req_len;
909 requested_bytes += req_len;
910 pos += req_len;
911 dreq->bytes_left -= req_len;
912 }
913 nfs_direct_release_pages(pagevec, npages);
914 kvfree(pagevec);
915 if (result < 0)
916 break;
917 }
918 nfs_pageio_complete(&desc);
919
920
921
922
923
924 if (requested_bytes == 0) {
925 inode_dio_end(inode);
926 nfs_direct_req_release(dreq);
927 return result < 0 ? result : -EIO;
928 }
929
930 if (put_dreq(dreq))
931 nfs_direct_write_complete(dreq);
932 return requested_bytes;
933}
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
956{
957 ssize_t result = -EINVAL, requested;
958 size_t count;
959 struct file *file = iocb->ki_filp;
960 struct address_space *mapping = file->f_mapping;
961 struct inode *inode = mapping->host;
962 struct nfs_direct_req *dreq;
963 struct nfs_lock_context *l_ctx;
964 loff_t pos, end;
965
966 dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
967 file, iov_iter_count(iter), (long long) iocb->ki_pos);
968
969 result = generic_write_checks(iocb, iter);
970 if (result <= 0)
971 return result;
972 count = result;
973 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
974
975 pos = iocb->ki_pos;
976 end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
977
978 task_io_account_write(count);
979
980 result = -ENOMEM;
981 dreq = nfs_direct_req_alloc();
982 if (!dreq)
983 goto out;
984
985 dreq->inode = inode;
986 dreq->bytes_left = dreq->max_count = count;
987 dreq->io_start = pos;
988 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
989 l_ctx = nfs_get_lock_context(dreq->ctx);
990 if (IS_ERR(l_ctx)) {
991 result = PTR_ERR(l_ctx);
992 goto out_release;
993 }
994 dreq->l_ctx = l_ctx;
995 if (!is_sync_kiocb(iocb))
996 dreq->iocb = iocb;
997
998 nfs_start_io_direct(inode);
999
1000 requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
1001
1002 if (mapping->nrpages) {
1003 invalidate_inode_pages2_range(mapping,
1004 pos >> PAGE_SHIFT, end);
1005 }
1006
1007 nfs_end_io_direct(inode);
1008
1009 if (requested > 0) {
1010 result = nfs_direct_wait(dreq);
1011 if (result > 0) {
1012 requested -= result;
1013 iocb->ki_pos = pos + result;
1014
1015 generic_write_sync(iocb, result);
1016 }
1017 iov_iter_revert(iter, requested);
1018 } else {
1019 result = requested;
1020 }
1021out_release:
1022 nfs_direct_req_release(dreq);
1023out:
1024 return result;
1025}
1026
1027
1028
1029
1030
1031int __init nfs_init_directcache(void)
1032{
1033 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1034 sizeof(struct nfs_direct_req),
1035 0, (SLAB_RECLAIM_ACCOUNT|
1036 SLAB_MEM_SPREAD),
1037 NULL);
1038 if (nfs_direct_cachep == NULL)
1039 return -ENOMEM;
1040
1041 return 0;
1042}
1043
1044
1045
1046
1047
1048void nfs_destroy_directcache(void)
1049{
1050 kmem_cache_destroy(nfs_direct_cachep);
1051}
1052