1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <linux/errno.h>
42#include <linux/sched.h>
43#include <linux/kernel.h>
44#include <linux/file.h>
45#include <linux/pagemap.h>
46#include <linux/kref.h>
47#include <linux/slab.h>
48#include <linux/task_io_accounting_ops.h>
49
50#include <linux/nfs_fs.h>
51#include <linux/nfs_page.h>
52#include <linux/sunrpc/clnt.h>
53
54#include <asm/uaccess.h>
55#include <linux/atomic.h>
56
57#include "internal.h"
58#include "iostat.h"
59#include "pnfs.h"
60
61#define NFSDBG_FACILITY NFSDBG_VFS
62
63static struct kmem_cache *nfs_direct_cachep;
64
65
66
67
68struct nfs_direct_req {
69 struct kref kref;
70
71
72 struct nfs_open_context *ctx;
73 struct nfs_lock_context *l_ctx;
74 struct kiocb * iocb;
75 struct inode * inode;
76
77
78 atomic_t io_count;
79 spinlock_t lock;
80 ssize_t count,
81 error;
82 struct completion completion;
83
84
85 struct nfs_mds_commit_info mds_cinfo;
86 struct pnfs_ds_commit_info ds_cinfo;
87 struct work_struct work;
88 int flags;
89#define NFS_ODIRECT_DO_COMMIT (1)
90#define NFS_ODIRECT_RESCHED_WRITES (2)
91 struct nfs_writeverf verf;
92};
93
94static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
95static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
96static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
97static void nfs_direct_write_schedule_work(struct work_struct *work);
98
99static inline void get_dreq(struct nfs_direct_req *dreq)
100{
101 atomic_inc(&dreq->io_count);
102}
103
104static inline int put_dreq(struct nfs_direct_req *dreq)
105{
106 return atomic_dec_and_test(&dreq->io_count);
107}
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
123{
124 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
125 iocb->ki_filp->f_path.dentry->d_name.name,
126 (long long) pos, nr_segs);
127
128 return -EINVAL;
129}
130
131static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
132{
133 unsigned int i;
134 for (i = 0; i < npages; i++)
135 page_cache_release(pages[i]);
136}
137
138void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
139 struct nfs_direct_req *dreq)
140{
141 cinfo->lock = &dreq->lock;
142 cinfo->mds = &dreq->mds_cinfo;
143 cinfo->ds = &dreq->ds_cinfo;
144 cinfo->dreq = dreq;
145 cinfo->completion_ops = &nfs_direct_commit_completion_ops;
146}
147
148static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
149{
150 struct nfs_direct_req *dreq;
151
152 dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
153 if (!dreq)
154 return NULL;
155
156 kref_init(&dreq->kref);
157 kref_get(&dreq->kref);
158 init_completion(&dreq->completion);
159 INIT_LIST_HEAD(&dreq->mds_cinfo.list);
160 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
161 spin_lock_init(&dreq->lock);
162
163 return dreq;
164}
165
166static void nfs_direct_req_free(struct kref *kref)
167{
168 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
169
170 if (dreq->l_ctx != NULL)
171 nfs_put_lock_context(dreq->l_ctx);
172 if (dreq->ctx != NULL)
173 put_nfs_open_context(dreq->ctx);
174 kmem_cache_free(nfs_direct_cachep, dreq);
175}
176
177static void nfs_direct_req_release(struct nfs_direct_req *dreq)
178{
179 kref_put(&dreq->kref, nfs_direct_req_free);
180}
181
182
183
184
185static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
186{
187 ssize_t result = -EIOCBQUEUED;
188
189
190 if (dreq->iocb)
191 goto out;
192
193 result = wait_for_completion_killable(&dreq->completion);
194
195 if (!result)
196 result = dreq->error;
197 if (!result)
198 result = dreq->count;
199
200out:
201 return (ssize_t) result;
202}
203
204
205
206
207
208static void nfs_direct_complete(struct nfs_direct_req *dreq)
209{
210 if (dreq->iocb) {
211 long res = (long) dreq->error;
212 if (!res)
213 res = (long) dreq->count;
214 aio_complete(dreq->iocb, res, 0);
215 }
216 complete_all(&dreq->completion);
217
218 nfs_direct_req_release(dreq);
219}
220
221static void nfs_direct_readpage_release(struct nfs_page *req)
222{
223 dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
224 req->wb_context->dentry->d_inode->i_sb->s_id,
225 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
226 req->wb_bytes,
227 (long long)req_offset(req));
228 nfs_release_request(req);
229}
230
231static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
232{
233 unsigned long bytes = 0;
234 struct nfs_direct_req *dreq = hdr->dreq;
235
236 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
237 goto out_put;
238
239 spin_lock(&dreq->lock);
240 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
241 dreq->error = hdr->error;
242 else
243 dreq->count += hdr->good_bytes;
244 spin_unlock(&dreq->lock);
245
246 while (!list_empty(&hdr->pages)) {
247 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
248 struct page *page = req->wb_page;
249
250 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
251 if (bytes > hdr->good_bytes)
252 zero_user(page, 0, PAGE_SIZE);
253 else if (hdr->good_bytes - bytes < PAGE_SIZE)
254 zero_user_segment(page,
255 hdr->good_bytes & ~PAGE_MASK,
256 PAGE_SIZE);
257 }
258 if (!PageCompound(page)) {
259 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
260 if (bytes < hdr->good_bytes)
261 set_page_dirty(page);
262 } else
263 set_page_dirty(page);
264 }
265 bytes += req->wb_bytes;
266 nfs_list_remove_request(req);
267 nfs_direct_readpage_release(req);
268 }
269out_put:
270 if (put_dreq(dreq))
271 nfs_direct_complete(dreq);
272 hdr->release(hdr);
273}
274
275static void nfs_read_sync_pgio_error(struct list_head *head)
276{
277 struct nfs_page *req;
278
279 while (!list_empty(head)) {
280 req = nfs_list_entry(head->next);
281 nfs_list_remove_request(req);
282 nfs_release_request(req);
283 }
284}
285
286static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
287{
288 get_dreq(hdr->dreq);
289}
290
291static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
292 .error_cleanup = nfs_read_sync_pgio_error,
293 .init_hdr = nfs_direct_pgio_init,
294 .completion = nfs_direct_read_completion,
295};
296
297
298
299
300
301
302
303
304static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
305 const struct iovec *iov,
306 loff_t pos)
307{
308 struct nfs_direct_req *dreq = desc->pg_dreq;
309 struct nfs_open_context *ctx = dreq->ctx;
310 struct inode *inode = ctx->dentry->d_inode;
311 unsigned long user_addr = (unsigned long)iov->iov_base;
312 size_t count = iov->iov_len;
313 size_t rsize = NFS_SERVER(inode)->rsize;
314 unsigned int pgbase;
315 int result;
316 ssize_t started = 0;
317 struct page **pagevec = NULL;
318 unsigned int npages;
319
320 do {
321 size_t bytes;
322 int i;
323
324 pgbase = user_addr & ~PAGE_MASK;
325 bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
326
327 result = -ENOMEM;
328 npages = nfs_page_array_len(pgbase, bytes);
329 if (!pagevec)
330 pagevec = kmalloc(npages * sizeof(struct page *),
331 GFP_KERNEL);
332 if (!pagevec)
333 break;
334 down_read(¤t->mm->mmap_sem);
335 result = get_user_pages(current, current->mm, user_addr,
336 npages, 1, 0, pagevec, NULL);
337 up_read(¤t->mm->mmap_sem);
338 if (result < 0)
339 break;
340 if ((unsigned)result < npages) {
341 bytes = result * PAGE_SIZE;
342 if (bytes <= pgbase) {
343 nfs_direct_release_pages(pagevec, result);
344 break;
345 }
346 bytes -= pgbase;
347 npages = result;
348 }
349
350 for (i = 0; i < npages; i++) {
351 struct nfs_page *req;
352 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
353
354 req = nfs_create_request(dreq->ctx, dreq->inode,
355 pagevec[i],
356 pgbase, req_len);
357 if (IS_ERR(req)) {
358 result = PTR_ERR(req);
359 break;
360 }
361 req->wb_index = pos >> PAGE_SHIFT;
362 req->wb_offset = pos & ~PAGE_MASK;
363 if (!nfs_pageio_add_request(desc, req)) {
364 result = desc->pg_error;
365 nfs_release_request(req);
366 break;
367 }
368 pgbase = 0;
369 bytes -= req_len;
370 started += req_len;
371 user_addr += req_len;
372 pos += req_len;
373 count -= req_len;
374 }
375
376 nfs_direct_release_pages(pagevec, npages);
377 } while (count != 0 && result >= 0);
378
379 kfree(pagevec);
380
381 if (started)
382 return started;
383 return result < 0 ? (ssize_t) result : -EFAULT;
384}
385
386static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
387 const struct iovec *iov,
388 unsigned long nr_segs,
389 loff_t pos)
390{
391 struct nfs_pageio_descriptor desc;
392 ssize_t result = -EINVAL;
393 size_t requested_bytes = 0;
394 unsigned long seg;
395
396 nfs_pageio_init_read(&desc, dreq->inode,
397 &nfs_direct_read_completion_ops);
398 get_dreq(dreq);
399 desc.pg_dreq = dreq;
400
401 for (seg = 0; seg < nr_segs; seg++) {
402 const struct iovec *vec = &iov[seg];
403 result = nfs_direct_read_schedule_segment(&desc, vec, pos);
404 if (result < 0)
405 break;
406 requested_bytes += result;
407 if ((size_t)result < vec->iov_len)
408 break;
409 pos += vec->iov_len;
410 }
411
412 nfs_pageio_complete(&desc);
413
414
415
416
417
418 if (requested_bytes == 0) {
419 nfs_direct_req_release(dreq);
420 return result < 0 ? result : -EIO;
421 }
422
423 if (put_dreq(dreq))
424 nfs_direct_complete(dreq);
425 return 0;
426}
427
428static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
429 unsigned long nr_segs, loff_t pos)
430{
431 ssize_t result = -ENOMEM;
432 struct inode *inode = iocb->ki_filp->f_mapping->host;
433 struct nfs_direct_req *dreq;
434
435 dreq = nfs_direct_req_alloc();
436 if (dreq == NULL)
437 goto out;
438
439 dreq->inode = inode;
440 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
441 dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
442 if (dreq->l_ctx == NULL)
443 goto out_release;
444 if (!is_sync_kiocb(iocb))
445 dreq->iocb = iocb;
446
447 result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
448 if (!result)
449 result = nfs_direct_wait(dreq);
450 NFS_I(inode)->read_io += result;
451out_release:
452 nfs_direct_req_release(dreq);
453out:
454 return result;
455}
456
457static void nfs_inode_dio_write_done(struct inode *inode)
458{
459 nfs_zap_mapping(inode, inode->i_mapping);
460 inode_dio_done(inode);
461}
462
463#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
464static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
465{
466 struct nfs_pageio_descriptor desc;
467 struct nfs_page *req, *tmp;
468 LIST_HEAD(reqs);
469 struct nfs_commit_info cinfo;
470 LIST_HEAD(failed);
471
472 nfs_init_cinfo_from_dreq(&cinfo, dreq);
473 pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
474 spin_lock(cinfo.lock);
475 nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
476 spin_unlock(cinfo.lock);
477
478 dreq->count = 0;
479 get_dreq(dreq);
480
481 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE,
482 &nfs_direct_write_completion_ops);
483 desc.pg_dreq = dreq;
484
485 list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
486 if (!nfs_pageio_add_request(&desc, req)) {
487 nfs_list_remove_request(req);
488 nfs_list_add_request(req, &failed);
489 spin_lock(cinfo.lock);
490 dreq->flags = 0;
491 dreq->error = -EIO;
492 spin_unlock(cinfo.lock);
493 }
494 nfs_release_request(req);
495 }
496 nfs_pageio_complete(&desc);
497
498 while (!list_empty(&failed)) {
499 req = nfs_list_entry(failed.next);
500 nfs_list_remove_request(req);
501 nfs_unlock_and_release_request(req);
502 }
503
504 if (put_dreq(dreq))
505 nfs_direct_write_complete(dreq, dreq->inode);
506}
507
508static void nfs_direct_commit_complete(struct nfs_commit_data *data)
509{
510 struct nfs_direct_req *dreq = data->dreq;
511 struct nfs_commit_info cinfo;
512 struct nfs_page *req;
513 int status = data->task.tk_status;
514
515 nfs_init_cinfo_from_dreq(&cinfo, dreq);
516 if (status < 0) {
517 dprintk("NFS: %5u commit failed with error %d.\n",
518 data->task.tk_pid, status);
519 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
520 } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
521 dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
522 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
523 }
524
525 dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
526 while (!list_empty(&data->pages)) {
527 req = nfs_list_entry(data->pages.next);
528 nfs_list_remove_request(req);
529 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
530
531 nfs_mark_request_commit(req, NULL, &cinfo);
532 } else
533 nfs_release_request(req);
534 nfs_unlock_and_release_request(req);
535 }
536
537 if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
538 nfs_direct_write_complete(dreq, data->inode);
539}
540
541static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
542{
543
544}
545
546static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
547 .completion = nfs_direct_commit_complete,
548 .error_cleanup = nfs_direct_error_cleanup,
549};
550
551static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
552{
553 int res;
554 struct nfs_commit_info cinfo;
555 LIST_HEAD(mds_list);
556
557 nfs_init_cinfo_from_dreq(&cinfo, dreq);
558 nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
559 res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
560 if (res < 0)
561 nfs_direct_write_reschedule(dreq);
562}
563
564static void nfs_direct_write_schedule_work(struct work_struct *work)
565{
566 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
567 int flags = dreq->flags;
568
569 dreq->flags = 0;
570 switch (flags) {
571 case NFS_ODIRECT_DO_COMMIT:
572 nfs_direct_commit_schedule(dreq);
573 break;
574 case NFS_ODIRECT_RESCHED_WRITES:
575 nfs_direct_write_reschedule(dreq);
576 break;
577 default:
578 nfs_inode_dio_write_done(dreq->inode);
579 nfs_direct_complete(dreq);
580 }
581}
582
583static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
584{
585 schedule_work(&dreq->work);
586}
587
588#else
589static void nfs_direct_write_schedule_work(struct work_struct *work)
590{
591}
592
593static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
594{
595 nfs_inode_dio_write_done(inode);
596 nfs_direct_complete(dreq);
597}
598#endif
599
600
601
602
603
604
605
606
607
608
609
610
611static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
612 const struct iovec *iov,
613 loff_t pos)
614{
615 struct nfs_direct_req *dreq = desc->pg_dreq;
616 struct nfs_open_context *ctx = dreq->ctx;
617 struct inode *inode = ctx->dentry->d_inode;
618 unsigned long user_addr = (unsigned long)iov->iov_base;
619 size_t count = iov->iov_len;
620 size_t wsize = NFS_SERVER(inode)->wsize;
621 unsigned int pgbase;
622 int result;
623 ssize_t started = 0;
624 struct page **pagevec = NULL;
625 unsigned int npages;
626
627 do {
628 size_t bytes;
629 int i;
630
631 pgbase = user_addr & ~PAGE_MASK;
632 bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
633
634 result = -ENOMEM;
635 npages = nfs_page_array_len(pgbase, bytes);
636 if (!pagevec)
637 pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
638 if (!pagevec)
639 break;
640
641 down_read(¤t->mm->mmap_sem);
642 result = get_user_pages(current, current->mm, user_addr,
643 npages, 0, 0, pagevec, NULL);
644 up_read(¤t->mm->mmap_sem);
645 if (result < 0)
646 break;
647
648 if ((unsigned)result < npages) {
649 bytes = result * PAGE_SIZE;
650 if (bytes <= pgbase) {
651 nfs_direct_release_pages(pagevec, result);
652 break;
653 }
654 bytes -= pgbase;
655 npages = result;
656 }
657
658 for (i = 0; i < npages; i++) {
659 struct nfs_page *req;
660 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
661
662 req = nfs_create_request(dreq->ctx, dreq->inode,
663 pagevec[i],
664 pgbase, req_len);
665 if (IS_ERR(req)) {
666 result = PTR_ERR(req);
667 break;
668 }
669 nfs_lock_request(req);
670 req->wb_index = pos >> PAGE_SHIFT;
671 req->wb_offset = pos & ~PAGE_MASK;
672 if (!nfs_pageio_add_request(desc, req)) {
673 result = desc->pg_error;
674 nfs_unlock_and_release_request(req);
675 break;
676 }
677 pgbase = 0;
678 bytes -= req_len;
679 started += req_len;
680 user_addr += req_len;
681 pos += req_len;
682 count -= req_len;
683 }
684
685 nfs_direct_release_pages(pagevec, npages);
686 } while (count != 0 && result >= 0);
687
688 kfree(pagevec);
689
690 if (started)
691 return started;
692 return result < 0 ? (ssize_t) result : -EFAULT;
693}
694
695static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
696{
697 struct nfs_direct_req *dreq = hdr->dreq;
698 struct nfs_commit_info cinfo;
699 int bit = -1;
700 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
701
702 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
703 goto out_put;
704
705 nfs_init_cinfo_from_dreq(&cinfo, dreq);
706
707 spin_lock(&dreq->lock);
708
709 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
710 dreq->flags = 0;
711 dreq->error = hdr->error;
712 }
713 if (dreq->error != 0)
714 bit = NFS_IOHDR_ERROR;
715 else {
716 dreq->count += hdr->good_bytes;
717 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
718 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
719 bit = NFS_IOHDR_NEED_RESCHED;
720 } else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
721 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
722 bit = NFS_IOHDR_NEED_RESCHED;
723 else if (dreq->flags == 0) {
724 memcpy(&dreq->verf, hdr->verf,
725 sizeof(dreq->verf));
726 bit = NFS_IOHDR_NEED_COMMIT;
727 dreq->flags = NFS_ODIRECT_DO_COMMIT;
728 } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
729 if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) {
730 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
731 bit = NFS_IOHDR_NEED_RESCHED;
732 } else
733 bit = NFS_IOHDR_NEED_COMMIT;
734 }
735 }
736 }
737 spin_unlock(&dreq->lock);
738
739 while (!list_empty(&hdr->pages)) {
740 req = nfs_list_entry(hdr->pages.next);
741 nfs_list_remove_request(req);
742 switch (bit) {
743 case NFS_IOHDR_NEED_RESCHED:
744 case NFS_IOHDR_NEED_COMMIT:
745 kref_get(&req->wb_kref);
746 nfs_mark_request_commit(req, hdr->lseg, &cinfo);
747 }
748 nfs_unlock_and_release_request(req);
749 }
750
751out_put:
752 if (put_dreq(dreq))
753 nfs_direct_write_complete(dreq, hdr->inode);
754 hdr->release(hdr);
755}
756
757static void nfs_write_sync_pgio_error(struct list_head *head)
758{
759 struct nfs_page *req;
760
761 while (!list_empty(head)) {
762 req = nfs_list_entry(head->next);
763 nfs_list_remove_request(req);
764 nfs_unlock_and_release_request(req);
765 }
766}
767
768static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
769 .error_cleanup = nfs_write_sync_pgio_error,
770 .init_hdr = nfs_direct_pgio_init,
771 .completion = nfs_direct_write_completion,
772};
773
774static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
775 const struct iovec *iov,
776 unsigned long nr_segs,
777 loff_t pos)
778{
779 struct nfs_pageio_descriptor desc;
780 struct inode *inode = dreq->inode;
781 ssize_t result = 0;
782 size_t requested_bytes = 0;
783 unsigned long seg;
784
785 nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE,
786 &nfs_direct_write_completion_ops);
787 desc.pg_dreq = dreq;
788 get_dreq(dreq);
789 atomic_inc(&inode->i_dio_count);
790
791 for (seg = 0; seg < nr_segs; seg++) {
792 const struct iovec *vec = &iov[seg];
793 result = nfs_direct_write_schedule_segment(&desc, vec, pos);
794 if (result < 0)
795 break;
796 requested_bytes += result;
797 if ((size_t)result < vec->iov_len)
798 break;
799 pos += vec->iov_len;
800 }
801 nfs_pageio_complete(&desc);
802 NFS_I(dreq->inode)->write_io += desc.pg_bytes_written;
803
804
805
806
807
808 if (requested_bytes == 0) {
809 inode_dio_done(inode);
810 nfs_direct_req_release(dreq);
811 return result < 0 ? result : -EIO;
812 }
813
814 if (put_dreq(dreq))
815 nfs_direct_write_complete(dreq, dreq->inode);
816 return 0;
817}
818
819static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
820 unsigned long nr_segs, loff_t pos,
821 size_t count)
822{
823 ssize_t result = -ENOMEM;
824 struct inode *inode = iocb->ki_filp->f_mapping->host;
825 struct nfs_direct_req *dreq;
826
827 dreq = nfs_direct_req_alloc();
828 if (!dreq)
829 goto out;
830
831 dreq->inode = inode;
832 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
833 dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
834 if (dreq->l_ctx == NULL)
835 goto out_release;
836 if (!is_sync_kiocb(iocb))
837 dreq->iocb = iocb;
838
839 result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
840 if (!result)
841 result = nfs_direct_wait(dreq);
842out_release:
843 nfs_direct_req_release(dreq);
844out:
845 return result;
846}
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
870 unsigned long nr_segs, loff_t pos)
871{
872 ssize_t retval = -EINVAL;
873 struct file *file = iocb->ki_filp;
874 struct address_space *mapping = file->f_mapping;
875 size_t count;
876
877 count = iov_length(iov, nr_segs);
878 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
879
880 dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
881 file->f_path.dentry->d_parent->d_name.name,
882 file->f_path.dentry->d_name.name,
883 count, (long long) pos);
884
885 retval = 0;
886 if (!count)
887 goto out;
888
889 retval = nfs_sync_mapping(mapping);
890 if (retval)
891 goto out;
892
893 task_io_account_read(count);
894
895 retval = nfs_direct_read(iocb, iov, nr_segs, pos);
896 if (retval > 0)
897 iocb->ki_pos = pos + retval;
898
899out:
900 return retval;
901}
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
926 unsigned long nr_segs, loff_t pos)
927{
928 ssize_t retval = -EINVAL;
929 struct file *file = iocb->ki_filp;
930 struct address_space *mapping = file->f_mapping;
931 size_t count;
932
933 count = iov_length(iov, nr_segs);
934 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
935
936 dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
937 file->f_path.dentry->d_parent->d_name.name,
938 file->f_path.dentry->d_name.name,
939 count, (long long) pos);
940
941 retval = generic_write_checks(file, &pos, &count, 0);
942 if (retval)
943 goto out;
944
945 retval = -EINVAL;
946 if ((ssize_t) count < 0)
947 goto out;
948 retval = 0;
949 if (!count)
950 goto out;
951
952 retval = nfs_sync_mapping(mapping);
953 if (retval)
954 goto out;
955
956 task_io_account_write(count);
957
958 retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
959 if (retval > 0) {
960 struct inode *inode = mapping->host;
961
962 iocb->ki_pos = pos + retval;
963 spin_lock(&inode->i_lock);
964 if (i_size_read(inode) < iocb->ki_pos)
965 i_size_write(inode, iocb->ki_pos);
966 spin_unlock(&inode->i_lock);
967 }
968out:
969 return retval;
970}
971
972
973
974
975
976int __init nfs_init_directcache(void)
977{
978 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
979 sizeof(struct nfs_direct_req),
980 0, (SLAB_RECLAIM_ACCOUNT|
981 SLAB_MEM_SPREAD),
982 NULL);
983 if (nfs_direct_cachep == NULL)
984 return -ENOMEM;
985
986 return 0;
987}
988
989
990
991
992
993void nfs_destroy_directcache(void)
994{
995 kmem_cache_destroy(nfs_direct_cachep);
996}
997