1
2
3
4
5
6
7
8
9#include "fuse_i.h"
10
11#include <linux/pagemap.h>
12#include <linux/slab.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/module.h>
16#include <linux/compat.h>
17#include <linux/swap.h>
18#include <linux/aio.h>
19#include <linux/falloc.h>
20
21static const struct file_operations fuse_direct_io_file_operations;
22
23static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
24 int opcode, struct fuse_open_out *outargp)
25{
26 struct fuse_open_in inarg;
27 struct fuse_req *req;
28 int err;
29
30 req = fuse_get_req_nopages(fc);
31 if (IS_ERR(req))
32 return PTR_ERR(req);
33
34 memset(&inarg, 0, sizeof(inarg));
35 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
36 if (!fc->atomic_o_trunc)
37 inarg.flags &= ~O_TRUNC;
38 req->in.h.opcode = opcode;
39 req->in.h.nodeid = nodeid;
40 req->in.numargs = 1;
41 req->in.args[0].size = sizeof(inarg);
42 req->in.args[0].value = &inarg;
43 req->out.numargs = 1;
44 req->out.args[0].size = sizeof(*outargp);
45 req->out.args[0].value = outargp;
46 fuse_request_send(fc, req);
47 err = req->out.h.error;
48 fuse_put_request(fc, req);
49
50 return err;
51}
52
53struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
54{
55 struct fuse_file *ff;
56
57 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
58 if (unlikely(!ff))
59 return NULL;
60
61 ff->fc = fc;
62 ff->reserved_req = fuse_request_alloc(0);
63 if (unlikely(!ff->reserved_req)) {
64 kfree(ff);
65 return NULL;
66 }
67
68 INIT_LIST_HEAD(&ff->write_entry);
69 atomic_set(&ff->count, 0);
70 RB_CLEAR_NODE(&ff->polled_node);
71 init_waitqueue_head(&ff->poll_wait);
72
73 spin_lock(&fc->lock);
74 ff->kh = ++fc->khctr;
75 spin_unlock(&fc->lock);
76
77 return ff;
78}
79
80void fuse_file_free(struct fuse_file *ff)
81{
82 fuse_request_free(ff->reserved_req);
83 kfree(ff);
84}
85
86struct fuse_file *fuse_file_get(struct fuse_file *ff)
87{
88 atomic_inc(&ff->count);
89 return ff;
90}
91
92static void fuse_release_async(struct work_struct *work)
93{
94 struct fuse_req *req;
95 struct fuse_conn *fc;
96 struct path path;
97
98 req = container_of(work, struct fuse_req, misc.release.work);
99 path = req->misc.release.path;
100 fc = get_fuse_conn(path.dentry->d_inode);
101
102 fuse_put_request(fc, req);
103 path_put(&path);
104}
105
106static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
107{
108 if (fc->destroy_req) {
109
110
111
112
113
114
115
116
117 atomic_inc(&req->count);
118 INIT_WORK(&req->misc.release.work, fuse_release_async);
119 schedule_work(&req->misc.release.work);
120 } else {
121 path_put(&req->misc.release.path);
122 }
123}
124
125static void fuse_file_put(struct fuse_file *ff, bool sync)
126{
127 if (atomic_dec_and_test(&ff->count)) {
128 struct fuse_req *req = ff->reserved_req;
129
130 if (sync) {
131 req->background = 0;
132 fuse_request_send(ff->fc, req);
133 path_put(&req->misc.release.path);
134 fuse_put_request(ff->fc, req);
135 } else {
136 req->end = fuse_release_end;
137 req->background = 1;
138 fuse_request_send_background(ff->fc, req);
139 }
140 kfree(ff);
141 }
142}
143
144int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
145 bool isdir)
146{
147 struct fuse_open_out outarg;
148 struct fuse_file *ff;
149 int err;
150 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
151
152 ff = fuse_file_alloc(fc);
153 if (!ff)
154 return -ENOMEM;
155
156 err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
157 if (err) {
158 fuse_file_free(ff);
159 return err;
160 }
161
162 if (isdir)
163 outarg.open_flags &= ~FOPEN_DIRECT_IO;
164
165 ff->fh = outarg.fh;
166 ff->nodeid = nodeid;
167 ff->open_flags = outarg.open_flags;
168 file->private_data = fuse_file_get(ff);
169
170 return 0;
171}
172EXPORT_SYMBOL_GPL(fuse_do_open);
173
174void fuse_finish_open(struct inode *inode, struct file *file)
175{
176 struct fuse_file *ff = file->private_data;
177 struct fuse_conn *fc = get_fuse_conn(inode);
178
179 if (ff->open_flags & FOPEN_DIRECT_IO)
180 file->f_op = &fuse_direct_io_file_operations;
181 if (!(ff->open_flags & FOPEN_KEEP_CACHE))
182 invalidate_inode_pages2(inode->i_mapping);
183 if (ff->open_flags & FOPEN_NONSEEKABLE)
184 nonseekable_open(inode, file);
185 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
186 struct fuse_inode *fi = get_fuse_inode(inode);
187
188 spin_lock(&fc->lock);
189 fi->attr_version = ++fc->attr_version;
190 i_size_write(inode, 0);
191 spin_unlock(&fc->lock);
192 fuse_invalidate_attr(inode);
193 }
194}
195
196int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
197{
198 struct fuse_conn *fc = get_fuse_conn(inode);
199 int err;
200
201 err = generic_file_open(inode, file);
202 if (err)
203 return err;
204
205 err = fuse_do_open(fc, get_node_id(inode), file, isdir);
206 if (err)
207 return err;
208
209 fuse_finish_open(inode, file);
210
211 return 0;
212}
213
214static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
215{
216 struct fuse_conn *fc = ff->fc;
217 struct fuse_req *req = ff->reserved_req;
218 struct fuse_release_in *inarg = &req->misc.release.in;
219
220 spin_lock(&fc->lock);
221 list_del(&ff->write_entry);
222 if (!RB_EMPTY_NODE(&ff->polled_node))
223 rb_erase(&ff->polled_node, &fc->polled_files);
224 spin_unlock(&fc->lock);
225
226 wake_up_interruptible_all(&ff->poll_wait);
227
228 inarg->fh = ff->fh;
229 inarg->flags = flags;
230 req->in.h.opcode = opcode;
231 req->in.h.nodeid = ff->nodeid;
232 req->in.numargs = 1;
233 req->in.args[0].size = sizeof(struct fuse_release_in);
234 req->in.args[0].value = inarg;
235}
236
237void fuse_release_common(struct file *file, int opcode)
238{
239 struct fuse_file *ff;
240 struct fuse_req *req;
241
242 ff = file->private_data;
243 if (unlikely(!ff))
244 return;
245
246 req = ff->reserved_req;
247 fuse_prepare_release(ff, file->f_flags, opcode);
248
249 if (ff->flock) {
250 struct fuse_release_in *inarg = &req->misc.release.in;
251 inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
252 inarg->lock_owner = fuse_lock_owner_id(ff->fc,
253 (fl_owner_t) file);
254 }
255
256 path_get(&file->f_path);
257 req->misc.release.path = file->f_path;
258
259
260
261
262
263
264
265
266
267
268 fuse_file_put(ff, ff->fc->destroy_req != NULL);
269}
270
271static int fuse_open(struct inode *inode, struct file *file)
272{
273 return fuse_open_common(inode, file, false);
274}
275
276static int fuse_release(struct inode *inode, struct file *file)
277{
278 fuse_release_common(file, FUSE_RELEASE);
279
280
281 return 0;
282}
283
284void fuse_sync_release(struct fuse_file *ff, int flags)
285{
286 WARN_ON(atomic_read(&ff->count) > 1);
287 fuse_prepare_release(ff, flags, FUSE_RELEASE);
288 ff->reserved_req->force = 1;
289 ff->reserved_req->background = 0;
290 fuse_request_send(ff->fc, ff->reserved_req);
291 fuse_put_request(ff->fc, ff->reserved_req);
292 kfree(ff);
293}
294EXPORT_SYMBOL_GPL(fuse_sync_release);
295
296
297
298
299
300u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
301{
302 u32 *k = fc->scramble_key;
303 u64 v = (unsigned long) id;
304 u32 v0 = v;
305 u32 v1 = v >> 32;
306 u32 sum = 0;
307 int i;
308
309 for (i = 0; i < 32; i++) {
310 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
311 sum += 0x9E3779B9;
312 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
313 }
314
315 return (u64) v0 + ((u64) v1 << 32);
316}
317
318
319
320
321
322
323
324static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
325{
326 struct fuse_conn *fc = get_fuse_conn(inode);
327 struct fuse_inode *fi = get_fuse_inode(inode);
328 struct fuse_req *req;
329 bool found = false;
330
331 spin_lock(&fc->lock);
332 list_for_each_entry(req, &fi->writepages, writepages_entry) {
333 pgoff_t curr_index;
334
335 BUG_ON(req->inode != inode);
336 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
337 if (curr_index == index) {
338 found = true;
339 break;
340 }
341 }
342 spin_unlock(&fc->lock);
343
344 return found;
345}
346
347
348
349
350
351
352
353static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
354{
355 struct fuse_inode *fi = get_fuse_inode(inode);
356
357 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
358 return 0;
359}
360
361static int fuse_flush(struct file *file, fl_owner_t id)
362{
363 struct inode *inode = file_inode(file);
364 struct fuse_conn *fc = get_fuse_conn(inode);
365 struct fuse_file *ff = file->private_data;
366 struct fuse_req *req;
367 struct fuse_flush_in inarg;
368 int err;
369
370 if (is_bad_inode(inode))
371 return -EIO;
372
373 if (fc->no_flush)
374 return 0;
375
376 req = fuse_get_req_nofail_nopages(fc, file);
377 memset(&inarg, 0, sizeof(inarg));
378 inarg.fh = ff->fh;
379 inarg.lock_owner = fuse_lock_owner_id(fc, id);
380 req->in.h.opcode = FUSE_FLUSH;
381 req->in.h.nodeid = get_node_id(inode);
382 req->in.numargs = 1;
383 req->in.args[0].size = sizeof(inarg);
384 req->in.args[0].value = &inarg;
385 req->force = 1;
386 fuse_request_send(fc, req);
387 err = req->out.h.error;
388 fuse_put_request(fc, req);
389 if (err == -ENOSYS) {
390 fc->no_flush = 1;
391 err = 0;
392 }
393 return err;
394}
395
396
397
398
399
400
401
402
403
404
405static void fuse_sync_writes(struct inode *inode)
406{
407 fuse_set_nowrite(inode);
408 fuse_release_nowrite(inode);
409}
410
411int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
412 int datasync, int isdir)
413{
414 struct inode *inode = file->f_mapping->host;
415 struct fuse_conn *fc = get_fuse_conn(inode);
416 struct fuse_file *ff = file->private_data;
417 struct fuse_req *req;
418 struct fuse_fsync_in inarg;
419 int err;
420
421 if (is_bad_inode(inode))
422 return -EIO;
423
424 err = filemap_write_and_wait_range(inode->i_mapping, start, end);
425 if (err)
426 return err;
427
428 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
429 return 0;
430
431 mutex_lock(&inode->i_mutex);
432
433
434
435
436
437
438 err = write_inode_now(inode, 0);
439 if (err)
440 goto out;
441
442 fuse_sync_writes(inode);
443
444 req = fuse_get_req_nopages(fc);
445 if (IS_ERR(req)) {
446 err = PTR_ERR(req);
447 goto out;
448 }
449
450 memset(&inarg, 0, sizeof(inarg));
451 inarg.fh = ff->fh;
452 inarg.fsync_flags = datasync ? 1 : 0;
453 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
454 req->in.h.nodeid = get_node_id(inode);
455 req->in.numargs = 1;
456 req->in.args[0].size = sizeof(inarg);
457 req->in.args[0].value = &inarg;
458 fuse_request_send(fc, req);
459 err = req->out.h.error;
460 fuse_put_request(fc, req);
461 if (err == -ENOSYS) {
462 if (isdir)
463 fc->no_fsyncdir = 1;
464 else
465 fc->no_fsync = 1;
466 err = 0;
467 }
468out:
469 mutex_unlock(&inode->i_mutex);
470 return err;
471}
472
473static int fuse_fsync(struct file *file, loff_t start, loff_t end,
474 int datasync)
475{
476 return fuse_fsync_common(file, start, end, datasync, 0);
477}
478
479void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
480 size_t count, int opcode)
481{
482 struct fuse_read_in *inarg = &req->misc.read.in;
483 struct fuse_file *ff = file->private_data;
484
485 inarg->fh = ff->fh;
486 inarg->offset = pos;
487 inarg->size = count;
488 inarg->flags = file->f_flags;
489 req->in.h.opcode = opcode;
490 req->in.h.nodeid = ff->nodeid;
491 req->in.numargs = 1;
492 req->in.args[0].size = sizeof(struct fuse_read_in);
493 req->in.args[0].value = inarg;
494 req->out.argvar = 1;
495 req->out.numargs = 1;
496 req->out.args[0].size = count;
497}
498
499static void fuse_release_user_pages(struct fuse_req *req, int write)
500{
501 unsigned i;
502
503 for (i = 0; i < req->num_pages; i++) {
504 struct page *page = req->pages[i];
505 if (write)
506 set_page_dirty_lock(page);
507 put_page(page);
508 }
509}
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
528{
529 int left;
530
531 spin_lock(&io->lock);
532 if (err)
533 io->err = io->err ? : err;
534 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
535 io->bytes = pos;
536
537 left = --io->reqs;
538 spin_unlock(&io->lock);
539
540 if (!left) {
541 long res;
542
543 if (io->err)
544 res = io->err;
545 else if (io->bytes >= 0 && io->write)
546 res = -EIO;
547 else {
548 res = io->bytes < 0 ? io->size : io->bytes;
549
550 if (!is_sync_kiocb(io->iocb)) {
551 struct inode *inode = file_inode(io->iocb->ki_filp);
552 struct fuse_conn *fc = get_fuse_conn(inode);
553 struct fuse_inode *fi = get_fuse_inode(inode);
554
555 spin_lock(&fc->lock);
556 fi->attr_version = ++fc->attr_version;
557 spin_unlock(&fc->lock);
558 }
559 }
560
561 aio_complete(io->iocb, res, 0);
562 kfree(io);
563 }
564}
565
566static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
567{
568 struct fuse_io_priv *io = req->io;
569 ssize_t pos = -1;
570
571 fuse_release_user_pages(req, !io->write);
572
573 if (io->write) {
574 if (req->misc.write.in.size != req->misc.write.out.size)
575 pos = req->misc.write.in.offset - io->offset +
576 req->misc.write.out.size;
577 } else {
578 if (req->misc.read.in.size != req->out.args[0].size)
579 pos = req->misc.read.in.offset - io->offset +
580 req->out.args[0].size;
581 }
582
583 fuse_aio_complete(io, req->out.h.error, pos);
584}
585
586static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
587 size_t num_bytes, struct fuse_io_priv *io)
588{
589 spin_lock(&io->lock);
590 io->size += num_bytes;
591 io->reqs++;
592 spin_unlock(&io->lock);
593
594 req->io = io;
595 req->end = fuse_aio_complete_req;
596
597 __fuse_get_request(req);
598 fuse_request_send_background(fc, req);
599
600 return num_bytes;
601}
602
603static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io,
604 loff_t pos, size_t count, fl_owner_t owner)
605{
606 struct file *file = io->file;
607 struct fuse_file *ff = file->private_data;
608 struct fuse_conn *fc = ff->fc;
609
610 fuse_read_fill(req, file, pos, count, FUSE_READ);
611 if (owner != NULL) {
612 struct fuse_read_in *inarg = &req->misc.read.in;
613
614 inarg->read_flags |= FUSE_READ_LOCKOWNER;
615 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
616 }
617
618 if (io->async)
619 return fuse_async_req_send(fc, req, count, io);
620
621 fuse_request_send(fc, req);
622 return req->out.args[0].size;
623}
624
625static void fuse_read_update_size(struct inode *inode, loff_t size,
626 u64 attr_ver)
627{
628 struct fuse_conn *fc = get_fuse_conn(inode);
629 struct fuse_inode *fi = get_fuse_inode(inode);
630
631 spin_lock(&fc->lock);
632 if (attr_ver == fi->attr_version && size < inode->i_size &&
633 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
634 fi->attr_version = ++fc->attr_version;
635 i_size_write(inode, size);
636 }
637 spin_unlock(&fc->lock);
638}
639
640static int fuse_readpage(struct file *file, struct page *page)
641{
642 struct fuse_io_priv io = { .async = 0, .file = file };
643 struct inode *inode = page->mapping->host;
644 struct fuse_conn *fc = get_fuse_conn(inode);
645 struct fuse_req *req;
646 size_t num_read;
647 loff_t pos = page_offset(page);
648 size_t count = PAGE_CACHE_SIZE;
649 u64 attr_ver;
650 int err;
651
652 err = -EIO;
653 if (is_bad_inode(inode))
654 goto out;
655
656
657
658
659
660
661 fuse_wait_on_page_writeback(inode, page->index);
662
663 req = fuse_get_req(fc, 1);
664 err = PTR_ERR(req);
665 if (IS_ERR(req))
666 goto out;
667
668 attr_ver = fuse_get_attr_version(fc);
669
670 req->out.page_zeroing = 1;
671 req->out.argpages = 1;
672 req->num_pages = 1;
673 req->pages[0] = page;
674 req->page_descs[0].length = count;
675 num_read = fuse_send_read(req, &io, pos, count, NULL);
676 err = req->out.h.error;
677 fuse_put_request(fc, req);
678
679 if (!err) {
680
681
682
683 if (num_read < count)
684 fuse_read_update_size(inode, pos + num_read, attr_ver);
685
686 SetPageUptodate(page);
687 }
688
689 fuse_invalidate_attr(inode);
690 out:
691 unlock_page(page);
692 return err;
693}
694
695static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
696{
697 int i;
698 size_t count = req->misc.read.in.size;
699 size_t num_read = req->out.args[0].size;
700 struct address_space *mapping = NULL;
701
702 for (i = 0; mapping == NULL && i < req->num_pages; i++)
703 mapping = req->pages[i]->mapping;
704
705 if (mapping) {
706 struct inode *inode = mapping->host;
707
708
709
710
711 if (!req->out.h.error && num_read < count) {
712 loff_t pos;
713
714 pos = page_offset(req->pages[0]) + num_read;
715 fuse_read_update_size(inode, pos,
716 req->misc.read.attr_ver);
717 }
718 fuse_invalidate_attr(inode);
719 }
720
721 for (i = 0; i < req->num_pages; i++) {
722 struct page *page = req->pages[i];
723 if (!req->out.h.error)
724 SetPageUptodate(page);
725 else
726 SetPageError(page);
727 unlock_page(page);
728 page_cache_release(page);
729 }
730 if (req->ff)
731 fuse_file_put(req->ff, false);
732}
733
734static void fuse_send_readpages(struct fuse_req *req, struct file *file)
735{
736 struct fuse_file *ff = file->private_data;
737 struct fuse_conn *fc = ff->fc;
738 loff_t pos = page_offset(req->pages[0]);
739 size_t count = req->num_pages << PAGE_CACHE_SHIFT;
740
741 req->out.argpages = 1;
742 req->out.page_zeroing = 1;
743 req->out.page_replace = 1;
744 fuse_read_fill(req, file, pos, count, FUSE_READ);
745 req->misc.read.attr_ver = fuse_get_attr_version(fc);
746 if (fc->async_read) {
747 req->ff = fuse_file_get(ff);
748 req->end = fuse_readpages_end;
749 fuse_request_send_background(fc, req);
750 } else {
751 fuse_request_send(fc, req);
752 fuse_readpages_end(fc, req);
753 fuse_put_request(fc, req);
754 }
755}
756
757struct fuse_fill_data {
758 struct fuse_req *req;
759 struct file *file;
760 struct inode *inode;
761 unsigned nr_pages;
762};
763
764static int fuse_readpages_fill(void *_data, struct page *page)
765{
766 struct fuse_fill_data *data = _data;
767 struct fuse_req *req = data->req;
768 struct inode *inode = data->inode;
769 struct fuse_conn *fc = get_fuse_conn(inode);
770
771 fuse_wait_on_page_writeback(inode, page->index);
772
773 if (req->num_pages &&
774 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
775 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
776 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
777 int nr_alloc = min_t(unsigned, data->nr_pages,
778 FUSE_MAX_PAGES_PER_REQ);
779 fuse_send_readpages(req, data->file);
780 if (fc->async_read)
781 req = fuse_get_req_for_background(fc, nr_alloc);
782 else
783 req = fuse_get_req(fc, nr_alloc);
784
785 data->req = req;
786 if (IS_ERR(req)) {
787 unlock_page(page);
788 return PTR_ERR(req);
789 }
790 }
791
792 if (WARN_ON(req->num_pages >= req->max_pages)) {
793 fuse_put_request(fc, req);
794 return -EIO;
795 }
796
797 page_cache_get(page);
798 req->pages[req->num_pages] = page;
799 req->page_descs[req->num_pages].length = PAGE_SIZE;
800 req->num_pages++;
801 data->nr_pages--;
802 return 0;
803}
804
805static int fuse_readpages(struct file *file, struct address_space *mapping,
806 struct list_head *pages, unsigned nr_pages)
807{
808 struct inode *inode = mapping->host;
809 struct fuse_conn *fc = get_fuse_conn(inode);
810 struct fuse_fill_data data;
811 int err;
812 int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ);
813
814 err = -EIO;
815 if (is_bad_inode(inode))
816 goto out;
817
818 data.file = file;
819 data.inode = inode;
820 if (fc->async_read)
821 data.req = fuse_get_req_for_background(fc, nr_alloc);
822 else
823 data.req = fuse_get_req(fc, nr_alloc);
824 data.nr_pages = nr_pages;
825 err = PTR_ERR(data.req);
826 if (IS_ERR(data.req))
827 goto out;
828
829 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
830 if (!err) {
831 if (data.req->num_pages)
832 fuse_send_readpages(data.req, file);
833 else
834 fuse_put_request(fc, data.req);
835 }
836out:
837 return err;
838}
839
840static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
841 unsigned long nr_segs, loff_t pos)
842{
843 struct inode *inode = iocb->ki_filp->f_mapping->host;
844 struct fuse_conn *fc = get_fuse_conn(inode);
845
846
847
848
849
850
851 if (fc->auto_inval_data ||
852 (pos + iov_length(iov, nr_segs) > i_size_read(inode))) {
853 int err;
854 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
855 if (err)
856 return err;
857 }
858
859 return generic_file_aio_read(iocb, iov, nr_segs, pos);
860}
861
862static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
863 loff_t pos, size_t count)
864{
865 struct fuse_write_in *inarg = &req->misc.write.in;
866 struct fuse_write_out *outarg = &req->misc.write.out;
867
868 inarg->fh = ff->fh;
869 inarg->offset = pos;
870 inarg->size = count;
871 req->in.h.opcode = FUSE_WRITE;
872 req->in.h.nodeid = ff->nodeid;
873 req->in.numargs = 2;
874 if (ff->fc->minor < 9)
875 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
876 else
877 req->in.args[0].size = sizeof(struct fuse_write_in);
878 req->in.args[0].value = inarg;
879 req->in.args[1].size = count;
880 req->out.numargs = 1;
881 req->out.args[0].size = sizeof(struct fuse_write_out);
882 req->out.args[0].value = outarg;
883}
884
885static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io,
886 loff_t pos, size_t count, fl_owner_t owner)
887{
888 struct file *file = io->file;
889 struct fuse_file *ff = file->private_data;
890 struct fuse_conn *fc = ff->fc;
891 struct fuse_write_in *inarg = &req->misc.write.in;
892
893 fuse_write_fill(req, ff, pos, count);
894 inarg->flags = file->f_flags;
895 if (owner != NULL) {
896 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
897 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
898 }
899
900 if (io->async)
901 return fuse_async_req_send(fc, req, count, io);
902
903 fuse_request_send(fc, req);
904 return req->misc.write.out.size;
905}
906
907void fuse_write_update_size(struct inode *inode, loff_t pos)
908{
909 struct fuse_conn *fc = get_fuse_conn(inode);
910 struct fuse_inode *fi = get_fuse_inode(inode);
911
912 spin_lock(&fc->lock);
913 fi->attr_version = ++fc->attr_version;
914 if (pos > inode->i_size)
915 i_size_write(inode, pos);
916 spin_unlock(&fc->lock);
917}
918
919static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
920 struct inode *inode, loff_t pos,
921 size_t count)
922{
923 size_t res;
924 unsigned offset;
925 unsigned i;
926 struct fuse_io_priv io = { .async = 0, .file = file };
927
928 for (i = 0; i < req->num_pages; i++)
929 fuse_wait_on_page_writeback(inode, req->pages[i]->index);
930
931 res = fuse_send_write(req, &io, pos, count, NULL);
932
933 offset = req->page_descs[0].offset;
934 count = res;
935 for (i = 0; i < req->num_pages; i++) {
936 struct page *page = req->pages[i];
937
938 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
939 SetPageUptodate(page);
940
941 if (count > PAGE_CACHE_SIZE - offset)
942 count -= PAGE_CACHE_SIZE - offset;
943 else
944 count = 0;
945 offset = 0;
946
947 unlock_page(page);
948 page_cache_release(page);
949 }
950
951 return res;
952}
953
954static ssize_t fuse_fill_write_pages(struct fuse_req *req,
955 struct address_space *mapping,
956 struct iov_iter *ii, loff_t pos)
957{
958 struct fuse_conn *fc = get_fuse_conn(mapping->host);
959 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
960 size_t count = 0;
961 int err;
962
963 req->in.argpages = 1;
964 req->page_descs[0].offset = offset;
965
966 do {
967 size_t tmp;
968 struct page *page;
969 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
970 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
971 iov_iter_count(ii));
972
973 bytes = min_t(size_t, bytes, fc->max_write - count);
974
975 again:
976 err = -EFAULT;
977 if (iov_iter_fault_in_readable(ii, bytes))
978 break;
979
980 err = -ENOMEM;
981 page = grab_cache_page_write_begin(mapping, index, 0);
982 if (!page)
983 break;
984
985 if (mapping_writably_mapped(mapping))
986 flush_dcache_page(page);
987
988 pagefault_disable();
989 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
990 pagefault_enable();
991 flush_dcache_page(page);
992
993 mark_page_accessed(page);
994
995 if (!tmp) {
996 unlock_page(page);
997 page_cache_release(page);
998 bytes = min(bytes, iov_iter_single_seg_count(ii));
999 goto again;
1000 }
1001
1002 err = 0;
1003 req->pages[req->num_pages] = page;
1004 req->page_descs[req->num_pages].length = tmp;
1005 req->num_pages++;
1006
1007 iov_iter_advance(ii, tmp);
1008 count += tmp;
1009 pos += tmp;
1010 offset += tmp;
1011 if (offset == PAGE_CACHE_SIZE)
1012 offset = 0;
1013
1014 if (!fc->big_writes)
1015 break;
1016 } while (iov_iter_count(ii) && count < fc->max_write &&
1017 req->num_pages < req->max_pages && offset == 0);
1018
1019 return count > 0 ? count : err;
1020}
1021
1022static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
1023{
1024 return min_t(unsigned,
1025 ((pos + len - 1) >> PAGE_CACHE_SHIFT) -
1026 (pos >> PAGE_CACHE_SHIFT) + 1,
1027 FUSE_MAX_PAGES_PER_REQ);
1028}
1029
1030static ssize_t fuse_perform_write(struct file *file,
1031 struct address_space *mapping,
1032 struct iov_iter *ii, loff_t pos)
1033{
1034 struct inode *inode = mapping->host;
1035 struct fuse_conn *fc = get_fuse_conn(inode);
1036 struct fuse_inode *fi = get_fuse_inode(inode);
1037 int err = 0;
1038 ssize_t res = 0;
1039
1040 if (is_bad_inode(inode))
1041 return -EIO;
1042
1043 if (inode->i_size < pos + iov_iter_count(ii))
1044 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1045
1046 do {
1047 struct fuse_req *req;
1048 ssize_t count;
1049 unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii));
1050
1051 req = fuse_get_req(fc, nr_pages);
1052 if (IS_ERR(req)) {
1053 err = PTR_ERR(req);
1054 break;
1055 }
1056
1057 count = fuse_fill_write_pages(req, mapping, ii, pos);
1058 if (count <= 0) {
1059 err = count;
1060 } else {
1061 size_t num_written;
1062
1063 num_written = fuse_send_write_pages(req, file, inode,
1064 pos, count);
1065 err = req->out.h.error;
1066 if (!err) {
1067 res += num_written;
1068 pos += num_written;
1069
1070
1071 if (num_written != count)
1072 err = -EIO;
1073 }
1074 }
1075 fuse_put_request(fc, req);
1076 } while (!err && iov_iter_count(ii));
1077
1078 if (res > 0)
1079 fuse_write_update_size(inode, pos);
1080
1081 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1082 fuse_invalidate_attr(inode);
1083
1084 return res > 0 ? res : err;
1085}
1086
1087static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
1088 unsigned long nr_segs, loff_t pos)
1089{
1090 struct file *file = iocb->ki_filp;
1091 struct address_space *mapping = file->f_mapping;
1092 size_t count = 0;
1093 size_t ocount = 0;
1094 ssize_t written = 0;
1095 ssize_t written_buffered = 0;
1096 struct inode *inode = mapping->host;
1097 ssize_t err;
1098 struct iov_iter i;
1099 loff_t endbyte = 0;
1100
1101 WARN_ON(iocb->ki_pos != pos);
1102
1103 ocount = 0;
1104 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1105 if (err)
1106 return err;
1107
1108 count = ocount;
1109 mutex_lock(&inode->i_mutex);
1110
1111
1112 current->backing_dev_info = mapping->backing_dev_info;
1113
1114 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1115 if (err)
1116 goto out;
1117
1118 if (count == 0)
1119 goto out;
1120
1121 err = file_remove_suid(file);
1122 if (err)
1123 goto out;
1124
1125 err = file_update_time(file);
1126 if (err)
1127 goto out;
1128
1129 if (file->f_flags & O_DIRECT) {
1130 written = generic_file_direct_write(iocb, iov, &nr_segs,
1131 pos, &iocb->ki_pos,
1132 count, ocount);
1133 if (written < 0 || written == count)
1134 goto out;
1135
1136 pos += written;
1137 count -= written;
1138
1139 iov_iter_init(&i, iov, nr_segs, count, written);
1140 written_buffered = fuse_perform_write(file, mapping, &i, pos);
1141 if (written_buffered < 0) {
1142 err = written_buffered;
1143 goto out;
1144 }
1145 endbyte = pos + written_buffered - 1;
1146
1147 err = filemap_write_and_wait_range(file->f_mapping, pos,
1148 endbyte);
1149 if (err)
1150 goto out;
1151
1152 invalidate_mapping_pages(file->f_mapping,
1153 pos >> PAGE_CACHE_SHIFT,
1154 endbyte >> PAGE_CACHE_SHIFT);
1155
1156 written += written_buffered;
1157 iocb->ki_pos = pos + written_buffered;
1158 } else {
1159 iov_iter_init(&i, iov, nr_segs, count, 0);
1160 written = fuse_perform_write(file, mapping, &i, pos);
1161 if (written >= 0)
1162 iocb->ki_pos = pos + written;
1163 }
1164out:
1165 current->backing_dev_info = NULL;
1166 mutex_unlock(&inode->i_mutex);
1167
1168 return written ? written : err;
1169}
1170
1171static inline void fuse_page_descs_length_init(struct fuse_req *req,
1172 unsigned index, unsigned nr_pages)
1173{
1174 int i;
1175
1176 for (i = index; i < index + nr_pages; i++)
1177 req->page_descs[i].length = PAGE_SIZE -
1178 req->page_descs[i].offset;
1179}
1180
1181static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1182{
1183 return (unsigned long)ii->iov->iov_base + ii->iov_offset;
1184}
1185
1186static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1187 size_t max_size)
1188{
1189 return min(iov_iter_single_seg_count(ii), max_size);
1190}
1191
1192static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
1193 size_t *nbytesp, int write)
1194{
1195 size_t nbytes = 0;
1196
1197
1198 if (segment_eq(get_fs(), KERNEL_DS)) {
1199 unsigned long user_addr = fuse_get_user_addr(ii);
1200 size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1201
1202 if (write)
1203 req->in.args[1].value = (void *) user_addr;
1204 else
1205 req->out.args[0].value = (void *) user_addr;
1206
1207 iov_iter_advance(ii, frag_size);
1208 *nbytesp = frag_size;
1209 return 0;
1210 }
1211
1212 while (nbytes < *nbytesp && req->num_pages < req->max_pages) {
1213 unsigned npages;
1214 unsigned long user_addr = fuse_get_user_addr(ii);
1215 unsigned offset = user_addr & ~PAGE_MASK;
1216 size_t frag_size = fuse_get_frag_size(ii, *nbytesp - nbytes);
1217 int ret;
1218
1219 unsigned n = req->max_pages - req->num_pages;
1220 frag_size = min_t(size_t, frag_size, n << PAGE_SHIFT);
1221
1222 npages = (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1223 npages = clamp(npages, 1U, n);
1224
1225 ret = get_user_pages_fast(user_addr, npages, !write,
1226 &req->pages[req->num_pages]);
1227 if (ret < 0)
1228 return ret;
1229
1230 npages = ret;
1231 frag_size = min_t(size_t, frag_size,
1232 (npages << PAGE_SHIFT) - offset);
1233 iov_iter_advance(ii, frag_size);
1234
1235 req->page_descs[req->num_pages].offset = offset;
1236 fuse_page_descs_length_init(req, req->num_pages, npages);
1237
1238 req->num_pages += npages;
1239 req->page_descs[req->num_pages - 1].length -=
1240 (npages << PAGE_SHIFT) - offset - frag_size;
1241
1242 nbytes += frag_size;
1243 }
1244
1245 if (write)
1246 req->in.argpages = 1;
1247 else
1248 req->out.argpages = 1;
1249
1250 *nbytesp = nbytes;
1251
1252 return 0;
1253}
1254
1255static inline int fuse_iter_npages(const struct iov_iter *ii_p)
1256{
1257 struct iov_iter ii = *ii_p;
1258 int npages = 0;
1259
1260 while (iov_iter_count(&ii) && npages < FUSE_MAX_PAGES_PER_REQ) {
1261 unsigned long user_addr = fuse_get_user_addr(&ii);
1262 unsigned offset = user_addr & ~PAGE_MASK;
1263 size_t frag_size = iov_iter_single_seg_count(&ii);
1264
1265 npages += (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1266 iov_iter_advance(&ii, frag_size);
1267 }
1268
1269 return min(npages, FUSE_MAX_PAGES_PER_REQ);
1270}
1271
1272ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
1273 unsigned long nr_segs, size_t count, loff_t *ppos,
1274 int write)
1275{
1276 struct file *file = io->file;
1277 struct fuse_file *ff = file->private_data;
1278 struct fuse_conn *fc = ff->fc;
1279 size_t nmax = write ? fc->max_write : fc->max_read;
1280 loff_t pos = *ppos;
1281 ssize_t res = 0;
1282 struct fuse_req *req;
1283 struct iov_iter ii;
1284
1285 iov_iter_init(&ii, iov, nr_segs, count, 0);
1286
1287 if (io->async)
1288 req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii));
1289 else
1290 req = fuse_get_req(fc, fuse_iter_npages(&ii));
1291 if (IS_ERR(req))
1292 return PTR_ERR(req);
1293
1294 while (count) {
1295 size_t nres;
1296 fl_owner_t owner = current->files;
1297 size_t nbytes = min(count, nmax);
1298 int err = fuse_get_user_pages(req, &ii, &nbytes, write);
1299 if (err) {
1300 res = err;
1301 break;
1302 }
1303
1304 if (write)
1305 nres = fuse_send_write(req, io, pos, nbytes, owner);
1306 else
1307 nres = fuse_send_read(req, io, pos, nbytes, owner);
1308
1309 if (!io->async)
1310 fuse_release_user_pages(req, !write);
1311 if (req->out.h.error) {
1312 if (!res)
1313 res = req->out.h.error;
1314 break;
1315 } else if (nres > nbytes) {
1316 res = -EIO;
1317 break;
1318 }
1319 count -= nres;
1320 res += nres;
1321 pos += nres;
1322 if (nres != nbytes)
1323 break;
1324 if (count) {
1325 fuse_put_request(fc, req);
1326 if (io->async)
1327 req = fuse_get_req_for_background(fc,
1328 fuse_iter_npages(&ii));
1329 else
1330 req = fuse_get_req(fc, fuse_iter_npages(&ii));
1331 if (IS_ERR(req))
1332 break;
1333 }
1334 }
1335 if (!IS_ERR(req))
1336 fuse_put_request(fc, req);
1337 if (res > 0)
1338 *ppos = pos;
1339
1340 return res;
1341}
1342EXPORT_SYMBOL_GPL(fuse_direct_io);
1343
1344static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
1345 const struct iovec *iov,
1346 unsigned long nr_segs, loff_t *ppos,
1347 size_t count)
1348{
1349 ssize_t res;
1350 struct file *file = io->file;
1351 struct inode *inode = file_inode(file);
1352
1353 if (is_bad_inode(inode))
1354 return -EIO;
1355
1356 res = fuse_direct_io(io, iov, nr_segs, count, ppos, 0);
1357
1358 fuse_invalidate_attr(inode);
1359
1360 return res;
1361}
1362
1363static ssize_t fuse_direct_read(struct file *file, char __user *buf,
1364 size_t count, loff_t *ppos)
1365{
1366 struct fuse_io_priv io = { .async = 0, .file = file };
1367 struct iovec iov = { .iov_base = buf, .iov_len = count };
1368 return __fuse_direct_read(&io, &iov, 1, ppos, count);
1369}
1370
1371static ssize_t __fuse_direct_write(struct fuse_io_priv *io,
1372 const struct iovec *iov,
1373 unsigned long nr_segs, loff_t *ppos)
1374{
1375 struct file *file = io->file;
1376 struct inode *inode = file_inode(file);
1377 size_t count = iov_length(iov, nr_segs);
1378 ssize_t res;
1379
1380 res = generic_write_checks(file, ppos, &count, 0);
1381 if (!res)
1382 res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1);
1383
1384 fuse_invalidate_attr(inode);
1385
1386 return res;
1387}
1388
1389static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1390 size_t count, loff_t *ppos)
1391{
1392 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
1393 struct inode *inode = file_inode(file);
1394 ssize_t res;
1395 struct fuse_io_priv io = { .async = 0, .file = file };
1396
1397 if (is_bad_inode(inode))
1398 return -EIO;
1399
1400
1401 mutex_lock(&inode->i_mutex);
1402 res = __fuse_direct_write(&io, &iov, 1, ppos);
1403 if (res > 0)
1404 fuse_write_update_size(inode, *ppos);
1405 mutex_unlock(&inode->i_mutex);
1406
1407 return res;
1408}
1409
1410static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
1411{
1412 __free_page(req->pages[0]);
1413 fuse_file_put(req->ff, false);
1414}
1415
1416static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1417{
1418 struct inode *inode = req->inode;
1419 struct fuse_inode *fi = get_fuse_inode(inode);
1420 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
1421
1422 list_del(&req->writepages_entry);
1423 dec_bdi_stat(bdi, BDI_WRITEBACK);
1424 dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
1425 bdi_writeout_inc(bdi);
1426 wake_up(&fi->page_waitq);
1427}
1428
1429
1430static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
1431__releases(fc->lock)
1432__acquires(fc->lock)
1433{
1434 struct fuse_inode *fi = get_fuse_inode(req->inode);
1435 loff_t size = i_size_read(req->inode);
1436 struct fuse_write_in *inarg = &req->misc.write.in;
1437
1438 if (!fc->connected)
1439 goto out_free;
1440
1441 if (inarg->offset + PAGE_CACHE_SIZE <= size) {
1442 inarg->size = PAGE_CACHE_SIZE;
1443 } else if (inarg->offset < size) {
1444 inarg->size = size & (PAGE_CACHE_SIZE - 1);
1445 } else {
1446
1447 goto out_free;
1448 }
1449
1450 req->in.args[1].size = inarg->size;
1451 fi->writectr++;
1452 fuse_request_send_background_locked(fc, req);
1453 return;
1454
1455 out_free:
1456 fuse_writepage_finish(fc, req);
1457 spin_unlock(&fc->lock);
1458 fuse_writepage_free(fc, req);
1459 fuse_put_request(fc, req);
1460 spin_lock(&fc->lock);
1461}
1462
1463
1464
1465
1466
1467
1468
1469void fuse_flush_writepages(struct inode *inode)
1470__releases(fc->lock)
1471__acquires(fc->lock)
1472{
1473 struct fuse_conn *fc = get_fuse_conn(inode);
1474 struct fuse_inode *fi = get_fuse_inode(inode);
1475 struct fuse_req *req;
1476
1477 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1478 req = list_entry(fi->queued_writes.next, struct fuse_req, list);
1479 list_del_init(&req->list);
1480 fuse_send_writepage(fc, req);
1481 }
1482}
1483
1484static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
1485{
1486 struct inode *inode = req->inode;
1487 struct fuse_inode *fi = get_fuse_inode(inode);
1488
1489 mapping_set_error(inode->i_mapping, req->out.h.error);
1490 spin_lock(&fc->lock);
1491 fi->writectr--;
1492 fuse_writepage_finish(fc, req);
1493 spin_unlock(&fc->lock);
1494 fuse_writepage_free(fc, req);
1495}
1496
1497static int fuse_writepage_locked(struct page *page)
1498{
1499 struct address_space *mapping = page->mapping;
1500 struct inode *inode = mapping->host;
1501 struct fuse_conn *fc = get_fuse_conn(inode);
1502 struct fuse_inode *fi = get_fuse_inode(inode);
1503 struct fuse_req *req;
1504 struct fuse_file *ff;
1505 struct page *tmp_page;
1506
1507 set_page_writeback(page);
1508
1509 req = fuse_request_alloc_nofs(1);
1510 if (!req)
1511 goto err;
1512
1513 req->background = 1;
1514 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1515 if (!tmp_page)
1516 goto err_free;
1517
1518 spin_lock(&fc->lock);
1519 BUG_ON(list_empty(&fi->write_files));
1520 ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
1521 req->ff = fuse_file_get(ff);
1522 spin_unlock(&fc->lock);
1523
1524 fuse_write_fill(req, ff, page_offset(page), 0);
1525
1526 copy_highpage(tmp_page, page);
1527 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
1528 req->in.argpages = 1;
1529 req->num_pages = 1;
1530 req->pages[0] = tmp_page;
1531 req->page_descs[0].offset = 0;
1532 req->page_descs[0].length = PAGE_SIZE;
1533 req->end = fuse_writepage_end;
1534 req->inode = inode;
1535
1536 inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
1537 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1538
1539 spin_lock(&fc->lock);
1540 list_add(&req->writepages_entry, &fi->writepages);
1541 list_add_tail(&req->list, &fi->queued_writes);
1542 fuse_flush_writepages(inode);
1543 spin_unlock(&fc->lock);
1544
1545 end_page_writeback(page);
1546
1547 return 0;
1548
1549err_free:
1550 fuse_request_free(req);
1551err:
1552 end_page_writeback(page);
1553 return -ENOMEM;
1554}
1555
1556static int fuse_writepage(struct page *page, struct writeback_control *wbc)
1557{
1558 int err;
1559
1560 err = fuse_writepage_locked(page);
1561 unlock_page(page);
1562
1563 return err;
1564}
1565
1566static int fuse_launder_page(struct page *page)
1567{
1568 int err = 0;
1569 if (clear_page_dirty_for_io(page)) {
1570 struct inode *inode = page->mapping->host;
1571 err = fuse_writepage_locked(page);
1572 if (!err)
1573 fuse_wait_on_page_writeback(inode, page->index);
1574 }
1575 return err;
1576}
1577
1578
1579
1580
1581
1582static void fuse_vma_close(struct vm_area_struct *vma)
1583{
1584 filemap_write_and_wait(vma->vm_file->f_mapping);
1585}
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1603{
1604 struct page *page = vmf->page;
1605
1606
1607
1608
1609 struct inode *inode = vma->vm_file->f_mapping->host;
1610
1611 fuse_wait_on_page_writeback(inode, page->index);
1612 return 0;
1613}
1614
1615static const struct vm_operations_struct fuse_file_vm_ops = {
1616 .close = fuse_vma_close,
1617 .fault = filemap_fault,
1618 .page_mkwrite = fuse_page_mkwrite,
1619 .remap_pages = generic_file_remap_pages,
1620};
1621
1622static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
1623{
1624 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
1625 struct inode *inode = file_inode(file);
1626 struct fuse_conn *fc = get_fuse_conn(inode);
1627 struct fuse_inode *fi = get_fuse_inode(inode);
1628 struct fuse_file *ff = file->private_data;
1629
1630
1631
1632
1633 spin_lock(&fc->lock);
1634 if (list_empty(&ff->write_entry))
1635 list_add(&ff->write_entry, &fi->write_files);
1636 spin_unlock(&fc->lock);
1637 }
1638 file_accessed(file);
1639 vma->vm_ops = &fuse_file_vm_ops;
1640 return 0;
1641}
1642
1643static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
1644{
1645
1646 if (vma->vm_flags & VM_MAYSHARE)
1647 return -ENODEV;
1648
1649 invalidate_inode_pages2(file->f_mapping);
1650
1651 return generic_file_mmap(file, vma);
1652}
1653
1654static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
1655 struct file_lock *fl)
1656{
1657 switch (ffl->type) {
1658 case F_UNLCK:
1659 break;
1660
1661 case F_RDLCK:
1662 case F_WRLCK:
1663 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
1664 ffl->end < ffl->start)
1665 return -EIO;
1666
1667 fl->fl_start = ffl->start;
1668 fl->fl_end = ffl->end;
1669 fl->fl_pid = ffl->pid;
1670 break;
1671
1672 default:
1673 return -EIO;
1674 }
1675 fl->fl_type = ffl->type;
1676 return 0;
1677}
1678
1679static void fuse_lk_fill(struct fuse_req *req, struct file *file,
1680 const struct file_lock *fl, int opcode, pid_t pid,
1681 int flock)
1682{
1683 struct inode *inode = file_inode(file);
1684 struct fuse_conn *fc = get_fuse_conn(inode);
1685 struct fuse_file *ff = file->private_data;
1686 struct fuse_lk_in *arg = &req->misc.lk_in;
1687
1688 arg->fh = ff->fh;
1689 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
1690 arg->lk.start = fl->fl_start;
1691 arg->lk.end = fl->fl_end;
1692 arg->lk.type = fl->fl_type;
1693 arg->lk.pid = pid;
1694 if (flock)
1695 arg->lk_flags |= FUSE_LK_FLOCK;
1696 req->in.h.opcode = opcode;
1697 req->in.h.nodeid = get_node_id(inode);
1698 req->in.numargs = 1;
1699 req->in.args[0].size = sizeof(*arg);
1700 req->in.args[0].value = arg;
1701}
1702
1703static int fuse_getlk(struct file *file, struct file_lock *fl)
1704{
1705 struct inode *inode = file_inode(file);
1706 struct fuse_conn *fc = get_fuse_conn(inode);
1707 struct fuse_req *req;
1708 struct fuse_lk_out outarg;
1709 int err;
1710
1711 req = fuse_get_req_nopages(fc);
1712 if (IS_ERR(req))
1713 return PTR_ERR(req);
1714
1715 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
1716 req->out.numargs = 1;
1717 req->out.args[0].size = sizeof(outarg);
1718 req->out.args[0].value = &outarg;
1719 fuse_request_send(fc, req);
1720 err = req->out.h.error;
1721 fuse_put_request(fc, req);
1722 if (!err)
1723 err = convert_fuse_file_lock(&outarg.lk, fl);
1724
1725 return err;
1726}
1727
1728static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
1729{
1730 struct inode *inode = file_inode(file);
1731 struct fuse_conn *fc = get_fuse_conn(inode);
1732 struct fuse_req *req;
1733 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
1734 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
1735 int err;
1736
1737 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
1738
1739 return -ENOLCK;
1740 }
1741
1742
1743 if (fl->fl_flags & FL_CLOSE)
1744 return 0;
1745
1746 req = fuse_get_req_nopages(fc);
1747 if (IS_ERR(req))
1748 return PTR_ERR(req);
1749
1750 fuse_lk_fill(req, file, fl, opcode, pid, flock);
1751 fuse_request_send(fc, req);
1752 err = req->out.h.error;
1753
1754 if (err == -EINTR)
1755 err = -ERESTARTSYS;
1756 fuse_put_request(fc, req);
1757 return err;
1758}
1759
1760static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
1761{
1762 struct inode *inode = file_inode(file);
1763 struct fuse_conn *fc = get_fuse_conn(inode);
1764 int err;
1765
1766 if (cmd == F_CANCELLK) {
1767 err = 0;
1768 } else if (cmd == F_GETLK) {
1769 if (fc->no_lock) {
1770 posix_test_lock(file, fl);
1771 err = 0;
1772 } else
1773 err = fuse_getlk(file, fl);
1774 } else {
1775 if (fc->no_lock)
1776 err = posix_lock_file(file, fl, NULL);
1777 else
1778 err = fuse_setlk(file, fl, 0);
1779 }
1780 return err;
1781}
1782
1783static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
1784{
1785 struct inode *inode = file_inode(file);
1786 struct fuse_conn *fc = get_fuse_conn(inode);
1787 int err;
1788
1789 if (fc->no_flock) {
1790 err = flock_lock_file_wait(file, fl);
1791 } else {
1792 struct fuse_file *ff = file->private_data;
1793
1794
1795 fl->fl_owner = (fl_owner_t) file;
1796 ff->flock = true;
1797 err = fuse_setlk(file, fl, 1);
1798 }
1799
1800 return err;
1801}
1802
1803static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
1804{
1805 struct inode *inode = mapping->host;
1806 struct fuse_conn *fc = get_fuse_conn(inode);
1807 struct fuse_req *req;
1808 struct fuse_bmap_in inarg;
1809 struct fuse_bmap_out outarg;
1810 int err;
1811
1812 if (!inode->i_sb->s_bdev || fc->no_bmap)
1813 return 0;
1814
1815 req = fuse_get_req_nopages(fc);
1816 if (IS_ERR(req))
1817 return 0;
1818
1819 memset(&inarg, 0, sizeof(inarg));
1820 inarg.block = block;
1821 inarg.blocksize = inode->i_sb->s_blocksize;
1822 req->in.h.opcode = FUSE_BMAP;
1823 req->in.h.nodeid = get_node_id(inode);
1824 req->in.numargs = 1;
1825 req->in.args[0].size = sizeof(inarg);
1826 req->in.args[0].value = &inarg;
1827 req->out.numargs = 1;
1828 req->out.args[0].size = sizeof(outarg);
1829 req->out.args[0].value = &outarg;
1830 fuse_request_send(fc, req);
1831 err = req->out.h.error;
1832 fuse_put_request(fc, req);
1833 if (err == -ENOSYS)
1834 fc->no_bmap = 1;
1835
1836 return err ? 0 : outarg.block;
1837}
1838
1839static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
1840{
1841 loff_t retval;
1842 struct inode *inode = file_inode(file);
1843
1844
1845 if (whence == SEEK_CUR || whence == SEEK_SET)
1846 return generic_file_llseek(file, offset, whence);
1847
1848 mutex_lock(&inode->i_mutex);
1849 retval = fuse_update_attributes(inode, NULL, file, NULL);
1850 if (!retval)
1851 retval = generic_file_llseek(file, offset, whence);
1852 mutex_unlock(&inode->i_mutex);
1853
1854 return retval;
1855}
1856
1857static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
1858 unsigned int nr_segs, size_t bytes, bool to_user)
1859{
1860 struct iov_iter ii;
1861 int page_idx = 0;
1862
1863 if (!bytes)
1864 return 0;
1865
1866 iov_iter_init(&ii, iov, nr_segs, bytes, 0);
1867
1868 while (iov_iter_count(&ii)) {
1869 struct page *page = pages[page_idx++];
1870 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
1871 void *kaddr;
1872
1873 kaddr = kmap(page);
1874
1875 while (todo) {
1876 char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
1877 size_t iov_len = ii.iov->iov_len - ii.iov_offset;
1878 size_t copy = min(todo, iov_len);
1879 size_t left;
1880
1881 if (!to_user)
1882 left = copy_from_user(kaddr, uaddr, copy);
1883 else
1884 left = copy_to_user(uaddr, kaddr, copy);
1885
1886 if (unlikely(left))
1887 return -EFAULT;
1888
1889 iov_iter_advance(&ii, copy);
1890 todo -= copy;
1891 kaddr += copy;
1892 }
1893
1894 kunmap(page);
1895 }
1896
1897 return 0;
1898}
1899
1900
1901
1902
1903
1904
1905
1906static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src,
1907 size_t transferred, unsigned count,
1908 bool is_compat)
1909{
1910#ifdef CONFIG_COMPAT
1911 if (count * sizeof(struct compat_iovec) == transferred) {
1912 struct compat_iovec *ciov = src;
1913 unsigned i;
1914
1915
1916
1917
1918
1919
1920 if (!is_compat)
1921 return -EINVAL;
1922
1923 for (i = 0; i < count; i++) {
1924 dst[i].iov_base = compat_ptr(ciov[i].iov_base);
1925 dst[i].iov_len = ciov[i].iov_len;
1926 }
1927 return 0;
1928 }
1929#endif
1930
1931 if (count * sizeof(struct iovec) != transferred)
1932 return -EIO;
1933
1934 memcpy(dst, src, transferred);
1935 return 0;
1936}
1937
1938
1939static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
1940{
1941 size_t n;
1942 u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
1943
1944 for (n = 0; n < count; n++, iov++) {
1945 if (iov->iov_len > (size_t) max)
1946 return -ENOMEM;
1947 max -= iov->iov_len;
1948 }
1949 return 0;
1950}
1951
1952static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
1953 void *src, size_t transferred, unsigned count,
1954 bool is_compat)
1955{
1956 unsigned i;
1957 struct fuse_ioctl_iovec *fiov = src;
1958
1959 if (fc->minor < 16) {
1960 return fuse_copy_ioctl_iovec_old(dst, src, transferred,
1961 count, is_compat);
1962 }
1963
1964 if (count * sizeof(struct fuse_ioctl_iovec) != transferred)
1965 return -EIO;
1966
1967 for (i = 0; i < count; i++) {
1968
1969 if (fiov[i].base != (unsigned long) fiov[i].base ||
1970 fiov[i].len != (unsigned long) fiov[i].len)
1971 return -EIO;
1972
1973 dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base;
1974 dst[i].iov_len = (size_t) fiov[i].len;
1975
1976#ifdef CONFIG_COMPAT
1977 if (is_compat &&
1978 (ptr_to_compat(dst[i].iov_base) != fiov[i].base ||
1979 (compat_size_t) dst[i].iov_len != fiov[i].len))
1980 return -EIO;
1981#endif
1982 }
1983
1984 return 0;
1985}
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
2035 unsigned int flags)
2036{
2037 struct fuse_file *ff = file->private_data;
2038 struct fuse_conn *fc = ff->fc;
2039 struct fuse_ioctl_in inarg = {
2040 .fh = ff->fh,
2041 .cmd = cmd,
2042 .arg = arg,
2043 .flags = flags
2044 };
2045 struct fuse_ioctl_out outarg;
2046 struct fuse_req *req = NULL;
2047 struct page **pages = NULL;
2048 struct iovec *iov_page = NULL;
2049 struct iovec *in_iov = NULL, *out_iov = NULL;
2050 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
2051 size_t in_size, out_size, transferred;
2052 int err;
2053
2054#if BITS_PER_LONG == 32
2055 inarg.flags |= FUSE_IOCTL_32BIT;
2056#else
2057 if (flags & FUSE_IOCTL_COMPAT)
2058 inarg.flags |= FUSE_IOCTL_32BIT;
2059#endif
2060
2061
2062 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
2063
2064 err = -ENOMEM;
2065 pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL);
2066 iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
2067 if (!pages || !iov_page)
2068 goto out;
2069
2070
2071
2072
2073
2074 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
2075 struct iovec *iov = iov_page;
2076
2077 iov->iov_base = (void __user *)arg;
2078 iov->iov_len = _IOC_SIZE(cmd);
2079
2080 if (_IOC_DIR(cmd) & _IOC_WRITE) {
2081 in_iov = iov;
2082 in_iovs = 1;
2083 }
2084
2085 if (_IOC_DIR(cmd) & _IOC_READ) {
2086 out_iov = iov;
2087 out_iovs = 1;
2088 }
2089 }
2090
2091 retry:
2092 inarg.in_size = in_size = iov_length(in_iov, in_iovs);
2093 inarg.out_size = out_size = iov_length(out_iov, out_iovs);
2094
2095
2096
2097
2098
2099 out_size = max_t(size_t, out_size, PAGE_SIZE);
2100 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
2101
2102
2103 err = -ENOMEM;
2104 if (max_pages > FUSE_MAX_PAGES_PER_REQ)
2105 goto out;
2106 while (num_pages < max_pages) {
2107 pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2108 if (!pages[num_pages])
2109 goto out;
2110 num_pages++;
2111 }
2112
2113 req = fuse_get_req(fc, num_pages);
2114 if (IS_ERR(req)) {
2115 err = PTR_ERR(req);
2116 req = NULL;
2117 goto out;
2118 }
2119 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
2120 req->num_pages = num_pages;
2121 fuse_page_descs_length_init(req, 0, req->num_pages);
2122
2123
2124 req->in.h.opcode = FUSE_IOCTL;
2125 req->in.h.nodeid = ff->nodeid;
2126 req->in.numargs = 1;
2127 req->in.args[0].size = sizeof(inarg);
2128 req->in.args[0].value = &inarg;
2129 if (in_size) {
2130 req->in.numargs++;
2131 req->in.args[1].size = in_size;
2132 req->in.argpages = 1;
2133
2134 err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
2135 false);
2136 if (err)
2137 goto out;
2138 }
2139
2140 req->out.numargs = 2;
2141 req->out.args[0].size = sizeof(outarg);
2142 req->out.args[0].value = &outarg;
2143 req->out.args[1].size = out_size;
2144 req->out.argpages = 1;
2145 req->out.argvar = 1;
2146
2147 fuse_request_send(fc, req);
2148 err = req->out.h.error;
2149 transferred = req->out.args[1].size;
2150 fuse_put_request(fc, req);
2151 req = NULL;
2152 if (err)
2153 goto out;
2154
2155
2156 if (outarg.flags & FUSE_IOCTL_RETRY) {
2157 void *vaddr;
2158
2159
2160 err = -EIO;
2161 if (!(flags & FUSE_IOCTL_UNRESTRICTED))
2162 goto out;
2163
2164 in_iovs = outarg.in_iovs;
2165 out_iovs = outarg.out_iovs;
2166
2167
2168
2169
2170
2171 err = -ENOMEM;
2172 if (in_iovs > FUSE_IOCTL_MAX_IOV ||
2173 out_iovs > FUSE_IOCTL_MAX_IOV ||
2174 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
2175 goto out;
2176
2177 vaddr = kmap_atomic(pages[0]);
2178 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
2179 transferred, in_iovs + out_iovs,
2180 (flags & FUSE_IOCTL_COMPAT) != 0);
2181 kunmap_atomic(vaddr);
2182 if (err)
2183 goto out;
2184
2185 in_iov = iov_page;
2186 out_iov = in_iov + in_iovs;
2187
2188 err = fuse_verify_ioctl_iov(in_iov, in_iovs);
2189 if (err)
2190 goto out;
2191
2192 err = fuse_verify_ioctl_iov(out_iov, out_iovs);
2193 if (err)
2194 goto out;
2195
2196 goto retry;
2197 }
2198
2199 err = -EIO;
2200 if (transferred > inarg.out_size)
2201 goto out;
2202
2203 err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
2204 out:
2205 if (req)
2206 fuse_put_request(fc, req);
2207 free_page((unsigned long) iov_page);
2208 while (num_pages)
2209 __free_page(pages[--num_pages]);
2210 kfree(pages);
2211
2212 return err ? err : outarg.result;
2213}
2214EXPORT_SYMBOL_GPL(fuse_do_ioctl);
2215
2216long fuse_ioctl_common(struct file *file, unsigned int cmd,
2217 unsigned long arg, unsigned int flags)
2218{
2219 struct inode *inode = file_inode(file);
2220 struct fuse_conn *fc = get_fuse_conn(inode);
2221
2222 if (!fuse_allow_current_process(fc))
2223 return -EACCES;
2224
2225 if (is_bad_inode(inode))
2226 return -EIO;
2227
2228 return fuse_do_ioctl(file, cmd, arg, flags);
2229}
2230
2231static long fuse_file_ioctl(struct file *file, unsigned int cmd,
2232 unsigned long arg)
2233{
2234 return fuse_ioctl_common(file, cmd, arg, 0);
2235}
2236
2237static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
2238 unsigned long arg)
2239{
2240 return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
2241}
2242
2243
2244
2245
2246
2247
2248static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2249 struct rb_node **parent_out)
2250{
2251 struct rb_node **link = &fc->polled_files.rb_node;
2252 struct rb_node *last = NULL;
2253
2254 while (*link) {
2255 struct fuse_file *ff;
2256
2257 last = *link;
2258 ff = rb_entry(last, struct fuse_file, polled_node);
2259
2260 if (kh < ff->kh)
2261 link = &last->rb_left;
2262 else if (kh > ff->kh)
2263 link = &last->rb_right;
2264 else
2265 return link;
2266 }
2267
2268 if (parent_out)
2269 *parent_out = last;
2270 return link;
2271}
2272
2273
2274
2275
2276
2277
2278
2279static void fuse_register_polled_file(struct fuse_conn *fc,
2280 struct fuse_file *ff)
2281{
2282 spin_lock(&fc->lock);
2283 if (RB_EMPTY_NODE(&ff->polled_node)) {
2284 struct rb_node **link, *parent;
2285
2286 link = fuse_find_polled_node(fc, ff->kh, &parent);
2287 BUG_ON(*link);
2288 rb_link_node(&ff->polled_node, parent, link);
2289 rb_insert_color(&ff->polled_node, &fc->polled_files);
2290 }
2291 spin_unlock(&fc->lock);
2292}
2293
2294unsigned fuse_file_poll(struct file *file, poll_table *wait)
2295{
2296 struct fuse_file *ff = file->private_data;
2297 struct fuse_conn *fc = ff->fc;
2298 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
2299 struct fuse_poll_out outarg;
2300 struct fuse_req *req;
2301 int err;
2302
2303 if (fc->no_poll)
2304 return DEFAULT_POLLMASK;
2305
2306 poll_wait(file, &ff->poll_wait, wait);
2307 inarg.events = (__u32)poll_requested_events(wait);
2308
2309
2310
2311
2312
2313 if (waitqueue_active(&ff->poll_wait)) {
2314 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
2315 fuse_register_polled_file(fc, ff);
2316 }
2317
2318 req = fuse_get_req_nopages(fc);
2319 if (IS_ERR(req))
2320 return POLLERR;
2321
2322 req->in.h.opcode = FUSE_POLL;
2323 req->in.h.nodeid = ff->nodeid;
2324 req->in.numargs = 1;
2325 req->in.args[0].size = sizeof(inarg);
2326 req->in.args[0].value = &inarg;
2327 req->out.numargs = 1;
2328 req->out.args[0].size = sizeof(outarg);
2329 req->out.args[0].value = &outarg;
2330 fuse_request_send(fc, req);
2331 err = req->out.h.error;
2332 fuse_put_request(fc, req);
2333
2334 if (!err)
2335 return outarg.revents;
2336 if (err == -ENOSYS) {
2337 fc->no_poll = 1;
2338 return DEFAULT_POLLMASK;
2339 }
2340 return POLLERR;
2341}
2342EXPORT_SYMBOL_GPL(fuse_file_poll);
2343
2344
2345
2346
2347
2348int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2349 struct fuse_notify_poll_wakeup_out *outarg)
2350{
2351 u64 kh = outarg->kh;
2352 struct rb_node **link;
2353
2354 spin_lock(&fc->lock);
2355
2356 link = fuse_find_polled_node(fc, kh, NULL);
2357 if (*link) {
2358 struct fuse_file *ff;
2359
2360 ff = rb_entry(*link, struct fuse_file, polled_node);
2361 wake_up_interruptible_sync(&ff->poll_wait);
2362 }
2363
2364 spin_unlock(&fc->lock);
2365 return 0;
2366}
2367
2368static void fuse_do_truncate(struct file *file)
2369{
2370 struct inode *inode = file->f_mapping->host;
2371 struct iattr attr;
2372
2373 attr.ia_valid = ATTR_SIZE;
2374 attr.ia_size = i_size_read(inode);
2375
2376 attr.ia_file = file;
2377 attr.ia_valid |= ATTR_FILE;
2378
2379 fuse_do_setattr(inode, &attr, file);
2380}
2381
2382static inline loff_t fuse_round_up(loff_t off)
2383{
2384 return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
2385}
2386
2387static ssize_t
2388fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2389 loff_t offset, unsigned long nr_segs)
2390{
2391 ssize_t ret = 0;
2392 struct file *file = iocb->ki_filp;
2393 struct fuse_file *ff = file->private_data;
2394 bool async_dio = ff->fc->async_dio;
2395 loff_t pos = 0;
2396 struct inode *inode;
2397 loff_t i_size;
2398 size_t count = iov_length(iov, nr_segs);
2399 struct fuse_io_priv *io;
2400
2401 pos = offset;
2402 inode = file->f_mapping->host;
2403 i_size = i_size_read(inode);
2404
2405
2406 if (async_dio && rw != WRITE && offset + count > i_size) {
2407 if (offset >= i_size)
2408 return 0;
2409 count = min_t(loff_t, count, fuse_round_up(i_size - offset));
2410 }
2411
2412 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
2413 if (!io)
2414 return -ENOMEM;
2415 spin_lock_init(&io->lock);
2416 io->reqs = 1;
2417 io->bytes = -1;
2418 io->size = 0;
2419 io->offset = offset;
2420 io->write = (rw == WRITE);
2421 io->err = 0;
2422 io->file = file;
2423
2424
2425
2426
2427 io->async = async_dio;
2428 io->iocb = iocb;
2429
2430
2431
2432
2433
2434
2435 if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
2436 io->async = false;
2437
2438 if (rw == WRITE)
2439 ret = __fuse_direct_write(io, iov, nr_segs, &pos);
2440 else
2441 ret = __fuse_direct_read(io, iov, nr_segs, &pos, count);
2442
2443 if (io->async) {
2444 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
2445
2446
2447 if (!is_sync_kiocb(iocb))
2448 return -EIOCBQUEUED;
2449
2450 ret = wait_on_sync_kiocb(iocb);
2451 } else {
2452 kfree(io);
2453 }
2454
2455 if (rw == WRITE) {
2456 if (ret > 0)
2457 fuse_write_update_size(inode, pos);
2458 else if (ret < 0 && offset + count > i_size)
2459 fuse_do_truncate(file);
2460 }
2461
2462 return ret;
2463}
2464
2465static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2466 loff_t length)
2467{
2468 struct fuse_file *ff = file->private_data;
2469 struct inode *inode = file->f_inode;
2470 struct fuse_inode *fi = get_fuse_inode(inode);
2471 struct fuse_conn *fc = ff->fc;
2472 struct fuse_req *req;
2473 struct fuse_fallocate_in inarg = {
2474 .fh = ff->fh,
2475 .offset = offset,
2476 .length = length,
2477 .mode = mode
2478 };
2479 int err;
2480 bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
2481 (mode & FALLOC_FL_PUNCH_HOLE);
2482
2483 if (fc->no_fallocate)
2484 return -EOPNOTSUPP;
2485
2486 if (lock_inode) {
2487 mutex_lock(&inode->i_mutex);
2488 if (mode & FALLOC_FL_PUNCH_HOLE) {
2489 loff_t endbyte = offset + length - 1;
2490 err = filemap_write_and_wait_range(inode->i_mapping,
2491 offset, endbyte);
2492 if (err)
2493 goto out;
2494
2495 fuse_sync_writes(inode);
2496 }
2497 }
2498
2499 if (!(mode & FALLOC_FL_KEEP_SIZE))
2500 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2501
2502 req = fuse_get_req_nopages(fc);
2503 if (IS_ERR(req)) {
2504 err = PTR_ERR(req);
2505 goto out;
2506 }
2507
2508 req->in.h.opcode = FUSE_FALLOCATE;
2509 req->in.h.nodeid = ff->nodeid;
2510 req->in.numargs = 1;
2511 req->in.args[0].size = sizeof(inarg);
2512 req->in.args[0].value = &inarg;
2513 fuse_request_send(fc, req);
2514 err = req->out.h.error;
2515 if (err == -ENOSYS) {
2516 fc->no_fallocate = 1;
2517 err = -EOPNOTSUPP;
2518 }
2519 fuse_put_request(fc, req);
2520
2521 if (err)
2522 goto out;
2523
2524
2525 if (!(mode & FALLOC_FL_KEEP_SIZE))
2526 fuse_write_update_size(inode, offset + length);
2527
2528 if (mode & FALLOC_FL_PUNCH_HOLE)
2529 truncate_pagecache_range(inode, offset, offset + length - 1);
2530
2531 fuse_invalidate_attr(inode);
2532
2533out:
2534 if (!(mode & FALLOC_FL_KEEP_SIZE))
2535 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2536
2537 if (lock_inode)
2538 mutex_unlock(&inode->i_mutex);
2539
2540 return err;
2541}
2542
2543static const struct file_operations fuse_file_operations = {
2544 .llseek = fuse_file_llseek,
2545 .read = do_sync_read,
2546 .aio_read = fuse_file_aio_read,
2547 .write = do_sync_write,
2548 .aio_write = fuse_file_aio_write,
2549 .mmap = fuse_file_mmap,
2550 .open = fuse_open,
2551 .flush = fuse_flush,
2552 .release = fuse_release,
2553 .fsync = fuse_fsync,
2554 .lock = fuse_file_lock,
2555 .flock = fuse_file_flock,
2556 .splice_read = generic_file_splice_read,
2557 .unlocked_ioctl = fuse_file_ioctl,
2558 .compat_ioctl = fuse_file_compat_ioctl,
2559 .poll = fuse_file_poll,
2560 .fallocate = fuse_file_fallocate,
2561};
2562
2563static const struct file_operations fuse_direct_io_file_operations = {
2564 .llseek = fuse_file_llseek,
2565 .read = fuse_direct_read,
2566 .write = fuse_direct_write,
2567 .mmap = fuse_direct_mmap,
2568 .open = fuse_open,
2569 .flush = fuse_flush,
2570 .release = fuse_release,
2571 .fsync = fuse_fsync,
2572 .lock = fuse_file_lock,
2573 .flock = fuse_file_flock,
2574 .unlocked_ioctl = fuse_file_ioctl,
2575 .compat_ioctl = fuse_file_compat_ioctl,
2576 .poll = fuse_file_poll,
2577 .fallocate = fuse_file_fallocate,
2578
2579};
2580
2581static const struct address_space_operations fuse_file_aops = {
2582 .readpage = fuse_readpage,
2583 .writepage = fuse_writepage,
2584 .launder_page = fuse_launder_page,
2585 .readpages = fuse_readpages,
2586 .set_page_dirty = __set_page_dirty_nobuffers,
2587 .bmap = fuse_bmap,
2588 .direct_IO = fuse_direct_IO,
2589};
2590
2591void fuse_init_file_inode(struct inode *inode)
2592{
2593 inode->i_fop = &fuse_file_operations;
2594 inode->i_data.a_ops = &fuse_file_aops;
2595}
2596