1
2
3
4
5
6
7
8
9#include "fuse_i.h"
10
11#include <linux/pagemap.h>
12#include <linux/slab.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/module.h>
16
17static const struct file_operations fuse_direct_io_file_operations;
18
19static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
20 int opcode, struct fuse_open_out *outargp)
21{
22 struct fuse_open_in inarg;
23 struct fuse_req *req;
24 int err;
25
26 req = fuse_get_req(fc);
27 if (IS_ERR(req))
28 return PTR_ERR(req);
29
30 memset(&inarg, 0, sizeof(inarg));
31 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
32 if (!fc->atomic_o_trunc)
33 inarg.flags &= ~O_TRUNC;
34 req->in.h.opcode = opcode;
35 req->in.h.nodeid = nodeid;
36 req->in.numargs = 1;
37 req->in.args[0].size = sizeof(inarg);
38 req->in.args[0].value = &inarg;
39 req->out.numargs = 1;
40 req->out.args[0].size = sizeof(*outargp);
41 req->out.args[0].value = outargp;
42 fuse_request_send(fc, req);
43 err = req->out.h.error;
44 fuse_put_request(fc, req);
45
46 return err;
47}
48
49struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
50{
51 struct fuse_file *ff;
52
53 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
54 if (unlikely(!ff))
55 return NULL;
56
57 ff->fc = fc;
58 ff->reserved_req = fuse_request_alloc();
59 if (unlikely(!ff->reserved_req)) {
60 kfree(ff);
61 return NULL;
62 }
63
64 INIT_LIST_HEAD(&ff->write_entry);
65 atomic_set(&ff->count, 0);
66 RB_CLEAR_NODE(&ff->polled_node);
67 init_waitqueue_head(&ff->poll_wait);
68
69 spin_lock(&fc->lock);
70 ff->kh = ++fc->khctr;
71 spin_unlock(&fc->lock);
72
73 return ff;
74}
75
76void fuse_file_free(struct fuse_file *ff)
77{
78 fuse_request_free(ff->reserved_req);
79 kfree(ff);
80}
81
82struct fuse_file *fuse_file_get(struct fuse_file *ff)
83{
84 atomic_inc(&ff->count);
85 return ff;
86}
87
88static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
89{
90 path_put(&req->misc.release.path);
91}
92
93static void fuse_file_put(struct fuse_file *ff)
94{
95 if (atomic_dec_and_test(&ff->count)) {
96 struct fuse_req *req = ff->reserved_req;
97
98 req->end = fuse_release_end;
99 fuse_request_send_background(ff->fc, req);
100 kfree(ff);
101 }
102}
103
104int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
105 bool isdir)
106{
107 struct fuse_open_out outarg;
108 struct fuse_file *ff;
109 int err;
110 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
111
112 ff = fuse_file_alloc(fc);
113 if (!ff)
114 return -ENOMEM;
115
116 err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
117 if (err) {
118 fuse_file_free(ff);
119 return err;
120 }
121
122 if (isdir)
123 outarg.open_flags &= ~FOPEN_DIRECT_IO;
124
125 ff->fh = outarg.fh;
126 ff->nodeid = nodeid;
127 ff->open_flags = outarg.open_flags;
128 file->private_data = fuse_file_get(ff);
129
130 return 0;
131}
132EXPORT_SYMBOL_GPL(fuse_do_open);
133
134void fuse_finish_open(struct inode *inode, struct file *file)
135{
136 struct fuse_file *ff = file->private_data;
137
138 if (ff->open_flags & FOPEN_DIRECT_IO)
139 file->f_op = &fuse_direct_io_file_operations;
140 if (!(ff->open_flags & FOPEN_KEEP_CACHE))
141 invalidate_inode_pages2(inode->i_mapping);
142 if (ff->open_flags & FOPEN_NONSEEKABLE)
143 nonseekable_open(inode, file);
144}
145
146int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
147{
148 struct fuse_conn *fc = get_fuse_conn(inode);
149 int err;
150
151
152 if (file->f_flags & O_DIRECT)
153 return -EINVAL;
154
155 err = generic_file_open(inode, file);
156 if (err)
157 return err;
158
159 err = fuse_do_open(fc, get_node_id(inode), file, isdir);
160 if (err)
161 return err;
162
163 fuse_finish_open(inode, file);
164
165 return 0;
166}
167
168static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
169{
170 struct fuse_conn *fc = ff->fc;
171 struct fuse_req *req = ff->reserved_req;
172 struct fuse_release_in *inarg = &req->misc.release.in;
173
174 spin_lock(&fc->lock);
175 list_del(&ff->write_entry);
176 if (!RB_EMPTY_NODE(&ff->polled_node))
177 rb_erase(&ff->polled_node, &fc->polled_files);
178 spin_unlock(&fc->lock);
179
180 wake_up_interruptible_sync(&ff->poll_wait);
181
182 inarg->fh = ff->fh;
183 inarg->flags = flags;
184 req->in.h.opcode = opcode;
185 req->in.h.nodeid = ff->nodeid;
186 req->in.numargs = 1;
187 req->in.args[0].size = sizeof(struct fuse_release_in);
188 req->in.args[0].value = inarg;
189}
190
191void fuse_release_common(struct file *file, int opcode)
192{
193 struct fuse_file *ff;
194 struct fuse_req *req;
195
196 ff = file->private_data;
197 if (unlikely(!ff))
198 return;
199
200 req = ff->reserved_req;
201 fuse_prepare_release(ff, file->f_flags, opcode);
202
203
204 path_get(&file->f_path);
205 req->misc.release.path = file->f_path;
206
207
208
209
210
211
212 fuse_file_put(ff);
213}
214
215static int fuse_open(struct inode *inode, struct file *file)
216{
217 return fuse_open_common(inode, file, false);
218}
219
220static int fuse_release(struct inode *inode, struct file *file)
221{
222 fuse_release_common(file, FUSE_RELEASE);
223
224
225 return 0;
226}
227
228void fuse_sync_release(struct fuse_file *ff, int flags)
229{
230 WARN_ON(atomic_read(&ff->count) > 1);
231 fuse_prepare_release(ff, flags, FUSE_RELEASE);
232 ff->reserved_req->force = 1;
233 fuse_request_send(ff->fc, ff->reserved_req);
234 fuse_put_request(ff->fc, ff->reserved_req);
235 kfree(ff);
236}
237EXPORT_SYMBOL_GPL(fuse_sync_release);
238
239
240
241
242
243u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
244{
245 u32 *k = fc->scramble_key;
246 u64 v = (unsigned long) id;
247 u32 v0 = v;
248 u32 v1 = v >> 32;
249 u32 sum = 0;
250 int i;
251
252 for (i = 0; i < 32; i++) {
253 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
254 sum += 0x9E3779B9;
255 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
256 }
257
258 return (u64) v0 + ((u64) v1 << 32);
259}
260
261
262
263
264
265
266
267static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
268{
269 struct fuse_conn *fc = get_fuse_conn(inode);
270 struct fuse_inode *fi = get_fuse_inode(inode);
271 struct fuse_req *req;
272 bool found = false;
273
274 spin_lock(&fc->lock);
275 list_for_each_entry(req, &fi->writepages, writepages_entry) {
276 pgoff_t curr_index;
277
278 BUG_ON(req->inode != inode);
279 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
280 if (curr_index == index) {
281 found = true;
282 break;
283 }
284 }
285 spin_unlock(&fc->lock);
286
287 return found;
288}
289
290
291
292
293
294
295
296static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
297{
298 struct fuse_inode *fi = get_fuse_inode(inode);
299
300 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
301 return 0;
302}
303
304static int fuse_flush(struct file *file, fl_owner_t id)
305{
306 struct inode *inode = file->f_path.dentry->d_inode;
307 struct fuse_conn *fc = get_fuse_conn(inode);
308 struct fuse_file *ff = file->private_data;
309 struct fuse_req *req;
310 struct fuse_flush_in inarg;
311 int err;
312
313 if (is_bad_inode(inode))
314 return -EIO;
315
316 if (fc->no_flush)
317 return 0;
318
319 req = fuse_get_req_nofail(fc, file);
320 memset(&inarg, 0, sizeof(inarg));
321 inarg.fh = ff->fh;
322 inarg.lock_owner = fuse_lock_owner_id(fc, id);
323 req->in.h.opcode = FUSE_FLUSH;
324 req->in.h.nodeid = get_node_id(inode);
325 req->in.numargs = 1;
326 req->in.args[0].size = sizeof(inarg);
327 req->in.args[0].value = &inarg;
328 req->force = 1;
329 fuse_request_send(fc, req);
330 err = req->out.h.error;
331 fuse_put_request(fc, req);
332 if (err == -ENOSYS) {
333 fc->no_flush = 1;
334 err = 0;
335 }
336 return err;
337}
338
339
340
341
342
343
344
345
346
347
348static void fuse_sync_writes(struct inode *inode)
349{
350 fuse_set_nowrite(inode);
351 fuse_release_nowrite(inode);
352}
353
354int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
355 int isdir)
356{
357 struct inode *inode = de->d_inode;
358 struct fuse_conn *fc = get_fuse_conn(inode);
359 struct fuse_file *ff = file->private_data;
360 struct fuse_req *req;
361 struct fuse_fsync_in inarg;
362 int err;
363
364 if (is_bad_inode(inode))
365 return -EIO;
366
367 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
368 return 0;
369
370
371
372
373
374
375 err = write_inode_now(inode, 0);
376 if (err)
377 return err;
378
379 fuse_sync_writes(inode);
380
381 req = fuse_get_req(fc);
382 if (IS_ERR(req))
383 return PTR_ERR(req);
384
385 memset(&inarg, 0, sizeof(inarg));
386 inarg.fh = ff->fh;
387 inarg.fsync_flags = datasync ? 1 : 0;
388 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
389 req->in.h.nodeid = get_node_id(inode);
390 req->in.numargs = 1;
391 req->in.args[0].size = sizeof(inarg);
392 req->in.args[0].value = &inarg;
393 fuse_request_send(fc, req);
394 err = req->out.h.error;
395 fuse_put_request(fc, req);
396 if (err == -ENOSYS) {
397 if (isdir)
398 fc->no_fsyncdir = 1;
399 else
400 fc->no_fsync = 1;
401 err = 0;
402 }
403 return err;
404}
405
406static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
407{
408 return fuse_fsync_common(file, de, datasync, 0);
409}
410
411void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
412 size_t count, int opcode)
413{
414 struct fuse_read_in *inarg = &req->misc.read.in;
415 struct fuse_file *ff = file->private_data;
416
417 inarg->fh = ff->fh;
418 inarg->offset = pos;
419 inarg->size = count;
420 inarg->flags = file->f_flags;
421 req->in.h.opcode = opcode;
422 req->in.h.nodeid = ff->nodeid;
423 req->in.numargs = 1;
424 req->in.args[0].size = sizeof(struct fuse_read_in);
425 req->in.args[0].value = inarg;
426 req->out.argvar = 1;
427 req->out.numargs = 1;
428 req->out.args[0].size = count;
429}
430
431static size_t fuse_send_read(struct fuse_req *req, struct file *file,
432 loff_t pos, size_t count, fl_owner_t owner)
433{
434 struct fuse_file *ff = file->private_data;
435 struct fuse_conn *fc = ff->fc;
436
437 fuse_read_fill(req, file, pos, count, FUSE_READ);
438 if (owner != NULL) {
439 struct fuse_read_in *inarg = &req->misc.read.in;
440
441 inarg->read_flags |= FUSE_READ_LOCKOWNER;
442 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
443 }
444 fuse_request_send(fc, req);
445 return req->out.args[0].size;
446}
447
448static void fuse_read_update_size(struct inode *inode, loff_t size,
449 u64 attr_ver)
450{
451 struct fuse_conn *fc = get_fuse_conn(inode);
452 struct fuse_inode *fi = get_fuse_inode(inode);
453
454 spin_lock(&fc->lock);
455 if (attr_ver == fi->attr_version && size < inode->i_size) {
456 fi->attr_version = ++fc->attr_version;
457 i_size_write(inode, size);
458 }
459 spin_unlock(&fc->lock);
460}
461
462static int fuse_readpage(struct file *file, struct page *page)
463{
464 struct inode *inode = page->mapping->host;
465 struct fuse_conn *fc = get_fuse_conn(inode);
466 struct fuse_req *req;
467 size_t num_read;
468 loff_t pos = page_offset(page);
469 size_t count = PAGE_CACHE_SIZE;
470 u64 attr_ver;
471 int err;
472
473 err = -EIO;
474 if (is_bad_inode(inode))
475 goto out;
476
477
478
479
480
481
482 fuse_wait_on_page_writeback(inode, page->index);
483
484 req = fuse_get_req(fc);
485 err = PTR_ERR(req);
486 if (IS_ERR(req))
487 goto out;
488
489 attr_ver = fuse_get_attr_version(fc);
490
491 req->out.page_zeroing = 1;
492 req->out.argpages = 1;
493 req->num_pages = 1;
494 req->pages[0] = page;
495 num_read = fuse_send_read(req, file, pos, count, NULL);
496 err = req->out.h.error;
497 fuse_put_request(fc, req);
498
499 if (!err) {
500
501
502
503 if (num_read < count)
504 fuse_read_update_size(inode, pos + num_read, attr_ver);
505
506 SetPageUptodate(page);
507 }
508
509 fuse_invalidate_attr(inode);
510 out:
511 unlock_page(page);
512 return err;
513}
514
515static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
516{
517 int i;
518 size_t count = req->misc.read.in.size;
519 size_t num_read = req->out.args[0].size;
520 struct inode *inode = req->pages[0]->mapping->host;
521
522
523
524
525 if (!req->out.h.error && num_read < count) {
526 loff_t pos = page_offset(req->pages[0]) + num_read;
527 fuse_read_update_size(inode, pos, req->misc.read.attr_ver);
528 }
529
530 fuse_invalidate_attr(inode);
531
532 for (i = 0; i < req->num_pages; i++) {
533 struct page *page = req->pages[i];
534 if (!req->out.h.error)
535 SetPageUptodate(page);
536 else
537 SetPageError(page);
538 unlock_page(page);
539 }
540 if (req->ff)
541 fuse_file_put(req->ff);
542}
543
544static void fuse_send_readpages(struct fuse_req *req, struct file *file)
545{
546 struct fuse_file *ff = file->private_data;
547 struct fuse_conn *fc = ff->fc;
548 loff_t pos = page_offset(req->pages[0]);
549 size_t count = req->num_pages << PAGE_CACHE_SHIFT;
550
551 req->out.argpages = 1;
552 req->out.page_zeroing = 1;
553 fuse_read_fill(req, file, pos, count, FUSE_READ);
554 req->misc.read.attr_ver = fuse_get_attr_version(fc);
555 if (fc->async_read) {
556 req->ff = fuse_file_get(ff);
557 req->end = fuse_readpages_end;
558 fuse_request_send_background(fc, req);
559 } else {
560 fuse_request_send(fc, req);
561 fuse_readpages_end(fc, req);
562 fuse_put_request(fc, req);
563 }
564}
565
566struct fuse_fill_data {
567 struct fuse_req *req;
568 struct file *file;
569 struct inode *inode;
570};
571
572static int fuse_readpages_fill(void *_data, struct page *page)
573{
574 struct fuse_fill_data *data = _data;
575 struct fuse_req *req = data->req;
576 struct inode *inode = data->inode;
577 struct fuse_conn *fc = get_fuse_conn(inode);
578
579 fuse_wait_on_page_writeback(inode, page->index);
580
581 if (req->num_pages &&
582 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
583 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
584 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
585 fuse_send_readpages(req, data->file);
586 data->req = req = fuse_get_req(fc);
587 if (IS_ERR(req)) {
588 unlock_page(page);
589 return PTR_ERR(req);
590 }
591 }
592 req->pages[req->num_pages] = page;
593 req->num_pages++;
594 return 0;
595}
596
597static int fuse_readpages(struct file *file, struct address_space *mapping,
598 struct list_head *pages, unsigned nr_pages)
599{
600 struct inode *inode = mapping->host;
601 struct fuse_conn *fc = get_fuse_conn(inode);
602 struct fuse_fill_data data;
603 int err;
604
605 err = -EIO;
606 if (is_bad_inode(inode))
607 goto out;
608
609 data.file = file;
610 data.inode = inode;
611 data.req = fuse_get_req(fc);
612 err = PTR_ERR(data.req);
613 if (IS_ERR(data.req))
614 goto out;
615
616 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
617 if (!err) {
618 if (data.req->num_pages)
619 fuse_send_readpages(data.req, file);
620 else
621 fuse_put_request(fc, data.req);
622 }
623out:
624 return err;
625}
626
627static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
628 unsigned long nr_segs, loff_t pos)
629{
630 struct inode *inode = iocb->ki_filp->f_mapping->host;
631
632 if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
633 int err;
634
635
636
637
638 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
639 if (err)
640 return err;
641 }
642
643 return generic_file_aio_read(iocb, iov, nr_segs, pos);
644}
645
646static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
647 loff_t pos, size_t count)
648{
649 struct fuse_write_in *inarg = &req->misc.write.in;
650 struct fuse_write_out *outarg = &req->misc.write.out;
651
652 inarg->fh = ff->fh;
653 inarg->offset = pos;
654 inarg->size = count;
655 req->in.h.opcode = FUSE_WRITE;
656 req->in.h.nodeid = ff->nodeid;
657 req->in.numargs = 2;
658 if (ff->fc->minor < 9)
659 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
660 else
661 req->in.args[0].size = sizeof(struct fuse_write_in);
662 req->in.args[0].value = inarg;
663 req->in.args[1].size = count;
664 req->out.numargs = 1;
665 req->out.args[0].size = sizeof(struct fuse_write_out);
666 req->out.args[0].value = outarg;
667}
668
669static size_t fuse_send_write(struct fuse_req *req, struct file *file,
670 loff_t pos, size_t count, fl_owner_t owner)
671{
672 struct fuse_file *ff = file->private_data;
673 struct fuse_conn *fc = ff->fc;
674 struct fuse_write_in *inarg = &req->misc.write.in;
675
676 fuse_write_fill(req, ff, pos, count);
677 inarg->flags = file->f_flags;
678 if (owner != NULL) {
679 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
680 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
681 }
682 fuse_request_send(fc, req);
683 return req->misc.write.out.size;
684}
685
686static int fuse_write_begin(struct file *file, struct address_space *mapping,
687 loff_t pos, unsigned len, unsigned flags,
688 struct page **pagep, void **fsdata)
689{
690 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
691
692 *pagep = grab_cache_page_write_begin(mapping, index, flags);
693 if (!*pagep)
694 return -ENOMEM;
695 return 0;
696}
697
698static void fuse_write_update_size(struct inode *inode, loff_t pos)
699{
700 struct fuse_conn *fc = get_fuse_conn(inode);
701 struct fuse_inode *fi = get_fuse_inode(inode);
702
703 spin_lock(&fc->lock);
704 fi->attr_version = ++fc->attr_version;
705 if (pos > inode->i_size)
706 i_size_write(inode, pos);
707 spin_unlock(&fc->lock);
708}
709
710static int fuse_buffered_write(struct file *file, struct inode *inode,
711 loff_t pos, unsigned count, struct page *page)
712{
713 int err;
714 size_t nres;
715 struct fuse_conn *fc = get_fuse_conn(inode);
716 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
717 struct fuse_req *req;
718
719 if (is_bad_inode(inode))
720 return -EIO;
721
722
723
724
725
726 fuse_wait_on_page_writeback(inode, page->index);
727
728 req = fuse_get_req(fc);
729 if (IS_ERR(req))
730 return PTR_ERR(req);
731
732 req->in.argpages = 1;
733 req->num_pages = 1;
734 req->pages[0] = page;
735 req->page_offset = offset;
736 nres = fuse_send_write(req, file, pos, count, NULL);
737 err = req->out.h.error;
738 fuse_put_request(fc, req);
739 if (!err && !nres)
740 err = -EIO;
741 if (!err) {
742 pos += nres;
743 fuse_write_update_size(inode, pos);
744 if (count == PAGE_CACHE_SIZE)
745 SetPageUptodate(page);
746 }
747 fuse_invalidate_attr(inode);
748 return err ? err : nres;
749}
750
751static int fuse_write_end(struct file *file, struct address_space *mapping,
752 loff_t pos, unsigned len, unsigned copied,
753 struct page *page, void *fsdata)
754{
755 struct inode *inode = mapping->host;
756 int res = 0;
757
758 if (copied)
759 res = fuse_buffered_write(file, inode, pos, copied, page);
760
761 unlock_page(page);
762 page_cache_release(page);
763 return res;
764}
765
766static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
767 struct inode *inode, loff_t pos,
768 size_t count)
769{
770 size_t res;
771 unsigned offset;
772 unsigned i;
773
774 for (i = 0; i < req->num_pages; i++)
775 fuse_wait_on_page_writeback(inode, req->pages[i]->index);
776
777 res = fuse_send_write(req, file, pos, count, NULL);
778
779 offset = req->page_offset;
780 count = res;
781 for (i = 0; i < req->num_pages; i++) {
782 struct page *page = req->pages[i];
783
784 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
785 SetPageUptodate(page);
786
787 if (count > PAGE_CACHE_SIZE - offset)
788 count -= PAGE_CACHE_SIZE - offset;
789 else
790 count = 0;
791 offset = 0;
792
793 unlock_page(page);
794 page_cache_release(page);
795 }
796
797 return res;
798}
799
800static ssize_t fuse_fill_write_pages(struct fuse_req *req,
801 struct address_space *mapping,
802 struct iov_iter *ii, loff_t pos)
803{
804 struct fuse_conn *fc = get_fuse_conn(mapping->host);
805 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
806 size_t count = 0;
807 int err;
808
809 req->in.argpages = 1;
810 req->page_offset = offset;
811
812 do {
813 size_t tmp;
814 struct page *page;
815 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
816 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
817 iov_iter_count(ii));
818
819 bytes = min_t(size_t, bytes, fc->max_write - count);
820
821 again:
822 err = -EFAULT;
823 if (iov_iter_fault_in_readable(ii, bytes))
824 break;
825
826 err = -ENOMEM;
827 page = grab_cache_page_write_begin(mapping, index, 0);
828 if (!page)
829 break;
830
831 pagefault_disable();
832 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
833 pagefault_enable();
834 flush_dcache_page(page);
835
836 if (!tmp) {
837 unlock_page(page);
838 page_cache_release(page);
839 bytes = min(bytes, iov_iter_single_seg_count(ii));
840 goto again;
841 }
842
843 err = 0;
844 req->pages[req->num_pages] = page;
845 req->num_pages++;
846
847 iov_iter_advance(ii, tmp);
848 count += tmp;
849 pos += tmp;
850 offset += tmp;
851 if (offset == PAGE_CACHE_SIZE)
852 offset = 0;
853
854 if (!fc->big_writes)
855 break;
856 } while (iov_iter_count(ii) && count < fc->max_write &&
857 req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0);
858
859 return count > 0 ? count : err;
860}
861
862static ssize_t fuse_perform_write(struct file *file,
863 struct address_space *mapping,
864 struct iov_iter *ii, loff_t pos)
865{
866 struct inode *inode = mapping->host;
867 struct fuse_conn *fc = get_fuse_conn(inode);
868 int err = 0;
869 ssize_t res = 0;
870
871 if (is_bad_inode(inode))
872 return -EIO;
873
874 do {
875 struct fuse_req *req;
876 ssize_t count;
877
878 req = fuse_get_req(fc);
879 if (IS_ERR(req)) {
880 err = PTR_ERR(req);
881 break;
882 }
883
884 count = fuse_fill_write_pages(req, mapping, ii, pos);
885 if (count <= 0) {
886 err = count;
887 } else {
888 size_t num_written;
889
890 num_written = fuse_send_write_pages(req, file, inode,
891 pos, count);
892 err = req->out.h.error;
893 if (!err) {
894 res += num_written;
895 pos += num_written;
896
897
898 if (num_written != count)
899 err = -EIO;
900 }
901 }
902 fuse_put_request(fc, req);
903 } while (!err && iov_iter_count(ii));
904
905 if (res > 0)
906 fuse_write_update_size(inode, pos);
907
908 fuse_invalidate_attr(inode);
909
910 return res > 0 ? res : err;
911}
912
913static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
914 unsigned long nr_segs, loff_t pos)
915{
916 struct file *file = iocb->ki_filp;
917 struct address_space *mapping = file->f_mapping;
918 size_t count = 0;
919 ssize_t written = 0;
920 struct inode *inode = mapping->host;
921 ssize_t err;
922 struct iov_iter i;
923
924 WARN_ON(iocb->ki_pos != pos);
925
926 err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
927 if (err)
928 return err;
929
930 mutex_lock(&inode->i_mutex);
931 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
932
933
934 current->backing_dev_info = mapping->backing_dev_info;
935
936 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
937 if (err)
938 goto out;
939
940 if (count == 0)
941 goto out;
942
943 err = file_remove_suid(file);
944 if (err)
945 goto out;
946
947 file_update_time(file);
948
949 iov_iter_init(&i, iov, nr_segs, count, 0);
950 written = fuse_perform_write(file, mapping, &i, pos);
951 if (written >= 0)
952 iocb->ki_pos = pos + written;
953
954out:
955 current->backing_dev_info = NULL;
956 mutex_unlock(&inode->i_mutex);
957
958 return written ? written : err;
959}
960
961static void fuse_release_user_pages(struct fuse_req *req, int write)
962{
963 unsigned i;
964
965 for (i = 0; i < req->num_pages; i++) {
966 struct page *page = req->pages[i];
967 if (write)
968 set_page_dirty_lock(page);
969 put_page(page);
970 }
971}
972
973static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
974 size_t *nbytesp, int write)
975{
976 size_t nbytes = *nbytesp;
977 unsigned long user_addr = (unsigned long) buf;
978 unsigned offset = user_addr & ~PAGE_MASK;
979 int npages;
980
981
982 if (segment_eq(get_fs(), KERNEL_DS)) {
983 if (write)
984 req->in.args[1].value = (void *) user_addr;
985 else
986 req->out.args[0].value = (void *) user_addr;
987
988 return 0;
989 }
990
991 nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
992 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
993 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
994 down_read(¤t->mm->mmap_sem);
995 npages = get_user_pages(current, current->mm, user_addr, npages, !write,
996 0, req->pages, NULL);
997 up_read(¤t->mm->mmap_sem);
998 if (npages < 0)
999 return npages;
1000
1001 req->num_pages = npages;
1002 req->page_offset = offset;
1003
1004 if (write)
1005 req->in.argpages = 1;
1006 else
1007 req->out.argpages = 1;
1008
1009 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
1010 *nbytesp = min(*nbytesp, nbytes);
1011
1012 return 0;
1013}
1014
1015ssize_t fuse_direct_io(struct file *file, const char __user *buf,
1016 size_t count, loff_t *ppos, int write)
1017{
1018 struct fuse_file *ff = file->private_data;
1019 struct fuse_conn *fc = ff->fc;
1020 size_t nmax = write ? fc->max_write : fc->max_read;
1021 loff_t pos = *ppos;
1022 ssize_t res = 0;
1023 struct fuse_req *req;
1024
1025 req = fuse_get_req(fc);
1026 if (IS_ERR(req))
1027 return PTR_ERR(req);
1028
1029 while (count) {
1030 size_t nres;
1031 fl_owner_t owner = current->files;
1032 size_t nbytes = min(count, nmax);
1033 int err = fuse_get_user_pages(req, buf, &nbytes, write);
1034 if (err) {
1035 res = err;
1036 break;
1037 }
1038
1039 if (write)
1040 nres = fuse_send_write(req, file, pos, nbytes, owner);
1041 else
1042 nres = fuse_send_read(req, file, pos, nbytes, owner);
1043
1044 fuse_release_user_pages(req, !write);
1045 if (req->out.h.error) {
1046 if (!res)
1047 res = req->out.h.error;
1048 break;
1049 } else if (nres > nbytes) {
1050 res = -EIO;
1051 break;
1052 }
1053 count -= nres;
1054 res += nres;
1055 pos += nres;
1056 buf += nres;
1057 if (nres != nbytes)
1058 break;
1059 if (count) {
1060 fuse_put_request(fc, req);
1061 req = fuse_get_req(fc);
1062 if (IS_ERR(req))
1063 break;
1064 }
1065 }
1066 if (!IS_ERR(req))
1067 fuse_put_request(fc, req);
1068 if (res > 0)
1069 *ppos = pos;
1070
1071 return res;
1072}
1073EXPORT_SYMBOL_GPL(fuse_direct_io);
1074
1075static ssize_t fuse_direct_read(struct file *file, char __user *buf,
1076 size_t count, loff_t *ppos)
1077{
1078 ssize_t res;
1079 struct inode *inode = file->f_path.dentry->d_inode;
1080
1081 if (is_bad_inode(inode))
1082 return -EIO;
1083
1084 res = fuse_direct_io(file, buf, count, ppos, 0);
1085
1086 fuse_invalidate_attr(inode);
1087
1088 return res;
1089}
1090
1091static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1092 size_t count, loff_t *ppos)
1093{
1094 struct inode *inode = file->f_path.dentry->d_inode;
1095 ssize_t res;
1096
1097 if (is_bad_inode(inode))
1098 return -EIO;
1099
1100
1101 mutex_lock(&inode->i_mutex);
1102 res = generic_write_checks(file, ppos, &count, 0);
1103 if (!res) {
1104 res = fuse_direct_io(file, buf, count, ppos, 1);
1105 if (res > 0)
1106 fuse_write_update_size(inode, *ppos);
1107 }
1108 mutex_unlock(&inode->i_mutex);
1109
1110 fuse_invalidate_attr(inode);
1111
1112 return res;
1113}
1114
1115static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
1116{
1117 __free_page(req->pages[0]);
1118 fuse_file_put(req->ff);
1119}
1120
1121static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1122{
1123 struct inode *inode = req->inode;
1124 struct fuse_inode *fi = get_fuse_inode(inode);
1125 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
1126
1127 list_del(&req->writepages_entry);
1128 dec_bdi_stat(bdi, BDI_WRITEBACK);
1129 dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
1130 bdi_writeout_inc(bdi);
1131 wake_up(&fi->page_waitq);
1132}
1133
1134
1135static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
1136__releases(&fc->lock)
1137__acquires(&fc->lock)
1138{
1139 struct fuse_inode *fi = get_fuse_inode(req->inode);
1140 loff_t size = i_size_read(req->inode);
1141 struct fuse_write_in *inarg = &req->misc.write.in;
1142
1143 if (!fc->connected)
1144 goto out_free;
1145
1146 if (inarg->offset + PAGE_CACHE_SIZE <= size) {
1147 inarg->size = PAGE_CACHE_SIZE;
1148 } else if (inarg->offset < size) {
1149 inarg->size = size & (PAGE_CACHE_SIZE - 1);
1150 } else {
1151
1152 goto out_free;
1153 }
1154
1155 req->in.args[1].size = inarg->size;
1156 fi->writectr++;
1157 fuse_request_send_background_locked(fc, req);
1158 return;
1159
1160 out_free:
1161 fuse_writepage_finish(fc, req);
1162 spin_unlock(&fc->lock);
1163 fuse_writepage_free(fc, req);
1164 fuse_put_request(fc, req);
1165 spin_lock(&fc->lock);
1166}
1167
1168
1169
1170
1171
1172
1173
1174void fuse_flush_writepages(struct inode *inode)
1175__releases(&fc->lock)
1176__acquires(&fc->lock)
1177{
1178 struct fuse_conn *fc = get_fuse_conn(inode);
1179 struct fuse_inode *fi = get_fuse_inode(inode);
1180 struct fuse_req *req;
1181
1182 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1183 req = list_entry(fi->queued_writes.next, struct fuse_req, list);
1184 list_del_init(&req->list);
1185 fuse_send_writepage(fc, req);
1186 }
1187}
1188
1189static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
1190{
1191 struct inode *inode = req->inode;
1192 struct fuse_inode *fi = get_fuse_inode(inode);
1193
1194 mapping_set_error(inode->i_mapping, req->out.h.error);
1195 spin_lock(&fc->lock);
1196 fi->writectr--;
1197 fuse_writepage_finish(fc, req);
1198 spin_unlock(&fc->lock);
1199 fuse_writepage_free(fc, req);
1200}
1201
1202static int fuse_writepage_locked(struct page *page)
1203{
1204 struct address_space *mapping = page->mapping;
1205 struct inode *inode = mapping->host;
1206 struct fuse_conn *fc = get_fuse_conn(inode);
1207 struct fuse_inode *fi = get_fuse_inode(inode);
1208 struct fuse_req *req;
1209 struct fuse_file *ff;
1210 struct page *tmp_page;
1211
1212 set_page_writeback(page);
1213
1214 req = fuse_request_alloc_nofs();
1215 if (!req)
1216 goto err;
1217
1218 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1219 if (!tmp_page)
1220 goto err_free;
1221
1222 spin_lock(&fc->lock);
1223 BUG_ON(list_empty(&fi->write_files));
1224 ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
1225 req->ff = fuse_file_get(ff);
1226 spin_unlock(&fc->lock);
1227
1228 fuse_write_fill(req, ff, page_offset(page), 0);
1229
1230 copy_highpage(tmp_page, page);
1231 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
1232 req->in.argpages = 1;
1233 req->num_pages = 1;
1234 req->pages[0] = tmp_page;
1235 req->page_offset = 0;
1236 req->end = fuse_writepage_end;
1237 req->inode = inode;
1238
1239 inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
1240 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1241 end_page_writeback(page);
1242
1243 spin_lock(&fc->lock);
1244 list_add(&req->writepages_entry, &fi->writepages);
1245 list_add_tail(&req->list, &fi->queued_writes);
1246 fuse_flush_writepages(inode);
1247 spin_unlock(&fc->lock);
1248
1249 return 0;
1250
1251err_free:
1252 fuse_request_free(req);
1253err:
1254 end_page_writeback(page);
1255 return -ENOMEM;
1256}
1257
1258static int fuse_writepage(struct page *page, struct writeback_control *wbc)
1259{
1260 int err;
1261
1262 err = fuse_writepage_locked(page);
1263 unlock_page(page);
1264
1265 return err;
1266}
1267
1268static int fuse_launder_page(struct page *page)
1269{
1270 int err = 0;
1271 if (clear_page_dirty_for_io(page)) {
1272 struct inode *inode = page->mapping->host;
1273 err = fuse_writepage_locked(page);
1274 if (!err)
1275 fuse_wait_on_page_writeback(inode, page->index);
1276 }
1277 return err;
1278}
1279
1280
1281
1282
1283
1284static void fuse_vma_close(struct vm_area_struct *vma)
1285{
1286 filemap_write_and_wait(vma->vm_file->f_mapping);
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1305{
1306 struct page *page = vmf->page;
1307
1308
1309
1310
1311 struct inode *inode = vma->vm_file->f_mapping->host;
1312
1313 fuse_wait_on_page_writeback(inode, page->index);
1314 return 0;
1315}
1316
1317static const struct vm_operations_struct fuse_file_vm_ops = {
1318 .close = fuse_vma_close,
1319 .fault = filemap_fault,
1320 .page_mkwrite = fuse_page_mkwrite,
1321};
1322
1323static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
1324{
1325 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
1326 struct inode *inode = file->f_dentry->d_inode;
1327 struct fuse_conn *fc = get_fuse_conn(inode);
1328 struct fuse_inode *fi = get_fuse_inode(inode);
1329 struct fuse_file *ff = file->private_data;
1330
1331
1332
1333
1334 spin_lock(&fc->lock);
1335 if (list_empty(&ff->write_entry))
1336 list_add(&ff->write_entry, &fi->write_files);
1337 spin_unlock(&fc->lock);
1338 }
1339 file_accessed(file);
1340 vma->vm_ops = &fuse_file_vm_ops;
1341 return 0;
1342}
1343
1344static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
1345{
1346
1347 if (vma->vm_flags & VM_MAYSHARE)
1348 return -ENODEV;
1349
1350 invalidate_inode_pages2(file->f_mapping);
1351
1352 return generic_file_mmap(file, vma);
1353}
1354
1355static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
1356 struct file_lock *fl)
1357{
1358 switch (ffl->type) {
1359 case F_UNLCK:
1360 break;
1361
1362 case F_RDLCK:
1363 case F_WRLCK:
1364 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
1365 ffl->end < ffl->start)
1366 return -EIO;
1367
1368 fl->fl_start = ffl->start;
1369 fl->fl_end = ffl->end;
1370 fl->fl_pid = ffl->pid;
1371 break;
1372
1373 default:
1374 return -EIO;
1375 }
1376 fl->fl_type = ffl->type;
1377 return 0;
1378}
1379
1380static void fuse_lk_fill(struct fuse_req *req, struct file *file,
1381 const struct file_lock *fl, int opcode, pid_t pid,
1382 int flock)
1383{
1384 struct inode *inode = file->f_path.dentry->d_inode;
1385 struct fuse_conn *fc = get_fuse_conn(inode);
1386 struct fuse_file *ff = file->private_data;
1387 struct fuse_lk_in *arg = &req->misc.lk_in;
1388
1389 arg->fh = ff->fh;
1390 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
1391 arg->lk.start = fl->fl_start;
1392 arg->lk.end = fl->fl_end;
1393 arg->lk.type = fl->fl_type;
1394 arg->lk.pid = pid;
1395 if (flock)
1396 arg->lk_flags |= FUSE_LK_FLOCK;
1397 req->in.h.opcode = opcode;
1398 req->in.h.nodeid = get_node_id(inode);
1399 req->in.numargs = 1;
1400 req->in.args[0].size = sizeof(*arg);
1401 req->in.args[0].value = arg;
1402}
1403
1404static int fuse_getlk(struct file *file, struct file_lock *fl)
1405{
1406 struct inode *inode = file->f_path.dentry->d_inode;
1407 struct fuse_conn *fc = get_fuse_conn(inode);
1408 struct fuse_req *req;
1409 struct fuse_lk_out outarg;
1410 int err;
1411
1412 req = fuse_get_req(fc);
1413 if (IS_ERR(req))
1414 return PTR_ERR(req);
1415
1416 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
1417 req->out.numargs = 1;
1418 req->out.args[0].size = sizeof(outarg);
1419 req->out.args[0].value = &outarg;
1420 fuse_request_send(fc, req);
1421 err = req->out.h.error;
1422 fuse_put_request(fc, req);
1423 if (!err)
1424 err = convert_fuse_file_lock(&outarg.lk, fl);
1425
1426 return err;
1427}
1428
1429static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
1430{
1431 struct inode *inode = file->f_path.dentry->d_inode;
1432 struct fuse_conn *fc = get_fuse_conn(inode);
1433 struct fuse_req *req;
1434 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
1435 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
1436 int err;
1437
1438 if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
1439
1440 return -ENOLCK;
1441 }
1442
1443
1444 if (fl->fl_flags & FL_CLOSE)
1445 return 0;
1446
1447 req = fuse_get_req(fc);
1448 if (IS_ERR(req))
1449 return PTR_ERR(req);
1450
1451 fuse_lk_fill(req, file, fl, opcode, pid, flock);
1452 fuse_request_send(fc, req);
1453 err = req->out.h.error;
1454
1455 if (err == -EINTR)
1456 err = -ERESTARTSYS;
1457 fuse_put_request(fc, req);
1458 return err;
1459}
1460
1461static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
1462{
1463 struct inode *inode = file->f_path.dentry->d_inode;
1464 struct fuse_conn *fc = get_fuse_conn(inode);
1465 int err;
1466
1467 if (cmd == F_CANCELLK) {
1468 err = 0;
1469 } else if (cmd == F_GETLK) {
1470 if (fc->no_lock) {
1471 posix_test_lock(file, fl);
1472 err = 0;
1473 } else
1474 err = fuse_getlk(file, fl);
1475 } else {
1476 if (fc->no_lock)
1477 err = posix_lock_file(file, fl, NULL);
1478 else
1479 err = fuse_setlk(file, fl, 0);
1480 }
1481 return err;
1482}
1483
1484static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
1485{
1486 struct inode *inode = file->f_path.dentry->d_inode;
1487 struct fuse_conn *fc = get_fuse_conn(inode);
1488 int err;
1489
1490 if (fc->no_lock) {
1491 err = flock_lock_file_wait(file, fl);
1492 } else {
1493
1494 fl->fl_owner = (fl_owner_t) file;
1495 err = fuse_setlk(file, fl, 1);
1496 }
1497
1498 return err;
1499}
1500
1501static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
1502{
1503 struct inode *inode = mapping->host;
1504 struct fuse_conn *fc = get_fuse_conn(inode);
1505 struct fuse_req *req;
1506 struct fuse_bmap_in inarg;
1507 struct fuse_bmap_out outarg;
1508 int err;
1509
1510 if (!inode->i_sb->s_bdev || fc->no_bmap)
1511 return 0;
1512
1513 req = fuse_get_req(fc);
1514 if (IS_ERR(req))
1515 return 0;
1516
1517 memset(&inarg, 0, sizeof(inarg));
1518 inarg.block = block;
1519 inarg.blocksize = inode->i_sb->s_blocksize;
1520 req->in.h.opcode = FUSE_BMAP;
1521 req->in.h.nodeid = get_node_id(inode);
1522 req->in.numargs = 1;
1523 req->in.args[0].size = sizeof(inarg);
1524 req->in.args[0].value = &inarg;
1525 req->out.numargs = 1;
1526 req->out.args[0].size = sizeof(outarg);
1527 req->out.args[0].value = &outarg;
1528 fuse_request_send(fc, req);
1529 err = req->out.h.error;
1530 fuse_put_request(fc, req);
1531 if (err == -ENOSYS)
1532 fc->no_bmap = 1;
1533
1534 return err ? 0 : outarg.block;
1535}
1536
1537static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
1538{
1539 loff_t retval;
1540 struct inode *inode = file->f_path.dentry->d_inode;
1541
1542 mutex_lock(&inode->i_mutex);
1543 switch (origin) {
1544 case SEEK_END:
1545 retval = fuse_update_attributes(inode, NULL, file, NULL);
1546 if (retval)
1547 goto exit;
1548 offset += i_size_read(inode);
1549 break;
1550 case SEEK_CUR:
1551 offset += file->f_pos;
1552 }
1553 retval = -EINVAL;
1554 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
1555 if (offset != file->f_pos) {
1556 file->f_pos = offset;
1557 file->f_version = 0;
1558 }
1559 retval = offset;
1560 }
1561exit:
1562 mutex_unlock(&inode->i_mutex);
1563 return retval;
1564}
1565
1566static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
1567 unsigned int nr_segs, size_t bytes, bool to_user)
1568{
1569 struct iov_iter ii;
1570 int page_idx = 0;
1571
1572 if (!bytes)
1573 return 0;
1574
1575 iov_iter_init(&ii, iov, nr_segs, bytes, 0);
1576
1577 while (iov_iter_count(&ii)) {
1578 struct page *page = pages[page_idx++];
1579 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
1580 void *kaddr, *map;
1581
1582 kaddr = map = kmap(page);
1583
1584 while (todo) {
1585 char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
1586 size_t iov_len = ii.iov->iov_len - ii.iov_offset;
1587 size_t copy = min(todo, iov_len);
1588 size_t left;
1589
1590 if (!to_user)
1591 left = copy_from_user(kaddr, uaddr, copy);
1592 else
1593 left = copy_to_user(uaddr, kaddr, copy);
1594
1595 if (unlikely(left))
1596 return -EFAULT;
1597
1598 iov_iter_advance(&ii, copy);
1599 todo -= copy;
1600 kaddr += copy;
1601 }
1602
1603 kunmap(page);
1604 }
1605
1606 return 0;
1607}
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1656 unsigned int flags)
1657{
1658 struct fuse_file *ff = file->private_data;
1659 struct fuse_conn *fc = ff->fc;
1660 struct fuse_ioctl_in inarg = {
1661 .fh = ff->fh,
1662 .cmd = cmd,
1663 .arg = arg,
1664 .flags = flags
1665 };
1666 struct fuse_ioctl_out outarg;
1667 struct fuse_req *req = NULL;
1668 struct page **pages = NULL;
1669 struct page *iov_page = NULL;
1670 struct iovec *in_iov = NULL, *out_iov = NULL;
1671 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
1672 size_t in_size, out_size, transferred;
1673 int err;
1674
1675
1676 BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
1677
1678 err = -ENOMEM;
1679 pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL);
1680 iov_page = alloc_page(GFP_KERNEL);
1681 if (!pages || !iov_page)
1682 goto out;
1683
1684
1685
1686
1687
1688 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
1689 struct iovec *iov = page_address(iov_page);
1690
1691 iov->iov_base = (void __user *)arg;
1692 iov->iov_len = _IOC_SIZE(cmd);
1693
1694 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1695 in_iov = iov;
1696 in_iovs = 1;
1697 }
1698
1699 if (_IOC_DIR(cmd) & _IOC_READ) {
1700 out_iov = iov;
1701 out_iovs = 1;
1702 }
1703 }
1704
1705 retry:
1706 inarg.in_size = in_size = iov_length(in_iov, in_iovs);
1707 inarg.out_size = out_size = iov_length(out_iov, out_iovs);
1708
1709
1710
1711
1712
1713 out_size = max_t(size_t, out_size, PAGE_SIZE);
1714 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
1715
1716
1717 err = -ENOMEM;
1718 if (max_pages > FUSE_MAX_PAGES_PER_REQ)
1719 goto out;
1720 while (num_pages < max_pages) {
1721 pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
1722 if (!pages[num_pages])
1723 goto out;
1724 num_pages++;
1725 }
1726
1727 req = fuse_get_req(fc);
1728 if (IS_ERR(req)) {
1729 err = PTR_ERR(req);
1730 req = NULL;
1731 goto out;
1732 }
1733 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
1734 req->num_pages = num_pages;
1735
1736
1737 req->in.h.opcode = FUSE_IOCTL;
1738 req->in.h.nodeid = ff->nodeid;
1739 req->in.numargs = 1;
1740 req->in.args[0].size = sizeof(inarg);
1741 req->in.args[0].value = &inarg;
1742 if (in_size) {
1743 req->in.numargs++;
1744 req->in.args[1].size = in_size;
1745 req->in.argpages = 1;
1746
1747 err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
1748 false);
1749 if (err)
1750 goto out;
1751 }
1752
1753 req->out.numargs = 2;
1754 req->out.args[0].size = sizeof(outarg);
1755 req->out.args[0].value = &outarg;
1756 req->out.args[1].size = out_size;
1757 req->out.argpages = 1;
1758 req->out.argvar = 1;
1759
1760 fuse_request_send(fc, req);
1761 err = req->out.h.error;
1762 transferred = req->out.args[1].size;
1763 fuse_put_request(fc, req);
1764 req = NULL;
1765 if (err)
1766 goto out;
1767
1768
1769 if (outarg.flags & FUSE_IOCTL_RETRY) {
1770 char *vaddr;
1771
1772
1773 err = -EIO;
1774 if (!(flags & FUSE_IOCTL_UNRESTRICTED))
1775 goto out;
1776
1777 in_iovs = outarg.in_iovs;
1778 out_iovs = outarg.out_iovs;
1779
1780
1781
1782
1783
1784 err = -ENOMEM;
1785 if (in_iovs > FUSE_IOCTL_MAX_IOV ||
1786 out_iovs > FUSE_IOCTL_MAX_IOV ||
1787 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
1788 goto out;
1789
1790 err = -EIO;
1791 if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred)
1792 goto out;
1793
1794
1795 vaddr = kmap_atomic(pages[0], KM_USER0);
1796 memcpy(page_address(iov_page), vaddr, transferred);
1797 kunmap_atomic(vaddr, KM_USER0);
1798
1799 in_iov = page_address(iov_page);
1800 out_iov = in_iov + in_iovs;
1801
1802 goto retry;
1803 }
1804
1805 err = -EIO;
1806 if (transferred > inarg.out_size)
1807 goto out;
1808
1809 err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
1810 out:
1811 if (req)
1812 fuse_put_request(fc, req);
1813 if (iov_page)
1814 __free_page(iov_page);
1815 while (num_pages)
1816 __free_page(pages[--num_pages]);
1817 kfree(pages);
1818
1819 return err ? err : outarg.result;
1820}
1821EXPORT_SYMBOL_GPL(fuse_do_ioctl);
1822
1823static long fuse_file_ioctl_common(struct file *file, unsigned int cmd,
1824 unsigned long arg, unsigned int flags)
1825{
1826 struct inode *inode = file->f_dentry->d_inode;
1827 struct fuse_conn *fc = get_fuse_conn(inode);
1828
1829 if (!fuse_allow_task(fc, current))
1830 return -EACCES;
1831
1832 if (is_bad_inode(inode))
1833 return -EIO;
1834
1835 return fuse_do_ioctl(file, cmd, arg, flags);
1836}
1837
1838static long fuse_file_ioctl(struct file *file, unsigned int cmd,
1839 unsigned long arg)
1840{
1841 return fuse_file_ioctl_common(file, cmd, arg, 0);
1842}
1843
1844static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
1845 unsigned long arg)
1846{
1847 return fuse_file_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
1848}
1849
1850
1851
1852
1853
1854
1855static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
1856 struct rb_node **parent_out)
1857{
1858 struct rb_node **link = &fc->polled_files.rb_node;
1859 struct rb_node *last = NULL;
1860
1861 while (*link) {
1862 struct fuse_file *ff;
1863
1864 last = *link;
1865 ff = rb_entry(last, struct fuse_file, polled_node);
1866
1867 if (kh < ff->kh)
1868 link = &last->rb_left;
1869 else if (kh > ff->kh)
1870 link = &last->rb_right;
1871 else
1872 return link;
1873 }
1874
1875 if (parent_out)
1876 *parent_out = last;
1877 return link;
1878}
1879
1880
1881
1882
1883
1884
1885
1886static void fuse_register_polled_file(struct fuse_conn *fc,
1887 struct fuse_file *ff)
1888{
1889 spin_lock(&fc->lock);
1890 if (RB_EMPTY_NODE(&ff->polled_node)) {
1891 struct rb_node **link, *parent;
1892
1893 link = fuse_find_polled_node(fc, ff->kh, &parent);
1894 BUG_ON(*link);
1895 rb_link_node(&ff->polled_node, parent, link);
1896 rb_insert_color(&ff->polled_node, &fc->polled_files);
1897 }
1898 spin_unlock(&fc->lock);
1899}
1900
1901unsigned fuse_file_poll(struct file *file, poll_table *wait)
1902{
1903 struct fuse_file *ff = file->private_data;
1904 struct fuse_conn *fc = ff->fc;
1905 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
1906 struct fuse_poll_out outarg;
1907 struct fuse_req *req;
1908 int err;
1909
1910 if (fc->no_poll)
1911 return DEFAULT_POLLMASK;
1912
1913 poll_wait(file, &ff->poll_wait, wait);
1914
1915
1916
1917
1918
1919 if (waitqueue_active(&ff->poll_wait)) {
1920 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
1921 fuse_register_polled_file(fc, ff);
1922 }
1923
1924 req = fuse_get_req(fc);
1925 if (IS_ERR(req))
1926 return POLLERR;
1927
1928 req->in.h.opcode = FUSE_POLL;
1929 req->in.h.nodeid = ff->nodeid;
1930 req->in.numargs = 1;
1931 req->in.args[0].size = sizeof(inarg);
1932 req->in.args[0].value = &inarg;
1933 req->out.numargs = 1;
1934 req->out.args[0].size = sizeof(outarg);
1935 req->out.args[0].value = &outarg;
1936 fuse_request_send(fc, req);
1937 err = req->out.h.error;
1938 fuse_put_request(fc, req);
1939
1940 if (!err)
1941 return outarg.revents;
1942 if (err == -ENOSYS) {
1943 fc->no_poll = 1;
1944 return DEFAULT_POLLMASK;
1945 }
1946 return POLLERR;
1947}
1948EXPORT_SYMBOL_GPL(fuse_file_poll);
1949
1950
1951
1952
1953
1954int fuse_notify_poll_wakeup(struct fuse_conn *fc,
1955 struct fuse_notify_poll_wakeup_out *outarg)
1956{
1957 u64 kh = outarg->kh;
1958 struct rb_node **link;
1959
1960 spin_lock(&fc->lock);
1961
1962 link = fuse_find_polled_node(fc, kh, NULL);
1963 if (*link) {
1964 struct fuse_file *ff;
1965
1966 ff = rb_entry(*link, struct fuse_file, polled_node);
1967 wake_up_interruptible_sync(&ff->poll_wait);
1968 }
1969
1970 spin_unlock(&fc->lock);
1971 return 0;
1972}
1973
1974static const struct file_operations fuse_file_operations = {
1975 .llseek = fuse_file_llseek,
1976 .read = do_sync_read,
1977 .aio_read = fuse_file_aio_read,
1978 .write = do_sync_write,
1979 .aio_write = fuse_file_aio_write,
1980 .mmap = fuse_file_mmap,
1981 .open = fuse_open,
1982 .flush = fuse_flush,
1983 .release = fuse_release,
1984 .fsync = fuse_fsync,
1985 .lock = fuse_file_lock,
1986 .flock = fuse_file_flock,
1987 .splice_read = generic_file_splice_read,
1988 .unlocked_ioctl = fuse_file_ioctl,
1989 .compat_ioctl = fuse_file_compat_ioctl,
1990 .poll = fuse_file_poll,
1991};
1992
1993static const struct file_operations fuse_direct_io_file_operations = {
1994 .llseek = fuse_file_llseek,
1995 .read = fuse_direct_read,
1996 .write = fuse_direct_write,
1997 .mmap = fuse_direct_mmap,
1998 .open = fuse_open,
1999 .flush = fuse_flush,
2000 .release = fuse_release,
2001 .fsync = fuse_fsync,
2002 .lock = fuse_file_lock,
2003 .flock = fuse_file_flock,
2004 .unlocked_ioctl = fuse_file_ioctl,
2005 .compat_ioctl = fuse_file_compat_ioctl,
2006 .poll = fuse_file_poll,
2007
2008};
2009
2010static const struct address_space_operations fuse_file_aops = {
2011 .readpage = fuse_readpage,
2012 .writepage = fuse_writepage,
2013 .launder_page = fuse_launder_page,
2014 .write_begin = fuse_write_begin,
2015 .write_end = fuse_write_end,
2016 .readpages = fuse_readpages,
2017 .set_page_dirty = __set_page_dirty_nobuffers,
2018 .bmap = fuse_bmap,
2019};
2020
2021void fuse_init_file_inode(struct inode *inode)
2022{
2023 inode->i_fop = &fuse_file_operations;
2024 inode->i_data.a_ops = &fuse_file_aops;
2025}
2026