1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/fs.h>
21#include <linux/file.h>
22#include <linux/pagemap.h>
23#include <linux/splice.h>
24#include <linux/memcontrol.h>
25#include <linux/mm_inline.h>
26#include <linux/swap.h>
27#include <linux/writeback.h>
28#include <linux/export.h>
29#include <linux/syscalls.h>
30#include <linux/uio.h>
31#include <linux/security.h>
32#include <linux/gfp.h>
33#include <linux/socket.h>
34#include <linux/compat.h>
35#include "internal.h"
36
37
38
39
40
41
42
43static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
44 struct pipe_buffer *buf)
45{
46 struct page *page = buf->page;
47 struct address_space *mapping;
48
49 lock_page(page);
50
51 mapping = page_mapping(page);
52 if (mapping) {
53 WARN_ON(!PageUptodate(page));
54
55
56
57
58
59
60
61
62
63 wait_on_page_writeback(page);
64
65 if (page_has_private(page) &&
66 !try_to_release_page(page, GFP_KERNEL))
67 goto out_unlock;
68
69
70
71
72
73 if (remove_mapping(mapping, page)) {
74 buf->flags |= PIPE_BUF_FLAG_LRU;
75 return 0;
76 }
77 }
78
79
80
81
82
83out_unlock:
84 unlock_page(page);
85 return 1;
86}
87
88static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
89 struct pipe_buffer *buf)
90{
91 page_cache_release(buf->page);
92 buf->flags &= ~PIPE_BUF_FLAG_LRU;
93}
94
95
96
97
98
99static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe,
100 struct pipe_buffer *buf)
101{
102 struct page *page = buf->page;
103 int err;
104
105 if (!PageUptodate(page)) {
106 lock_page(page);
107
108
109
110
111
112 if (!page->mapping) {
113 err = -ENODATA;
114 goto error;
115 }
116
117
118
119
120 if (!PageUptodate(page)) {
121 err = -EIO;
122 goto error;
123 }
124
125
126
127
128 unlock_page(page);
129 }
130
131 return 0;
132error:
133 unlock_page(page);
134 return err;
135}
136
137const struct pipe_buf_operations page_cache_pipe_buf_ops = {
138 .can_merge = 0,
139 .map = generic_pipe_buf_map,
140 .unmap = generic_pipe_buf_unmap,
141 .confirm = page_cache_pipe_buf_confirm,
142 .release = page_cache_pipe_buf_release,
143 .steal = page_cache_pipe_buf_steal,
144 .get = generic_pipe_buf_get,
145};
146
147static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
148 struct pipe_buffer *buf)
149{
150 if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
151 return 1;
152
153 buf->flags |= PIPE_BUF_FLAG_LRU;
154 return generic_pipe_buf_steal(pipe, buf);
155}
156
157static const struct pipe_buf_operations user_page_pipe_buf_ops = {
158 .can_merge = 0,
159 .map = generic_pipe_buf_map,
160 .unmap = generic_pipe_buf_unmap,
161 .confirm = generic_pipe_buf_confirm,
162 .release = page_cache_pipe_buf_release,
163 .steal = user_page_pipe_buf_steal,
164 .get = generic_pipe_buf_get,
165};
166
167static void wakeup_pipe_readers(struct pipe_inode_info *pipe)
168{
169 smp_mb();
170 if (waitqueue_active(&pipe->wait))
171 wake_up_interruptible(&pipe->wait);
172 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
173}
174
175
176
177
178
179
180
181
182
183
184
185
186ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
187 struct splice_pipe_desc *spd)
188{
189 unsigned int spd_pages = spd->nr_pages;
190 int ret, do_wakeup, page_nr;
191
192 ret = 0;
193 do_wakeup = 0;
194 page_nr = 0;
195
196 pipe_lock(pipe);
197
198 for (;;) {
199 if (!pipe->readers) {
200 send_sig(SIGPIPE, current, 0);
201 if (!ret)
202 ret = -EPIPE;
203 break;
204 }
205
206 if (pipe->nrbufs < pipe->buffers) {
207 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
208 struct pipe_buffer *buf = pipe->bufs + newbuf;
209
210 buf->page = spd->pages[page_nr];
211 buf->offset = spd->partial[page_nr].offset;
212 buf->len = spd->partial[page_nr].len;
213 buf->private = spd->partial[page_nr].private;
214 buf->ops = spd->ops;
215 if (spd->flags & SPLICE_F_GIFT)
216 buf->flags |= PIPE_BUF_FLAG_GIFT;
217
218 pipe->nrbufs++;
219 page_nr++;
220 ret += buf->len;
221
222 if (pipe->files)
223 do_wakeup = 1;
224
225 if (!--spd->nr_pages)
226 break;
227 if (pipe->nrbufs < pipe->buffers)
228 continue;
229
230 break;
231 }
232
233 if (spd->flags & SPLICE_F_NONBLOCK) {
234 if (!ret)
235 ret = -EAGAIN;
236 break;
237 }
238
239 if (signal_pending(current)) {
240 if (!ret)
241 ret = -ERESTARTSYS;
242 break;
243 }
244
245 if (do_wakeup) {
246 smp_mb();
247 if (waitqueue_active(&pipe->wait))
248 wake_up_interruptible_sync(&pipe->wait);
249 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
250 do_wakeup = 0;
251 }
252
253 pipe->waiting_writers++;
254 pipe_wait(pipe);
255 pipe->waiting_writers--;
256 }
257
258 pipe_unlock(pipe);
259
260 if (do_wakeup)
261 wakeup_pipe_readers(pipe);
262
263 while (page_nr < spd_pages)
264 spd->spd_release(spd, page_nr++);
265
266 return ret;
267}
268EXPORT_SYMBOL_GPL(splice_to_pipe);
269
270void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
271{
272 page_cache_release(spd->pages[i]);
273}
274
275
276
277
278
279int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
280{
281 unsigned int buffers = ACCESS_ONCE(pipe->buffers);
282
283 spd->nr_pages_max = buffers;
284 if (buffers <= PIPE_DEF_BUFFERS)
285 return 0;
286
287 spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
288 spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
289
290 if (spd->pages && spd->partial)
291 return 0;
292
293 kfree(spd->pages);
294 kfree(spd->partial);
295 return -ENOMEM;
296}
297
298void splice_shrink_spd(struct splice_pipe_desc *spd)
299{
300 if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
301 return;
302
303 kfree(spd->pages);
304 kfree(spd->partial);
305}
306
307static int
308__generic_file_splice_read(struct file *in, loff_t *ppos,
309 struct pipe_inode_info *pipe, size_t len,
310 unsigned int flags)
311{
312 struct address_space *mapping = in->f_mapping;
313 unsigned int loff, nr_pages, req_pages;
314 struct page *pages[PIPE_DEF_BUFFERS];
315 struct partial_page partial[PIPE_DEF_BUFFERS];
316 struct page *page;
317 pgoff_t index, end_index;
318 loff_t isize;
319 int error, page_nr;
320 struct splice_pipe_desc spd = {
321 .pages = pages,
322 .partial = partial,
323 .nr_pages_max = PIPE_DEF_BUFFERS,
324 .flags = flags,
325 .ops = &page_cache_pipe_buf_ops,
326 .spd_release = spd_release_page,
327 };
328
329 if (splice_grow_spd(pipe, &spd))
330 return -ENOMEM;
331
332 index = *ppos >> PAGE_CACHE_SHIFT;
333 loff = *ppos & ~PAGE_CACHE_MASK;
334 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
335 nr_pages = min(req_pages, spd.nr_pages_max);
336
337
338
339
340 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages);
341 index += spd.nr_pages;
342
343
344
345
346
347 if (spd.nr_pages < nr_pages)
348 page_cache_sync_readahead(mapping, &in->f_ra, in,
349 index, req_pages - spd.nr_pages);
350
351 error = 0;
352 while (spd.nr_pages < nr_pages) {
353
354
355
356
357 page = find_get_page(mapping, index);
358 if (!page) {
359
360
361
362 page = page_cache_alloc_cold(mapping);
363 if (!page)
364 break;
365
366 error = add_to_page_cache_lru(page, mapping, index,
367 GFP_KERNEL);
368 if (unlikely(error)) {
369 page_cache_release(page);
370 if (error == -EEXIST)
371 continue;
372 break;
373 }
374
375
376
377
378 unlock_page(page);
379 }
380
381 spd.pages[spd.nr_pages++] = page;
382 index++;
383 }
384
385
386
387
388
389 index = *ppos >> PAGE_CACHE_SHIFT;
390 nr_pages = spd.nr_pages;
391 spd.nr_pages = 0;
392 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
393 unsigned int this_len;
394
395 if (!len)
396 break;
397
398
399
400
401 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
402 page = spd.pages[page_nr];
403
404 if (PageReadahead(page))
405 page_cache_async_readahead(mapping, &in->f_ra, in,
406 page, index, req_pages - page_nr);
407
408
409
410
411 if (!PageUptodate(page)) {
412 lock_page(page);
413
414
415
416
417
418
419
420 if (!page->mapping) {
421 unlock_page(page);
422retry_lookup:
423 page = find_or_create_page(mapping, index,
424 mapping_gfp_mask(mapping));
425
426 if (!page) {
427 error = -ENOMEM;
428 break;
429 }
430 page_cache_release(spd.pages[page_nr]);
431 spd.pages[page_nr] = page;
432 }
433
434
435
436 if (PageUptodate(page)) {
437 unlock_page(page);
438 goto fill_it;
439 }
440
441
442
443
444 error = mapping->a_ops->readpage(in, page);
445 if (unlikely(error)) {
446
447
448
449 if (error == AOP_TRUNCATED_PAGE)
450 goto retry_lookup;
451
452 break;
453 }
454 }
455fill_it:
456
457
458
459 isize = i_size_read(mapping->host);
460 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
461 if (unlikely(!isize || index > end_index))
462 break;
463
464
465
466
467
468 if (end_index == index) {
469 unsigned int plen;
470
471
472
473
474 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
475 if (plen <= loff)
476 break;
477
478
479
480
481 this_len = min(this_len, plen - loff);
482 len = this_len;
483 }
484
485 spd.partial[page_nr].offset = loff;
486 spd.partial[page_nr].len = this_len;
487 len -= this_len;
488 loff = 0;
489 spd.nr_pages++;
490 index++;
491 }
492
493
494
495
496
497 while (page_nr < nr_pages)
498 page_cache_release(spd.pages[page_nr++]);
499 in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
500
501 if (spd.nr_pages)
502 error = splice_to_pipe(pipe, &spd);
503
504 splice_shrink_spd(&spd);
505 return error;
506}
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
523 struct pipe_inode_info *pipe, size_t len,
524 unsigned int flags)
525{
526 loff_t isize, left;
527 int ret;
528
529 if (IS_DAX(in->f_mapping->host))
530 return default_file_splice_read(in, ppos, pipe, len, flags);
531
532 isize = i_size_read(in->f_mapping->host);
533 if (unlikely(*ppos >= isize))
534 return 0;
535
536 left = isize - *ppos;
537 if (unlikely(left < len))
538 len = left;
539
540 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
541 if (ret > 0) {
542 *ppos += ret;
543 file_accessed(in);
544 }
545
546 return ret;
547}
548EXPORT_SYMBOL(generic_file_splice_read);
549
550static const struct pipe_buf_operations default_pipe_buf_ops = {
551 .can_merge = 0,
552 .map = generic_pipe_buf_map,
553 .unmap = generic_pipe_buf_unmap,
554 .confirm = generic_pipe_buf_confirm,
555 .release = generic_pipe_buf_release,
556 .steal = generic_pipe_buf_steal,
557 .get = generic_pipe_buf_get,
558};
559
560static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
561 struct pipe_buffer *buf)
562{
563 return 1;
564}
565
566
567const struct pipe_buf_operations nosteal_pipe_buf_ops = {
568 .can_merge = 0,
569 .map = generic_pipe_buf_map,
570 .unmap = generic_pipe_buf_unmap,
571 .confirm = generic_pipe_buf_confirm,
572 .release = generic_pipe_buf_release,
573 .steal = generic_pipe_buf_nosteal,
574 .get = generic_pipe_buf_get,
575};
576EXPORT_SYMBOL(nosteal_pipe_buf_ops);
577
578static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
579 unsigned long vlen, loff_t offset)
580{
581 mm_segment_t old_fs;
582 loff_t pos = offset;
583 ssize_t res;
584
585 old_fs = get_fs();
586 set_fs(get_ds());
587
588 res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
589 set_fs(old_fs);
590
591 return res;
592}
593
594ssize_t kernel_write(struct file *file, const char *buf, size_t count,
595 loff_t pos)
596{
597 mm_segment_t old_fs;
598 ssize_t res;
599
600 old_fs = get_fs();
601 set_fs(get_ds());
602
603 res = vfs_write(file, (__force const char __user *)buf, count, &pos);
604 set_fs(old_fs);
605
606 return res;
607}
608EXPORT_SYMBOL(kernel_write);
609
610ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
611 struct pipe_inode_info *pipe, size_t len,
612 unsigned int flags)
613{
614 unsigned int nr_pages;
615 unsigned int nr_freed;
616 size_t offset;
617 struct page *pages[PIPE_DEF_BUFFERS];
618 struct partial_page partial[PIPE_DEF_BUFFERS];
619 struct iovec *vec, __vec[PIPE_DEF_BUFFERS];
620 ssize_t res;
621 size_t this_len;
622 int error;
623 int i;
624 struct splice_pipe_desc spd = {
625 .pages = pages,
626 .partial = partial,
627 .nr_pages_max = PIPE_DEF_BUFFERS,
628 .flags = flags,
629 .ops = &default_pipe_buf_ops,
630 .spd_release = spd_release_page,
631 };
632
633 if (splice_grow_spd(pipe, &spd))
634 return -ENOMEM;
635
636 res = -ENOMEM;
637 vec = __vec;
638 if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
639 vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
640 if (!vec)
641 goto shrink_ret;
642 }
643
644 offset = *ppos & ~PAGE_CACHE_MASK;
645 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
646
647 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
648 struct page *page;
649
650 page = alloc_page(GFP_USER);
651 error = -ENOMEM;
652 if (!page)
653 goto err;
654
655 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
656 vec[i].iov_base = (void __user *) page_address(page);
657 vec[i].iov_len = this_len;
658 spd.pages[i] = page;
659 spd.nr_pages++;
660 len -= this_len;
661 offset = 0;
662 }
663
664 res = kernel_readv(in, vec, spd.nr_pages, *ppos);
665 if (res < 0) {
666 error = res;
667 goto err;
668 }
669
670 error = 0;
671 if (!res)
672 goto err;
673
674 nr_freed = 0;
675 for (i = 0; i < spd.nr_pages; i++) {
676 this_len = min_t(size_t, vec[i].iov_len, res);
677 spd.partial[i].offset = 0;
678 spd.partial[i].len = this_len;
679 if (!this_len) {
680 __free_page(spd.pages[i]);
681 spd.pages[i] = NULL;
682 nr_freed++;
683 }
684 res -= this_len;
685 }
686 spd.nr_pages -= nr_freed;
687
688 res = splice_to_pipe(pipe, &spd);
689 if (res > 0)
690 *ppos += res;
691
692shrink_ret:
693 if (vec != __vec)
694 kfree(vec);
695 splice_shrink_spd(&spd);
696 return res;
697
698err:
699 for (i = 0; i < spd.nr_pages; i++)
700 __free_page(spd.pages[i]);
701
702 res = error;
703 goto shrink_ret;
704}
705EXPORT_SYMBOL(default_file_splice_read);
706
707
708
709
710
711static int pipe_to_sendpage(struct pipe_inode_info *pipe,
712 struct pipe_buffer *buf, struct splice_desc *sd)
713{
714 struct file *file = sd->u.file;
715 loff_t pos = sd->pos;
716 int more;
717
718 if (!likely(file->f_op && file->f_op->sendpage))
719 return -EINVAL;
720
721 more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
722
723 if (sd->len < sd->total_len && pipe->nrbufs > 1)
724 more |= MSG_SENDPAGE_NOTLAST;
725
726 return file->f_op->sendpage(file, buf->page, buf->offset,
727 sd->len, &pos, more);
728}
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
751 struct splice_desc *sd)
752{
753 struct file *file = sd->u.file;
754 struct address_space *mapping = file->f_mapping;
755 unsigned int offset, this_len;
756 struct page *page;
757 void *fsdata;
758 int ret;
759
760 offset = sd->pos & ~PAGE_CACHE_MASK;
761
762 this_len = sd->len;
763 if (this_len + offset > PAGE_CACHE_SIZE)
764 this_len = PAGE_CACHE_SIZE - offset;
765
766 ret = pagecache_write_begin(file, mapping, sd->pos, this_len,
767 AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
768 if (unlikely(ret))
769 goto out;
770
771 if (buf->page != page) {
772 char *src = buf->ops->map(pipe, buf, 1);
773 char *dst = kmap_atomic(page);
774
775 memcpy(dst + offset, src + buf->offset, this_len);
776 flush_dcache_page(page);
777 kunmap_atomic(dst);
778 buf->ops->unmap(pipe, buf, src);
779 }
780 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
781 page, fsdata);
782out:
783 return ret;
784}
785EXPORT_SYMBOL(pipe_to_file);
786
787static void wakeup_pipe_writers(struct pipe_inode_info *pipe)
788{
789 smp_mb();
790 if (waitqueue_active(&pipe->wait))
791 wake_up_interruptible(&pipe->wait);
792 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
793}
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
816 splice_actor *actor)
817{
818 int ret;
819
820 while (pipe->nrbufs) {
821 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
822 const struct pipe_buf_operations *ops = buf->ops;
823
824 sd->len = buf->len;
825 if (sd->len > sd->total_len)
826 sd->len = sd->total_len;
827
828 ret = buf->ops->confirm(pipe, buf);
829 if (unlikely(ret)) {
830 if (ret == -ENODATA)
831 ret = 0;
832 return ret;
833 }
834
835 ret = actor(pipe, buf, sd);
836 if (ret <= 0)
837 return ret;
838
839 buf->offset += ret;
840 buf->len -= ret;
841
842 sd->num_spliced += ret;
843 sd->len -= ret;
844 sd->pos += ret;
845 sd->total_len -= ret;
846
847 if (!buf->len) {
848 buf->ops = NULL;
849 ops->release(pipe, buf);
850 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
851 pipe->nrbufs--;
852 if (pipe->files)
853 sd->need_wakeup = true;
854 }
855
856 if (!sd->total_len)
857 return 0;
858 }
859
860 return 1;
861}
862EXPORT_SYMBOL(splice_from_pipe_feed);
863
864
865
866
867
868
869
870
871
872
873
874int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
875{
876 while (!pipe->nrbufs) {
877 if (!pipe->writers)
878 return 0;
879
880 if (!pipe->waiting_writers && sd->num_spliced)
881 return 0;
882
883 if (sd->flags & SPLICE_F_NONBLOCK)
884 return -EAGAIN;
885
886 if (signal_pending(current))
887 return -ERESTARTSYS;
888
889 if (sd->need_wakeup) {
890 wakeup_pipe_writers(pipe);
891 sd->need_wakeup = false;
892 }
893
894 pipe_wait(pipe);
895 }
896
897 return 1;
898}
899EXPORT_SYMBOL(splice_from_pipe_next);
900
901
902
903
904
905
906
907
908
909
910void splice_from_pipe_begin(struct splice_desc *sd)
911{
912 sd->num_spliced = 0;
913 sd->need_wakeup = false;
914}
915EXPORT_SYMBOL(splice_from_pipe_begin);
916
917
918
919
920
921
922
923
924
925
926
927void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd)
928{
929 if (sd->need_wakeup)
930 wakeup_pipe_writers(pipe);
931}
932EXPORT_SYMBOL(splice_from_pipe_end);
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
948 splice_actor *actor)
949{
950 int ret;
951
952 splice_from_pipe_begin(sd);
953 do {
954 ret = splice_from_pipe_next(pipe, sd);
955 if (ret > 0)
956 ret = splice_from_pipe_feed(pipe, sd, actor);
957 } while (ret > 0);
958 splice_from_pipe_end(pipe, sd);
959
960 return sd->num_spliced ? sd->num_spliced : ret;
961}
962EXPORT_SYMBOL(__splice_from_pipe);
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
979 loff_t *ppos, size_t len, unsigned int flags,
980 splice_actor *actor)
981{
982 ssize_t ret;
983 struct splice_desc sd = {
984 .total_len = len,
985 .flags = flags,
986 .pos = *ppos,
987 .u.file = out,
988 };
989
990 pipe_lock(pipe);
991 ret = __splice_from_pipe(pipe, &sd, actor);
992 pipe_unlock(pipe);
993
994 return ret;
995}
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011ssize_t splice_write_to_file(struct pipe_inode_info *pipe, struct file *out,
1012 loff_t *ppos, size_t len, unsigned int flags,
1013 splice_write_actor actor)
1014{
1015 struct address_space *mapping = out->f_mapping;
1016 struct splice_desc sd = {
1017 .total_len = len,
1018 .flags = flags,
1019 .pos = *ppos,
1020 .u.file = out,
1021 };
1022 ssize_t ret;
1023
1024 pipe_lock(pipe);
1025
1026 splice_from_pipe_begin(&sd);
1027 do {
1028 ret = splice_from_pipe_next(pipe, &sd);
1029 if (ret <= 0)
1030 break;
1031
1032 ret = actor(pipe, &sd);
1033
1034 } while (ret > 0);
1035 splice_from_pipe_end(pipe, &sd);
1036
1037 pipe_unlock(pipe);
1038
1039 if (sd.num_spliced)
1040 ret = sd.num_spliced;
1041
1042 if (ret > 0) {
1043 int err;
1044
1045 err = generic_write_sync(out, *ppos, ret);
1046 if (err)
1047 ret = err;
1048 else
1049 *ppos += ret;
1050 balance_dirty_pages_ratelimited(mapping);
1051 }
1052
1053 return ret;
1054}
1055EXPORT_SYMBOL(splice_write_to_file);
1056
1057static ssize_t generic_file_splice_write_actor(struct pipe_inode_info *pipe,
1058 struct splice_desc *sd)
1059{
1060 struct file *out = sd->u.file;
1061 struct inode *inode = out->f_mapping->host;
1062 ssize_t ret;
1063
1064 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
1065 ret = file_remove_privs(out);
1066 if (!ret) {
1067 file_update_time(out);
1068 ret = splice_from_pipe_feed(pipe, sd, pipe_to_file);
1069 }
1070 mutex_unlock(&inode->i_mutex);
1071
1072 return ret;
1073}
1074
1075static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1076 struct splice_desc *sd)
1077{
1078 int ret;
1079 void *data;
1080 loff_t tmp = sd->pos;
1081
1082 data = buf->ops->map(pipe, buf, 0);
1083 ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
1084 buf->ops->unmap(pipe, buf, data);
1085
1086 return ret;
1087}
1088
1089ssize_t default_file_splice_write(struct pipe_inode_info *pipe,
1090 struct file *out, loff_t *ppos,
1091 size_t len, unsigned int flags)
1092{
1093 ssize_t ret;
1094
1095 ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf);
1096 if (ret > 0)
1097 *ppos += ret;
1098
1099 return ret;
1100}
1101EXPORT_SYMBOL(default_file_splice_write);
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116ssize_t
1117generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
1118 loff_t *ppos, size_t len, unsigned int flags)
1119{
1120 if (IS_DAX(out->f_mapping->host))
1121 return default_file_splice_write(pipe, out, ppos, len, flags);
1122
1123 return splice_write_to_file(pipe, out, ppos, len, flags,
1124 generic_file_splice_write_actor);
1125}
1126EXPORT_SYMBOL(generic_file_splice_write);
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
1142 loff_t *ppos, size_t len, unsigned int flags)
1143{
1144 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
1145}
1146
1147EXPORT_SYMBOL(generic_splice_sendpage);
1148
1149
1150
1151
1152static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
1153 loff_t *ppos, size_t len, unsigned int flags)
1154{
1155 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
1156 loff_t *, size_t, unsigned int);
1157 struct inode *inode = out->f_mapping->host;
1158 int ret;
1159
1160 if (unlikely(!(out->f_mode & FMODE_WRITE)))
1161 return -EBADF;
1162
1163 if (unlikely(out->f_flags & O_APPEND))
1164 return -EINVAL;
1165
1166 ret = rw_verify_area(WRITE, out, ppos, len);
1167 if (unlikely(ret < 0))
1168 return ret;
1169
1170 ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
1171 if (ret)
1172 return ret;
1173
1174 if (out->f_op && out->f_op->splice_write)
1175 splice_write = out->f_op->splice_write;
1176 else
1177 splice_write = default_file_splice_write;
1178
1179 return splice_write(pipe, out, ppos, len, flags);
1180}
1181
1182
1183
1184
1185static long do_splice_to(struct file *in, loff_t *ppos,
1186 struct pipe_inode_info *pipe, size_t len,
1187 unsigned int flags)
1188{
1189 ssize_t (*splice_read)(struct file *, loff_t *,
1190 struct pipe_inode_info *, size_t, unsigned int);
1191 int ret;
1192
1193 if (unlikely(!(in->f_mode & FMODE_READ)))
1194 return -EBADF;
1195
1196 ret = rw_verify_area(READ, in, ppos, len);
1197 if (unlikely(ret < 0))
1198 return ret;
1199
1200 if (in->f_op && in->f_op->splice_read)
1201 splice_read = in->f_op->splice_read;
1202 else
1203 splice_read = default_file_splice_read;
1204
1205 return splice_read(in, ppos, pipe, len, flags);
1206}
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1222 splice_direct_actor *actor)
1223{
1224 struct pipe_inode_info *pipe;
1225 long ret, bytes;
1226 umode_t i_mode;
1227 size_t len;
1228 int i, flags;
1229
1230
1231
1232
1233
1234
1235 i_mode = file_inode(in)->i_mode;
1236 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
1237 return -EINVAL;
1238
1239
1240
1241
1242
1243 pipe = current->splice_pipe;
1244 if (unlikely(!pipe)) {
1245 pipe = alloc_pipe_info();
1246 if (!pipe)
1247 return -ENOMEM;
1248
1249
1250
1251
1252
1253
1254 pipe->readers = 1;
1255
1256 current->splice_pipe = pipe;
1257 }
1258
1259
1260
1261
1262 ret = 0;
1263 bytes = 0;
1264 len = sd->total_len;
1265 flags = sd->flags;
1266
1267
1268
1269
1270 sd->flags &= ~SPLICE_F_NONBLOCK;
1271
1272 while (len) {
1273 size_t read_len;
1274 loff_t pos = sd->pos, prev_pos = pos;
1275
1276 ret = do_splice_to(in, &pos, pipe, len, flags);
1277 if (unlikely(ret <= 0))
1278 goto out_release;
1279
1280 read_len = ret;
1281 sd->total_len = read_len;
1282
1283
1284
1285
1286
1287
1288 ret = actor(pipe, sd);
1289 if (unlikely(ret <= 0)) {
1290 sd->pos = prev_pos;
1291 goto out_release;
1292 }
1293
1294 bytes += ret;
1295 len -= ret;
1296 sd->pos = pos;
1297
1298 if (ret < read_len) {
1299 sd->pos = prev_pos + ret;
1300 goto out_release;
1301 }
1302 }
1303
1304done:
1305 pipe->nrbufs = pipe->curbuf = 0;
1306 file_accessed(in);
1307 return bytes;
1308
1309out_release:
1310
1311
1312
1313
1314 for (i = 0; i < pipe->buffers; i++) {
1315 struct pipe_buffer *buf = pipe->bufs + i;
1316
1317 if (buf->ops) {
1318 buf->ops->release(pipe, buf);
1319 buf->ops = NULL;
1320 }
1321 }
1322
1323 if (!bytes)
1324 bytes = ret;
1325
1326 goto done;
1327}
1328EXPORT_SYMBOL(splice_direct_to_actor);
1329
1330static int direct_splice_actor(struct pipe_inode_info *pipe,
1331 struct splice_desc *sd)
1332{
1333 struct file *file = sd->u.file;
1334
1335 return do_splice_from(pipe, file, sd->opos, sd->total_len,
1336 sd->flags);
1337}
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1356 loff_t *opos, size_t len, unsigned int flags)
1357{
1358 struct splice_desc sd = {
1359 .len = len,
1360 .total_len = len,
1361 .flags = flags,
1362 .pos = *ppos,
1363 .u.file = out,
1364 .opos = opos,
1365 };
1366 long ret;
1367
1368 ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
1369 if (ret > 0)
1370 *ppos = sd.pos;
1371
1372 return ret;
1373}
1374EXPORT_SYMBOL(do_splice_direct);
1375
1376static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1377 struct pipe_inode_info *opipe,
1378 size_t len, unsigned int flags);
1379
1380
1381
1382
1383static long do_splice(struct file *in, loff_t __user *off_in,
1384 struct file *out, loff_t __user *off_out,
1385 size_t len, unsigned int flags)
1386{
1387 struct pipe_inode_info *ipipe;
1388 struct pipe_inode_info *opipe;
1389 loff_t offset;
1390 long ret;
1391
1392 ipipe = get_pipe_info(in);
1393 opipe = get_pipe_info(out);
1394
1395 if (ipipe && opipe) {
1396 if (off_in || off_out)
1397 return -ESPIPE;
1398
1399 if (!(in->f_mode & FMODE_READ))
1400 return -EBADF;
1401
1402 if (!(out->f_mode & FMODE_WRITE))
1403 return -EBADF;
1404
1405
1406 if (ipipe == opipe)
1407 return -EINVAL;
1408
1409 return splice_pipe_to_pipe(ipipe, opipe, len, flags);
1410 }
1411
1412 if (ipipe) {
1413 if (off_in)
1414 return -ESPIPE;
1415 if (off_out) {
1416 if (!(out->f_mode & FMODE_PWRITE))
1417 return -EINVAL;
1418 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1419 return -EFAULT;
1420 } else {
1421 offset = out->f_pos;
1422 }
1423
1424 file_start_write(out);
1425 ret = do_splice_from(ipipe, out, &offset, len, flags);
1426 file_end_write(out);
1427
1428 if (!off_out)
1429 out->f_pos = offset;
1430 else if (copy_to_user(off_out, &offset, sizeof(loff_t)))
1431 ret = -EFAULT;
1432
1433 return ret;
1434 }
1435
1436 if (opipe) {
1437 if (off_out)
1438 return -ESPIPE;
1439 if (off_in) {
1440 if (!(in->f_mode & FMODE_PREAD))
1441 return -EINVAL;
1442 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1443 return -EFAULT;
1444 } else {
1445 offset = in->f_pos;
1446 }
1447
1448 ret = do_splice_to(in, &offset, opipe, len, flags);
1449
1450 if (!off_in)
1451 in->f_pos = offset;
1452 else if (copy_to_user(off_in, &offset, sizeof(loff_t)))
1453 ret = -EFAULT;
1454
1455 return ret;
1456 }
1457
1458 return -EINVAL;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468static int get_iovec_page_array(const struct iovec __user *iov,
1469 unsigned int nr_vecs, struct page **pages,
1470 struct partial_page *partial, bool aligned,
1471 unsigned int pipe_buffers)
1472{
1473 int buffers = 0, error = 0;
1474
1475 while (nr_vecs) {
1476 unsigned long off, npages;
1477 struct iovec entry;
1478 void __user *base;
1479 size_t len;
1480 int i;
1481
1482 error = -EFAULT;
1483 if (copy_from_user(&entry, iov, sizeof(entry)))
1484 break;
1485
1486 base = entry.iov_base;
1487 len = entry.iov_len;
1488
1489
1490
1491
1492 error = 0;
1493 if (unlikely(!len))
1494 break;
1495 error = -EFAULT;
1496 if (!access_ok(VERIFY_READ, base, len))
1497 break;
1498
1499
1500
1501
1502
1503 off = (unsigned long) base & ~PAGE_MASK;
1504
1505
1506
1507
1508
1509 error = -EINVAL;
1510 if (aligned && (off || len & ~PAGE_MASK))
1511 break;
1512
1513 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1514 if (npages > pipe_buffers - buffers)
1515 npages = pipe_buffers - buffers;
1516
1517 error = get_user_pages_fast((unsigned long)base, npages,
1518 0, &pages[buffers]);
1519
1520 if (unlikely(error <= 0))
1521 break;
1522
1523
1524
1525
1526 for (i = 0; i < error; i++) {
1527 const int plen = min_t(size_t, len, PAGE_SIZE - off);
1528
1529 partial[buffers].offset = off;
1530 partial[buffers].len = plen;
1531
1532 off = 0;
1533 len -= plen;
1534 buffers++;
1535 }
1536
1537
1538
1539
1540
1541
1542 if (len)
1543 break;
1544
1545
1546
1547
1548
1549
1550 if (error < npages || buffers == pipe_buffers)
1551 break;
1552
1553 nr_vecs--;
1554 iov++;
1555 }
1556
1557 if (buffers)
1558 return buffers;
1559
1560 return error;
1561}
1562
1563static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1564 struct splice_desc *sd)
1565{
1566 char *src;
1567 int ret;
1568
1569
1570
1571
1572
1573 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(sd->u.userptr, sd->len)) {
1574 src = buf->ops->map(pipe, buf, 1);
1575 ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset,
1576 sd->len);
1577 buf->ops->unmap(pipe, buf, src);
1578 if (!ret) {
1579 ret = sd->len;
1580 goto out;
1581 }
1582 }
1583
1584
1585
1586
1587 src = buf->ops->map(pipe, buf, 0);
1588
1589 ret = sd->len;
1590 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
1591 ret = -EFAULT;
1592
1593 buf->ops->unmap(pipe, buf, src);
1594out:
1595 if (ret > 0)
1596 sd->u.userptr += ret;
1597 return ret;
1598}
1599
1600
1601
1602
1603
1604static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1605 unsigned long nr_segs, unsigned int flags)
1606{
1607 struct pipe_inode_info *pipe;
1608 struct splice_desc sd;
1609 ssize_t size;
1610 int error;
1611 long ret;
1612
1613 pipe = get_pipe_info(file);
1614 if (!pipe)
1615 return -EBADF;
1616
1617 pipe_lock(pipe);
1618
1619 error = ret = 0;
1620 while (nr_segs) {
1621 void __user *base;
1622 size_t len;
1623
1624
1625
1626
1627 error = get_user(base, &iov->iov_base);
1628 if (unlikely(error))
1629 break;
1630 error = get_user(len, &iov->iov_len);
1631 if (unlikely(error))
1632 break;
1633
1634
1635
1636
1637 if (unlikely(!len))
1638 break;
1639 if (unlikely(!base)) {
1640 error = -EFAULT;
1641 break;
1642 }
1643
1644 if (unlikely(!access_ok(VERIFY_WRITE, base, len))) {
1645 error = -EFAULT;
1646 break;
1647 }
1648
1649 sd.len = 0;
1650 sd.total_len = len;
1651 sd.flags = flags;
1652 sd.u.userptr = base;
1653 sd.pos = 0;
1654
1655 size = __splice_from_pipe(pipe, &sd, pipe_to_user);
1656 if (size < 0) {
1657 if (!ret)
1658 ret = size;
1659
1660 break;
1661 }
1662
1663 ret += size;
1664
1665 if (size < len)
1666 break;
1667
1668 nr_segs--;
1669 iov++;
1670 }
1671
1672 pipe_unlock(pipe);
1673
1674 if (!ret)
1675 ret = error;
1676
1677 return ret;
1678}
1679
1680
1681
1682
1683
1684
1685static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
1686 unsigned long nr_segs, unsigned int flags)
1687{
1688 struct pipe_inode_info *pipe;
1689 struct page *pages[PIPE_DEF_BUFFERS];
1690 struct partial_page partial[PIPE_DEF_BUFFERS];
1691 struct splice_pipe_desc spd = {
1692 .pages = pages,
1693 .partial = partial,
1694 .nr_pages_max = PIPE_DEF_BUFFERS,
1695 .flags = flags,
1696 .ops = &user_page_pipe_buf_ops,
1697 .spd_release = spd_release_page,
1698 };
1699 long ret;
1700
1701 pipe = get_pipe_info(file);
1702 if (!pipe)
1703 return -EBADF;
1704
1705 if (splice_grow_spd(pipe, &spd))
1706 return -ENOMEM;
1707
1708 spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
1709 spd.partial, false,
1710 spd.nr_pages_max);
1711 if (spd.nr_pages <= 0)
1712 ret = spd.nr_pages;
1713 else
1714 ret = splice_to_pipe(pipe, &spd);
1715
1716 splice_shrink_spd(&spd);
1717 return ret;
1718}
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov,
1737 unsigned long, nr_segs, unsigned int, flags)
1738{
1739 struct fd f;
1740 long error;
1741
1742 if (unlikely(nr_segs > UIO_MAXIOV))
1743 return -EINVAL;
1744 else if (unlikely(!nr_segs))
1745 return 0;
1746
1747 error = -EBADF;
1748 f = fdget(fd);
1749 if (f.file) {
1750 if (f.file->f_mode & FMODE_WRITE)
1751 error = vmsplice_to_pipe(f.file, iov, nr_segs, flags);
1752 else if (f.file->f_mode & FMODE_READ)
1753 error = vmsplice_to_user(f.file, iov, nr_segs, flags);
1754
1755 fdput(f);
1756 }
1757
1758 return error;
1759}
1760
1761#ifdef CONFIG_COMPAT
1762COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32,
1763 unsigned int, nr_segs, unsigned int, flags)
1764{
1765 unsigned i;
1766 struct iovec __user *iov;
1767 if (nr_segs > UIO_MAXIOV)
1768 return -EINVAL;
1769 iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec));
1770 for (i = 0; i < nr_segs; i++) {
1771 struct compat_iovec v;
1772 if (get_user(v.iov_base, &iov32[i].iov_base) ||
1773 get_user(v.iov_len, &iov32[i].iov_len) ||
1774 put_user(compat_ptr(v.iov_base), &iov[i].iov_base) ||
1775 put_user(v.iov_len, &iov[i].iov_len))
1776 return -EFAULT;
1777 }
1778 return sys_vmsplice(fd, iov, nr_segs, flags);
1779}
1780#endif
1781
1782SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
1783 int, fd_out, loff_t __user *, off_out,
1784 size_t, len, unsigned int, flags)
1785{
1786 struct fd in, out;
1787 long error;
1788
1789 if (unlikely(!len))
1790 return 0;
1791
1792 error = -EBADF;
1793 in = fdget(fd_in);
1794 if (in.file) {
1795 if (in.file->f_mode & FMODE_READ) {
1796 out = fdget(fd_out);
1797 if (out.file) {
1798 if (out.file->f_mode & FMODE_WRITE)
1799 error = do_splice(in.file, off_in,
1800 out.file, off_out,
1801 len, flags);
1802 fdput(out);
1803 }
1804 }
1805 fdput(in);
1806 }
1807 return error;
1808}
1809
1810
1811
1812
1813
1814static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1815{
1816 int ret;
1817
1818
1819
1820
1821
1822 if (pipe->nrbufs)
1823 return 0;
1824
1825 ret = 0;
1826 pipe_lock(pipe);
1827
1828 while (!pipe->nrbufs) {
1829 if (signal_pending(current)) {
1830 ret = -ERESTARTSYS;
1831 break;
1832 }
1833 if (!pipe->writers)
1834 break;
1835 if (!pipe->waiting_writers) {
1836 if (flags & SPLICE_F_NONBLOCK) {
1837 ret = -EAGAIN;
1838 break;
1839 }
1840 }
1841 pipe_wait(pipe);
1842 }
1843
1844 pipe_unlock(pipe);
1845 return ret;
1846}
1847
1848
1849
1850
1851
1852static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1853{
1854 int ret;
1855
1856
1857
1858
1859
1860 if (pipe->nrbufs < pipe->buffers)
1861 return 0;
1862
1863 ret = 0;
1864 pipe_lock(pipe);
1865
1866 while (pipe->nrbufs >= pipe->buffers) {
1867 if (!pipe->readers) {
1868 send_sig(SIGPIPE, current, 0);
1869 ret = -EPIPE;
1870 break;
1871 }
1872 if (flags & SPLICE_F_NONBLOCK) {
1873 ret = -EAGAIN;
1874 break;
1875 }
1876 if (signal_pending(current)) {
1877 ret = -ERESTARTSYS;
1878 break;
1879 }
1880 pipe->waiting_writers++;
1881 pipe_wait(pipe);
1882 pipe->waiting_writers--;
1883 }
1884
1885 pipe_unlock(pipe);
1886 return ret;
1887}
1888
1889
1890
1891
1892static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1893 struct pipe_inode_info *opipe,
1894 size_t len, unsigned int flags)
1895{
1896 struct pipe_buffer *ibuf, *obuf;
1897 int ret = 0, nbuf;
1898 bool input_wakeup = false;
1899
1900
1901retry:
1902 ret = ipipe_prep(ipipe, flags);
1903 if (ret)
1904 return ret;
1905
1906 ret = opipe_prep(opipe, flags);
1907 if (ret)
1908 return ret;
1909
1910
1911
1912
1913
1914
1915 pipe_double_lock(ipipe, opipe);
1916
1917 do {
1918 if (!opipe->readers) {
1919 send_sig(SIGPIPE, current, 0);
1920 if (!ret)
1921 ret = -EPIPE;
1922 break;
1923 }
1924
1925 if (!ipipe->nrbufs && !ipipe->writers)
1926 break;
1927
1928
1929
1930
1931
1932 if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) {
1933
1934 if (ret)
1935 break;
1936
1937 if (flags & SPLICE_F_NONBLOCK) {
1938 ret = -EAGAIN;
1939 break;
1940 }
1941
1942
1943
1944
1945
1946
1947 pipe_unlock(ipipe);
1948 pipe_unlock(opipe);
1949 goto retry;
1950 }
1951
1952 ibuf = ipipe->bufs + ipipe->curbuf;
1953 nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
1954 obuf = opipe->bufs + nbuf;
1955
1956 if (len >= ibuf->len) {
1957
1958
1959
1960 *obuf = *ibuf;
1961 ibuf->ops = NULL;
1962 opipe->nrbufs++;
1963 ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1);
1964 ipipe->nrbufs--;
1965 input_wakeup = true;
1966 } else {
1967
1968
1969
1970
1971 ibuf->ops->get(ipipe, ibuf);
1972 *obuf = *ibuf;
1973
1974
1975
1976
1977
1978 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1979
1980 obuf->len = len;
1981 opipe->nrbufs++;
1982 ibuf->offset += obuf->len;
1983 ibuf->len -= obuf->len;
1984 }
1985 ret += obuf->len;
1986 len -= obuf->len;
1987 } while (len);
1988
1989 pipe_unlock(ipipe);
1990 pipe_unlock(opipe);
1991
1992
1993
1994
1995 if (ret > 0)
1996 wakeup_pipe_readers(opipe);
1997
1998 if (input_wakeup)
1999 wakeup_pipe_writers(ipipe);
2000
2001 return ret;
2002}
2003
2004
2005
2006
2007static int link_pipe(struct pipe_inode_info *ipipe,
2008 struct pipe_inode_info *opipe,
2009 size_t len, unsigned int flags)
2010{
2011 struct pipe_buffer *ibuf, *obuf;
2012 int ret = 0, i = 0, nbuf;
2013
2014
2015
2016
2017
2018
2019 pipe_double_lock(ipipe, opipe);
2020
2021 do {
2022 if (!opipe->readers) {
2023 send_sig(SIGPIPE, current, 0);
2024 if (!ret)
2025 ret = -EPIPE;
2026 break;
2027 }
2028
2029
2030
2031
2032
2033 if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers)
2034 break;
2035
2036 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1));
2037 nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
2038
2039
2040
2041
2042
2043 ibuf->ops->get(ipipe, ibuf);
2044
2045 obuf = opipe->bufs + nbuf;
2046 *obuf = *ibuf;
2047
2048
2049
2050
2051
2052 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2053
2054 if (obuf->len > len)
2055 obuf->len = len;
2056
2057 opipe->nrbufs++;
2058 ret += obuf->len;
2059 len -= obuf->len;
2060 i++;
2061 } while (len);
2062
2063
2064
2065
2066
2067 if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
2068 ret = -EAGAIN;
2069
2070 pipe_unlock(ipipe);
2071 pipe_unlock(opipe);
2072
2073
2074
2075
2076 if (ret > 0)
2077 wakeup_pipe_readers(opipe);
2078
2079 return ret;
2080}
2081
2082
2083
2084
2085
2086
2087
2088static long do_tee(struct file *in, struct file *out, size_t len,
2089 unsigned int flags)
2090{
2091 struct pipe_inode_info *ipipe = get_pipe_info(in);
2092 struct pipe_inode_info *opipe = get_pipe_info(out);
2093 int ret = -EINVAL;
2094
2095
2096
2097
2098
2099 if (ipipe && opipe && ipipe != opipe) {
2100
2101
2102
2103
2104 ret = ipipe_prep(ipipe, flags);
2105 if (!ret) {
2106 ret = opipe_prep(opipe, flags);
2107 if (!ret)
2108 ret = link_pipe(ipipe, opipe, len, flags);
2109 }
2110 }
2111
2112 return ret;
2113}
2114
2115SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
2116{
2117 struct fd in;
2118 int error;
2119
2120 if (unlikely(!len))
2121 return 0;
2122
2123 error = -EBADF;
2124 in = fdget(fdin);
2125 if (in.file) {
2126 if (in.file->f_mode & FMODE_READ) {
2127 struct fd out = fdget(fdout);
2128 if (out.file) {
2129 if (out.file->f_mode & FMODE_WRITE)
2130 error = do_tee(in.file, out.file,
2131 len, flags);
2132 fdput(out);
2133 }
2134 }
2135 fdput(in);
2136 }
2137
2138 return error;
2139}
2140