1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <net/compat.h>
48#include <linux/refcount.h>
49#include <linux/uio.h>
50#include <linux/bits.h>
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
58#include <linux/percpu.h>
59#include <linux/slab.h>
60#include <linux/blkdev.h>
61#include <linux/bvec.h>
62#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
65#include <net/scm.h>
66#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
70#include <linux/sizes.h>
71#include <linux/hugetlb.h>
72#include <linux/highmem.h>
73#include <linux/namei.h>
74#include <linux/fsnotify.h>
75#include <linux/fadvise.h>
76#include <linux/eventpoll.h>
77#include <linux/splice.h>
78#include <linux/task_work.h>
79#include <linux/pagemap.h>
80#include <linux/io_uring.h>
81#include <linux/tracehook.h>
82
83#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
86#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
89#include "io-wq.h"
90
91#define IORING_MAX_ENTRIES 32768
92#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
93#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
94
95
96
97
98#define IORING_FILE_TABLE_SHIFT 9
99#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
100#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
101#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
102#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
103 IORING_REGISTER_LAST + IORING_OP_LAST)
104
105#define IO_RSRC_TAG_TABLE_SHIFT 9
106#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
107#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
108
109#define IORING_MAX_REG_BUFFERS (1U << 14)
110
111#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
112 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
113 IOSQE_BUFFER_SELECT)
114#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
115 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
116
117#define IO_TCTX_REFS_CACHE_NR (1U << 10)
118
119struct io_uring {
120 u32 head ____cacheline_aligned_in_smp;
121 u32 tail ____cacheline_aligned_in_smp;
122};
123
124
125
126
127
128
129
130
131struct io_rings {
132
133
134
135
136
137
138
139
140 struct io_uring sq, cq;
141
142
143
144
145 u32 sq_ring_mask, cq_ring_mask;
146
147 u32 sq_ring_entries, cq_ring_entries;
148
149
150
151
152
153
154
155
156
157
158
159
160 u32 sq_dropped;
161
162
163
164
165
166
167
168
169
170 u32 sq_flags;
171
172
173
174
175
176
177 u32 cq_flags;
178
179
180
181
182
183
184
185
186
187
188
189
190
191 u32 cq_overflow;
192
193
194
195
196
197
198
199 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
200};
201
202enum io_uring_cmd_flags {
203 IO_URING_F_NONBLOCK = 1,
204 IO_URING_F_COMPLETE_DEFER = 2,
205};
206
207struct io_mapped_ubuf {
208 u64 ubuf;
209 u64 ubuf_end;
210 unsigned int nr_bvecs;
211 unsigned long acct_pages;
212 struct bio_vec bvec[];
213};
214
215struct io_ring_ctx;
216
217struct io_overflow_cqe {
218 struct io_uring_cqe cqe;
219 struct list_head list;
220};
221
222struct io_fixed_file {
223
224 unsigned long file_ptr;
225};
226
227struct io_rsrc_put {
228 struct list_head list;
229 u64 tag;
230 union {
231 void *rsrc;
232 struct file *file;
233 struct io_mapped_ubuf *buf;
234 };
235};
236
237struct io_file_table {
238
239 struct io_fixed_file **files;
240};
241
242struct io_rsrc_node {
243 struct percpu_ref refs;
244 struct list_head node;
245 struct list_head rsrc_list;
246 struct io_rsrc_data *rsrc_data;
247 struct llist_node llist;
248 bool done;
249};
250
251typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
252
253struct io_rsrc_data {
254 struct io_ring_ctx *ctx;
255
256 u64 **tags;
257 unsigned int nr;
258 rsrc_put_fn *do_put;
259 atomic_t refs;
260 struct completion done;
261 bool quiesce;
262};
263
264struct io_buffer {
265 struct list_head list;
266 __u64 addr;
267 __u32 len;
268 __u16 bid;
269};
270
271struct io_restriction {
272 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
273 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
274 u8 sqe_flags_allowed;
275 u8 sqe_flags_required;
276 bool registered;
277};
278
279enum {
280 IO_SQ_THREAD_SHOULD_STOP = 0,
281 IO_SQ_THREAD_SHOULD_PARK,
282};
283
284struct io_sq_data {
285 refcount_t refs;
286 atomic_t park_pending;
287 struct mutex lock;
288
289
290 struct list_head ctx_list;
291
292 struct task_struct *thread;
293 struct wait_queue_head wait;
294
295 unsigned sq_thread_idle;
296 int sq_cpu;
297 pid_t task_pid;
298 pid_t task_tgid;
299
300 unsigned long state;
301 struct completion exited;
302};
303
304#define IO_IOPOLL_BATCH 8
305#define IO_COMPL_BATCH 32
306#define IO_REQ_CACHE_SIZE 32
307#define IO_REQ_ALLOC_BATCH 8
308
309struct io_comp_state {
310 struct io_kiocb *reqs[IO_COMPL_BATCH];
311 unsigned int nr;
312
313 struct list_head free_list;
314};
315
316struct io_submit_link {
317 struct io_kiocb *head;
318 struct io_kiocb *last;
319};
320
321struct io_submit_state {
322 struct blk_plug plug;
323 struct io_submit_link link;
324
325
326
327
328 void *reqs[IO_REQ_CACHE_SIZE];
329 unsigned int free_reqs;
330
331 bool plug_started;
332
333
334
335
336 struct io_comp_state comp;
337
338
339
340
341 struct file *file;
342 unsigned int fd;
343 unsigned int file_refs;
344 unsigned int ios_left;
345};
346
347struct io_ring_ctx {
348
349 struct {
350 struct percpu_ref refs;
351
352 struct io_rings *rings;
353 unsigned int flags;
354 unsigned int compat: 1;
355 unsigned int drain_next: 1;
356 unsigned int eventfd_async: 1;
357 unsigned int restricted: 1;
358 unsigned int off_timeout_used: 1;
359 unsigned int drain_active: 1;
360 } ____cacheline_aligned_in_smp;
361
362
363 struct {
364 struct mutex uring_lock;
365
366
367
368
369
370
371
372
373
374
375
376
377 u32 *sq_array;
378 struct io_uring_sqe *sq_sqes;
379 unsigned cached_sq_head;
380 unsigned sq_entries;
381 struct list_head defer_list;
382
383
384
385
386
387 struct io_rsrc_node *rsrc_node;
388 struct io_file_table file_table;
389 unsigned nr_user_files;
390 unsigned nr_user_bufs;
391 struct io_mapped_ubuf **user_bufs;
392
393 struct io_submit_state submit_state;
394 struct list_head timeout_list;
395 struct list_head cq_overflow_list;
396 struct xarray io_buffers;
397 struct xarray personalities;
398 u32 pers_next;
399 unsigned sq_thread_idle;
400 } ____cacheline_aligned_in_smp;
401
402
403 struct list_head locked_free_list;
404 unsigned int locked_free_nr;
405
406 const struct cred *sq_creds;
407 struct io_sq_data *sq_data;
408
409 struct wait_queue_head sqo_sq_wait;
410 struct list_head sqd_list;
411
412 unsigned long check_cq_overflow;
413
414 struct {
415 unsigned cached_cq_tail;
416 unsigned cq_entries;
417 struct eventfd_ctx *cq_ev_fd;
418 struct wait_queue_head poll_wait;
419 struct wait_queue_head cq_wait;
420 unsigned cq_extra;
421 atomic_t cq_timeouts;
422 struct fasync_struct *cq_fasync;
423 unsigned cq_last_tm_flush;
424 } ____cacheline_aligned_in_smp;
425
426 struct {
427 spinlock_t completion_lock;
428
429
430
431
432
433
434
435 struct list_head iopoll_list;
436 struct hlist_head *cancel_hash;
437 unsigned cancel_hash_bits;
438 bool poll_multi_queue;
439 } ____cacheline_aligned_in_smp;
440
441 struct io_restriction restrictions;
442
443
444 struct {
445 struct io_rsrc_node *rsrc_backup_node;
446 struct io_mapped_ubuf *dummy_ubuf;
447 struct io_rsrc_data *file_data;
448 struct io_rsrc_data *buf_data;
449
450 struct delayed_work rsrc_put_work;
451 struct llist_head rsrc_put_llist;
452 struct list_head rsrc_ref_list;
453 spinlock_t rsrc_ref_lock;
454 };
455
456
457 struct {
458 #if defined(CONFIG_UNIX)
459 struct socket *ring_sock;
460 #endif
461
462 struct io_wq_hash *hash_map;
463
464
465 struct user_struct *user;
466 struct mm_struct *mm_account;
467
468
469 struct llist_head fallback_llist;
470 struct delayed_work fallback_work;
471 struct work_struct exit_work;
472 struct list_head tctx_list;
473 struct completion ref_comp;
474 };
475};
476
477struct io_uring_task {
478
479 int cached_refs;
480 struct xarray xa;
481 struct wait_queue_head wait;
482 const struct io_ring_ctx *last;
483 struct io_wq *io_wq;
484 struct percpu_counter inflight;
485 atomic_t inflight_tracked;
486 atomic_t in_idle;
487
488 spinlock_t task_lock;
489 struct io_wq_work_list task_list;
490 unsigned long task_state;
491 struct callback_head task_work;
492};
493
494
495
496
497
498struct io_poll_iocb {
499 struct file *file;
500 struct wait_queue_head *head;
501 __poll_t events;
502 bool done;
503 bool canceled;
504 struct wait_queue_entry wait;
505};
506
507struct io_poll_update {
508 struct file *file;
509 u64 old_user_data;
510 u64 new_user_data;
511 __poll_t events;
512 bool update_events;
513 bool update_user_data;
514};
515
516struct io_close {
517 struct file *file;
518 int fd;
519};
520
521struct io_timeout_data {
522 struct io_kiocb *req;
523 struct hrtimer timer;
524 struct timespec64 ts;
525 enum hrtimer_mode mode;
526};
527
528struct io_accept {
529 struct file *file;
530 struct sockaddr __user *addr;
531 int __user *addr_len;
532 int flags;
533 unsigned long nofile;
534};
535
536struct io_sync {
537 struct file *file;
538 loff_t len;
539 loff_t off;
540 int flags;
541 int mode;
542};
543
544struct io_cancel {
545 struct file *file;
546 u64 addr;
547};
548
549struct io_timeout {
550 struct file *file;
551 u32 off;
552 u32 target_seq;
553 struct list_head list;
554
555 struct io_kiocb *head;
556};
557
558struct io_timeout_rem {
559 struct file *file;
560 u64 addr;
561
562
563 struct timespec64 ts;
564 u32 flags;
565};
566
567struct io_rw {
568
569 struct kiocb kiocb;
570 u64 addr;
571 u64 len;
572};
573
574struct io_connect {
575 struct file *file;
576 struct sockaddr __user *addr;
577 int addr_len;
578};
579
580struct io_sr_msg {
581 struct file *file;
582 union {
583 struct compat_msghdr __user *umsg_compat;
584 struct user_msghdr __user *umsg;
585 void __user *buf;
586 };
587 int msg_flags;
588 int bgid;
589 size_t len;
590 struct io_buffer *kbuf;
591};
592
593struct io_open {
594 struct file *file;
595 int dfd;
596 struct filename *filename;
597 struct open_how how;
598 unsigned long nofile;
599};
600
601struct io_rsrc_update {
602 struct file *file;
603 u64 arg;
604 u32 nr_args;
605 u32 offset;
606};
607
608struct io_fadvise {
609 struct file *file;
610 u64 offset;
611 u32 len;
612 u32 advice;
613};
614
615struct io_madvise {
616 struct file *file;
617 u64 addr;
618 u32 len;
619 u32 advice;
620};
621
622struct io_epoll {
623 struct file *file;
624 int epfd;
625 int op;
626 int fd;
627 struct epoll_event event;
628};
629
630struct io_splice {
631 struct file *file_out;
632 struct file *file_in;
633 loff_t off_out;
634 loff_t off_in;
635 u64 len;
636 unsigned int flags;
637};
638
639struct io_provide_buf {
640 struct file *file;
641 __u64 addr;
642 __u32 len;
643 __u32 bgid;
644 __u16 nbufs;
645 __u16 bid;
646};
647
648struct io_statx {
649 struct file *file;
650 int dfd;
651 unsigned int mask;
652 unsigned int flags;
653 const char __user *filename;
654 struct statx __user *buffer;
655};
656
657struct io_shutdown {
658 struct file *file;
659 int how;
660};
661
662struct io_rename {
663 struct file *file;
664 int old_dfd;
665 int new_dfd;
666 struct filename *oldpath;
667 struct filename *newpath;
668 int flags;
669};
670
671struct io_unlink {
672 struct file *file;
673 int dfd;
674 int flags;
675 struct filename *filename;
676};
677
678struct io_completion {
679 struct file *file;
680 struct list_head list;
681 u32 cflags;
682};
683
684struct io_async_connect {
685 struct sockaddr_storage address;
686};
687
688struct io_async_msghdr {
689 struct iovec fast_iov[UIO_FASTIOV];
690
691 struct iovec *free_iov;
692 struct sockaddr __user *uaddr;
693 struct msghdr msg;
694 struct sockaddr_storage addr;
695};
696
697struct io_async_rw {
698 struct iovec fast_iov[UIO_FASTIOV];
699 const struct iovec *free_iovec;
700 struct iov_iter iter;
701 size_t bytes_done;
702 struct wait_page_queue wpq;
703};
704
705enum {
706 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
707 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
708 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
709 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
710 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
711 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
712
713
714 REQ_F_FAIL_BIT = 8,
715 REQ_F_INFLIGHT_BIT,
716 REQ_F_CUR_POS_BIT,
717 REQ_F_NOWAIT_BIT,
718 REQ_F_LINK_TIMEOUT_BIT,
719 REQ_F_NEED_CLEANUP_BIT,
720 REQ_F_POLLED_BIT,
721 REQ_F_BUFFER_SELECTED_BIT,
722 REQ_F_LTIMEOUT_ACTIVE_BIT,
723 REQ_F_COMPLETE_INLINE_BIT,
724 REQ_F_REISSUE_BIT,
725 REQ_F_DONT_REISSUE_BIT,
726 REQ_F_CREDS_BIT,
727
728 REQ_F_ASYNC_READ_BIT,
729 REQ_F_ASYNC_WRITE_BIT,
730 REQ_F_ISREG_BIT,
731
732
733 __REQ_F_LAST_BIT,
734};
735
736enum {
737
738 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
739
740 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
741
742 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
743
744 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
745
746 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
747
748 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
749
750
751 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
752
753 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
754
755 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
756
757 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
758
759 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
760
761 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
762
763 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
764
765 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
766
767 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
768
769 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
770
771 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
772
773 REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT),
774
775 REQ_F_ASYNC_READ = BIT(REQ_F_ASYNC_READ_BIT),
776
777 REQ_F_ASYNC_WRITE = BIT(REQ_F_ASYNC_WRITE_BIT),
778
779 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
780
781 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
782};
783
784struct async_poll {
785 struct io_poll_iocb poll;
786 struct io_poll_iocb *double_poll;
787};
788
789typedef void (*io_req_tw_func_t)(struct io_kiocb *req);
790
791struct io_task_work {
792 union {
793 struct io_wq_work_node node;
794 struct llist_node fallback_node;
795 };
796 io_req_tw_func_t func;
797};
798
799enum {
800 IORING_RSRC_FILE = 0,
801 IORING_RSRC_BUFFER = 1,
802};
803
804
805
806
807
808
809
810struct io_kiocb {
811 union {
812 struct file *file;
813 struct io_rw rw;
814 struct io_poll_iocb poll;
815 struct io_poll_update poll_update;
816 struct io_accept accept;
817 struct io_sync sync;
818 struct io_cancel cancel;
819 struct io_timeout timeout;
820 struct io_timeout_rem timeout_rem;
821 struct io_connect connect;
822 struct io_sr_msg sr_msg;
823 struct io_open open;
824 struct io_close close;
825 struct io_rsrc_update rsrc_update;
826 struct io_fadvise fadvise;
827 struct io_madvise madvise;
828 struct io_epoll epoll;
829 struct io_splice splice;
830 struct io_provide_buf pbuf;
831 struct io_statx statx;
832 struct io_shutdown shutdown;
833 struct io_rename rename;
834 struct io_unlink unlink;
835
836 struct io_completion compl;
837 };
838
839
840 void *async_data;
841 u8 opcode;
842
843 u8 iopoll_completed;
844
845 u16 buf_index;
846 u32 result;
847
848 struct io_ring_ctx *ctx;
849 unsigned int flags;
850 atomic_t refs;
851 struct task_struct *task;
852 u64 user_data;
853
854 struct io_kiocb *link;
855 struct percpu_ref *fixed_rsrc_refs;
856
857
858 struct list_head inflight_entry;
859 struct io_task_work io_task_work;
860
861 struct hlist_node hash_node;
862 struct async_poll *apoll;
863 struct io_wq_work work;
864 const struct cred *creds;
865
866
867 struct io_mapped_ubuf *imu;
868};
869
870struct io_tctx_node {
871 struct list_head ctx_node;
872 struct task_struct *task;
873 struct io_ring_ctx *ctx;
874};
875
876struct io_defer_entry {
877 struct list_head list;
878 struct io_kiocb *req;
879 u32 seq;
880};
881
882struct io_op_def {
883
884 unsigned needs_file : 1;
885
886 unsigned hash_reg_file : 1;
887
888 unsigned unbound_nonreg_file : 1;
889
890 unsigned not_supported : 1;
891
892 unsigned pollin : 1;
893 unsigned pollout : 1;
894
895 unsigned buffer_select : 1;
896
897 unsigned needs_async_setup : 1;
898
899 unsigned plug : 1;
900
901 unsigned short async_size;
902};
903
904static const struct io_op_def io_op_defs[] = {
905 [IORING_OP_NOP] = {},
906 [IORING_OP_READV] = {
907 .needs_file = 1,
908 .unbound_nonreg_file = 1,
909 .pollin = 1,
910 .buffer_select = 1,
911 .needs_async_setup = 1,
912 .plug = 1,
913 .async_size = sizeof(struct io_async_rw),
914 },
915 [IORING_OP_WRITEV] = {
916 .needs_file = 1,
917 .hash_reg_file = 1,
918 .unbound_nonreg_file = 1,
919 .pollout = 1,
920 .needs_async_setup = 1,
921 .plug = 1,
922 .async_size = sizeof(struct io_async_rw),
923 },
924 [IORING_OP_FSYNC] = {
925 .needs_file = 1,
926 },
927 [IORING_OP_READ_FIXED] = {
928 .needs_file = 1,
929 .unbound_nonreg_file = 1,
930 .pollin = 1,
931 .plug = 1,
932 .async_size = sizeof(struct io_async_rw),
933 },
934 [IORING_OP_WRITE_FIXED] = {
935 .needs_file = 1,
936 .hash_reg_file = 1,
937 .unbound_nonreg_file = 1,
938 .pollout = 1,
939 .plug = 1,
940 .async_size = sizeof(struct io_async_rw),
941 },
942 [IORING_OP_POLL_ADD] = {
943 .needs_file = 1,
944 .unbound_nonreg_file = 1,
945 },
946 [IORING_OP_POLL_REMOVE] = {},
947 [IORING_OP_SYNC_FILE_RANGE] = {
948 .needs_file = 1,
949 },
950 [IORING_OP_SENDMSG] = {
951 .needs_file = 1,
952 .unbound_nonreg_file = 1,
953 .pollout = 1,
954 .needs_async_setup = 1,
955 .async_size = sizeof(struct io_async_msghdr),
956 },
957 [IORING_OP_RECVMSG] = {
958 .needs_file = 1,
959 .unbound_nonreg_file = 1,
960 .pollin = 1,
961 .buffer_select = 1,
962 .needs_async_setup = 1,
963 .async_size = sizeof(struct io_async_msghdr),
964 },
965 [IORING_OP_TIMEOUT] = {
966 .async_size = sizeof(struct io_timeout_data),
967 },
968 [IORING_OP_TIMEOUT_REMOVE] = {
969
970 },
971 [IORING_OP_ACCEPT] = {
972 .needs_file = 1,
973 .unbound_nonreg_file = 1,
974 .pollin = 1,
975 },
976 [IORING_OP_ASYNC_CANCEL] = {},
977 [IORING_OP_LINK_TIMEOUT] = {
978 .async_size = sizeof(struct io_timeout_data),
979 },
980 [IORING_OP_CONNECT] = {
981 .needs_file = 1,
982 .unbound_nonreg_file = 1,
983 .pollout = 1,
984 .needs_async_setup = 1,
985 .async_size = sizeof(struct io_async_connect),
986 },
987 [IORING_OP_FALLOCATE] = {
988 .needs_file = 1,
989 },
990 [IORING_OP_OPENAT] = {},
991 [IORING_OP_CLOSE] = {},
992 [IORING_OP_FILES_UPDATE] = {},
993 [IORING_OP_STATX] = {},
994 [IORING_OP_READ] = {
995 .needs_file = 1,
996 .unbound_nonreg_file = 1,
997 .pollin = 1,
998 .buffer_select = 1,
999 .plug = 1,
1000 .async_size = sizeof(struct io_async_rw),
1001 },
1002 [IORING_OP_WRITE] = {
1003 .needs_file = 1,
1004 .unbound_nonreg_file = 1,
1005 .pollout = 1,
1006 .plug = 1,
1007 .async_size = sizeof(struct io_async_rw),
1008 },
1009 [IORING_OP_FADVISE] = {
1010 .needs_file = 1,
1011 },
1012 [IORING_OP_MADVISE] = {},
1013 [IORING_OP_SEND] = {
1014 .needs_file = 1,
1015 .unbound_nonreg_file = 1,
1016 .pollout = 1,
1017 },
1018 [IORING_OP_RECV] = {
1019 .needs_file = 1,
1020 .unbound_nonreg_file = 1,
1021 .pollin = 1,
1022 .buffer_select = 1,
1023 },
1024 [IORING_OP_OPENAT2] = {
1025 },
1026 [IORING_OP_EPOLL_CTL] = {
1027 .unbound_nonreg_file = 1,
1028 },
1029 [IORING_OP_SPLICE] = {
1030 .needs_file = 1,
1031 .hash_reg_file = 1,
1032 .unbound_nonreg_file = 1,
1033 },
1034 [IORING_OP_PROVIDE_BUFFERS] = {},
1035 [IORING_OP_REMOVE_BUFFERS] = {},
1036 [IORING_OP_TEE] = {
1037 .needs_file = 1,
1038 .hash_reg_file = 1,
1039 .unbound_nonreg_file = 1,
1040 },
1041 [IORING_OP_SHUTDOWN] = {
1042 .needs_file = 1,
1043 },
1044 [IORING_OP_RENAMEAT] = {},
1045 [IORING_OP_UNLINKAT] = {},
1046};
1047
1048static bool io_disarm_next(struct io_kiocb *req);
1049static void io_uring_del_tctx_node(unsigned long index);
1050static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1051 struct task_struct *task,
1052 bool cancel_all);
1053static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
1054static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
1055
1056static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1057 long res, unsigned int cflags);
1058static void io_put_req(struct io_kiocb *req);
1059static void io_put_req_deferred(struct io_kiocb *req, int nr);
1060static void io_dismantle_req(struct io_kiocb *req);
1061static void io_put_task(struct task_struct *task, int nr);
1062static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
1063static void io_queue_linked_timeout(struct io_kiocb *req);
1064static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
1065 struct io_uring_rsrc_update2 *up,
1066 unsigned nr_args);
1067static void io_clean_op(struct io_kiocb *req);
1068static struct file *io_file_get(struct io_submit_state *state,
1069 struct io_kiocb *req, int fd, bool fixed);
1070static void __io_queue_sqe(struct io_kiocb *req);
1071static void io_rsrc_put_work(struct work_struct *work);
1072
1073static void io_req_task_queue(struct io_kiocb *req);
1074static void io_submit_flush_completions(struct io_ring_ctx *ctx);
1075static bool io_poll_remove_waitqs(struct io_kiocb *req);
1076static int io_req_prep_async(struct io_kiocb *req);
1077
1078static void io_fallback_req_func(struct work_struct *unused);
1079
1080static struct kmem_cache *req_cachep;
1081
1082static const struct file_operations io_uring_fops;
1083
1084struct sock *io_uring_get_socket(struct file *file)
1085{
1086#if defined(CONFIG_UNIX)
1087 if (file->f_op == &io_uring_fops) {
1088 struct io_ring_ctx *ctx = file->private_data;
1089
1090 return ctx->ring_sock->sk;
1091 }
1092#endif
1093 return NULL;
1094}
1095EXPORT_SYMBOL(io_uring_get_socket);
1096
1097#define io_for_each_link(pos, head) \
1098 for (pos = (head); pos; pos = pos->link)
1099
1100static inline void io_req_set_rsrc_node(struct io_kiocb *req)
1101{
1102 struct io_ring_ctx *ctx = req->ctx;
1103
1104 if (!req->fixed_rsrc_refs) {
1105 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
1106 percpu_ref_get(req->fixed_rsrc_refs);
1107 }
1108}
1109
1110static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1111{
1112 bool got = percpu_ref_tryget(ref);
1113
1114
1115 if (!got)
1116 wait_for_completion(compl);
1117 percpu_ref_resurrect(ref);
1118 if (got)
1119 percpu_ref_put(ref);
1120}
1121
1122static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1123 bool cancel_all)
1124{
1125 struct io_kiocb *req;
1126
1127 if (task && head->task != task)
1128 return false;
1129 if (cancel_all)
1130 return true;
1131
1132 io_for_each_link(req, head) {
1133 if (req->flags & REQ_F_INFLIGHT)
1134 return true;
1135 }
1136 return false;
1137}
1138
1139static inline void req_set_fail(struct io_kiocb *req)
1140{
1141 req->flags |= REQ_F_FAIL;
1142}
1143
1144static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1145{
1146 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1147
1148 complete(&ctx->ref_comp);
1149}
1150
1151static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1152{
1153 return !req->timeout.off;
1154}
1155
1156static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1157{
1158 struct io_ring_ctx *ctx;
1159 int hash_bits;
1160
1161 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1162 if (!ctx)
1163 return NULL;
1164
1165
1166
1167
1168
1169 hash_bits = ilog2(p->cq_entries);
1170 hash_bits -= 5;
1171 if (hash_bits <= 0)
1172 hash_bits = 1;
1173 ctx->cancel_hash_bits = hash_bits;
1174 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1175 GFP_KERNEL);
1176 if (!ctx->cancel_hash)
1177 goto err;
1178 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1179
1180 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1181 if (!ctx->dummy_ubuf)
1182 goto err;
1183
1184 ctx->dummy_ubuf->ubuf = -1UL;
1185
1186 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
1187 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1188 goto err;
1189
1190 ctx->flags = p->flags;
1191 init_waitqueue_head(&ctx->sqo_sq_wait);
1192 INIT_LIST_HEAD(&ctx->sqd_list);
1193 init_waitqueue_head(&ctx->poll_wait);
1194 INIT_LIST_HEAD(&ctx->cq_overflow_list);
1195 init_completion(&ctx->ref_comp);
1196 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
1197 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
1198 mutex_init(&ctx->uring_lock);
1199 init_waitqueue_head(&ctx->cq_wait);
1200 spin_lock_init(&ctx->completion_lock);
1201 INIT_LIST_HEAD(&ctx->iopoll_list);
1202 INIT_LIST_HEAD(&ctx->defer_list);
1203 INIT_LIST_HEAD(&ctx->timeout_list);
1204 spin_lock_init(&ctx->rsrc_ref_lock);
1205 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
1206 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1207 init_llist_head(&ctx->rsrc_put_llist);
1208 INIT_LIST_HEAD(&ctx->tctx_list);
1209 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
1210 INIT_LIST_HEAD(&ctx->locked_free_list);
1211 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
1212 return ctx;
1213err:
1214 kfree(ctx->dummy_ubuf);
1215 kfree(ctx->cancel_hash);
1216 kfree(ctx);
1217 return NULL;
1218}
1219
1220static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1221{
1222 struct io_rings *r = ctx->rings;
1223
1224 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1225 ctx->cq_extra--;
1226}
1227
1228static bool req_need_defer(struct io_kiocb *req, u32 seq)
1229{
1230 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1231 struct io_ring_ctx *ctx = req->ctx;
1232
1233 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
1234 }
1235
1236 return false;
1237}
1238
1239static void io_req_track_inflight(struct io_kiocb *req)
1240{
1241 if (!(req->flags & REQ_F_INFLIGHT)) {
1242 req->flags |= REQ_F_INFLIGHT;
1243 atomic_inc(¤t->io_uring->inflight_tracked);
1244 }
1245}
1246
1247static void io_prep_async_work(struct io_kiocb *req)
1248{
1249 const struct io_op_def *def = &io_op_defs[req->opcode];
1250 struct io_ring_ctx *ctx = req->ctx;
1251
1252 if (!(req->flags & REQ_F_CREDS)) {
1253 req->flags |= REQ_F_CREDS;
1254 req->creds = get_current_cred();
1255 }
1256
1257 req->work.list.next = NULL;
1258 req->work.flags = 0;
1259 if (req->flags & REQ_F_FORCE_ASYNC)
1260 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1261
1262 if (req->flags & REQ_F_ISREG) {
1263 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
1264 io_wq_hash_work(&req->work, file_inode(req->file));
1265 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
1266 if (def->unbound_nonreg_file)
1267 req->work.flags |= IO_WQ_WORK_UNBOUND;
1268 }
1269
1270 switch (req->opcode) {
1271 case IORING_OP_SPLICE:
1272 case IORING_OP_TEE:
1273 if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
1274 req->work.flags |= IO_WQ_WORK_UNBOUND;
1275 break;
1276 }
1277}
1278
1279static void io_prep_async_link(struct io_kiocb *req)
1280{
1281 struct io_kiocb *cur;
1282
1283 if (req->flags & REQ_F_LINK_TIMEOUT) {
1284 struct io_ring_ctx *ctx = req->ctx;
1285
1286 spin_lock_irq(&ctx->completion_lock);
1287 io_for_each_link(cur, req)
1288 io_prep_async_work(cur);
1289 spin_unlock_irq(&ctx->completion_lock);
1290 } else {
1291 io_for_each_link(cur, req)
1292 io_prep_async_work(cur);
1293 }
1294}
1295
1296static void io_queue_async_work(struct io_kiocb *req)
1297{
1298 struct io_ring_ctx *ctx = req->ctx;
1299 struct io_kiocb *link = io_prep_linked_timeout(req);
1300 struct io_uring_task *tctx = req->task->io_uring;
1301
1302 BUG_ON(!tctx);
1303 BUG_ON(!tctx->io_wq);
1304
1305
1306 io_prep_async_link(req);
1307
1308
1309
1310
1311
1312
1313
1314
1315 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1316 req->work.flags |= IO_WQ_WORK_CANCEL;
1317
1318 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1319 &req->work, req->flags);
1320 io_wq_enqueue(tctx->io_wq, &req->work);
1321 if (link)
1322 io_queue_linked_timeout(link);
1323}
1324
1325static void io_kill_timeout(struct io_kiocb *req, int status)
1326 __must_hold(&req->ctx->completion_lock)
1327{
1328 struct io_timeout_data *io = req->async_data;
1329
1330 if (hrtimer_try_to_cancel(&io->timer) != -1) {
1331 atomic_set(&req->ctx->cq_timeouts,
1332 atomic_read(&req->ctx->cq_timeouts) + 1);
1333 list_del_init(&req->timeout.list);
1334 io_cqring_fill_event(req->ctx, req->user_data, status, 0);
1335 io_put_req_deferred(req, 1);
1336 }
1337}
1338
1339static void io_queue_deferred(struct io_ring_ctx *ctx)
1340{
1341 while (!list_empty(&ctx->defer_list)) {
1342 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1343 struct io_defer_entry, list);
1344
1345 if (req_need_defer(de->req, de->seq))
1346 break;
1347 list_del_init(&de->list);
1348 io_req_task_queue(de->req);
1349 kfree(de);
1350 }
1351}
1352
1353static void io_flush_timeouts(struct io_ring_ctx *ctx)
1354{
1355 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1356
1357 while (!list_empty(&ctx->timeout_list)) {
1358 u32 events_needed, events_got;
1359 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
1360 struct io_kiocb, timeout.list);
1361
1362 if (io_is_timeout_noseq(req))
1363 break;
1364
1365
1366
1367
1368
1369
1370
1371
1372 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1373 events_got = seq - ctx->cq_last_tm_flush;
1374 if (events_got < events_needed)
1375 break;
1376
1377 list_del_init(&req->timeout.list);
1378 io_kill_timeout(req, 0);
1379 }
1380 ctx->cq_last_tm_flush = seq;
1381}
1382
1383static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
1384{
1385 if (ctx->off_timeout_used)
1386 io_flush_timeouts(ctx);
1387 if (ctx->drain_active)
1388 io_queue_deferred(ctx);
1389}
1390
1391static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1392{
1393 if (unlikely(ctx->off_timeout_used || ctx->drain_active))
1394 __io_commit_cqring_flush(ctx);
1395
1396 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
1397}
1398
1399static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1400{
1401 struct io_rings *r = ctx->rings;
1402
1403 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
1404}
1405
1406static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1407{
1408 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1409}
1410
1411static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
1412{
1413 struct io_rings *rings = ctx->rings;
1414 unsigned tail, mask = ctx->cq_entries - 1;
1415
1416
1417
1418
1419
1420
1421 if (__io_cqring_events(ctx) == ctx->cq_entries)
1422 return NULL;
1423
1424 tail = ctx->cached_cq_tail++;
1425 return &rings->cqes[tail & mask];
1426}
1427
1428static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1429{
1430 if (likely(!ctx->cq_ev_fd))
1431 return false;
1432 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1433 return false;
1434 return !ctx->eventfd_async || io_wq_current_is_worker();
1435}
1436
1437static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1438{
1439
1440 smp_mb();
1441
1442 if (waitqueue_active(&ctx->cq_wait))
1443 wake_up(&ctx->cq_wait);
1444 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1445 wake_up(&ctx->sq_data->wait);
1446 if (io_should_trigger_evfd(ctx))
1447 eventfd_signal(ctx->cq_ev_fd, 1);
1448 if (waitqueue_active(&ctx->poll_wait)) {
1449 wake_up_interruptible(&ctx->poll_wait);
1450 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1451 }
1452}
1453
1454static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1455{
1456
1457 smp_mb();
1458
1459 if (ctx->flags & IORING_SETUP_SQPOLL) {
1460 if (waitqueue_active(&ctx->cq_wait))
1461 wake_up(&ctx->cq_wait);
1462 }
1463 if (io_should_trigger_evfd(ctx))
1464 eventfd_signal(ctx->cq_ev_fd, 1);
1465 if (waitqueue_active(&ctx->poll_wait)) {
1466 wake_up_interruptible(&ctx->poll_wait);
1467 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1468 }
1469}
1470
1471
1472static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1473{
1474 unsigned long flags;
1475 bool all_flushed, posted;
1476
1477 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
1478 return false;
1479
1480 posted = false;
1481 spin_lock_irqsave(&ctx->completion_lock, flags);
1482 while (!list_empty(&ctx->cq_overflow_list)) {
1483 struct io_uring_cqe *cqe = io_get_cqe(ctx);
1484 struct io_overflow_cqe *ocqe;
1485
1486 if (!cqe && !force)
1487 break;
1488 ocqe = list_first_entry(&ctx->cq_overflow_list,
1489 struct io_overflow_cqe, list);
1490 if (cqe)
1491 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1492 else
1493 io_account_cq_overflow(ctx);
1494
1495 posted = true;
1496 list_del(&ocqe->list);
1497 kfree(ocqe);
1498 }
1499
1500 all_flushed = list_empty(&ctx->cq_overflow_list);
1501 if (all_flushed) {
1502 clear_bit(0, &ctx->check_cq_overflow);
1503 WRITE_ONCE(ctx->rings->sq_flags,
1504 ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
1505 }
1506
1507 if (posted)
1508 io_commit_cqring(ctx);
1509 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1510 if (posted)
1511 io_cqring_ev_posted(ctx);
1512 return all_flushed;
1513}
1514
1515static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1516{
1517 bool ret = true;
1518
1519 if (test_bit(0, &ctx->check_cq_overflow)) {
1520
1521 if (ctx->flags & IORING_SETUP_IOPOLL)
1522 mutex_lock(&ctx->uring_lock);
1523 ret = __io_cqring_overflow_flush(ctx, force);
1524 if (ctx->flags & IORING_SETUP_IOPOLL)
1525 mutex_unlock(&ctx->uring_lock);
1526 }
1527
1528 return ret;
1529}
1530
1531
1532
1533
1534
1535#define req_ref_zero_or_close_to_overflow(req) \
1536 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1537
1538static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1539{
1540 return atomic_inc_not_zero(&req->refs);
1541}
1542
1543static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs)
1544{
1545 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1546 return atomic_sub_and_test(refs, &req->refs);
1547}
1548
1549static inline bool req_ref_put_and_test(struct io_kiocb *req)
1550{
1551 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1552 return atomic_dec_and_test(&req->refs);
1553}
1554
1555static inline void req_ref_put(struct io_kiocb *req)
1556{
1557 WARN_ON_ONCE(req_ref_put_and_test(req));
1558}
1559
1560static inline void req_ref_get(struct io_kiocb *req)
1561{
1562 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1563 atomic_inc(&req->refs);
1564}
1565
1566static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
1567 long res, unsigned int cflags)
1568{
1569 struct io_overflow_cqe *ocqe;
1570
1571 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1572 if (!ocqe) {
1573
1574
1575
1576
1577
1578 io_account_cq_overflow(ctx);
1579 return false;
1580 }
1581 if (list_empty(&ctx->cq_overflow_list)) {
1582 set_bit(0, &ctx->check_cq_overflow);
1583 WRITE_ONCE(ctx->rings->sq_flags,
1584 ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
1585
1586 }
1587 ocqe->cqe.user_data = user_data;
1588 ocqe->cqe.res = res;
1589 ocqe->cqe.flags = cflags;
1590 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1591 return true;
1592}
1593
1594static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1595 long res, unsigned int cflags)
1596{
1597 struct io_uring_cqe *cqe;
1598
1599 trace_io_uring_complete(ctx, user_data, res, cflags);
1600
1601
1602
1603
1604
1605
1606 cqe = io_get_cqe(ctx);
1607 if (likely(cqe)) {
1608 WRITE_ONCE(cqe->user_data, user_data);
1609 WRITE_ONCE(cqe->res, res);
1610 WRITE_ONCE(cqe->flags, cflags);
1611 return true;
1612 }
1613 return io_cqring_event_overflow(ctx, user_data, res, cflags);
1614}
1615
1616
1617static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1618 long res, unsigned int cflags)
1619{
1620 return __io_cqring_fill_event(ctx, user_data, res, cflags);
1621}
1622
1623static void io_req_complete_post(struct io_kiocb *req, long res,
1624 unsigned int cflags)
1625{
1626 struct io_ring_ctx *ctx = req->ctx;
1627 unsigned long flags;
1628
1629 spin_lock_irqsave(&ctx->completion_lock, flags);
1630 __io_cqring_fill_event(ctx, req->user_data, res, cflags);
1631
1632
1633
1634
1635 if (req_ref_put_and_test(req)) {
1636 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
1637 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL))
1638 io_disarm_next(req);
1639 if (req->link) {
1640 io_req_task_queue(req->link);
1641 req->link = NULL;
1642 }
1643 }
1644 io_dismantle_req(req);
1645 io_put_task(req->task, 1);
1646 list_add(&req->compl.list, &ctx->locked_free_list);
1647 ctx->locked_free_nr++;
1648 } else {
1649 if (!percpu_ref_tryget(&ctx->refs))
1650 req = NULL;
1651 }
1652 io_commit_cqring(ctx);
1653 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1654
1655 if (req) {
1656 io_cqring_ev_posted(ctx);
1657 percpu_ref_put(&ctx->refs);
1658 }
1659}
1660
1661static inline bool io_req_needs_clean(struct io_kiocb *req)
1662{
1663 return req->flags & IO_REQ_CLEAN_FLAGS;
1664}
1665
1666static void io_req_complete_state(struct io_kiocb *req, long res,
1667 unsigned int cflags)
1668{
1669 if (io_req_needs_clean(req))
1670 io_clean_op(req);
1671 req->result = res;
1672 req->compl.cflags = cflags;
1673 req->flags |= REQ_F_COMPLETE_INLINE;
1674}
1675
1676static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1677 long res, unsigned cflags)
1678{
1679 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1680 io_req_complete_state(req, res, cflags);
1681 else
1682 io_req_complete_post(req, res, cflags);
1683}
1684
1685static inline void io_req_complete(struct io_kiocb *req, long res)
1686{
1687 __io_req_complete(req, 0, res, 0);
1688}
1689
1690static void io_req_complete_failed(struct io_kiocb *req, long res)
1691{
1692 req_set_fail(req);
1693 io_put_req(req);
1694 io_req_complete_post(req, res, 0);
1695}
1696
1697static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1698 struct io_comp_state *cs)
1699{
1700 spin_lock_irq(&ctx->completion_lock);
1701 list_splice_init(&ctx->locked_free_list, &cs->free_list);
1702 ctx->locked_free_nr = 0;
1703 spin_unlock_irq(&ctx->completion_lock);
1704}
1705
1706
1707static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
1708{
1709 struct io_submit_state *state = &ctx->submit_state;
1710 struct io_comp_state *cs = &state->comp;
1711 int nr;
1712
1713
1714
1715
1716
1717
1718 if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
1719 io_flush_cached_locked_reqs(ctx, cs);
1720
1721 nr = state->free_reqs;
1722 while (!list_empty(&cs->free_list)) {
1723 struct io_kiocb *req = list_first_entry(&cs->free_list,
1724 struct io_kiocb, compl.list);
1725
1726 list_del(&req->compl.list);
1727 state->reqs[nr++] = req;
1728 if (nr == ARRAY_SIZE(state->reqs))
1729 break;
1730 }
1731
1732 state->free_reqs = nr;
1733 return nr != 0;
1734}
1735
1736static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
1737{
1738 struct io_submit_state *state = &ctx->submit_state;
1739
1740 BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
1741
1742 if (!state->free_reqs) {
1743 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1744 int ret, i;
1745
1746 if (io_flush_cached_reqs(ctx))
1747 goto got_req;
1748
1749 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1750 state->reqs);
1751
1752
1753
1754
1755
1756 if (unlikely(ret <= 0)) {
1757 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1758 if (!state->reqs[0])
1759 return NULL;
1760 ret = 1;
1761 }
1762
1763
1764
1765
1766
1767 for (i = 0; i < ret; i++) {
1768 struct io_kiocb *req = state->reqs[i];
1769
1770 req->ctx = ctx;
1771 req->link = NULL;
1772 req->async_data = NULL;
1773
1774 req->result = 0;
1775 }
1776 state->free_reqs = ret;
1777 }
1778got_req:
1779 state->free_reqs--;
1780 return state->reqs[state->free_reqs];
1781}
1782
1783static inline void io_put_file(struct file *file)
1784{
1785 if (file)
1786 fput(file);
1787}
1788
1789static void io_dismantle_req(struct io_kiocb *req)
1790{
1791 unsigned int flags = req->flags;
1792
1793 if (io_req_needs_clean(req))
1794 io_clean_op(req);
1795 if (!(flags & REQ_F_FIXED_FILE))
1796 io_put_file(req->file);
1797 if (req->fixed_rsrc_refs)
1798 percpu_ref_put(req->fixed_rsrc_refs);
1799 if (req->async_data) {
1800 kfree(req->async_data);
1801 req->async_data = NULL;
1802 }
1803}
1804
1805
1806static inline void io_put_task(struct task_struct *task, int nr)
1807{
1808 struct io_uring_task *tctx = task->io_uring;
1809
1810 percpu_counter_sub(&tctx->inflight, nr);
1811 if (unlikely(atomic_read(&tctx->in_idle)))
1812 wake_up(&tctx->wait);
1813 put_task_struct_many(task, nr);
1814}
1815
1816static void __io_free_req(struct io_kiocb *req)
1817{
1818 struct io_ring_ctx *ctx = req->ctx;
1819
1820 io_dismantle_req(req);
1821 io_put_task(req->task, 1);
1822
1823 kmem_cache_free(req_cachep, req);
1824 percpu_ref_put(&ctx->refs);
1825}
1826
1827static inline void io_remove_next_linked(struct io_kiocb *req)
1828{
1829 struct io_kiocb *nxt = req->link;
1830
1831 req->link = nxt->link;
1832 nxt->link = NULL;
1833}
1834
1835static bool io_kill_linked_timeout(struct io_kiocb *req)
1836 __must_hold(&req->ctx->completion_lock)
1837{
1838 struct io_kiocb *link = req->link;
1839
1840
1841
1842
1843
1844 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1845 struct io_timeout_data *io = link->async_data;
1846
1847 io_remove_next_linked(req);
1848 link->timeout.head = NULL;
1849 if (hrtimer_try_to_cancel(&io->timer) != -1) {
1850 io_cqring_fill_event(link->ctx, link->user_data,
1851 -ECANCELED, 0);
1852 io_put_req_deferred(link, 1);
1853 return true;
1854 }
1855 }
1856 return false;
1857}
1858
1859static void io_fail_links(struct io_kiocb *req)
1860 __must_hold(&req->ctx->completion_lock)
1861{
1862 struct io_kiocb *nxt, *link = req->link;
1863
1864 req->link = NULL;
1865 while (link) {
1866 nxt = link->link;
1867 link->link = NULL;
1868
1869 trace_io_uring_fail_link(req, link);
1870 io_cqring_fill_event(link->ctx, link->user_data, -ECANCELED, 0);
1871 io_put_req_deferred(link, 2);
1872 link = nxt;
1873 }
1874}
1875
1876static bool io_disarm_next(struct io_kiocb *req)
1877 __must_hold(&req->ctx->completion_lock)
1878{
1879 bool posted = false;
1880
1881 if (likely(req->flags & REQ_F_LINK_TIMEOUT))
1882 posted = io_kill_linked_timeout(req);
1883 if (unlikely((req->flags & REQ_F_FAIL) &&
1884 !(req->flags & REQ_F_HARDLINK))) {
1885 posted |= (req->link != NULL);
1886 io_fail_links(req);
1887 }
1888 return posted;
1889}
1890
1891static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
1892{
1893 struct io_kiocb *nxt;
1894
1895
1896
1897
1898
1899
1900
1901 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL)) {
1902 struct io_ring_ctx *ctx = req->ctx;
1903 unsigned long flags;
1904 bool posted;
1905
1906 spin_lock_irqsave(&ctx->completion_lock, flags);
1907 posted = io_disarm_next(req);
1908 if (posted)
1909 io_commit_cqring(req->ctx);
1910 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1911 if (posted)
1912 io_cqring_ev_posted(ctx);
1913 }
1914 nxt = req->link;
1915 req->link = NULL;
1916 return nxt;
1917}
1918
1919static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1920{
1921 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
1922 return NULL;
1923 return __io_req_find_next(req);
1924}
1925
1926static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1927{
1928 if (!ctx)
1929 return;
1930 if (ctx->submit_state.comp.nr) {
1931 mutex_lock(&ctx->uring_lock);
1932 io_submit_flush_completions(ctx);
1933 mutex_unlock(&ctx->uring_lock);
1934 }
1935 percpu_ref_put(&ctx->refs);
1936}
1937
1938static void tctx_task_work(struct callback_head *cb)
1939{
1940 struct io_ring_ctx *ctx = NULL;
1941 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
1942 task_work);
1943
1944 while (1) {
1945 struct io_wq_work_node *node;
1946
1947 spin_lock_irq(&tctx->task_lock);
1948 node = tctx->task_list.first;
1949 INIT_WQ_LIST(&tctx->task_list);
1950 spin_unlock_irq(&tctx->task_lock);
1951
1952 while (node) {
1953 struct io_wq_work_node *next = node->next;
1954 struct io_kiocb *req = container_of(node, struct io_kiocb,
1955 io_task_work.node);
1956
1957 if (req->ctx != ctx) {
1958 ctx_flush_and_put(ctx);
1959 ctx = req->ctx;
1960 percpu_ref_get(&ctx->refs);
1961 }
1962 req->io_task_work.func(req);
1963 node = next;
1964 }
1965 if (wq_list_empty(&tctx->task_list)) {
1966 spin_lock_irq(&tctx->task_lock);
1967 clear_bit(0, &tctx->task_state);
1968 if (wq_list_empty(&tctx->task_list)) {
1969 spin_unlock_irq(&tctx->task_lock);
1970 break;
1971 }
1972 spin_unlock_irq(&tctx->task_lock);
1973
1974 if (test_and_set_bit(0, &tctx->task_state))
1975 break;
1976 }
1977 cond_resched();
1978 }
1979
1980 ctx_flush_and_put(ctx);
1981}
1982
1983static void io_req_task_work_add(struct io_kiocb *req)
1984{
1985 struct task_struct *tsk = req->task;
1986 struct io_uring_task *tctx = tsk->io_uring;
1987 enum task_work_notify_mode notify;
1988 struct io_wq_work_node *node;
1989 unsigned long flags;
1990
1991 WARN_ON_ONCE(!tctx);
1992
1993 spin_lock_irqsave(&tctx->task_lock, flags);
1994 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
1995 spin_unlock_irqrestore(&tctx->task_lock, flags);
1996
1997
1998 if (test_bit(0, &tctx->task_state) ||
1999 test_and_set_bit(0, &tctx->task_state))
2000 return;
2001 if (unlikely(tsk->flags & PF_EXITING))
2002 goto fail;
2003
2004
2005
2006
2007
2008
2009
2010 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
2011 if (!task_work_add(tsk, &tctx->task_work, notify)) {
2012 wake_up_process(tsk);
2013 return;
2014 }
2015fail:
2016 clear_bit(0, &tctx->task_state);
2017 spin_lock_irqsave(&tctx->task_lock, flags);
2018 node = tctx->task_list.first;
2019 INIT_WQ_LIST(&tctx->task_list);
2020 spin_unlock_irqrestore(&tctx->task_lock, flags);
2021
2022 while (node) {
2023 req = container_of(node, struct io_kiocb, io_task_work.node);
2024 node = node->next;
2025 if (llist_add(&req->io_task_work.fallback_node,
2026 &req->ctx->fallback_llist))
2027 schedule_delayed_work(&req->ctx->fallback_work, 1);
2028 }
2029}
2030
2031static void io_req_task_cancel(struct io_kiocb *req)
2032{
2033 struct io_ring_ctx *ctx = req->ctx;
2034
2035
2036 mutex_lock(&ctx->uring_lock);
2037 io_req_complete_failed(req, req->result);
2038 mutex_unlock(&ctx->uring_lock);
2039}
2040
2041static void io_req_task_submit(struct io_kiocb *req)
2042{
2043 struct io_ring_ctx *ctx = req->ctx;
2044
2045
2046 mutex_lock(&ctx->uring_lock);
2047 if (!(req->task->flags & PF_EXITING) && !req->task->in_execve)
2048 __io_queue_sqe(req);
2049 else
2050 io_req_complete_failed(req, -EFAULT);
2051 mutex_unlock(&ctx->uring_lock);
2052}
2053
2054static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2055{
2056 req->result = ret;
2057 req->io_task_work.func = io_req_task_cancel;
2058 io_req_task_work_add(req);
2059}
2060
2061static void io_req_task_queue(struct io_kiocb *req)
2062{
2063 req->io_task_work.func = io_req_task_submit;
2064 io_req_task_work_add(req);
2065}
2066
2067static void io_req_task_queue_reissue(struct io_kiocb *req)
2068{
2069 req->io_task_work.func = io_queue_async_work;
2070 io_req_task_work_add(req);
2071}
2072
2073static inline void io_queue_next(struct io_kiocb *req)
2074{
2075 struct io_kiocb *nxt = io_req_find_next(req);
2076
2077 if (nxt)
2078 io_req_task_queue(nxt);
2079}
2080
2081static void io_free_req(struct io_kiocb *req)
2082{
2083 io_queue_next(req);
2084 __io_free_req(req);
2085}
2086
2087struct req_batch {
2088 struct task_struct *task;
2089 int task_refs;
2090 int ctx_refs;
2091};
2092
2093static inline void io_init_req_batch(struct req_batch *rb)
2094{
2095 rb->task_refs = 0;
2096 rb->ctx_refs = 0;
2097 rb->task = NULL;
2098}
2099
2100static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2101 struct req_batch *rb)
2102{
2103 if (rb->task)
2104 io_put_task(rb->task, rb->task_refs);
2105 if (rb->ctx_refs)
2106 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
2107}
2108
2109static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2110 struct io_submit_state *state)
2111{
2112 io_queue_next(req);
2113 io_dismantle_req(req);
2114
2115 if (req->task != rb->task) {
2116 if (rb->task)
2117 io_put_task(rb->task, rb->task_refs);
2118 rb->task = req->task;
2119 rb->task_refs = 0;
2120 }
2121 rb->task_refs++;
2122 rb->ctx_refs++;
2123
2124 if (state->free_reqs != ARRAY_SIZE(state->reqs))
2125 state->reqs[state->free_reqs++] = req;
2126 else
2127 list_add(&req->compl.list, &state->comp.free_list);
2128}
2129
2130static void io_submit_flush_completions(struct io_ring_ctx *ctx)
2131{
2132 struct io_comp_state *cs = &ctx->submit_state.comp;
2133 int i, nr = cs->nr;
2134 struct req_batch rb;
2135
2136 spin_lock_irq(&ctx->completion_lock);
2137 for (i = 0; i < nr; i++) {
2138 struct io_kiocb *req = cs->reqs[i];
2139
2140 __io_cqring_fill_event(ctx, req->user_data, req->result,
2141 req->compl.cflags);
2142 }
2143 io_commit_cqring(ctx);
2144 spin_unlock_irq(&ctx->completion_lock);
2145 io_cqring_ev_posted(ctx);
2146
2147 io_init_req_batch(&rb);
2148 for (i = 0; i < nr; i++) {
2149 struct io_kiocb *req = cs->reqs[i];
2150
2151
2152 if (req_ref_sub_and_test(req, 2))
2153 io_req_free_batch(&rb, req, &ctx->submit_state);
2154 }
2155
2156 io_req_free_batch_finish(ctx, &rb);
2157 cs->nr = 0;
2158}
2159
2160
2161
2162
2163
2164static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
2165{
2166 struct io_kiocb *nxt = NULL;
2167
2168 if (req_ref_put_and_test(req)) {
2169 nxt = io_req_find_next(req);
2170 __io_free_req(req);
2171 }
2172 return nxt;
2173}
2174
2175static inline void io_put_req(struct io_kiocb *req)
2176{
2177 if (req_ref_put_and_test(req))
2178 io_free_req(req);
2179}
2180
2181static void io_free_req_deferred(struct io_kiocb *req)
2182{
2183 req->io_task_work.func = io_free_req;
2184 io_req_task_work_add(req);
2185}
2186
2187static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2188{
2189 if (req_ref_sub_and_test(req, refs))
2190 io_free_req_deferred(req);
2191}
2192
2193static unsigned io_cqring_events(struct io_ring_ctx *ctx)
2194{
2195
2196 smp_rmb();
2197 return __io_cqring_events(ctx);
2198}
2199
2200static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2201{
2202 struct io_rings *rings = ctx->rings;
2203
2204
2205 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2206}
2207
2208static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
2209{
2210 unsigned int cflags;
2211
2212 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2213 cflags |= IORING_CQE_F_BUFFER;
2214 req->flags &= ~REQ_F_BUFFER_SELECTED;
2215 kfree(kbuf);
2216 return cflags;
2217}
2218
2219static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2220{
2221 struct io_buffer *kbuf;
2222
2223 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2224 return io_put_kbuf(req, kbuf);
2225}
2226
2227static inline bool io_run_task_work(void)
2228{
2229 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
2230 __set_current_state(TASK_RUNNING);
2231 tracehook_notify_signal();
2232 return true;
2233 }
2234
2235 return false;
2236}
2237
2238
2239
2240
2241static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2242 struct list_head *done, bool resubmit)
2243{
2244 struct req_batch rb;
2245 struct io_kiocb *req;
2246
2247
2248 smp_rmb();
2249
2250 io_init_req_batch(&rb);
2251 while (!list_empty(done)) {
2252 int cflags = 0;
2253
2254 req = list_first_entry(done, struct io_kiocb, inflight_entry);
2255 list_del(&req->inflight_entry);
2256
2257 if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
2258 !(req->flags & REQ_F_DONT_REISSUE)) {
2259 req->iopoll_completed = 0;
2260 req_ref_get(req);
2261 io_req_task_queue_reissue(req);
2262 continue;
2263 }
2264
2265 if (req->flags & REQ_F_BUFFER_SELECTED)
2266 cflags = io_put_rw_kbuf(req);
2267
2268 __io_cqring_fill_event(ctx, req->user_data, req->result, cflags);
2269 (*nr_events)++;
2270
2271 if (req_ref_put_and_test(req))
2272 io_req_free_batch(&rb, req, &ctx->submit_state);
2273 }
2274
2275 io_commit_cqring(ctx);
2276 io_cqring_ev_posted_iopoll(ctx);
2277 io_req_free_batch_finish(ctx, &rb);
2278}
2279
2280static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2281 long min, bool resubmit)
2282{
2283 struct io_kiocb *req, *tmp;
2284 LIST_HEAD(done);
2285 bool spin;
2286 int ret;
2287
2288
2289
2290
2291
2292 spin = !ctx->poll_multi_queue && *nr_events < min;
2293
2294 ret = 0;
2295 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
2296 struct kiocb *kiocb = &req->rw.kiocb;
2297
2298
2299
2300
2301
2302
2303 if (READ_ONCE(req->iopoll_completed)) {
2304 list_move_tail(&req->inflight_entry, &done);
2305 continue;
2306 }
2307 if (!list_empty(&done))
2308 break;
2309
2310 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2311 if (ret < 0)
2312 break;
2313
2314
2315 if (READ_ONCE(req->iopoll_completed))
2316 list_move_tail(&req->inflight_entry, &done);
2317
2318 if (ret && spin)
2319 spin = false;
2320 ret = 0;
2321 }
2322
2323 if (!list_empty(&done))
2324 io_iopoll_complete(ctx, nr_events, &done, resubmit);
2325
2326 return ret;
2327}
2328
2329
2330
2331
2332
2333static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
2334{
2335 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2336 return;
2337
2338 mutex_lock(&ctx->uring_lock);
2339 while (!list_empty(&ctx->iopoll_list)) {
2340 unsigned int nr_events = 0;
2341
2342 io_do_iopoll(ctx, &nr_events, 0, false);
2343
2344
2345 if (nr_events == 0)
2346 break;
2347
2348
2349
2350
2351
2352 if (need_resched()) {
2353 mutex_unlock(&ctx->uring_lock);
2354 cond_resched();
2355 mutex_lock(&ctx->uring_lock);
2356 }
2357 }
2358 mutex_unlock(&ctx->uring_lock);
2359}
2360
2361static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
2362{
2363 unsigned int nr_events = 0;
2364 int ret = 0;
2365
2366
2367
2368
2369
2370
2371 mutex_lock(&ctx->uring_lock);
2372
2373
2374
2375
2376
2377 if (test_bit(0, &ctx->check_cq_overflow))
2378 __io_cqring_overflow_flush(ctx, false);
2379 if (io_cqring_events(ctx))
2380 goto out;
2381 do {
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392 if (list_empty(&ctx->iopoll_list)) {
2393 u32 tail = ctx->cached_cq_tail;
2394
2395 mutex_unlock(&ctx->uring_lock);
2396 io_run_task_work();
2397 mutex_lock(&ctx->uring_lock);
2398
2399
2400 if (tail != ctx->cached_cq_tail ||
2401 list_empty(&ctx->iopoll_list))
2402 break;
2403 }
2404 ret = io_do_iopoll(ctx, &nr_events, min, true);
2405 } while (!ret && nr_events < min && !need_resched());
2406out:
2407 mutex_unlock(&ctx->uring_lock);
2408 return ret;
2409}
2410
2411static void kiocb_end_write(struct io_kiocb *req)
2412{
2413
2414
2415
2416
2417 if (req->flags & REQ_F_ISREG) {
2418 struct super_block *sb = file_inode(req->file)->i_sb;
2419
2420 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2421 sb_end_write(sb);
2422 }
2423}
2424
2425#ifdef CONFIG_BLOCK
2426static bool io_resubmit_prep(struct io_kiocb *req)
2427{
2428 struct io_async_rw *rw = req->async_data;
2429
2430 if (!rw)
2431 return !io_req_prep_async(req);
2432
2433 iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
2434 return true;
2435}
2436
2437static bool io_rw_should_reissue(struct io_kiocb *req)
2438{
2439 umode_t mode = file_inode(req->file)->i_mode;
2440 struct io_ring_ctx *ctx = req->ctx;
2441
2442 if (!S_ISBLK(mode) && !S_ISREG(mode))
2443 return false;
2444 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2445 !(ctx->flags & IORING_SETUP_IOPOLL)))
2446 return false;
2447
2448
2449
2450
2451
2452 if (percpu_ref_is_dying(&ctx->refs))
2453 return false;
2454
2455
2456
2457
2458 if (!same_thread_group(req->task, current) || !in_task())
2459 return false;
2460 return true;
2461}
2462#else
2463static bool io_resubmit_prep(struct io_kiocb *req)
2464{
2465 return false;
2466}
2467static bool io_rw_should_reissue(struct io_kiocb *req)
2468{
2469 return false;
2470}
2471#endif
2472
2473static void io_fallback_req_func(struct work_struct *work)
2474{
2475 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
2476 fallback_work.work);
2477 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
2478 struct io_kiocb *req, *tmp;
2479
2480 percpu_ref_get(&ctx->refs);
2481 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
2482 req->io_task_work.func(req);
2483 percpu_ref_put(&ctx->refs);
2484}
2485
2486static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2487 unsigned int issue_flags)
2488{
2489 int cflags = 0;
2490
2491 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2492 kiocb_end_write(req);
2493 if (res != req->result) {
2494 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2495 io_rw_should_reissue(req)) {
2496 req->flags |= REQ_F_REISSUE;
2497 return;
2498 }
2499 req_set_fail(req);
2500 }
2501 if (req->flags & REQ_F_BUFFER_SELECTED)
2502 cflags = io_put_rw_kbuf(req);
2503 __io_req_complete(req, issue_flags, res, cflags);
2504}
2505
2506static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2507{
2508 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2509
2510 __io_complete_rw(req, res, res2, 0);
2511}
2512
2513static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2514{
2515 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2516
2517 if (kiocb->ki_flags & IOCB_WRITE)
2518 kiocb_end_write(req);
2519 if (unlikely(res != req->result)) {
2520 if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
2521 io_resubmit_prep(req))) {
2522 req_set_fail(req);
2523 req->flags |= REQ_F_DONT_REISSUE;
2524 }
2525 }
2526
2527 WRITE_ONCE(req->result, res);
2528
2529 smp_wmb();
2530 WRITE_ONCE(req->iopoll_completed, 1);
2531}
2532
2533
2534
2535
2536
2537
2538
2539static void io_iopoll_req_issued(struct io_kiocb *req)
2540{
2541 struct io_ring_ctx *ctx = req->ctx;
2542 const bool in_async = io_wq_current_is_worker();
2543
2544
2545 if (unlikely(in_async))
2546 mutex_lock(&ctx->uring_lock);
2547
2548
2549
2550
2551
2552
2553 if (list_empty(&ctx->iopoll_list)) {
2554 ctx->poll_multi_queue = false;
2555 } else if (!ctx->poll_multi_queue) {
2556 struct io_kiocb *list_req;
2557 unsigned int queue_num0, queue_num1;
2558
2559 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
2560 inflight_entry);
2561
2562 if (list_req->file != req->file) {
2563 ctx->poll_multi_queue = true;
2564 } else {
2565 queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2566 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2567 if (queue_num0 != queue_num1)
2568 ctx->poll_multi_queue = true;
2569 }
2570 }
2571
2572
2573
2574
2575
2576 if (READ_ONCE(req->iopoll_completed))
2577 list_add(&req->inflight_entry, &ctx->iopoll_list);
2578 else
2579 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
2580
2581 if (unlikely(in_async)) {
2582
2583
2584
2585
2586
2587
2588 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2589 wq_has_sleeper(&ctx->sq_data->wait))
2590 wake_up(&ctx->sq_data->wait);
2591
2592 mutex_unlock(&ctx->uring_lock);
2593 }
2594}
2595
2596static inline void io_state_file_put(struct io_submit_state *state)
2597{
2598 if (state->file_refs) {
2599 fput_many(state->file, state->file_refs);
2600 state->file_refs = 0;
2601 }
2602}
2603
2604
2605
2606
2607
2608
2609static struct file *__io_file_get(struct io_submit_state *state, int fd)
2610{
2611 if (!state)
2612 return fget(fd);
2613
2614 if (state->file_refs) {
2615 if (state->fd == fd) {
2616 state->file_refs--;
2617 return state->file;
2618 }
2619 io_state_file_put(state);
2620 }
2621 state->file = fget_many(fd, state->ios_left);
2622 if (unlikely(!state->file))
2623 return NULL;
2624
2625 state->fd = fd;
2626 state->file_refs = state->ios_left - 1;
2627 return state->file;
2628}
2629
2630static bool io_bdev_nowait(struct block_device *bdev)
2631{
2632 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
2633}
2634
2635
2636
2637
2638
2639
2640static bool __io_file_supports_async(struct file *file, int rw)
2641{
2642 umode_t mode = file_inode(file)->i_mode;
2643
2644 if (S_ISBLK(mode)) {
2645 if (IS_ENABLED(CONFIG_BLOCK) &&
2646 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
2647 return true;
2648 return false;
2649 }
2650 if (S_ISSOCK(mode))
2651 return true;
2652 if (S_ISREG(mode)) {
2653 if (IS_ENABLED(CONFIG_BLOCK) &&
2654 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
2655 file->f_op != &io_uring_fops)
2656 return true;
2657 return false;
2658 }
2659
2660
2661 if (file->f_flags & O_NONBLOCK)
2662 return true;
2663
2664 if (!(file->f_mode & FMODE_NOWAIT))
2665 return false;
2666
2667 if (rw == READ)
2668 return file->f_op->read_iter != NULL;
2669
2670 return file->f_op->write_iter != NULL;
2671}
2672
2673static bool io_file_supports_async(struct io_kiocb *req, int rw)
2674{
2675 if (rw == READ && (req->flags & REQ_F_ASYNC_READ))
2676 return true;
2677 else if (rw == WRITE && (req->flags & REQ_F_ASYNC_WRITE))
2678 return true;
2679
2680 return __io_file_supports_async(req->file, rw);
2681}
2682
2683static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2684{
2685 struct io_ring_ctx *ctx = req->ctx;
2686 struct kiocb *kiocb = &req->rw.kiocb;
2687 struct file *file = req->file;
2688 unsigned ioprio;
2689 int ret;
2690
2691 if (!(req->flags & REQ_F_ISREG) && S_ISREG(file_inode(file)->i_mode))
2692 req->flags |= REQ_F_ISREG;
2693
2694 kiocb->ki_pos = READ_ONCE(sqe->off);
2695 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
2696 req->flags |= REQ_F_CUR_POS;
2697 kiocb->ki_pos = file->f_pos;
2698 }
2699 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
2700 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2701 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2702 if (unlikely(ret))
2703 return ret;
2704
2705
2706 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2707 req->flags |= REQ_F_NOWAIT;
2708
2709 ioprio = READ_ONCE(sqe->ioprio);
2710 if (ioprio) {
2711 ret = ioprio_check_cap(ioprio);
2712 if (ret)
2713 return ret;
2714
2715 kiocb->ki_ioprio = ioprio;
2716 } else
2717 kiocb->ki_ioprio = get_current_ioprio();
2718
2719 if (ctx->flags & IORING_SETUP_IOPOLL) {
2720 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2721 !kiocb->ki_filp->f_op->iopoll)
2722 return -EOPNOTSUPP;
2723
2724 kiocb->ki_flags |= IOCB_HIPRI;
2725 kiocb->ki_complete = io_complete_rw_iopoll;
2726 req->iopoll_completed = 0;
2727 } else {
2728 if (kiocb->ki_flags & IOCB_HIPRI)
2729 return -EINVAL;
2730 kiocb->ki_complete = io_complete_rw;
2731 }
2732
2733 if (req->opcode == IORING_OP_READ_FIXED ||
2734 req->opcode == IORING_OP_WRITE_FIXED) {
2735 req->imu = NULL;
2736 io_req_set_rsrc_node(req);
2737 }
2738
2739 req->rw.addr = READ_ONCE(sqe->addr);
2740 req->rw.len = READ_ONCE(sqe->len);
2741 req->buf_index = READ_ONCE(sqe->buf_index);
2742 return 0;
2743}
2744
2745static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2746{
2747 switch (ret) {
2748 case -EIOCBQUEUED:
2749 break;
2750 case -ERESTARTSYS:
2751 case -ERESTARTNOINTR:
2752 case -ERESTARTNOHAND:
2753 case -ERESTART_RESTARTBLOCK:
2754
2755
2756
2757
2758
2759 ret = -EINTR;
2760 fallthrough;
2761 default:
2762 kiocb->ki_complete(kiocb, ret, 0);
2763 }
2764}
2765
2766static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2767 unsigned int issue_flags)
2768{
2769 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2770 struct io_async_rw *io = req->async_data;
2771 bool check_reissue = kiocb->ki_complete == io_complete_rw;
2772
2773
2774 if (io && io->bytes_done > 0) {
2775 if (ret < 0)
2776 ret = io->bytes_done;
2777 else
2778 ret += io->bytes_done;
2779 }
2780
2781 if (req->flags & REQ_F_CUR_POS)
2782 req->file->f_pos = kiocb->ki_pos;
2783 if (ret >= 0 && check_reissue)
2784 __io_complete_rw(req, ret, 0, issue_flags);
2785 else
2786 io_rw_done(kiocb, ret);
2787
2788 if (check_reissue && (req->flags & REQ_F_REISSUE)) {
2789 req->flags &= ~REQ_F_REISSUE;
2790 if (io_resubmit_prep(req)) {
2791 req_ref_get(req);
2792 io_req_task_queue_reissue(req);
2793 } else {
2794 int cflags = 0;
2795
2796 req_set_fail(req);
2797 if (req->flags & REQ_F_BUFFER_SELECTED)
2798 cflags = io_put_rw_kbuf(req);
2799 __io_req_complete(req, issue_flags, ret, cflags);
2800 }
2801 }
2802}
2803
2804static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
2805 struct io_mapped_ubuf *imu)
2806{
2807 size_t len = req->rw.len;
2808 u64 buf_end, buf_addr = req->rw.addr;
2809 size_t offset;
2810
2811 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
2812 return -EFAULT;
2813
2814 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
2815 return -EFAULT;
2816
2817
2818
2819
2820
2821 offset = buf_addr - imu->ubuf;
2822 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
2823
2824 if (offset) {
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841 const struct bio_vec *bvec = imu->bvec;
2842
2843 if (offset <= bvec->bv_len) {
2844 iov_iter_advance(iter, offset);
2845 } else {
2846 unsigned long seg_skip;
2847
2848
2849 offset -= bvec->bv_len;
2850 seg_skip = 1 + (offset >> PAGE_SHIFT);
2851
2852 iter->bvec = bvec + seg_skip;
2853 iter->nr_segs -= seg_skip;
2854 iter->count -= bvec->bv_len + offset;
2855 iter->iov_offset = offset & ~PAGE_MASK;
2856 }
2857 }
2858
2859 return 0;
2860}
2861
2862static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
2863{
2864 struct io_ring_ctx *ctx = req->ctx;
2865 struct io_mapped_ubuf *imu = req->imu;
2866 u16 index, buf_index = req->buf_index;
2867
2868 if (likely(!imu)) {
2869 if (unlikely(buf_index >= ctx->nr_user_bufs))
2870 return -EFAULT;
2871 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2872 imu = READ_ONCE(ctx->user_bufs[index]);
2873 req->imu = imu;
2874 }
2875 return __io_import_fixed(req, rw, iter, imu);
2876}
2877
2878static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2879{
2880 if (needs_lock)
2881 mutex_unlock(&ctx->uring_lock);
2882}
2883
2884static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2885{
2886
2887
2888
2889
2890
2891
2892 if (needs_lock)
2893 mutex_lock(&ctx->uring_lock);
2894}
2895
2896static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2897 int bgid, struct io_buffer *kbuf,
2898 bool needs_lock)
2899{
2900 struct io_buffer *head;
2901
2902 if (req->flags & REQ_F_BUFFER_SELECTED)
2903 return kbuf;
2904
2905 io_ring_submit_lock(req->ctx, needs_lock);
2906
2907 lockdep_assert_held(&req->ctx->uring_lock);
2908
2909 head = xa_load(&req->ctx->io_buffers, bgid);
2910 if (head) {
2911 if (!list_empty(&head->list)) {
2912 kbuf = list_last_entry(&head->list, struct io_buffer,
2913 list);
2914 list_del(&kbuf->list);
2915 } else {
2916 kbuf = head;
2917 xa_erase(&req->ctx->io_buffers, bgid);
2918 }
2919 if (*len > kbuf->len)
2920 *len = kbuf->len;
2921 } else {
2922 kbuf = ERR_PTR(-ENOBUFS);
2923 }
2924
2925 io_ring_submit_unlock(req->ctx, needs_lock);
2926
2927 return kbuf;
2928}
2929
2930static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2931 bool needs_lock)
2932{
2933 struct io_buffer *kbuf;
2934 u16 bgid;
2935
2936 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2937 bgid = req->buf_index;
2938 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2939 if (IS_ERR(kbuf))
2940 return kbuf;
2941 req->rw.addr = (u64) (unsigned long) kbuf;
2942 req->flags |= REQ_F_BUFFER_SELECTED;
2943 return u64_to_user_ptr(kbuf->addr);
2944}
2945
2946#ifdef CONFIG_COMPAT
2947static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2948 bool needs_lock)
2949{
2950 struct compat_iovec __user *uiov;
2951 compat_ssize_t clen;
2952 void __user *buf;
2953 ssize_t len;
2954
2955 uiov = u64_to_user_ptr(req->rw.addr);
2956 if (!access_ok(uiov, sizeof(*uiov)))
2957 return -EFAULT;
2958 if (__get_user(clen, &uiov->iov_len))
2959 return -EFAULT;
2960 if (clen < 0)
2961 return -EINVAL;
2962
2963 len = clen;
2964 buf = io_rw_buffer_select(req, &len, needs_lock);
2965 if (IS_ERR(buf))
2966 return PTR_ERR(buf);
2967 iov[0].iov_base = buf;
2968 iov[0].iov_len = (compat_size_t) len;
2969 return 0;
2970}
2971#endif
2972
2973static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2974 bool needs_lock)
2975{
2976 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2977 void __user *buf;
2978 ssize_t len;
2979
2980 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2981 return -EFAULT;
2982
2983 len = iov[0].iov_len;
2984 if (len < 0)
2985 return -EINVAL;
2986 buf = io_rw_buffer_select(req, &len, needs_lock);
2987 if (IS_ERR(buf))
2988 return PTR_ERR(buf);
2989 iov[0].iov_base = buf;
2990 iov[0].iov_len = len;
2991 return 0;
2992}
2993
2994static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2995 bool needs_lock)
2996{
2997 if (req->flags & REQ_F_BUFFER_SELECTED) {
2998 struct io_buffer *kbuf;
2999
3000 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3001 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3002 iov[0].iov_len = kbuf->len;
3003 return 0;
3004 }
3005 if (req->rw.len != 1)
3006 return -EINVAL;
3007
3008#ifdef CONFIG_COMPAT
3009 if (req->ctx->compat)
3010 return io_compat_import(req, iov, needs_lock);
3011#endif
3012
3013 return __io_iov_buffer_select(req, iov, needs_lock);
3014}
3015
3016static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3017 struct iov_iter *iter, bool needs_lock)
3018{
3019 void __user *buf = u64_to_user_ptr(req->rw.addr);
3020 size_t sqe_len = req->rw.len;
3021 u8 opcode = req->opcode;
3022 ssize_t ret;
3023
3024 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
3025 *iovec = NULL;
3026 return io_import_fixed(req, rw, iter);
3027 }
3028
3029
3030 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
3031 return -EINVAL;
3032
3033 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
3034 if (req->flags & REQ_F_BUFFER_SELECT) {
3035 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
3036 if (IS_ERR(buf))
3037 return PTR_ERR(buf);
3038 req->rw.len = sqe_len;
3039 }
3040
3041 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3042 *iovec = NULL;
3043 return ret;
3044 }
3045
3046 if (req->flags & REQ_F_BUFFER_SELECT) {
3047 ret = io_iov_buffer_select(req, *iovec, needs_lock);
3048 if (!ret)
3049 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
3050 *iovec = NULL;
3051 return ret;
3052 }
3053
3054 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3055 req->ctx->compat);
3056}
3057
3058static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3059{
3060 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
3061}
3062
3063
3064
3065
3066
3067static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
3068{
3069 struct kiocb *kiocb = &req->rw.kiocb;
3070 struct file *file = req->file;
3071 ssize_t ret = 0;
3072
3073
3074
3075
3076
3077
3078 if (kiocb->ki_flags & IOCB_HIPRI)
3079 return -EOPNOTSUPP;
3080 if (kiocb->ki_flags & IOCB_NOWAIT)
3081 return -EAGAIN;
3082
3083 while (iov_iter_count(iter)) {
3084 struct iovec iovec;
3085 ssize_t nr;
3086
3087 if (!iov_iter_is_bvec(iter)) {
3088 iovec = iov_iter_iovec(iter);
3089 } else {
3090 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3091 iovec.iov_len = req->rw.len;
3092 }
3093
3094 if (rw == READ) {
3095 nr = file->f_op->read(file, iovec.iov_base,
3096 iovec.iov_len, io_kiocb_ppos(kiocb));
3097 } else {
3098 nr = file->f_op->write(file, iovec.iov_base,
3099 iovec.iov_len, io_kiocb_ppos(kiocb));
3100 }
3101
3102 if (nr < 0) {
3103 if (!ret)
3104 ret = nr;
3105 break;
3106 }
3107 ret += nr;
3108 if (nr != iovec.iov_len)
3109 break;
3110 req->rw.len -= nr;
3111 req->rw.addr += nr;
3112 iov_iter_advance(iter, nr);
3113 }
3114
3115 return ret;
3116}
3117
3118static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3119 const struct iovec *fast_iov, struct iov_iter *iter)
3120{
3121 struct io_async_rw *rw = req->async_data;
3122
3123 memcpy(&rw->iter, iter, sizeof(*iter));
3124 rw->free_iovec = iovec;
3125 rw->bytes_done = 0;
3126
3127 if (iov_iter_is_bvec(iter))
3128 return;
3129 if (!iovec) {
3130 unsigned iov_off = 0;
3131
3132 rw->iter.iov = rw->fast_iov;
3133 if (iter->iov != fast_iov) {
3134 iov_off = iter->iov - fast_iov;
3135 rw->iter.iov += iov_off;
3136 }
3137 if (rw->fast_iov != fast_iov)
3138 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
3139 sizeof(struct iovec) * iter->nr_segs);
3140 } else {
3141 req->flags |= REQ_F_NEED_CLEANUP;
3142 }
3143}
3144
3145static inline int io_alloc_async_data(struct io_kiocb *req)
3146{
3147 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3148 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3149 return req->async_data == NULL;
3150}
3151
3152static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3153 const struct iovec *fast_iov,
3154 struct iov_iter *iter, bool force)
3155{
3156 if (!force && !io_op_defs[req->opcode].needs_async_setup)
3157 return 0;
3158 if (!req->async_data) {
3159 if (io_alloc_async_data(req)) {
3160 kfree(iovec);
3161 return -ENOMEM;
3162 }
3163
3164 io_req_map_rw(req, iovec, fast_iov, iter);
3165 }
3166 return 0;
3167}
3168
3169static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
3170{
3171 struct io_async_rw *iorw = req->async_data;
3172 struct iovec *iov = iorw->fast_iov;
3173 int ret;
3174
3175 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
3176 if (unlikely(ret < 0))
3177 return ret;
3178
3179 iorw->bytes_done = 0;
3180 iorw->free_iovec = iov;
3181 if (iov)
3182 req->flags |= REQ_F_NEED_CLEANUP;
3183 return 0;
3184}
3185
3186static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3187{
3188 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3189 return -EBADF;
3190 return io_prep_rw(req, sqe);
3191}
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3204 int sync, void *arg)
3205{
3206 struct wait_page_queue *wpq;
3207 struct io_kiocb *req = wait->private;
3208 struct wait_page_key *key = arg;
3209
3210 wpq = container_of(wait, struct wait_page_queue, wait);
3211
3212 if (!wake_page_match(wpq, key))
3213 return 0;
3214
3215 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
3216 list_del_init(&wait->entry);
3217
3218
3219 req_ref_get(req);
3220 io_req_task_queue(req);
3221 return 1;
3222}
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236static bool io_rw_should_retry(struct io_kiocb *req)
3237{
3238 struct io_async_rw *rw = req->async_data;
3239 struct wait_page_queue *wait = &rw->wpq;
3240 struct kiocb *kiocb = &req->rw.kiocb;
3241
3242
3243 if (req->flags & REQ_F_NOWAIT)
3244 return false;
3245
3246
3247 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
3248 return false;
3249
3250
3251
3252
3253
3254 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3255 return false;
3256
3257 wait->wait.func = io_async_buf_func;
3258 wait->wait.private = req;
3259 wait->wait.flags = 0;
3260 INIT_LIST_HEAD(&wait->wait.entry);
3261 kiocb->ki_flags |= IOCB_WAITQ;
3262 kiocb->ki_flags &= ~IOCB_NOWAIT;
3263 kiocb->ki_waitq = wait;
3264 return true;
3265}
3266
3267static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3268{
3269 if (req->file->f_op->read_iter)
3270 return call_read_iter(req->file, &req->rw.kiocb, iter);
3271 else if (req->file->f_op->read)
3272 return loop_rw_iter(READ, req, iter);
3273 else
3274 return -EINVAL;
3275}
3276
3277static int io_read(struct io_kiocb *req, unsigned int issue_flags)
3278{
3279 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3280 struct kiocb *kiocb = &req->rw.kiocb;
3281 struct iov_iter __iter, *iter = &__iter;
3282 struct io_async_rw *rw = req->async_data;
3283 ssize_t io_size, ret, ret2;
3284 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3285
3286 if (rw) {
3287 iter = &rw->iter;
3288 iovec = NULL;
3289 } else {
3290 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3291 if (ret < 0)
3292 return ret;
3293 }
3294 io_size = iov_iter_count(iter);
3295 req->result = io_size;
3296
3297
3298 if (!force_nonblock)
3299 kiocb->ki_flags &= ~IOCB_NOWAIT;
3300 else
3301 kiocb->ki_flags |= IOCB_NOWAIT;
3302
3303
3304 if (force_nonblock && !io_file_supports_async(req, READ)) {
3305 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3306 return ret ?: -EAGAIN;
3307 }
3308
3309 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
3310 if (unlikely(ret)) {
3311 kfree(iovec);
3312 return ret;
3313 }
3314
3315 ret = io_iter_do_read(req, iter);
3316
3317 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
3318 req->flags &= ~REQ_F_REISSUE;
3319
3320 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
3321 goto done;
3322
3323 if (req->flags & REQ_F_NOWAIT)
3324 goto done;
3325
3326 iov_iter_revert(iter, io_size - iov_iter_count(iter));
3327 ret = 0;
3328 } else if (ret == -EIOCBQUEUED) {
3329 goto out_free;
3330 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
3331 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
3332
3333 goto done;
3334 }
3335
3336 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3337 if (ret2)
3338 return ret2;
3339
3340 iovec = NULL;
3341 rw = req->async_data;
3342
3343 iter = &rw->iter;
3344
3345 do {
3346 io_size -= ret;
3347 rw->bytes_done += ret;
3348
3349 if (!io_rw_should_retry(req)) {
3350 kiocb->ki_flags &= ~IOCB_WAITQ;
3351 return -EAGAIN;
3352 }
3353
3354
3355
3356
3357
3358
3359
3360 ret = io_iter_do_read(req, iter);
3361 if (ret == -EIOCBQUEUED)
3362 return 0;
3363
3364 kiocb->ki_flags &= ~IOCB_WAITQ;
3365 } while (ret > 0 && ret < io_size);
3366done:
3367 kiocb_done(kiocb, ret, issue_flags);
3368out_free:
3369
3370 if (iovec)
3371 kfree(iovec);
3372 return 0;
3373}
3374
3375static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3376{
3377 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3378 return -EBADF;
3379 return io_prep_rw(req, sqe);
3380}
3381
3382static int io_write(struct io_kiocb *req, unsigned int issue_flags)
3383{
3384 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3385 struct kiocb *kiocb = &req->rw.kiocb;
3386 struct iov_iter __iter, *iter = &__iter;
3387 struct io_async_rw *rw = req->async_data;
3388 ssize_t ret, ret2, io_size;
3389 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3390
3391 if (rw) {
3392 iter = &rw->iter;
3393 iovec = NULL;
3394 } else {
3395 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3396 if (ret < 0)
3397 return ret;
3398 }
3399 io_size = iov_iter_count(iter);
3400 req->result = io_size;
3401
3402
3403 if (!force_nonblock)
3404 kiocb->ki_flags &= ~IOCB_NOWAIT;
3405 else
3406 kiocb->ki_flags |= IOCB_NOWAIT;
3407
3408
3409 if (force_nonblock && !io_file_supports_async(req, WRITE))
3410 goto copy_iov;
3411
3412
3413 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3414 (req->flags & REQ_F_ISREG))
3415 goto copy_iov;
3416
3417 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
3418 if (unlikely(ret))
3419 goto out_free;
3420
3421
3422
3423
3424
3425
3426
3427
3428 if (req->flags & REQ_F_ISREG) {
3429 sb_start_write(file_inode(req->file)->i_sb);
3430 __sb_writers_release(file_inode(req->file)->i_sb,
3431 SB_FREEZE_WRITE);
3432 }
3433 kiocb->ki_flags |= IOCB_WRITE;
3434
3435 if (req->file->f_op->write_iter)
3436 ret2 = call_write_iter(req->file, kiocb, iter);
3437 else if (req->file->f_op->write)
3438 ret2 = loop_rw_iter(WRITE, req, iter);
3439 else
3440 ret2 = -EINVAL;
3441
3442 if (req->flags & REQ_F_REISSUE) {
3443 req->flags &= ~REQ_F_REISSUE;
3444 ret2 = -EAGAIN;
3445 }
3446
3447
3448
3449
3450
3451 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3452 ret2 = -EAGAIN;
3453
3454 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
3455 goto done;
3456 if (!force_nonblock || ret2 != -EAGAIN) {
3457
3458 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3459 goto copy_iov;
3460done:
3461 kiocb_done(kiocb, ret2, issue_flags);
3462 } else {
3463copy_iov:
3464
3465 iov_iter_revert(iter, io_size - iov_iter_count(iter));
3466 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
3467 return ret ?: -EAGAIN;
3468 }
3469out_free:
3470
3471 if (iovec)
3472 kfree(iovec);
3473 return ret;
3474}
3475
3476static int io_renameat_prep(struct io_kiocb *req,
3477 const struct io_uring_sqe *sqe)
3478{
3479 struct io_rename *ren = &req->rename;
3480 const char __user *oldf, *newf;
3481
3482 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3483 return -EINVAL;
3484 if (sqe->ioprio || sqe->buf_index)
3485 return -EINVAL;
3486 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3487 return -EBADF;
3488
3489 ren->old_dfd = READ_ONCE(sqe->fd);
3490 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3491 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3492 ren->new_dfd = READ_ONCE(sqe->len);
3493 ren->flags = READ_ONCE(sqe->rename_flags);
3494
3495 ren->oldpath = getname(oldf);
3496 if (IS_ERR(ren->oldpath))
3497 return PTR_ERR(ren->oldpath);
3498
3499 ren->newpath = getname(newf);
3500 if (IS_ERR(ren->newpath)) {
3501 putname(ren->oldpath);
3502 return PTR_ERR(ren->newpath);
3503 }
3504
3505 req->flags |= REQ_F_NEED_CLEANUP;
3506 return 0;
3507}
3508
3509static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
3510{
3511 struct io_rename *ren = &req->rename;
3512 int ret;
3513
3514 if (issue_flags & IO_URING_F_NONBLOCK)
3515 return -EAGAIN;
3516
3517 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3518 ren->newpath, ren->flags);
3519
3520 req->flags &= ~REQ_F_NEED_CLEANUP;
3521 if (ret < 0)
3522 req_set_fail(req);
3523 io_req_complete(req, ret);
3524 return 0;
3525}
3526
3527static int io_unlinkat_prep(struct io_kiocb *req,
3528 const struct io_uring_sqe *sqe)
3529{
3530 struct io_unlink *un = &req->unlink;
3531 const char __user *fname;
3532
3533 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3534 return -EINVAL;
3535 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
3536 return -EINVAL;
3537 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3538 return -EBADF;
3539
3540 un->dfd = READ_ONCE(sqe->fd);
3541
3542 un->flags = READ_ONCE(sqe->unlink_flags);
3543 if (un->flags & ~AT_REMOVEDIR)
3544 return -EINVAL;
3545
3546 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3547 un->filename = getname(fname);
3548 if (IS_ERR(un->filename))
3549 return PTR_ERR(un->filename);
3550
3551 req->flags |= REQ_F_NEED_CLEANUP;
3552 return 0;
3553}
3554
3555static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
3556{
3557 struct io_unlink *un = &req->unlink;
3558 int ret;
3559
3560 if (issue_flags & IO_URING_F_NONBLOCK)
3561 return -EAGAIN;
3562
3563 if (un->flags & AT_REMOVEDIR)
3564 ret = do_rmdir(un->dfd, un->filename);
3565 else
3566 ret = do_unlinkat(un->dfd, un->filename);
3567
3568 req->flags &= ~REQ_F_NEED_CLEANUP;
3569 if (ret < 0)
3570 req_set_fail(req);
3571 io_req_complete(req, ret);
3572 return 0;
3573}
3574
3575static int io_shutdown_prep(struct io_kiocb *req,
3576 const struct io_uring_sqe *sqe)
3577{
3578#if defined(CONFIG_NET)
3579 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3580 return -EINVAL;
3581 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3582 sqe->buf_index)
3583 return -EINVAL;
3584
3585 req->shutdown.how = READ_ONCE(sqe->len);
3586 return 0;
3587#else
3588 return -EOPNOTSUPP;
3589#endif
3590}
3591
3592static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
3593{
3594#if defined(CONFIG_NET)
3595 struct socket *sock;
3596 int ret;
3597
3598 if (issue_flags & IO_URING_F_NONBLOCK)
3599 return -EAGAIN;
3600
3601 sock = sock_from_file(req->file);
3602 if (unlikely(!sock))
3603 return -ENOTSOCK;
3604
3605 ret = __sys_shutdown_sock(sock, req->shutdown.how);
3606 if (ret < 0)
3607 req_set_fail(req);
3608 io_req_complete(req, ret);
3609 return 0;
3610#else
3611 return -EOPNOTSUPP;
3612#endif
3613}
3614
3615static int __io_splice_prep(struct io_kiocb *req,
3616 const struct io_uring_sqe *sqe)
3617{
3618 struct io_splice *sp = &req->splice;
3619 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
3620
3621 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3622 return -EINVAL;
3623
3624 sp->file_in = NULL;
3625 sp->len = READ_ONCE(sqe->len);
3626 sp->flags = READ_ONCE(sqe->splice_flags);
3627
3628 if (unlikely(sp->flags & ~valid_flags))
3629 return -EINVAL;
3630
3631 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3632 (sp->flags & SPLICE_F_FD_IN_FIXED));
3633 if (!sp->file_in)
3634 return -EBADF;
3635 req->flags |= REQ_F_NEED_CLEANUP;
3636 return 0;
3637}
3638
3639static int io_tee_prep(struct io_kiocb *req,
3640 const struct io_uring_sqe *sqe)
3641{
3642 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3643 return -EINVAL;
3644 return __io_splice_prep(req, sqe);
3645}
3646
3647static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
3648{
3649 struct io_splice *sp = &req->splice;
3650 struct file *in = sp->file_in;
3651 struct file *out = sp->file_out;
3652 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3653 long ret = 0;
3654
3655 if (issue_flags & IO_URING_F_NONBLOCK)
3656 return -EAGAIN;
3657 if (sp->len)
3658 ret = do_tee(in, out, sp->len, flags);
3659
3660 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3661 io_put_file(in);
3662 req->flags &= ~REQ_F_NEED_CLEANUP;
3663
3664 if (ret != sp->len)
3665 req_set_fail(req);
3666 io_req_complete(req, ret);
3667 return 0;
3668}
3669
3670static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3671{
3672 struct io_splice *sp = &req->splice;
3673
3674 sp->off_in = READ_ONCE(sqe->splice_off_in);
3675 sp->off_out = READ_ONCE(sqe->off);
3676 return __io_splice_prep(req, sqe);
3677}
3678
3679static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
3680{
3681 struct io_splice *sp = &req->splice;
3682 struct file *in = sp->file_in;
3683 struct file *out = sp->file_out;
3684 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3685 loff_t *poff_in, *poff_out;
3686 long ret = 0;
3687
3688 if (issue_flags & IO_URING_F_NONBLOCK)
3689 return -EAGAIN;
3690
3691 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3692 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
3693
3694 if (sp->len)
3695 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
3696
3697 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3698 io_put_file(in);
3699 req->flags &= ~REQ_F_NEED_CLEANUP;
3700
3701 if (ret != sp->len)
3702 req_set_fail(req);
3703 io_req_complete(req, ret);
3704 return 0;
3705}
3706
3707
3708
3709
3710static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
3711{
3712 struct io_ring_ctx *ctx = req->ctx;
3713
3714 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3715 return -EINVAL;
3716
3717 __io_req_complete(req, issue_flags, 0, 0);
3718 return 0;
3719}
3720
3721static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3722{
3723 struct io_ring_ctx *ctx = req->ctx;
3724
3725 if (!req->file)
3726 return -EBADF;
3727
3728 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3729 return -EINVAL;
3730 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3731 return -EINVAL;
3732
3733 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3734 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3735 return -EINVAL;
3736
3737 req->sync.off = READ_ONCE(sqe->off);
3738 req->sync.len = READ_ONCE(sqe->len);
3739 return 0;
3740}
3741
3742static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
3743{
3744 loff_t end = req->sync.off + req->sync.len;
3745 int ret;
3746
3747
3748 if (issue_flags & IO_URING_F_NONBLOCK)
3749 return -EAGAIN;
3750
3751 ret = vfs_fsync_range(req->file, req->sync.off,
3752 end > 0 ? end : LLONG_MAX,
3753 req->sync.flags & IORING_FSYNC_DATASYNC);
3754 if (ret < 0)
3755 req_set_fail(req);
3756 io_req_complete(req, ret);
3757 return 0;
3758}
3759
3760static int io_fallocate_prep(struct io_kiocb *req,
3761 const struct io_uring_sqe *sqe)
3762{
3763 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3764 return -EINVAL;
3765 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3766 return -EINVAL;
3767
3768 req->sync.off = READ_ONCE(sqe->off);
3769 req->sync.len = READ_ONCE(sqe->addr);
3770 req->sync.mode = READ_ONCE(sqe->len);
3771 return 0;
3772}
3773
3774static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
3775{
3776 int ret;
3777
3778
3779 if (issue_flags & IO_URING_F_NONBLOCK)
3780 return -EAGAIN;
3781 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3782 req->sync.len);
3783 if (ret < 0)
3784 req_set_fail(req);
3785 io_req_complete(req, ret);
3786 return 0;
3787}
3788
3789static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3790{
3791 const char __user *fname;
3792 int ret;
3793
3794 if (unlikely(sqe->ioprio || sqe->buf_index))
3795 return -EINVAL;
3796 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3797 return -EBADF;
3798
3799
3800 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
3801 req->open.how.flags |= O_LARGEFILE;
3802
3803 req->open.dfd = READ_ONCE(sqe->fd);
3804 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3805 req->open.filename = getname(fname);
3806 if (IS_ERR(req->open.filename)) {
3807 ret = PTR_ERR(req->open.filename);
3808 req->open.filename = NULL;
3809 return ret;
3810 }
3811 req->open.nofile = rlimit(RLIMIT_NOFILE);
3812 req->flags |= REQ_F_NEED_CLEANUP;
3813 return 0;
3814}
3815
3816static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3817{
3818 u64 flags, mode;
3819
3820 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3821 return -EINVAL;
3822 mode = READ_ONCE(sqe->len);
3823 flags = READ_ONCE(sqe->open_flags);
3824 req->open.how = build_open_how(flags, mode);
3825 return __io_openat_prep(req, sqe);
3826}
3827
3828static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3829{
3830 struct open_how __user *how;
3831 size_t len;
3832 int ret;
3833
3834 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3835 return -EINVAL;
3836 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3837 len = READ_ONCE(sqe->len);
3838 if (len < OPEN_HOW_SIZE_VER0)
3839 return -EINVAL;
3840
3841 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3842 len);
3843 if (ret)
3844 return ret;
3845
3846 return __io_openat_prep(req, sqe);
3847}
3848
3849static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
3850{
3851 struct open_flags op;
3852 struct file *file;
3853 bool nonblock_set;
3854 bool resolve_nonblock;
3855 int ret;
3856
3857 ret = build_open_flags(&req->open.how, &op);
3858 if (ret)
3859 goto err;
3860 nonblock_set = op.open_flag & O_NONBLOCK;
3861 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
3862 if (issue_flags & IO_URING_F_NONBLOCK) {
3863
3864
3865
3866
3867 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3868 return -EAGAIN;
3869 op.lookup_flags |= LOOKUP_CACHED;
3870 op.open_flag |= O_NONBLOCK;
3871 }
3872
3873 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
3874 if (ret < 0)
3875 goto err;
3876
3877 file = do_filp_open(req->open.dfd, req->open.filename, &op);
3878 if (IS_ERR(file)) {
3879
3880
3881
3882
3883
3884 put_unused_fd(ret);
3885
3886 ret = PTR_ERR(file);
3887
3888 if (ret == -EAGAIN &&
3889 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
3890 return -EAGAIN;
3891 goto err;
3892 }
3893
3894 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
3895 file->f_flags &= ~O_NONBLOCK;
3896 fsnotify_open(file);
3897 fd_install(ret, file);
3898err:
3899 putname(req->open.filename);
3900 req->flags &= ~REQ_F_NEED_CLEANUP;
3901 if (ret < 0)
3902 req_set_fail(req);
3903 __io_req_complete(req, issue_flags, ret, 0);
3904 return 0;
3905}
3906
3907static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
3908{
3909 return io_openat2(req, issue_flags);
3910}
3911
3912static int io_remove_buffers_prep(struct io_kiocb *req,
3913 const struct io_uring_sqe *sqe)
3914{
3915 struct io_provide_buf *p = &req->pbuf;
3916 u64 tmp;
3917
3918 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3919 return -EINVAL;
3920
3921 tmp = READ_ONCE(sqe->fd);
3922 if (!tmp || tmp > USHRT_MAX)
3923 return -EINVAL;
3924
3925 memset(p, 0, sizeof(*p));
3926 p->nbufs = tmp;
3927 p->bgid = READ_ONCE(sqe->buf_group);
3928 return 0;
3929}
3930
3931static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3932 int bgid, unsigned nbufs)
3933{
3934 unsigned i = 0;
3935
3936
3937 if (!nbufs)
3938 return 0;
3939
3940
3941 while (!list_empty(&buf->list)) {
3942 struct io_buffer *nxt;
3943
3944 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3945 list_del(&nxt->list);
3946 kfree(nxt);
3947 if (++i == nbufs)
3948 return i;
3949 }
3950 i++;
3951 kfree(buf);
3952 xa_erase(&ctx->io_buffers, bgid);
3953
3954 return i;
3955}
3956
3957static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
3958{
3959 struct io_provide_buf *p = &req->pbuf;
3960 struct io_ring_ctx *ctx = req->ctx;
3961 struct io_buffer *head;
3962 int ret = 0;
3963 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3964
3965 io_ring_submit_lock(ctx, !force_nonblock);
3966
3967 lockdep_assert_held(&ctx->uring_lock);
3968
3969 ret = -ENOENT;
3970 head = xa_load(&ctx->io_buffers, p->bgid);
3971 if (head)
3972 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3973 if (ret < 0)
3974 req_set_fail(req);
3975
3976
3977 __io_req_complete(req, issue_flags, ret, 0);
3978 io_ring_submit_unlock(ctx, !force_nonblock);
3979 return 0;
3980}
3981
3982static int io_provide_buffers_prep(struct io_kiocb *req,
3983 const struct io_uring_sqe *sqe)
3984{
3985 unsigned long size, tmp_check;
3986 struct io_provide_buf *p = &req->pbuf;
3987 u64 tmp;
3988
3989 if (sqe->ioprio || sqe->rw_flags)
3990 return -EINVAL;
3991
3992 tmp = READ_ONCE(sqe->fd);
3993 if (!tmp || tmp > USHRT_MAX)
3994 return -E2BIG;
3995 p->nbufs = tmp;
3996 p->addr = READ_ONCE(sqe->addr);
3997 p->len = READ_ONCE(sqe->len);
3998
3999 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
4000 &size))
4001 return -EOVERFLOW;
4002 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4003 return -EOVERFLOW;
4004
4005 size = (unsigned long)p->len * p->nbufs;
4006 if (!access_ok(u64_to_user_ptr(p->addr), size))
4007 return -EFAULT;
4008
4009 p->bgid = READ_ONCE(sqe->buf_group);
4010 tmp = READ_ONCE(sqe->off);
4011 if (tmp > USHRT_MAX)
4012 return -E2BIG;
4013 p->bid = tmp;
4014 return 0;
4015}
4016
4017static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4018{
4019 struct io_buffer *buf;
4020 u64 addr = pbuf->addr;
4021 int i, bid = pbuf->bid;
4022
4023 for (i = 0; i < pbuf->nbufs; i++) {
4024 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4025 if (!buf)
4026 break;
4027
4028 buf->addr = addr;
4029 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
4030 buf->bid = bid;
4031 addr += pbuf->len;
4032 bid++;
4033 if (!*head) {
4034 INIT_LIST_HEAD(&buf->list);
4035 *head = buf;
4036 } else {
4037 list_add_tail(&buf->list, &(*head)->list);
4038 }
4039 }
4040
4041 return i ? i : -ENOMEM;
4042}
4043
4044static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
4045{
4046 struct io_provide_buf *p = &req->pbuf;
4047 struct io_ring_ctx *ctx = req->ctx;
4048 struct io_buffer *head, *list;
4049 int ret = 0;
4050 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4051
4052 io_ring_submit_lock(ctx, !force_nonblock);
4053
4054 lockdep_assert_held(&ctx->uring_lock);
4055
4056 list = head = xa_load(&ctx->io_buffers, p->bgid);
4057
4058 ret = io_add_buffers(p, &head);
4059 if (ret >= 0 && !list) {
4060 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4061 if (ret < 0)
4062 __io_remove_buffers(ctx, head, p->bgid, -1U);
4063 }
4064 if (ret < 0)
4065 req_set_fail(req);
4066
4067 __io_req_complete(req, issue_flags, ret, 0);
4068 io_ring_submit_unlock(ctx, !force_nonblock);
4069 return 0;
4070}
4071
4072static int io_epoll_ctl_prep(struct io_kiocb *req,
4073 const struct io_uring_sqe *sqe)
4074{
4075#if defined(CONFIG_EPOLL)
4076 if (sqe->ioprio || sqe->buf_index)
4077 return -EINVAL;
4078 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4079 return -EINVAL;
4080
4081 req->epoll.epfd = READ_ONCE(sqe->fd);
4082 req->epoll.op = READ_ONCE(sqe->len);
4083 req->epoll.fd = READ_ONCE(sqe->off);
4084
4085 if (ep_op_has_event(req->epoll.op)) {
4086 struct epoll_event __user *ev;
4087
4088 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4089 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4090 return -EFAULT;
4091 }
4092
4093 return 0;
4094#else
4095 return -EOPNOTSUPP;
4096#endif
4097}
4098
4099static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
4100{
4101#if defined(CONFIG_EPOLL)
4102 struct io_epoll *ie = &req->epoll;
4103 int ret;
4104 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4105
4106 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4107 if (force_nonblock && ret == -EAGAIN)
4108 return -EAGAIN;
4109
4110 if (ret < 0)
4111 req_set_fail(req);
4112 __io_req_complete(req, issue_flags, ret, 0);
4113 return 0;
4114#else
4115 return -EOPNOTSUPP;
4116#endif
4117}
4118
4119static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4120{
4121#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4122 if (sqe->ioprio || sqe->buf_index || sqe->off)
4123 return -EINVAL;
4124 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4125 return -EINVAL;
4126
4127 req->madvise.addr = READ_ONCE(sqe->addr);
4128 req->madvise.len = READ_ONCE(sqe->len);
4129 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4130 return 0;
4131#else
4132 return -EOPNOTSUPP;
4133#endif
4134}
4135
4136static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
4137{
4138#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4139 struct io_madvise *ma = &req->madvise;
4140 int ret;
4141
4142 if (issue_flags & IO_URING_F_NONBLOCK)
4143 return -EAGAIN;
4144
4145 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
4146 if (ret < 0)
4147 req_set_fail(req);
4148 io_req_complete(req, ret);
4149 return 0;
4150#else
4151 return -EOPNOTSUPP;
4152#endif
4153}
4154
4155static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4156{
4157 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4158 return -EINVAL;
4159 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4160 return -EINVAL;
4161
4162 req->fadvise.offset = READ_ONCE(sqe->off);
4163 req->fadvise.len = READ_ONCE(sqe->len);
4164 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4165 return 0;
4166}
4167
4168static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4169{
4170 struct io_fadvise *fa = &req->fadvise;
4171 int ret;
4172
4173 if (issue_flags & IO_URING_F_NONBLOCK) {
4174 switch (fa->advice) {
4175 case POSIX_FADV_NORMAL:
4176 case POSIX_FADV_RANDOM:
4177 case POSIX_FADV_SEQUENTIAL:
4178 break;
4179 default:
4180 return -EAGAIN;
4181 }
4182 }
4183
4184 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4185 if (ret < 0)
4186 req_set_fail(req);
4187 __io_req_complete(req, issue_flags, ret, 0);
4188 return 0;
4189}
4190
4191static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4192{
4193 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4194 return -EINVAL;
4195 if (sqe->ioprio || sqe->buf_index)
4196 return -EINVAL;
4197 if (req->flags & REQ_F_FIXED_FILE)
4198 return -EBADF;
4199
4200 req->statx.dfd = READ_ONCE(sqe->fd);
4201 req->statx.mask = READ_ONCE(sqe->len);
4202 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
4203 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4204 req->statx.flags = READ_ONCE(sqe->statx_flags);
4205
4206 return 0;
4207}
4208
4209static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
4210{
4211 struct io_statx *ctx = &req->statx;
4212 int ret;
4213
4214 if (issue_flags & IO_URING_F_NONBLOCK)
4215 return -EAGAIN;
4216
4217 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4218 ctx->buffer);
4219
4220 if (ret < 0)
4221 req_set_fail(req);
4222 io_req_complete(req, ret);
4223 return 0;
4224}
4225
4226static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4227{
4228 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4229 return -EINVAL;
4230 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4231 sqe->rw_flags || sqe->buf_index)
4232 return -EINVAL;
4233 if (req->flags & REQ_F_FIXED_FILE)
4234 return -EBADF;
4235
4236 req->close.fd = READ_ONCE(sqe->fd);
4237 return 0;
4238}
4239
4240static int io_close(struct io_kiocb *req, unsigned int issue_flags)
4241{
4242 struct files_struct *files = current->files;
4243 struct io_close *close = &req->close;
4244 struct fdtable *fdt;
4245 struct file *file = NULL;
4246 int ret = -EBADF;
4247
4248 spin_lock(&files->file_lock);
4249 fdt = files_fdtable(files);
4250 if (close->fd >= fdt->max_fds) {
4251 spin_unlock(&files->file_lock);
4252 goto err;
4253 }
4254 file = fdt->fd[close->fd];
4255 if (!file || file->f_op == &io_uring_fops) {
4256 spin_unlock(&files->file_lock);
4257 file = NULL;
4258 goto err;
4259 }
4260
4261
4262 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
4263 spin_unlock(&files->file_lock);
4264 return -EAGAIN;
4265 }
4266
4267 ret = __close_fd_get_file(close->fd, &file);
4268 spin_unlock(&files->file_lock);
4269 if (ret < 0) {
4270 if (ret == -ENOENT)
4271 ret = -EBADF;
4272 goto err;
4273 }
4274
4275
4276 ret = filp_close(file, current->files);
4277err:
4278 if (ret < 0)
4279 req_set_fail(req);
4280 if (file)
4281 fput(file);
4282 __io_req_complete(req, issue_flags, ret, 0);
4283 return 0;
4284}
4285
4286static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4287{
4288 struct io_ring_ctx *ctx = req->ctx;
4289
4290 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4291 return -EINVAL;
4292 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4293 return -EINVAL;
4294
4295 req->sync.off = READ_ONCE(sqe->off);
4296 req->sync.len = READ_ONCE(sqe->len);
4297 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
4298 return 0;
4299}
4300
4301static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
4302{
4303 int ret;
4304
4305
4306 if (issue_flags & IO_URING_F_NONBLOCK)
4307 return -EAGAIN;
4308
4309 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
4310 req->sync.flags);
4311 if (ret < 0)
4312 req_set_fail(req);
4313 io_req_complete(req, ret);
4314 return 0;
4315}
4316
4317#if defined(CONFIG_NET)
4318static int io_setup_async_msg(struct io_kiocb *req,
4319 struct io_async_msghdr *kmsg)
4320{
4321 struct io_async_msghdr *async_msg = req->async_data;
4322
4323 if (async_msg)
4324 return -EAGAIN;
4325 if (io_alloc_async_data(req)) {
4326 kfree(kmsg->free_iov);
4327 return -ENOMEM;
4328 }
4329 async_msg = req->async_data;
4330 req->flags |= REQ_F_NEED_CLEANUP;
4331 memcpy(async_msg, kmsg, sizeof(*kmsg));
4332 async_msg->msg.msg_name = &async_msg->addr;
4333
4334 if (!async_msg->free_iov)
4335 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4336
4337 return -EAGAIN;
4338}
4339
4340static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4341 struct io_async_msghdr *iomsg)
4342{
4343 iomsg->msg.msg_name = &iomsg->addr;
4344 iomsg->free_iov = iomsg->fast_iov;
4345 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
4346 req->sr_msg.msg_flags, &iomsg->free_iov);
4347}
4348
4349static int io_sendmsg_prep_async(struct io_kiocb *req)
4350{
4351 int ret;
4352
4353 ret = io_sendmsg_copy_hdr(req, req->async_data);
4354 if (!ret)
4355 req->flags |= REQ_F_NEED_CLEANUP;
4356 return ret;
4357}
4358
4359static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4360{
4361 struct io_sr_msg *sr = &req->sr_msg;
4362
4363 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4364 return -EINVAL;
4365
4366 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4367 sr->len = READ_ONCE(sqe->len);
4368 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4369 if (sr->msg_flags & MSG_DONTWAIT)
4370 req->flags |= REQ_F_NOWAIT;
4371
4372#ifdef CONFIG_COMPAT
4373 if (req->ctx->compat)
4374 sr->msg_flags |= MSG_CMSG_COMPAT;
4375#endif
4376 return 0;
4377}
4378
4379static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
4380{
4381 struct io_async_msghdr iomsg, *kmsg;
4382 struct socket *sock;
4383 unsigned flags;
4384 int min_ret = 0;
4385 int ret;
4386
4387 sock = sock_from_file(req->file);
4388 if (unlikely(!sock))
4389 return -ENOTSOCK;
4390
4391 kmsg = req->async_data;
4392 if (!kmsg) {
4393 ret = io_sendmsg_copy_hdr(req, &iomsg);
4394 if (ret)
4395 return ret;
4396 kmsg = &iomsg;
4397 }
4398
4399 flags = req->sr_msg.msg_flags;
4400 if (issue_flags & IO_URING_F_NONBLOCK)
4401 flags |= MSG_DONTWAIT;
4402 if (flags & MSG_WAITALL)
4403 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4404
4405 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
4406 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
4407 return io_setup_async_msg(req, kmsg);
4408 if (ret == -ERESTARTSYS)
4409 ret = -EINTR;
4410
4411
4412 if (kmsg->free_iov)
4413 kfree(kmsg->free_iov);
4414 req->flags &= ~REQ_F_NEED_CLEANUP;
4415 if (ret < min_ret)
4416 req_set_fail(req);
4417 __io_req_complete(req, issue_flags, ret, 0);
4418 return 0;
4419}
4420
4421static int io_send(struct io_kiocb *req, unsigned int issue_flags)
4422{
4423 struct io_sr_msg *sr = &req->sr_msg;
4424 struct msghdr msg;
4425 struct iovec iov;
4426 struct socket *sock;
4427 unsigned flags;
4428 int min_ret = 0;
4429 int ret;
4430
4431 sock = sock_from_file(req->file);
4432 if (unlikely(!sock))
4433 return -ENOTSOCK;
4434
4435 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4436 if (unlikely(ret))
4437 return ret;
4438
4439 msg.msg_name = NULL;
4440 msg.msg_control = NULL;
4441 msg.msg_controllen = 0;
4442 msg.msg_namelen = 0;
4443
4444 flags = req->sr_msg.msg_flags;
4445 if (issue_flags & IO_URING_F_NONBLOCK)
4446 flags |= MSG_DONTWAIT;
4447 if (flags & MSG_WAITALL)
4448 min_ret = iov_iter_count(&msg.msg_iter);
4449
4450 msg.msg_flags = flags;
4451 ret = sock_sendmsg(sock, &msg);
4452 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
4453 return -EAGAIN;
4454 if (ret == -ERESTARTSYS)
4455 ret = -EINTR;
4456
4457 if (ret < min_ret)
4458 req_set_fail(req);
4459 __io_req_complete(req, issue_flags, ret, 0);
4460 return 0;
4461}
4462
4463static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4464 struct io_async_msghdr *iomsg)
4465{
4466 struct io_sr_msg *sr = &req->sr_msg;
4467 struct iovec __user *uiov;
4468 size_t iov_len;
4469 int ret;
4470
4471 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4472 &iomsg->uaddr, &uiov, &iov_len);
4473 if (ret)
4474 return ret;
4475
4476 if (req->flags & REQ_F_BUFFER_SELECT) {
4477 if (iov_len > 1)
4478 return -EINVAL;
4479 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
4480 return -EFAULT;
4481 sr->len = iomsg->fast_iov[0].iov_len;
4482 iomsg->free_iov = NULL;
4483 } else {
4484 iomsg->free_iov = iomsg->fast_iov;
4485 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
4486 &iomsg->free_iov, &iomsg->msg.msg_iter,
4487 false);
4488 if (ret > 0)
4489 ret = 0;
4490 }
4491
4492 return ret;
4493}
4494
4495#ifdef CONFIG_COMPAT
4496static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
4497 struct io_async_msghdr *iomsg)
4498{
4499 struct io_sr_msg *sr = &req->sr_msg;
4500 struct compat_iovec __user *uiov;
4501 compat_uptr_t ptr;
4502 compat_size_t len;
4503 int ret;
4504
4505 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4506 &ptr, &len);
4507 if (ret)
4508 return ret;
4509
4510 uiov = compat_ptr(ptr);
4511 if (req->flags & REQ_F_BUFFER_SELECT) {
4512 compat_ssize_t clen;
4513
4514 if (len > 1)
4515 return -EINVAL;
4516 if (!access_ok(uiov, sizeof(*uiov)))
4517 return -EFAULT;
4518 if (__get_user(clen, &uiov->iov_len))
4519 return -EFAULT;
4520 if (clen < 0)
4521 return -EINVAL;
4522 sr->len = clen;
4523 iomsg->free_iov = NULL;
4524 } else {
4525 iomsg->free_iov = iomsg->fast_iov;
4526 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
4527 UIO_FASTIOV, &iomsg->free_iov,
4528 &iomsg->msg.msg_iter, true);
4529 if (ret < 0)
4530 return ret;
4531 }
4532
4533 return 0;
4534}
4535#endif
4536
4537static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4538 struct io_async_msghdr *iomsg)
4539{
4540 iomsg->msg.msg_name = &iomsg->addr;
4541
4542#ifdef CONFIG_COMPAT
4543 if (req->ctx->compat)
4544 return __io_compat_recvmsg_copy_hdr(req, iomsg);
4545#endif
4546
4547 return __io_recvmsg_copy_hdr(req, iomsg);
4548}
4549
4550static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
4551 bool needs_lock)
4552{
4553 struct io_sr_msg *sr = &req->sr_msg;
4554 struct io_buffer *kbuf;
4555
4556 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4557 if (IS_ERR(kbuf))
4558 return kbuf;
4559
4560 sr->kbuf = kbuf;
4561 req->flags |= REQ_F_BUFFER_SELECTED;
4562 return kbuf;
4563}
4564
4565static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4566{
4567 return io_put_kbuf(req, req->sr_msg.kbuf);
4568}
4569
4570static int io_recvmsg_prep_async(struct io_kiocb *req)
4571{
4572 int ret;
4573
4574 ret = io_recvmsg_copy_hdr(req, req->async_data);
4575 if (!ret)
4576 req->flags |= REQ_F_NEED_CLEANUP;
4577 return ret;
4578}
4579
4580static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4581{
4582 struct io_sr_msg *sr = &req->sr_msg;
4583
4584 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4585 return -EINVAL;
4586
4587 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4588 sr->len = READ_ONCE(sqe->len);
4589 sr->bgid = READ_ONCE(sqe->buf_group);
4590 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4591 if (sr->msg_flags & MSG_DONTWAIT)
4592 req->flags |= REQ_F_NOWAIT;
4593
4594#ifdef CONFIG_COMPAT
4595 if (req->ctx->compat)
4596 sr->msg_flags |= MSG_CMSG_COMPAT;
4597#endif
4598 return 0;
4599}
4600
4601static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
4602{
4603 struct io_async_msghdr iomsg, *kmsg;
4604 struct socket *sock;
4605 struct io_buffer *kbuf;
4606 unsigned flags;
4607 int min_ret = 0;
4608 int ret, cflags = 0;
4609 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4610
4611 sock = sock_from_file(req->file);
4612 if (unlikely(!sock))
4613 return -ENOTSOCK;
4614
4615 kmsg = req->async_data;
4616 if (!kmsg) {
4617 ret = io_recvmsg_copy_hdr(req, &iomsg);
4618 if (ret)
4619 return ret;
4620 kmsg = &iomsg;
4621 }
4622
4623 if (req->flags & REQ_F_BUFFER_SELECT) {
4624 kbuf = io_recv_buffer_select(req, !force_nonblock);
4625 if (IS_ERR(kbuf))
4626 return PTR_ERR(kbuf);
4627 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
4628 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4629 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
4630 1, req->sr_msg.len);
4631 }
4632
4633 flags = req->sr_msg.msg_flags;
4634 if (force_nonblock)
4635 flags |= MSG_DONTWAIT;
4636 if (flags & MSG_WAITALL)
4637 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4638
4639 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4640 kmsg->uaddr, flags);
4641 if (force_nonblock && ret == -EAGAIN)
4642 return io_setup_async_msg(req, kmsg);
4643 if (ret == -ERESTARTSYS)
4644 ret = -EINTR;
4645
4646 if (req->flags & REQ_F_BUFFER_SELECTED)
4647 cflags = io_put_recv_kbuf(req);
4648
4649 if (kmsg->free_iov)
4650 kfree(kmsg->free_iov);
4651 req->flags &= ~REQ_F_NEED_CLEANUP;
4652 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
4653 req_set_fail(req);
4654 __io_req_complete(req, issue_flags, ret, cflags);
4655 return 0;
4656}
4657
4658static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
4659{
4660 struct io_buffer *kbuf;
4661 struct io_sr_msg *sr = &req->sr_msg;
4662 struct msghdr msg;
4663 void __user *buf = sr->buf;
4664 struct socket *sock;
4665 struct iovec iov;
4666 unsigned flags;
4667 int min_ret = 0;
4668 int ret, cflags = 0;
4669 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4670
4671 sock = sock_from_file(req->file);
4672 if (unlikely(!sock))
4673 return -ENOTSOCK;
4674
4675 if (req->flags & REQ_F_BUFFER_SELECT) {
4676 kbuf = io_recv_buffer_select(req, !force_nonblock);
4677 if (IS_ERR(kbuf))
4678 return PTR_ERR(kbuf);
4679 buf = u64_to_user_ptr(kbuf->addr);
4680 }
4681
4682 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
4683 if (unlikely(ret))
4684 goto out_free;
4685
4686 msg.msg_name = NULL;
4687 msg.msg_control = NULL;
4688 msg.msg_controllen = 0;
4689 msg.msg_namelen = 0;
4690 msg.msg_iocb = NULL;
4691 msg.msg_flags = 0;
4692
4693 flags = req->sr_msg.msg_flags;
4694 if (force_nonblock)
4695 flags |= MSG_DONTWAIT;
4696 if (flags & MSG_WAITALL)
4697 min_ret = iov_iter_count(&msg.msg_iter);
4698
4699 ret = sock_recvmsg(sock, &msg, flags);
4700 if (force_nonblock && ret == -EAGAIN)
4701 return -EAGAIN;
4702 if (ret == -ERESTARTSYS)
4703 ret = -EINTR;
4704out_free:
4705 if (req->flags & REQ_F_BUFFER_SELECTED)
4706 cflags = io_put_recv_kbuf(req);
4707 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
4708 req_set_fail(req);
4709 __io_req_complete(req, issue_flags, ret, cflags);
4710 return 0;
4711}
4712
4713static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4714{
4715 struct io_accept *accept = &req->accept;
4716
4717 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4718 return -EINVAL;
4719 if (sqe->ioprio || sqe->len || sqe->buf_index)
4720 return -EINVAL;
4721
4722 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4723 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4724 accept->flags = READ_ONCE(sqe->accept_flags);
4725 accept->nofile = rlimit(RLIMIT_NOFILE);
4726 return 0;
4727}
4728
4729static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
4730{
4731 struct io_accept *accept = &req->accept;
4732 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4733 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
4734 int ret;
4735
4736 if (req->file->f_flags & O_NONBLOCK)
4737 req->flags |= REQ_F_NOWAIT;
4738
4739 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
4740 accept->addr_len, accept->flags,
4741 accept->nofile);
4742 if (ret == -EAGAIN && force_nonblock)
4743 return -EAGAIN;
4744 if (ret < 0) {
4745 if (ret == -ERESTARTSYS)
4746 ret = -EINTR;
4747 req_set_fail(req);
4748 }
4749 __io_req_complete(req, issue_flags, ret, 0);
4750 return 0;
4751}
4752
4753static int io_connect_prep_async(struct io_kiocb *req)
4754{
4755 struct io_async_connect *io = req->async_data;
4756 struct io_connect *conn = &req->connect;
4757
4758 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4759}
4760
4761static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4762{
4763 struct io_connect *conn = &req->connect;
4764
4765 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4766 return -EINVAL;
4767 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4768 return -EINVAL;
4769
4770 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4771 conn->addr_len = READ_ONCE(sqe->addr2);
4772 return 0;
4773}
4774
4775static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
4776{
4777 struct io_async_connect __io, *io;
4778 unsigned file_flags;
4779 int ret;
4780 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4781
4782 if (req->async_data) {
4783 io = req->async_data;
4784 } else {
4785 ret = move_addr_to_kernel(req->connect.addr,
4786 req->connect.addr_len,
4787 &__io.address);
4788 if (ret)
4789 goto out;
4790 io = &__io;
4791 }
4792
4793 file_flags = force_nonblock ? O_NONBLOCK : 0;
4794
4795 ret = __sys_connect_file(req->file, &io->address,
4796 req->connect.addr_len, file_flags);
4797 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
4798 if (req->async_data)
4799 return -EAGAIN;
4800 if (io_alloc_async_data(req)) {
4801 ret = -ENOMEM;
4802 goto out;
4803 }
4804 memcpy(req->async_data, &__io, sizeof(__io));
4805 return -EAGAIN;
4806 }
4807 if (ret == -ERESTARTSYS)
4808 ret = -EINTR;
4809out:
4810 if (ret < 0)
4811 req_set_fail(req);
4812 __io_req_complete(req, issue_flags, ret, 0);
4813 return 0;
4814}
4815#else
4816#define IO_NETOP_FN(op) \
4817static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
4818{ \
4819 return -EOPNOTSUPP; \
4820}
4821
4822#define IO_NETOP_PREP(op) \
4823IO_NETOP_FN(op) \
4824static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4825{ \
4826 return -EOPNOTSUPP; \
4827} \
4828
4829#define IO_NETOP_PREP_ASYNC(op) \
4830IO_NETOP_PREP(op) \
4831static int io_##op##_prep_async(struct io_kiocb *req) \
4832{ \
4833 return -EOPNOTSUPP; \
4834}
4835
4836IO_NETOP_PREP_ASYNC(sendmsg);
4837IO_NETOP_PREP_ASYNC(recvmsg);
4838IO_NETOP_PREP_ASYNC(connect);
4839IO_NETOP_PREP(accept);
4840IO_NETOP_FN(send);
4841IO_NETOP_FN(recv);
4842#endif
4843
4844struct io_poll_table {
4845 struct poll_table_struct pt;
4846 struct io_kiocb *req;
4847 int nr_entries;
4848 int error;
4849};
4850
4851static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4852 __poll_t mask, io_req_tw_func_t func)
4853{
4854
4855 if (mask && !(mask & poll->events))
4856 return 0;
4857
4858 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4859
4860 list_del_init(&poll->wait.entry);
4861
4862 req->result = mask;
4863 req->io_task_work.func = func;
4864
4865
4866
4867
4868
4869
4870
4871 io_req_task_work_add(req);
4872 return 1;
4873}
4874
4875static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4876 __acquires(&req->ctx->completion_lock)
4877{
4878 struct io_ring_ctx *ctx = req->ctx;
4879
4880 if (unlikely(req->task->flags & PF_EXITING))
4881 WRITE_ONCE(poll->canceled, true);
4882
4883 if (!req->result && !READ_ONCE(poll->canceled)) {
4884 struct poll_table_struct pt = { ._key = poll->events };
4885
4886 req->result = vfs_poll(req->file, &pt) & poll->events;
4887 }
4888
4889 spin_lock_irq(&ctx->completion_lock);
4890 if (!req->result && !READ_ONCE(poll->canceled)) {
4891 add_wait_queue(poll->head, &poll->wait);
4892 return true;
4893 }
4894
4895 return false;
4896}
4897
4898static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
4899{
4900
4901 if (req->opcode == IORING_OP_POLL_ADD)
4902 return req->async_data;
4903 return req->apoll->double_poll;
4904}
4905
4906static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4907{
4908 if (req->opcode == IORING_OP_POLL_ADD)
4909 return &req->poll;
4910 return &req->apoll->poll;
4911}
4912
4913static void io_poll_remove_double(struct io_kiocb *req)
4914 __must_hold(&req->ctx->completion_lock)
4915{
4916 struct io_poll_iocb *poll = io_poll_get_double(req);
4917
4918 lockdep_assert_held(&req->ctx->completion_lock);
4919
4920 if (poll && poll->head) {
4921 struct wait_queue_head *head = poll->head;
4922
4923 spin_lock(&head->lock);
4924 list_del_init(&poll->wait.entry);
4925 if (poll->wait.private)
4926 req_ref_put(req);
4927 poll->head = NULL;
4928 spin_unlock(&head->lock);
4929 }
4930}
4931
4932static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
4933 __must_hold(&req->ctx->completion_lock)
4934{
4935 struct io_ring_ctx *ctx = req->ctx;
4936 unsigned flags = IORING_CQE_F_MORE;
4937 int error;
4938
4939 if (READ_ONCE(req->poll.canceled)) {
4940 error = -ECANCELED;
4941 req->poll.events |= EPOLLONESHOT;
4942 } else {
4943 error = mangle_poll(mask);
4944 }
4945 if (req->poll.events & EPOLLONESHOT)
4946 flags = 0;
4947 if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
4948 req->poll.done = true;
4949 flags = 0;
4950 }
4951 if (flags & IORING_CQE_F_MORE)
4952 ctx->cq_extra++;
4953
4954 io_commit_cqring(ctx);
4955 return !(flags & IORING_CQE_F_MORE);
4956}
4957
4958static void io_poll_task_func(struct io_kiocb *req)
4959{
4960 struct io_ring_ctx *ctx = req->ctx;
4961 struct io_kiocb *nxt;
4962
4963 if (io_poll_rewait(req, &req->poll)) {
4964 spin_unlock_irq(&ctx->completion_lock);
4965 } else {
4966 bool done;
4967
4968 done = io_poll_complete(req, req->result);
4969 if (done) {
4970 io_poll_remove_double(req);
4971 hash_del(&req->hash_node);
4972 } else {
4973 req->result = 0;
4974 add_wait_queue(req->poll.head, &req->poll.wait);
4975 }
4976 spin_unlock_irq(&ctx->completion_lock);
4977 io_cqring_ev_posted(ctx);
4978
4979 if (done) {
4980 nxt = io_put_req_find_next(req);
4981 if (nxt)
4982 io_req_task_submit(nxt);
4983 }
4984 }
4985}
4986
4987static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4988 int sync, void *key)
4989{
4990 struct io_kiocb *req = wait->private;
4991 struct io_poll_iocb *poll = io_poll_get_single(req);
4992 __poll_t mask = key_to_poll(key);
4993
4994
4995 if (mask && !(mask & poll->events))
4996 return 0;
4997 if (!(poll->events & EPOLLONESHOT))
4998 return poll->wait.func(&poll->wait, mode, sync, key);
4999
5000 list_del_init(&wait->entry);
5001
5002 if (poll->head) {
5003 bool done;
5004
5005 spin_lock(&poll->head->lock);
5006 done = list_empty(&poll->wait.entry);
5007 if (!done)
5008 list_del_init(&poll->wait.entry);
5009
5010 wait->private = NULL;
5011 spin_unlock(&poll->head->lock);
5012 if (!done) {
5013
5014 poll->wait.func(&poll->wait, mode, sync, key);
5015 }
5016 }
5017 req_ref_put(req);
5018 return 1;
5019}
5020
5021static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5022 wait_queue_func_t wake_func)
5023{
5024 poll->head = NULL;
5025 poll->done = false;
5026 poll->canceled = false;
5027#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5028
5029 poll->events = events | IO_POLL_UNMASK;
5030 INIT_LIST_HEAD(&poll->wait.entry);
5031 init_waitqueue_func_entry(&poll->wait, wake_func);
5032}
5033
5034static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
5035 struct wait_queue_head *head,
5036 struct io_poll_iocb **poll_ptr)
5037{
5038 struct io_kiocb *req = pt->req;
5039
5040
5041
5042
5043
5044
5045 if (unlikely(pt->nr_entries)) {
5046 struct io_poll_iocb *poll_one = poll;
5047
5048
5049 if (*poll_ptr) {
5050 pt->error = -EINVAL;
5051 return;
5052 }
5053
5054
5055
5056
5057 if (!(poll_one->events & EPOLLONESHOT))
5058 poll_one->events |= EPOLLONESHOT;
5059
5060 if (poll_one->head == head)
5061 return;
5062 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5063 if (!poll) {
5064 pt->error = -ENOMEM;
5065 return;
5066 }
5067 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
5068 req_ref_get(req);
5069 poll->wait.private = req;
5070 *poll_ptr = poll;
5071 }
5072
5073 pt->nr_entries++;
5074 poll->head = head;
5075
5076 if (poll->events & EPOLLEXCLUSIVE)
5077 add_wait_queue_exclusive(head, &poll->wait);
5078 else
5079 add_wait_queue(head, &poll->wait);
5080}
5081
5082static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5083 struct poll_table_struct *p)
5084{
5085 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5086 struct async_poll *apoll = pt->req->apoll;
5087
5088 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
5089}
5090
5091static void io_async_task_func(struct io_kiocb *req)
5092{
5093 struct async_poll *apoll = req->apoll;
5094 struct io_ring_ctx *ctx = req->ctx;
5095
5096 trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
5097
5098 if (io_poll_rewait(req, &apoll->poll)) {
5099 spin_unlock_irq(&ctx->completion_lock);
5100 return;
5101 }
5102
5103 hash_del(&req->hash_node);
5104 io_poll_remove_double(req);
5105 spin_unlock_irq(&ctx->completion_lock);
5106
5107 if (!READ_ONCE(apoll->poll.canceled))
5108 io_req_task_submit(req);
5109 else
5110 io_req_complete_failed(req, -ECANCELED);
5111}
5112
5113static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5114 void *key)
5115{
5116 struct io_kiocb *req = wait->private;
5117 struct io_poll_iocb *poll = &req->apoll->poll;
5118
5119 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5120 key_to_poll(key));
5121
5122 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5123}
5124
5125static void io_poll_req_insert(struct io_kiocb *req)
5126{
5127 struct io_ring_ctx *ctx = req->ctx;
5128 struct hlist_head *list;
5129
5130 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5131 hlist_add_head(&req->hash_node, list);
5132}
5133
5134static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5135 struct io_poll_iocb *poll,
5136 struct io_poll_table *ipt, __poll_t mask,
5137 wait_queue_func_t wake_func)
5138 __acquires(&ctx->completion_lock)
5139{
5140 struct io_ring_ctx *ctx = req->ctx;
5141 bool cancel = false;
5142
5143 INIT_HLIST_NODE(&req->hash_node);
5144 io_init_poll_iocb(poll, mask, wake_func);
5145 poll->file = req->file;
5146 poll->wait.private = req;
5147
5148 ipt->pt._key = mask;
5149 ipt->req = req;
5150 ipt->error = 0;
5151 ipt->nr_entries = 0;
5152
5153 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5154 if (unlikely(!ipt->nr_entries) && !ipt->error)
5155 ipt->error = -EINVAL;
5156
5157 spin_lock_irq(&ctx->completion_lock);
5158 if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
5159 io_poll_remove_double(req);
5160 if (likely(poll->head)) {
5161 spin_lock(&poll->head->lock);
5162 if (unlikely(list_empty(&poll->wait.entry))) {
5163 if (ipt->error)
5164 cancel = true;
5165 ipt->error = 0;
5166 mask = 0;
5167 }
5168 if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
5169 list_del_init(&poll->wait.entry);
5170 else if (cancel)
5171 WRITE_ONCE(poll->canceled, true);
5172 else if (!poll->done)
5173 io_poll_req_insert(req);
5174 spin_unlock(&poll->head->lock);
5175 }
5176
5177 return mask;
5178}
5179
5180enum {
5181 IO_APOLL_OK,
5182 IO_APOLL_ABORTED,
5183 IO_APOLL_READY
5184};
5185
5186static int io_arm_poll_handler(struct io_kiocb *req)
5187{
5188 const struct io_op_def *def = &io_op_defs[req->opcode];
5189 struct io_ring_ctx *ctx = req->ctx;
5190 struct async_poll *apoll;
5191 struct io_poll_table ipt;
5192 __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI;
5193 int rw;
5194
5195 if (!req->file || !file_can_poll(req->file))
5196 return IO_APOLL_ABORTED;
5197 if (req->flags & REQ_F_POLLED)
5198 return IO_APOLL_ABORTED;
5199 if (!def->pollin && !def->pollout)
5200 return IO_APOLL_ABORTED;
5201
5202 if (def->pollin) {
5203 rw = READ;
5204 mask |= POLLIN | POLLRDNORM;
5205
5206
5207 if ((req->opcode == IORING_OP_RECVMSG) &&
5208 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5209 mask &= ~POLLIN;
5210 } else {
5211 rw = WRITE;
5212 mask |= POLLOUT | POLLWRNORM;
5213 }
5214
5215
5216 if (!io_file_supports_async(req, rw))
5217 return IO_APOLL_ABORTED;
5218
5219 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5220 if (unlikely(!apoll))
5221 return IO_APOLL_ABORTED;
5222 apoll->double_poll = NULL;
5223 req->apoll = apoll;
5224 req->flags |= REQ_F_POLLED;
5225 ipt.pt._qproc = io_async_queue_proc;
5226
5227 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5228 io_async_wake);
5229 if (ret || ipt.error) {
5230 spin_unlock_irq(&ctx->completion_lock);
5231 if (ret)
5232 return IO_APOLL_READY;
5233 return IO_APOLL_ABORTED;
5234 }
5235 spin_unlock_irq(&ctx->completion_lock);
5236 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5237 mask, apoll->poll.events);
5238 return IO_APOLL_OK;
5239}
5240
5241static bool __io_poll_remove_one(struct io_kiocb *req,
5242 struct io_poll_iocb *poll, bool do_cancel)
5243 __must_hold(&req->ctx->completion_lock)
5244{
5245 bool do_complete = false;
5246
5247 if (!poll->head)
5248 return false;
5249 spin_lock(&poll->head->lock);
5250 if (do_cancel)
5251 WRITE_ONCE(poll->canceled, true);
5252 if (!list_empty(&poll->wait.entry)) {
5253 list_del_init(&poll->wait.entry);
5254 do_complete = true;
5255 }
5256 spin_unlock(&poll->head->lock);
5257 hash_del(&req->hash_node);
5258 return do_complete;
5259}
5260
5261static bool io_poll_remove_waitqs(struct io_kiocb *req)
5262 __must_hold(&req->ctx->completion_lock)
5263{
5264 bool do_complete;
5265
5266 io_poll_remove_double(req);
5267 do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
5268
5269 if (req->opcode != IORING_OP_POLL_ADD && do_complete) {
5270
5271 req_ref_put(req);
5272 }
5273 return do_complete;
5274}
5275
5276static bool io_poll_remove_one(struct io_kiocb *req)
5277 __must_hold(&req->ctx->completion_lock)
5278{
5279 bool do_complete;
5280
5281 do_complete = io_poll_remove_waitqs(req);
5282 if (do_complete) {
5283 io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
5284 io_commit_cqring(req->ctx);
5285 req_set_fail(req);
5286 io_put_req_deferred(req, 1);
5287 }
5288
5289 return do_complete;
5290}
5291
5292
5293
5294
5295static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5296 bool cancel_all)
5297{
5298 struct hlist_node *tmp;
5299 struct io_kiocb *req;
5300 int posted = 0, i;
5301
5302 spin_lock_irq(&ctx->completion_lock);
5303 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5304 struct hlist_head *list;
5305
5306 list = &ctx->cancel_hash[i];
5307 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
5308 if (io_match_task(req, tsk, cancel_all))
5309 posted += io_poll_remove_one(req);
5310 }
5311 }
5312 spin_unlock_irq(&ctx->completion_lock);
5313
5314 if (posted)
5315 io_cqring_ev_posted(ctx);
5316
5317 return posted != 0;
5318}
5319
5320static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5321 bool poll_only)
5322 __must_hold(&ctx->completion_lock)
5323{
5324 struct hlist_head *list;
5325 struct io_kiocb *req;
5326
5327 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5328 hlist_for_each_entry(req, list, hash_node) {
5329 if (sqe_addr != req->user_data)
5330 continue;
5331 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5332 continue;
5333 return req;
5334 }
5335 return NULL;
5336}
5337
5338static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5339 bool poll_only)
5340 __must_hold(&ctx->completion_lock)
5341{
5342 struct io_kiocb *req;
5343
5344 req = io_poll_find(ctx, sqe_addr, poll_only);
5345 if (!req)
5346 return -ENOENT;
5347 if (io_poll_remove_one(req))
5348 return 0;
5349
5350 return -EALREADY;
5351}
5352
5353static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5354 unsigned int flags)
5355{
5356 u32 events;
5357
5358 events = READ_ONCE(sqe->poll32_events);
5359#ifdef __BIG_ENDIAN
5360 events = swahw32(events);
5361#endif
5362 if (!(flags & IORING_POLL_ADD_MULTI))
5363 events |= EPOLLONESHOT;
5364 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
5365}
5366
5367static int io_poll_update_prep(struct io_kiocb *req,
5368 const struct io_uring_sqe *sqe)
5369{
5370 struct io_poll_update *upd = &req->poll_update;
5371 u32 flags;
5372
5373 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5374 return -EINVAL;
5375 if (sqe->ioprio || sqe->buf_index)
5376 return -EINVAL;
5377 flags = READ_ONCE(sqe->len);
5378 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5379 IORING_POLL_ADD_MULTI))
5380 return -EINVAL;
5381
5382 if (flags == IORING_POLL_ADD_MULTI)
5383 return -EINVAL;
5384
5385 upd->old_user_data = READ_ONCE(sqe->addr);
5386 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5387 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
5388
5389 upd->new_user_data = READ_ONCE(sqe->off);
5390 if (!upd->update_user_data && upd->new_user_data)
5391 return -EINVAL;
5392 if (upd->update_events)
5393 upd->events = io_poll_parse_events(sqe, flags);
5394 else if (sqe->poll32_events)
5395 return -EINVAL;
5396
5397 return 0;
5398}
5399
5400static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5401 void *key)
5402{
5403 struct io_kiocb *req = wait->private;
5404 struct io_poll_iocb *poll = &req->poll;
5405
5406 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
5407}
5408
5409static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5410 struct poll_table_struct *p)
5411{
5412 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5413
5414 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
5415}
5416
5417static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5418{
5419 struct io_poll_iocb *poll = &req->poll;
5420 u32 flags;
5421
5422 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5423 return -EINVAL;
5424 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
5425 return -EINVAL;
5426 flags = READ_ONCE(sqe->len);
5427 if (flags & ~IORING_POLL_ADD_MULTI)
5428 return -EINVAL;
5429
5430 poll->events = io_poll_parse_events(sqe, flags);
5431 return 0;
5432}
5433
5434static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
5435{
5436 struct io_poll_iocb *poll = &req->poll;
5437 struct io_ring_ctx *ctx = req->ctx;
5438 struct io_poll_table ipt;
5439 __poll_t mask;
5440
5441 ipt.pt._qproc = io_poll_queue_proc;
5442
5443 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5444 io_poll_wake);
5445
5446 if (mask) {
5447 ipt.error = 0;
5448 io_poll_complete(req, mask);
5449 }
5450 spin_unlock_irq(&ctx->completion_lock);
5451
5452 if (mask) {
5453 io_cqring_ev_posted(ctx);
5454 if (poll->events & EPOLLONESHOT)
5455 io_put_req(req);
5456 }
5457 return ipt.error;
5458}
5459
5460static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
5461{
5462 struct io_ring_ctx *ctx = req->ctx;
5463 struct io_kiocb *preq;
5464 bool completing;
5465 int ret;
5466
5467 spin_lock_irq(&ctx->completion_lock);
5468 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
5469 if (!preq) {
5470 ret = -ENOENT;
5471 goto err;
5472 }
5473
5474 if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
5475 completing = true;
5476 ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
5477 goto err;
5478 }
5479
5480
5481
5482
5483
5484
5485 completing = !__io_poll_remove_one(preq, &preq->poll, false);
5486 if (completing && (preq->poll.events & EPOLLONESHOT)) {
5487 ret = -EALREADY;
5488 goto err;
5489 }
5490
5491 ret = 0;
5492err:
5493 if (ret < 0) {
5494 spin_unlock_irq(&ctx->completion_lock);
5495 req_set_fail(req);
5496 io_req_complete(req, ret);
5497 return 0;
5498 }
5499
5500 if (req->poll_update.update_events) {
5501 preq->poll.events &= ~0xffff;
5502 preq->poll.events |= req->poll_update.events & 0xffff;
5503 preq->poll.events |= IO_POLL_UNMASK;
5504 }
5505 if (req->poll_update.update_user_data)
5506 preq->user_data = req->poll_update.new_user_data;
5507 spin_unlock_irq(&ctx->completion_lock);
5508
5509
5510 io_req_complete(req, ret);
5511
5512 if (!completing) {
5513 ret = io_poll_add(preq, issue_flags);
5514 if (ret < 0) {
5515 req_set_fail(preq);
5516 io_req_complete(preq, ret);
5517 }
5518 }
5519 return 0;
5520}
5521
5522static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5523{
5524 struct io_timeout_data *data = container_of(timer,
5525 struct io_timeout_data, timer);
5526 struct io_kiocb *req = data->req;
5527 struct io_ring_ctx *ctx = req->ctx;
5528 unsigned long flags;
5529
5530 spin_lock_irqsave(&ctx->completion_lock, flags);
5531 list_del_init(&req->timeout.list);
5532 atomic_set(&req->ctx->cq_timeouts,
5533 atomic_read(&req->ctx->cq_timeouts) + 1);
5534
5535 io_cqring_fill_event(ctx, req->user_data, -ETIME, 0);
5536 io_commit_cqring(ctx);
5537 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5538
5539 io_cqring_ev_posted(ctx);
5540 req_set_fail(req);
5541 io_put_req(req);
5542 return HRTIMER_NORESTART;
5543}
5544
5545static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5546 __u64 user_data)
5547 __must_hold(&ctx->completion_lock)
5548{
5549 struct io_timeout_data *io;
5550 struct io_kiocb *req;
5551 bool found = false;
5552
5553 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5554 found = user_data == req->user_data;
5555 if (found)
5556 break;
5557 }
5558 if (!found)
5559 return ERR_PTR(-ENOENT);
5560
5561 io = req->async_data;
5562 if (hrtimer_try_to_cancel(&io->timer) == -1)
5563 return ERR_PTR(-EALREADY);
5564 list_del_init(&req->timeout.list);
5565 return req;
5566}
5567
5568static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5569 __must_hold(&ctx->completion_lock)
5570{
5571 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5572
5573 if (IS_ERR(req))
5574 return PTR_ERR(req);
5575
5576 req_set_fail(req);
5577 io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
5578 io_put_req_deferred(req, 1);
5579 return 0;
5580}
5581
5582static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5583 struct timespec64 *ts, enum hrtimer_mode mode)
5584 __must_hold(&ctx->completion_lock)
5585{
5586 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5587 struct io_timeout_data *data;
5588
5589 if (IS_ERR(req))
5590 return PTR_ERR(req);
5591
5592 req->timeout.off = 0;
5593 data = req->async_data;
5594 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5595 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5596 data->timer.function = io_timeout_fn;
5597 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5598 return 0;
5599}
5600
5601static int io_timeout_remove_prep(struct io_kiocb *req,
5602 const struct io_uring_sqe *sqe)
5603{
5604 struct io_timeout_rem *tr = &req->timeout_rem;
5605
5606 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5607 return -EINVAL;
5608 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5609 return -EINVAL;
5610 if (sqe->ioprio || sqe->buf_index || sqe->len)
5611 return -EINVAL;
5612
5613 tr->addr = READ_ONCE(sqe->addr);
5614 tr->flags = READ_ONCE(sqe->timeout_flags);
5615 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5616 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5617 return -EINVAL;
5618 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5619 return -EFAULT;
5620 } else if (tr->flags) {
5621
5622 return -EINVAL;
5623 }
5624
5625 return 0;
5626}
5627
5628static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5629{
5630 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5631 : HRTIMER_MODE_REL;
5632}
5633
5634
5635
5636
5637static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
5638{
5639 struct io_timeout_rem *tr = &req->timeout_rem;
5640 struct io_ring_ctx *ctx = req->ctx;
5641 int ret;
5642
5643 spin_lock_irq(&ctx->completion_lock);
5644 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
5645 ret = io_timeout_cancel(ctx, tr->addr);
5646 else
5647 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5648 io_translate_timeout_mode(tr->flags));
5649
5650 io_cqring_fill_event(ctx, req->user_data, ret, 0);
5651 io_commit_cqring(ctx);
5652 spin_unlock_irq(&ctx->completion_lock);
5653 io_cqring_ev_posted(ctx);
5654 if (ret < 0)
5655 req_set_fail(req);
5656 io_put_req(req);
5657 return 0;
5658}
5659
5660static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5661 bool is_timeout_link)
5662{
5663 struct io_timeout_data *data;
5664 unsigned flags;
5665 u32 off = READ_ONCE(sqe->off);
5666
5667 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5668 return -EINVAL;
5669 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
5670 return -EINVAL;
5671 if (off && is_timeout_link)
5672 return -EINVAL;
5673 flags = READ_ONCE(sqe->timeout_flags);
5674 if (flags & ~IORING_TIMEOUT_ABS)
5675 return -EINVAL;
5676
5677 req->timeout.off = off;
5678 if (unlikely(off && !req->ctx->off_timeout_used))
5679 req->ctx->off_timeout_used = true;
5680
5681 if (!req->async_data && io_alloc_async_data(req))
5682 return -ENOMEM;
5683
5684 data = req->async_data;
5685 data->req = req;
5686
5687 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5688 return -EFAULT;
5689
5690 data->mode = io_translate_timeout_mode(flags);
5691 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5692 if (is_timeout_link)
5693 io_req_track_inflight(req);
5694 return 0;
5695}
5696
5697static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
5698{
5699 struct io_ring_ctx *ctx = req->ctx;
5700 struct io_timeout_data *data = req->async_data;
5701 struct list_head *entry;
5702 u32 tail, off = req->timeout.off;
5703
5704 spin_lock_irq(&ctx->completion_lock);
5705
5706
5707
5708
5709
5710
5711 if (io_is_timeout_noseq(req)) {
5712 entry = ctx->timeout_list.prev;
5713 goto add;
5714 }
5715
5716 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5717 req->timeout.target_seq = tail + off;
5718
5719
5720
5721
5722
5723 ctx->cq_last_tm_flush = tail;
5724
5725
5726
5727
5728
5729 list_for_each_prev(entry, &ctx->timeout_list) {
5730 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5731 timeout.list);
5732
5733 if (io_is_timeout_noseq(nxt))
5734 continue;
5735
5736 if (off >= nxt->timeout.target_seq - tail)
5737 break;
5738 }
5739add:
5740 list_add(&req->timeout.list, entry);
5741 data->timer.function = io_timeout_fn;
5742 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5743 spin_unlock_irq(&ctx->completion_lock);
5744 return 0;
5745}
5746
5747struct io_cancel_data {
5748 struct io_ring_ctx *ctx;
5749 u64 user_data;
5750};
5751
5752static bool io_cancel_cb(struct io_wq_work *work, void *data)
5753{
5754 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5755 struct io_cancel_data *cd = data;
5756
5757 return req->ctx == cd->ctx && req->user_data == cd->user_data;
5758}
5759
5760static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
5761 struct io_ring_ctx *ctx)
5762{
5763 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
5764 enum io_wq_cancel cancel_ret;
5765 int ret = 0;
5766
5767 if (!tctx || !tctx->io_wq)
5768 return -ENOENT;
5769
5770 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
5771 switch (cancel_ret) {
5772 case IO_WQ_CANCEL_OK:
5773 ret = 0;
5774 break;
5775 case IO_WQ_CANCEL_RUNNING:
5776 ret = -EALREADY;
5777 break;
5778 case IO_WQ_CANCEL_NOTFOUND:
5779 ret = -ENOENT;
5780 break;
5781 }
5782
5783 return ret;
5784}
5785
5786static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5787 struct io_kiocb *req, __u64 sqe_addr,
5788 int success_ret)
5789{
5790 unsigned long flags;
5791 int ret;
5792
5793 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
5794 spin_lock_irqsave(&ctx->completion_lock, flags);
5795 if (ret != -ENOENT)
5796 goto done;
5797 ret = io_timeout_cancel(ctx, sqe_addr);
5798 if (ret != -ENOENT)
5799 goto done;
5800 ret = io_poll_cancel(ctx, sqe_addr, false);
5801done:
5802 if (!ret)
5803 ret = success_ret;
5804 io_cqring_fill_event(ctx, req->user_data, ret, 0);
5805 io_commit_cqring(ctx);
5806 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5807 io_cqring_ev_posted(ctx);
5808
5809 if (ret < 0)
5810 req_set_fail(req);
5811}
5812
5813static int io_async_cancel_prep(struct io_kiocb *req,
5814 const struct io_uring_sqe *sqe)
5815{
5816 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5817 return -EINVAL;
5818 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5819 return -EINVAL;
5820 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
5821 return -EINVAL;
5822
5823 req->cancel.addr = READ_ONCE(sqe->addr);
5824 return 0;
5825}
5826
5827static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
5828{
5829 struct io_ring_ctx *ctx = req->ctx;
5830 u64 sqe_addr = req->cancel.addr;
5831 struct io_tctx_node *node;
5832 int ret;
5833
5834
5835 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
5836 spin_lock_irq(&ctx->completion_lock);
5837 if (ret != -ENOENT)
5838 goto done;
5839 ret = io_timeout_cancel(ctx, sqe_addr);
5840 if (ret != -ENOENT)
5841 goto done;
5842 ret = io_poll_cancel(ctx, sqe_addr, false);
5843 if (ret != -ENOENT)
5844 goto done;
5845 spin_unlock_irq(&ctx->completion_lock);
5846
5847
5848 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5849 ret = -ENOENT;
5850 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
5851 struct io_uring_task *tctx = node->task->io_uring;
5852
5853 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
5854 if (ret != -ENOENT)
5855 break;
5856 }
5857 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5858
5859 spin_lock_irq(&ctx->completion_lock);
5860done:
5861 io_cqring_fill_event(ctx, req->user_data, ret, 0);
5862 io_commit_cqring(ctx);
5863 spin_unlock_irq(&ctx->completion_lock);
5864 io_cqring_ev_posted(ctx);
5865
5866 if (ret < 0)
5867 req_set_fail(req);
5868 io_put_req(req);
5869 return 0;
5870}
5871
5872static int io_rsrc_update_prep(struct io_kiocb *req,
5873 const struct io_uring_sqe *sqe)
5874{
5875 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5876 return -EINVAL;
5877 if (sqe->ioprio || sqe->rw_flags)
5878 return -EINVAL;
5879
5880 req->rsrc_update.offset = READ_ONCE(sqe->off);
5881 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5882 if (!req->rsrc_update.nr_args)
5883 return -EINVAL;
5884 req->rsrc_update.arg = READ_ONCE(sqe->addr);
5885 return 0;
5886}
5887
5888static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
5889{
5890 struct io_ring_ctx *ctx = req->ctx;
5891 struct io_uring_rsrc_update2 up;
5892 int ret;
5893
5894 if (issue_flags & IO_URING_F_NONBLOCK)
5895 return -EAGAIN;
5896
5897 up.offset = req->rsrc_update.offset;
5898 up.data = req->rsrc_update.arg;
5899 up.nr = 0;
5900 up.tags = 0;
5901 up.resv = 0;
5902
5903 mutex_lock(&ctx->uring_lock);
5904 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
5905 &up, req->rsrc_update.nr_args);
5906 mutex_unlock(&ctx->uring_lock);
5907
5908 if (ret < 0)
5909 req_set_fail(req);
5910 __io_req_complete(req, issue_flags, ret, 0);
5911 return 0;
5912}
5913
5914static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5915{
5916 switch (req->opcode) {
5917 case IORING_OP_NOP:
5918 return 0;
5919 case IORING_OP_READV:
5920 case IORING_OP_READ_FIXED:
5921 case IORING_OP_READ:
5922 return io_read_prep(req, sqe);
5923 case IORING_OP_WRITEV:
5924 case IORING_OP_WRITE_FIXED:
5925 case IORING_OP_WRITE:
5926 return io_write_prep(req, sqe);
5927 case IORING_OP_POLL_ADD:
5928 return io_poll_add_prep(req, sqe);
5929 case IORING_OP_POLL_REMOVE:
5930 return io_poll_update_prep(req, sqe);
5931 case IORING_OP_FSYNC:
5932 return io_fsync_prep(req, sqe);
5933 case IORING_OP_SYNC_FILE_RANGE:
5934 return io_sfr_prep(req, sqe);
5935 case IORING_OP_SENDMSG:
5936 case IORING_OP_SEND:
5937 return io_sendmsg_prep(req, sqe);
5938 case IORING_OP_RECVMSG:
5939 case IORING_OP_RECV:
5940 return io_recvmsg_prep(req, sqe);
5941 case IORING_OP_CONNECT:
5942 return io_connect_prep(req, sqe);
5943 case IORING_OP_TIMEOUT:
5944 return io_timeout_prep(req, sqe, false);
5945 case IORING_OP_TIMEOUT_REMOVE:
5946 return io_timeout_remove_prep(req, sqe);
5947 case IORING_OP_ASYNC_CANCEL:
5948 return io_async_cancel_prep(req, sqe);
5949 case IORING_OP_LINK_TIMEOUT:
5950 return io_timeout_prep(req, sqe, true);
5951 case IORING_OP_ACCEPT:
5952 return io_accept_prep(req, sqe);
5953 case IORING_OP_FALLOCATE:
5954 return io_fallocate_prep(req, sqe);
5955 case IORING_OP_OPENAT:
5956 return io_openat_prep(req, sqe);
5957 case IORING_OP_CLOSE:
5958 return io_close_prep(req, sqe);
5959 case IORING_OP_FILES_UPDATE:
5960 return io_rsrc_update_prep(req, sqe);
5961 case IORING_OP_STATX:
5962 return io_statx_prep(req, sqe);
5963 case IORING_OP_FADVISE:
5964 return io_fadvise_prep(req, sqe);
5965 case IORING_OP_MADVISE:
5966 return io_madvise_prep(req, sqe);
5967 case IORING_OP_OPENAT2:
5968 return io_openat2_prep(req, sqe);
5969 case IORING_OP_EPOLL_CTL:
5970 return io_epoll_ctl_prep(req, sqe);
5971 case IORING_OP_SPLICE:
5972 return io_splice_prep(req, sqe);
5973 case IORING_OP_PROVIDE_BUFFERS:
5974 return io_provide_buffers_prep(req, sqe);
5975 case IORING_OP_REMOVE_BUFFERS:
5976 return io_remove_buffers_prep(req, sqe);
5977 case IORING_OP_TEE:
5978 return io_tee_prep(req, sqe);
5979 case IORING_OP_SHUTDOWN:
5980 return io_shutdown_prep(req, sqe);
5981 case IORING_OP_RENAMEAT:
5982 return io_renameat_prep(req, sqe);
5983 case IORING_OP_UNLINKAT:
5984 return io_unlinkat_prep(req, sqe);
5985 }
5986
5987 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5988 req->opcode);
5989 return -EINVAL;
5990}
5991
5992static int io_req_prep_async(struct io_kiocb *req)
5993{
5994 if (!io_op_defs[req->opcode].needs_async_setup)
5995 return 0;
5996 if (WARN_ON_ONCE(req->async_data))
5997 return -EFAULT;
5998 if (io_alloc_async_data(req))
5999 return -EAGAIN;
6000
6001 switch (req->opcode) {
6002 case IORING_OP_READV:
6003 return io_rw_prep_async(req, READ);
6004 case IORING_OP_WRITEV:
6005 return io_rw_prep_async(req, WRITE);
6006 case IORING_OP_SENDMSG:
6007 return io_sendmsg_prep_async(req);
6008 case IORING_OP_RECVMSG:
6009 return io_recvmsg_prep_async(req);
6010 case IORING_OP_CONNECT:
6011 return io_connect_prep_async(req);
6012 }
6013 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
6014 req->opcode);
6015 return -EFAULT;
6016}
6017
6018static u32 io_get_sequence(struct io_kiocb *req)
6019{
6020 u32 seq = req->ctx->cached_sq_head;
6021
6022
6023 io_for_each_link(req, req)
6024 seq--;
6025 return seq;
6026}
6027
6028static bool io_drain_req(struct io_kiocb *req)
6029{
6030 struct io_kiocb *pos;
6031 struct io_ring_ctx *ctx = req->ctx;
6032 struct io_defer_entry *de;
6033 int ret;
6034 u32 seq;
6035
6036
6037
6038
6039
6040
6041
6042 if (ctx->drain_next) {
6043 req->flags |= REQ_F_IO_DRAIN;
6044 ctx->drain_next = false;
6045 }
6046
6047 io_for_each_link(pos, req->link) {
6048 if (pos->flags & REQ_F_IO_DRAIN) {
6049 ctx->drain_next = true;
6050 req->flags |= REQ_F_IO_DRAIN;
6051 break;
6052 }
6053 }
6054
6055
6056 if (likely(list_empty_careful(&ctx->defer_list) &&
6057 !(req->flags & REQ_F_IO_DRAIN))) {
6058 ctx->drain_active = false;
6059 return false;
6060 }
6061
6062 seq = io_get_sequence(req);
6063
6064 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
6065 return false;
6066
6067 ret = io_req_prep_async(req);
6068 if (ret)
6069 goto fail;
6070 io_prep_async_link(req);
6071 de = kmalloc(sizeof(*de), GFP_KERNEL);
6072 if (!de) {
6073 ret = -ENOMEM;
6074fail:
6075 io_req_complete_failed(req, ret);
6076 return true;
6077 }
6078
6079 spin_lock_irq(&ctx->completion_lock);
6080 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
6081 spin_unlock_irq(&ctx->completion_lock);
6082 kfree(de);
6083 io_queue_async_work(req);
6084 return true;
6085 }
6086
6087 trace_io_uring_defer(ctx, req, req->user_data);
6088 de->req = req;
6089 de->seq = seq;
6090 list_add_tail(&de->list, &ctx->defer_list);
6091 spin_unlock_irq(&ctx->completion_lock);
6092 return true;
6093}
6094
6095static void io_clean_op(struct io_kiocb *req)
6096{
6097 if (req->flags & REQ_F_BUFFER_SELECTED) {
6098 switch (req->opcode) {
6099 case IORING_OP_READV:
6100 case IORING_OP_READ_FIXED:
6101 case IORING_OP_READ:
6102 kfree((void *)(unsigned long)req->rw.addr);
6103 break;
6104 case IORING_OP_RECVMSG:
6105 case IORING_OP_RECV:
6106 kfree(req->sr_msg.kbuf);
6107 break;
6108 }
6109 }
6110
6111 if (req->flags & REQ_F_NEED_CLEANUP) {
6112 switch (req->opcode) {
6113 case IORING_OP_READV:
6114 case IORING_OP_READ_FIXED:
6115 case IORING_OP_READ:
6116 case IORING_OP_WRITEV:
6117 case IORING_OP_WRITE_FIXED:
6118 case IORING_OP_WRITE: {
6119 struct io_async_rw *io = req->async_data;
6120
6121 kfree(io->free_iovec);
6122 break;
6123 }
6124 case IORING_OP_RECVMSG:
6125 case IORING_OP_SENDMSG: {
6126 struct io_async_msghdr *io = req->async_data;
6127
6128 kfree(io->free_iov);
6129 break;
6130 }
6131 case IORING_OP_SPLICE:
6132 case IORING_OP_TEE:
6133 if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
6134 io_put_file(req->splice.file_in);
6135 break;
6136 case IORING_OP_OPENAT:
6137 case IORING_OP_OPENAT2:
6138 if (req->open.filename)
6139 putname(req->open.filename);
6140 break;
6141 case IORING_OP_RENAMEAT:
6142 putname(req->rename.oldpath);
6143 putname(req->rename.newpath);
6144 break;
6145 case IORING_OP_UNLINKAT:
6146 putname(req->unlink.filename);
6147 break;
6148 }
6149 }
6150 if ((req->flags & REQ_F_POLLED) && req->apoll) {
6151 kfree(req->apoll->double_poll);
6152 kfree(req->apoll);
6153 req->apoll = NULL;
6154 }
6155 if (req->flags & REQ_F_INFLIGHT) {
6156 struct io_uring_task *tctx = req->task->io_uring;
6157
6158 atomic_dec(&tctx->inflight_tracked);
6159 }
6160 if (req->flags & REQ_F_CREDS)
6161 put_cred(req->creds);
6162
6163 req->flags &= ~IO_REQ_CLEAN_FLAGS;
6164}
6165
6166static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
6167{
6168 struct io_ring_ctx *ctx = req->ctx;
6169 const struct cred *creds = NULL;
6170 int ret;
6171
6172 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
6173 creds = override_creds(req->creds);
6174
6175 switch (req->opcode) {
6176 case IORING_OP_NOP:
6177 ret = io_nop(req, issue_flags);
6178 break;
6179 case IORING_OP_READV:
6180 case IORING_OP_READ_FIXED:
6181 case IORING_OP_READ:
6182 ret = io_read(req, issue_flags);
6183 break;
6184 case IORING_OP_WRITEV:
6185 case IORING_OP_WRITE_FIXED:
6186 case IORING_OP_WRITE:
6187 ret = io_write(req, issue_flags);
6188 break;
6189 case IORING_OP_FSYNC:
6190 ret = io_fsync(req, issue_flags);
6191 break;
6192 case IORING_OP_POLL_ADD:
6193 ret = io_poll_add(req, issue_flags);
6194 break;
6195 case IORING_OP_POLL_REMOVE:
6196 ret = io_poll_update(req, issue_flags);
6197 break;
6198 case IORING_OP_SYNC_FILE_RANGE:
6199 ret = io_sync_file_range(req, issue_flags);
6200 break;
6201 case IORING_OP_SENDMSG:
6202 ret = io_sendmsg(req, issue_flags);
6203 break;
6204 case IORING_OP_SEND:
6205 ret = io_send(req, issue_flags);
6206 break;
6207 case IORING_OP_RECVMSG:
6208 ret = io_recvmsg(req, issue_flags);
6209 break;
6210 case IORING_OP_RECV:
6211 ret = io_recv(req, issue_flags);
6212 break;
6213 case IORING_OP_TIMEOUT:
6214 ret = io_timeout(req, issue_flags);
6215 break;
6216 case IORING_OP_TIMEOUT_REMOVE:
6217 ret = io_timeout_remove(req, issue_flags);
6218 break;
6219 case IORING_OP_ACCEPT:
6220 ret = io_accept(req, issue_flags);
6221 break;
6222 case IORING_OP_CONNECT:
6223 ret = io_connect(req, issue_flags);
6224 break;
6225 case IORING_OP_ASYNC_CANCEL:
6226 ret = io_async_cancel(req, issue_flags);
6227 break;
6228 case IORING_OP_FALLOCATE:
6229 ret = io_fallocate(req, issue_flags);
6230 break;
6231 case IORING_OP_OPENAT:
6232 ret = io_openat(req, issue_flags);
6233 break;
6234 case IORING_OP_CLOSE:
6235 ret = io_close(req, issue_flags);
6236 break;
6237 case IORING_OP_FILES_UPDATE:
6238 ret = io_files_update(req, issue_flags);
6239 break;
6240 case IORING_OP_STATX:
6241 ret = io_statx(req, issue_flags);
6242 break;
6243 case IORING_OP_FADVISE:
6244 ret = io_fadvise(req, issue_flags);
6245 break;
6246 case IORING_OP_MADVISE:
6247 ret = io_madvise(req, issue_flags);
6248 break;
6249 case IORING_OP_OPENAT2:
6250 ret = io_openat2(req, issue_flags);
6251 break;
6252 case IORING_OP_EPOLL_CTL:
6253 ret = io_epoll_ctl(req, issue_flags);
6254 break;
6255 case IORING_OP_SPLICE:
6256 ret = io_splice(req, issue_flags);
6257 break;
6258 case IORING_OP_PROVIDE_BUFFERS:
6259 ret = io_provide_buffers(req, issue_flags);
6260 break;
6261 case IORING_OP_REMOVE_BUFFERS:
6262 ret = io_remove_buffers(req, issue_flags);
6263 break;
6264 case IORING_OP_TEE:
6265 ret = io_tee(req, issue_flags);
6266 break;
6267 case IORING_OP_SHUTDOWN:
6268 ret = io_shutdown(req, issue_flags);
6269 break;
6270 case IORING_OP_RENAMEAT:
6271 ret = io_renameat(req, issue_flags);
6272 break;
6273 case IORING_OP_UNLINKAT:
6274 ret = io_unlinkat(req, issue_flags);
6275 break;
6276 default:
6277 ret = -EINVAL;
6278 break;
6279 }
6280
6281 if (creds)
6282 revert_creds(creds);
6283 if (ret)
6284 return ret;
6285
6286 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
6287 io_iopoll_req_issued(req);
6288
6289 return 0;
6290}
6291
6292static void io_wq_submit_work(struct io_wq_work *work)
6293{
6294 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6295 struct io_kiocb *timeout;
6296 int ret = 0;
6297
6298 timeout = io_prep_linked_timeout(req);
6299 if (timeout)
6300 io_queue_linked_timeout(timeout);
6301
6302 if (work->flags & IO_WQ_WORK_CANCEL)
6303 ret = -ECANCELED;
6304
6305 if (!ret) {
6306 do {
6307 ret = io_issue_sqe(req, 0);
6308
6309
6310
6311
6312
6313 if (ret != -EAGAIN)
6314 break;
6315 cond_resched();
6316 } while (1);
6317 }
6318
6319
6320 if (ret) {
6321
6322 req_ref_get(req);
6323 io_req_task_queue_fail(req, ret);
6324 }
6325}
6326
6327#define FFS_ASYNC_READ 0x1UL
6328#define FFS_ASYNC_WRITE 0x2UL
6329#ifdef CONFIG_64BIT
6330#define FFS_ISREG 0x4UL
6331#else
6332#define FFS_ISREG 0x0UL
6333#endif
6334#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
6335
6336static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
6337 unsigned i)
6338{
6339 struct io_fixed_file *table_l2;
6340
6341 table_l2 = table->files[i >> IORING_FILE_TABLE_SHIFT];
6342 return &table_l2[i & IORING_FILE_TABLE_MASK];
6343}
6344
6345static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6346 int index)
6347{
6348 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
6349
6350 return (struct file *) (slot->file_ptr & FFS_MASK);
6351}
6352
6353static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
6354{
6355 unsigned long file_ptr = (unsigned long) file;
6356
6357 if (__io_file_supports_async(file, READ))
6358 file_ptr |= FFS_ASYNC_READ;
6359 if (__io_file_supports_async(file, WRITE))
6360 file_ptr |= FFS_ASYNC_WRITE;
6361 if (S_ISREG(file_inode(file)->i_mode))
6362 file_ptr |= FFS_ISREG;
6363 file_slot->file_ptr = file_ptr;
6364}
6365
6366static struct file *io_file_get(struct io_submit_state *state,
6367 struct io_kiocb *req, int fd, bool fixed)
6368{
6369 struct io_ring_ctx *ctx = req->ctx;
6370 struct file *file;
6371
6372 if (fixed) {
6373 unsigned long file_ptr;
6374
6375 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
6376 return NULL;
6377 fd = array_index_nospec(fd, ctx->nr_user_files);
6378 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
6379 file = (struct file *) (file_ptr & FFS_MASK);
6380 file_ptr &= ~FFS_MASK;
6381
6382 req->flags |= (file_ptr << REQ_F_ASYNC_READ_BIT);
6383 io_req_set_rsrc_node(req);
6384 } else {
6385 trace_io_uring_file_get(ctx, fd);
6386 file = __io_file_get(state, fd);
6387
6388
6389 if (file && unlikely(file->f_op == &io_uring_fops))
6390 io_req_track_inflight(req);
6391 }
6392
6393 return file;
6394}
6395
6396static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6397{
6398 struct io_timeout_data *data = container_of(timer,
6399 struct io_timeout_data, timer);
6400 struct io_kiocb *prev, *req = data->req;
6401 struct io_ring_ctx *ctx = req->ctx;
6402 unsigned long flags;
6403
6404 spin_lock_irqsave(&ctx->completion_lock, flags);
6405 prev = req->timeout.head;
6406 req->timeout.head = NULL;
6407
6408
6409
6410
6411
6412 if (prev) {
6413 io_remove_next_linked(prev);
6414 if (!req_ref_inc_not_zero(prev))
6415 prev = NULL;
6416 }
6417 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6418
6419 if (prev) {
6420 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
6421 io_put_req_deferred(prev, 1);
6422 io_put_req_deferred(req, 1);
6423 } else {
6424 io_req_complete_post(req, -ETIME, 0);
6425 }
6426 return HRTIMER_NORESTART;
6427}
6428
6429static void io_queue_linked_timeout(struct io_kiocb *req)
6430{
6431 struct io_ring_ctx *ctx = req->ctx;
6432
6433 spin_lock_irq(&ctx->completion_lock);
6434
6435
6436
6437
6438 if (req->timeout.head) {
6439 struct io_timeout_data *data = req->async_data;
6440
6441 data->timer.function = io_link_timeout_fn;
6442 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6443 data->mode);
6444 }
6445 spin_unlock_irq(&ctx->completion_lock);
6446
6447 io_put_req(req);
6448}
6449
6450static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
6451{
6452 struct io_kiocb *nxt = req->link;
6453
6454 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6455 nxt->opcode != IORING_OP_LINK_TIMEOUT)
6456 return NULL;
6457
6458 nxt->timeout.head = req;
6459 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
6460 req->flags |= REQ_F_LINK_TIMEOUT;
6461 return nxt;
6462}
6463
6464static void __io_queue_sqe(struct io_kiocb *req)
6465{
6466 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
6467 int ret;
6468
6469issue_sqe:
6470 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
6471
6472
6473
6474
6475
6476 if (likely(!ret)) {
6477
6478 if (req->flags & REQ_F_COMPLETE_INLINE) {
6479 struct io_ring_ctx *ctx = req->ctx;
6480 struct io_comp_state *cs = &ctx->submit_state.comp;
6481
6482 cs->reqs[cs->nr++] = req;
6483 if (cs->nr == ARRAY_SIZE(cs->reqs))
6484 io_submit_flush_completions(ctx);
6485 } else {
6486 io_put_req(req);
6487 }
6488 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
6489 switch (io_arm_poll_handler(req)) {
6490 case IO_APOLL_READY:
6491 goto issue_sqe;
6492 case IO_APOLL_ABORTED:
6493
6494
6495
6496
6497 io_queue_async_work(req);
6498 break;
6499 }
6500 } else {
6501 io_req_complete_failed(req, ret);
6502 }
6503 if (linked_timeout)
6504 io_queue_linked_timeout(linked_timeout);
6505}
6506
6507static inline void io_queue_sqe(struct io_kiocb *req)
6508{
6509 if (unlikely(req->ctx->drain_active) && io_drain_req(req))
6510 return;
6511
6512 if (likely(!(req->flags & REQ_F_FORCE_ASYNC))) {
6513 __io_queue_sqe(req);
6514 } else {
6515 int ret = io_req_prep_async(req);
6516
6517 if (unlikely(ret))
6518 io_req_complete_failed(req, ret);
6519 else
6520 io_queue_async_work(req);
6521 }
6522}
6523
6524
6525
6526
6527
6528
6529static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6530 struct io_kiocb *req,
6531 unsigned int sqe_flags)
6532{
6533 if (likely(!ctx->restricted))
6534 return true;
6535
6536 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6537 return false;
6538
6539 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6540 ctx->restrictions.sqe_flags_required)
6541 return false;
6542
6543 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6544 ctx->restrictions.sqe_flags_required))
6545 return false;
6546
6547 return true;
6548}
6549
6550static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
6551 const struct io_uring_sqe *sqe)
6552{
6553 struct io_submit_state *state;
6554 unsigned int sqe_flags;
6555 int personality, ret = 0;
6556
6557 req->opcode = READ_ONCE(sqe->opcode);
6558
6559 req->flags = sqe_flags = READ_ONCE(sqe->flags);
6560 req->user_data = READ_ONCE(sqe->user_data);
6561 req->file = NULL;
6562 req->fixed_rsrc_refs = NULL;
6563
6564 atomic_set(&req->refs, 2);
6565 req->task = current;
6566
6567
6568 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
6569 return -EINVAL;
6570 if (unlikely(req->opcode >= IORING_OP_LAST))
6571 return -EINVAL;
6572 if (!io_check_restriction(ctx, req, sqe_flags))
6573 return -EACCES;
6574
6575 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6576 !io_op_defs[req->opcode].buffer_select)
6577 return -EOPNOTSUPP;
6578 if (unlikely(sqe_flags & IOSQE_IO_DRAIN))
6579 ctx->drain_active = true;
6580
6581 personality = READ_ONCE(sqe->personality);
6582 if (personality) {
6583 req->creds = xa_load(&ctx->personalities, personality);
6584 if (!req->creds)
6585 return -EINVAL;
6586 get_cred(req->creds);
6587 req->flags |= REQ_F_CREDS;
6588 }
6589 state = &ctx->submit_state;
6590
6591
6592
6593
6594
6595 if (!state->plug_started && state->ios_left > 1 &&
6596 io_op_defs[req->opcode].plug) {
6597 blk_start_plug(&state->plug);
6598 state->plug_started = true;
6599 }
6600
6601 if (io_op_defs[req->opcode].needs_file) {
6602 bool fixed = req->flags & REQ_F_FIXED_FILE;
6603
6604 req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
6605 if (unlikely(!req->file))
6606 ret = -EBADF;
6607 }
6608
6609 state->ios_left--;
6610 return ret;
6611}
6612
6613static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
6614 const struct io_uring_sqe *sqe)
6615{
6616 struct io_submit_link *link = &ctx->submit_state.link;
6617 int ret;
6618
6619 ret = io_init_req(ctx, req, sqe);
6620 if (unlikely(ret)) {
6621fail_req:
6622 if (link->head) {
6623
6624 req_set_fail(link->head);
6625 io_req_complete_failed(link->head, -ECANCELED);
6626 link->head = NULL;
6627 }
6628 io_req_complete_failed(req, ret);
6629 return ret;
6630 }
6631
6632 ret = io_req_prep(req, sqe);
6633 if (unlikely(ret))
6634 goto fail_req;
6635
6636
6637 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
6638 req->flags, true,
6639 ctx->flags & IORING_SETUP_SQPOLL);
6640
6641
6642
6643
6644
6645
6646
6647
6648 if (link->head) {
6649 struct io_kiocb *head = link->head;
6650
6651 ret = io_req_prep_async(req);
6652 if (unlikely(ret))
6653 goto fail_req;
6654 trace_io_uring_link(ctx, req, head);
6655 link->last->link = req;
6656 link->last = req;
6657
6658
6659 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
6660 link->head = NULL;
6661 io_queue_sqe(head);
6662 }
6663 } else {
6664 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
6665 link->head = req;
6666 link->last = req;
6667 } else {
6668 io_queue_sqe(req);
6669 }
6670 }
6671
6672 return 0;
6673}
6674
6675
6676
6677
6678static void io_submit_state_end(struct io_submit_state *state,
6679 struct io_ring_ctx *ctx)
6680{
6681 if (state->link.head)
6682 io_queue_sqe(state->link.head);
6683 if (state->comp.nr)
6684 io_submit_flush_completions(ctx);
6685 if (state->plug_started)
6686 blk_finish_plug(&state->plug);
6687 io_state_file_put(state);
6688}
6689
6690
6691
6692
6693static void io_submit_state_start(struct io_submit_state *state,
6694 unsigned int max_ios)
6695{
6696 state->plug_started = false;
6697 state->ios_left = max_ios;
6698
6699 state->link.head = NULL;
6700}
6701
6702static void io_commit_sqring(struct io_ring_ctx *ctx)
6703{
6704 struct io_rings *rings = ctx->rings;
6705
6706
6707
6708
6709
6710
6711 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
6712}
6713
6714
6715
6716
6717
6718
6719
6720
6721
6722static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
6723{
6724 unsigned head, mask = ctx->sq_entries - 1;
6725 unsigned sq_idx = ctx->cached_sq_head++ & mask;
6726
6727
6728
6729
6730
6731
6732
6733
6734
6735 head = READ_ONCE(ctx->sq_array[sq_idx]);
6736 if (likely(head < ctx->sq_entries))
6737 return &ctx->sq_sqes[head];
6738
6739
6740 ctx->cq_extra--;
6741 WRITE_ONCE(ctx->rings->sq_dropped,
6742 READ_ONCE(ctx->rings->sq_dropped) + 1);
6743 return NULL;
6744}
6745
6746static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
6747{
6748 struct io_uring_task *tctx;
6749 int submitted = 0;
6750
6751
6752 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
6753 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6754 return -EAGAIN;
6755
6756 tctx = current->io_uring;
6757 tctx->cached_refs -= nr;
6758 if (unlikely(tctx->cached_refs < 0)) {
6759 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
6760
6761 percpu_counter_add(&tctx->inflight, refill);
6762 refcount_add(refill, ¤t->usage);
6763 tctx->cached_refs += refill;
6764 }
6765 io_submit_state_start(&ctx->submit_state, nr);
6766
6767 while (submitted < nr) {
6768 const struct io_uring_sqe *sqe;
6769 struct io_kiocb *req;
6770
6771 req = io_alloc_req(ctx);
6772 if (unlikely(!req)) {
6773 if (!submitted)
6774 submitted = -EAGAIN;
6775 break;
6776 }
6777 sqe = io_get_sqe(ctx);
6778 if (unlikely(!sqe)) {
6779 kmem_cache_free(req_cachep, req);
6780 break;
6781 }
6782
6783 submitted++;
6784 if (io_submit_sqe(ctx, req, sqe))
6785 break;
6786 }
6787
6788 if (unlikely(submitted != nr)) {
6789 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
6790 int unused = nr - ref_used;
6791
6792 current->io_uring->cached_refs += unused;
6793 percpu_ref_put_many(&ctx->refs, unused);
6794 }
6795
6796 io_submit_state_end(&ctx->submit_state, ctx);
6797
6798 io_commit_sqring(ctx);
6799
6800 return submitted;
6801}
6802
6803static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
6804{
6805 return READ_ONCE(sqd->state);
6806}
6807
6808static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6809{
6810
6811 spin_lock_irq(&ctx->completion_lock);
6812 WRITE_ONCE(ctx->rings->sq_flags,
6813 ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
6814 spin_unlock_irq(&ctx->completion_lock);
6815}
6816
6817static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6818{
6819 spin_lock_irq(&ctx->completion_lock);
6820 WRITE_ONCE(ctx->rings->sq_flags,
6821 ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
6822 spin_unlock_irq(&ctx->completion_lock);
6823}
6824
6825static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
6826{
6827 unsigned int to_submit;
6828 int ret = 0;
6829
6830 to_submit = io_sqring_entries(ctx);
6831
6832 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
6833 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
6834
6835 if (!list_empty(&ctx->iopoll_list) || to_submit) {
6836 unsigned nr_events = 0;
6837 const struct cred *creds = NULL;
6838
6839 if (ctx->sq_creds != current_cred())
6840 creds = override_creds(ctx->sq_creds);
6841
6842 mutex_lock(&ctx->uring_lock);
6843 if (!list_empty(&ctx->iopoll_list))
6844 io_do_iopoll(ctx, &nr_events, 0, true);
6845
6846
6847
6848
6849
6850 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
6851 !(ctx->flags & IORING_SETUP_R_DISABLED))
6852 ret = io_submit_sqes(ctx, to_submit);
6853 mutex_unlock(&ctx->uring_lock);
6854
6855 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
6856 wake_up(&ctx->sqo_sq_wait);
6857 if (creds)
6858 revert_creds(creds);
6859 }
6860
6861 return ret;
6862}
6863
6864static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6865{
6866 struct io_ring_ctx *ctx;
6867 unsigned sq_thread_idle = 0;
6868
6869 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6870 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
6871 sqd->sq_thread_idle = sq_thread_idle;
6872}
6873
6874static bool io_sqd_handle_event(struct io_sq_data *sqd)
6875{
6876 bool did_sig = false;
6877 struct ksignal ksig;
6878
6879 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
6880 signal_pending(current)) {
6881 mutex_unlock(&sqd->lock);
6882 if (signal_pending(current))
6883 did_sig = get_signal(&ksig);
6884 cond_resched();
6885 mutex_lock(&sqd->lock);
6886 }
6887 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
6888}
6889
6890static int io_sq_thread(void *data)
6891{
6892 struct io_sq_data *sqd = data;
6893 struct io_ring_ctx *ctx;
6894 unsigned long timeout = 0;
6895 char buf[TASK_COMM_LEN];
6896 DEFINE_WAIT(wait);
6897
6898 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
6899 set_task_comm(current, buf);
6900
6901 if (sqd->sq_cpu != -1)
6902 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
6903 else
6904 set_cpus_allowed_ptr(current, cpu_online_mask);
6905 current->flags |= PF_NO_SETAFFINITY;
6906
6907 mutex_lock(&sqd->lock);
6908 while (1) {
6909 bool cap_entries, sqt_spin = false;
6910
6911 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
6912 if (io_sqd_handle_event(sqd))
6913 break;
6914 timeout = jiffies + sqd->sq_thread_idle;
6915 }
6916
6917 cap_entries = !list_is_singular(&sqd->ctx_list);
6918 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6919 int ret = __io_sq_thread(ctx, cap_entries);
6920
6921 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
6922 sqt_spin = true;
6923 }
6924 if (io_run_task_work())
6925 sqt_spin = true;
6926
6927 if (sqt_spin || !time_after(jiffies, timeout)) {
6928 cond_resched();
6929 if (sqt_spin)
6930 timeout = jiffies + sqd->sq_thread_idle;
6931 continue;
6932 }
6933
6934 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
6935 if (!io_sqd_events_pending(sqd) && !current->task_works) {
6936 bool needs_sched = true;
6937
6938 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6939 io_ring_set_wakeup_flag(ctx);
6940
6941 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6942 !list_empty_careful(&ctx->iopoll_list)) {
6943 needs_sched = false;
6944 break;
6945 }
6946 if (io_sqring_entries(ctx)) {
6947 needs_sched = false;
6948 break;
6949 }
6950 }
6951
6952 if (needs_sched) {
6953 mutex_unlock(&sqd->lock);
6954 schedule();
6955 mutex_lock(&sqd->lock);
6956 }
6957 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6958 io_ring_clear_wakeup_flag(ctx);
6959 }
6960
6961 finish_wait(&sqd->wait, &wait);
6962 timeout = jiffies + sqd->sq_thread_idle;
6963 }
6964
6965 io_uring_cancel_generic(true, sqd);
6966 sqd->thread = NULL;
6967 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6968 io_ring_set_wakeup_flag(ctx);
6969 io_run_task_work();
6970 mutex_unlock(&sqd->lock);
6971
6972 complete(&sqd->exited);
6973 do_exit(0);
6974}
6975
6976struct io_wait_queue {
6977 struct wait_queue_entry wq;
6978 struct io_ring_ctx *ctx;
6979 unsigned to_wait;
6980 unsigned nr_timeouts;
6981};
6982
6983static inline bool io_should_wake(struct io_wait_queue *iowq)
6984{
6985 struct io_ring_ctx *ctx = iowq->ctx;
6986
6987
6988
6989
6990
6991
6992 return io_cqring_events(ctx) >= iowq->to_wait ||
6993 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6994}
6995
6996static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6997 int wake_flags, void *key)
6998{
6999 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7000 wq);
7001
7002
7003
7004
7005
7006 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
7007 return autoremove_wake_function(curr, mode, wake_flags, key);
7008 return -1;
7009}
7010
7011static int io_run_task_work_sig(void)
7012{
7013 if (io_run_task_work())
7014 return 1;
7015 if (!signal_pending(current))
7016 return 0;
7017 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
7018 return -ERESTARTSYS;
7019 return -EINTR;
7020}
7021
7022
7023static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
7024 struct io_wait_queue *iowq,
7025 signed long *timeout)
7026{
7027 int ret;
7028
7029
7030 ret = io_run_task_work_sig();
7031 if (ret || io_should_wake(iowq))
7032 return ret;
7033
7034 if (test_bit(0, &ctx->check_cq_overflow))
7035 return 1;
7036
7037 *timeout = schedule_timeout(*timeout);
7038 return !*timeout ? -ETIME : 1;
7039}
7040
7041
7042
7043
7044
7045static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
7046 const sigset_t __user *sig, size_t sigsz,
7047 struct __kernel_timespec __user *uts)
7048{
7049 struct io_wait_queue iowq = {
7050 .wq = {
7051 .private = current,
7052 .func = io_wake_function,
7053 .entry = LIST_HEAD_INIT(iowq.wq.entry),
7054 },
7055 .ctx = ctx,
7056 .to_wait = min_events,
7057 };
7058 struct io_rings *rings = ctx->rings;
7059 signed long timeout = MAX_SCHEDULE_TIMEOUT;
7060 int ret;
7061
7062 do {
7063 io_cqring_overflow_flush(ctx, false);
7064 if (io_cqring_events(ctx) >= min_events)
7065 return 0;
7066 if (!io_run_task_work())
7067 break;
7068 } while (1);
7069
7070 if (sig) {
7071#ifdef CONFIG_COMPAT
7072 if (in_compat_syscall())
7073 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
7074 sigsz);
7075 else
7076#endif
7077 ret = set_user_sigmask(sig, sigsz);
7078
7079 if (ret)
7080 return ret;
7081 }
7082
7083 if (uts) {
7084 struct timespec64 ts;
7085
7086 if (get_timespec64(&ts, uts))
7087 return -EFAULT;
7088 timeout = timespec64_to_jiffies(&ts);
7089 }
7090
7091 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
7092 trace_io_uring_cqring_wait(ctx, min_events);
7093 do {
7094
7095 if (!io_cqring_overflow_flush(ctx, false)) {
7096 ret = -EBUSY;
7097 break;
7098 }
7099 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
7100 TASK_INTERRUPTIBLE);
7101 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
7102 finish_wait(&ctx->cq_wait, &iowq.wq);
7103 cond_resched();
7104 } while (ret > 0);
7105
7106 restore_saved_sigmask_unless(ret == -EINTR);
7107
7108 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
7109}
7110
7111static void io_free_page_table(void **table, size_t size)
7112{
7113 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
7114
7115 for (i = 0; i < nr_tables; i++)
7116 kfree(table[i]);
7117 kfree(table);
7118}
7119
7120static void **io_alloc_page_table(size_t size)
7121{
7122 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
7123 size_t init_size = size;
7124 void **table;
7125
7126 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL);
7127 if (!table)
7128 return NULL;
7129
7130 for (i = 0; i < nr_tables; i++) {
7131 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
7132
7133 table[i] = kzalloc(this_size, GFP_KERNEL);
7134 if (!table[i]) {
7135 io_free_page_table(table, init_size);
7136 return NULL;
7137 }
7138 size -= this_size;
7139 }
7140 return table;
7141}
7142
7143static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
7144{
7145 percpu_ref_exit(&ref_node->refs);
7146 kfree(ref_node);
7147}
7148
7149static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
7150 struct io_rsrc_data *data_to_kill)
7151{
7152 WARN_ON_ONCE(!ctx->rsrc_backup_node);
7153 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
7154
7155 if (data_to_kill) {
7156 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
7157
7158 rsrc_node->rsrc_data = data_to_kill;
7159 spin_lock_irq(&ctx->rsrc_ref_lock);
7160 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
7161 spin_unlock_irq(&ctx->rsrc_ref_lock);
7162
7163 atomic_inc(&data_to_kill->refs);
7164 percpu_ref_kill(&rsrc_node->refs);
7165 ctx->rsrc_node = NULL;
7166 }
7167
7168 if (!ctx->rsrc_node) {
7169 ctx->rsrc_node = ctx->rsrc_backup_node;
7170 ctx->rsrc_backup_node = NULL;
7171 }
7172}
7173
7174static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
7175{
7176 if (ctx->rsrc_backup_node)
7177 return 0;
7178 ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
7179 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
7180}
7181
7182static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
7183{
7184 int ret;
7185
7186
7187 if (data->quiesce)
7188 return -ENXIO;
7189
7190 data->quiesce = true;
7191 do {
7192 ret = io_rsrc_node_switch_start(ctx);
7193 if (ret)
7194 break;
7195 io_rsrc_node_switch(ctx, data);
7196
7197
7198 if (atomic_dec_and_test(&data->refs))
7199 break;
7200 mutex_unlock(&ctx->uring_lock);
7201 flush_delayed_work(&ctx->rsrc_put_work);
7202 ret = wait_for_completion_interruptible(&data->done);
7203 if (!ret) {
7204 mutex_lock(&ctx->uring_lock);
7205 break;
7206 }
7207
7208 atomic_inc(&data->refs);
7209
7210 flush_delayed_work(&ctx->rsrc_put_work);
7211 reinit_completion(&data->done);
7212
7213 ret = io_run_task_work_sig();
7214 mutex_lock(&ctx->uring_lock);
7215 } while (ret >= 0);
7216 data->quiesce = false;
7217
7218 return ret;
7219}
7220
7221static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
7222{
7223 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
7224 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
7225
7226 return &data->tags[table_idx][off];
7227}
7228
7229static void io_rsrc_data_free(struct io_rsrc_data *data)
7230{
7231 size_t size = data->nr * sizeof(data->tags[0][0]);
7232
7233 if (data->tags)
7234 io_free_page_table((void **)data->tags, size);
7235 kfree(data);
7236}
7237
7238static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
7239 u64 __user *utags, unsigned nr,
7240 struct io_rsrc_data **pdata)
7241{
7242 struct io_rsrc_data *data;
7243 int ret = -ENOMEM;
7244 unsigned i;
7245
7246 data = kzalloc(sizeof(*data), GFP_KERNEL);
7247 if (!data)
7248 return -ENOMEM;
7249 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
7250 if (!data->tags) {
7251 kfree(data);
7252 return -ENOMEM;
7253 }
7254
7255 data->nr = nr;
7256 data->ctx = ctx;
7257 data->do_put = do_put;
7258 if (utags) {
7259 ret = -EFAULT;
7260 for (i = 0; i < nr; i++) {
7261 u64 *tag_slot = io_get_tag_slot(data, i);
7262
7263 if (copy_from_user(tag_slot, &utags[i],
7264 sizeof(*tag_slot)))
7265 goto fail;
7266 }
7267 }
7268
7269 atomic_set(&data->refs, 1);
7270 init_completion(&data->done);
7271 *pdata = data;
7272 return 0;
7273fail:
7274 io_rsrc_data_free(data);
7275 return ret;
7276}
7277
7278static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
7279{
7280 size_t size = nr_files * sizeof(struct io_fixed_file);
7281
7282 table->files = (struct io_fixed_file **)io_alloc_page_table(size);
7283 return !!table->files;
7284}
7285
7286static void io_free_file_tables(struct io_file_table *table, unsigned nr_files)
7287{
7288 size_t size = nr_files * sizeof(struct io_fixed_file);
7289
7290 io_free_page_table((void **)table->files, size);
7291 table->files = NULL;
7292}
7293
7294static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7295{
7296#if defined(CONFIG_UNIX)
7297 if (ctx->ring_sock) {
7298 struct sock *sock = ctx->ring_sock->sk;
7299 struct sk_buff *skb;
7300
7301 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7302 kfree_skb(skb);
7303 }
7304#else
7305 int i;
7306
7307 for (i = 0; i < ctx->nr_user_files; i++) {
7308 struct file *file;
7309
7310 file = io_file_from_index(ctx, i);
7311 if (file)
7312 fput(file);
7313 }
7314#endif
7315 io_free_file_tables(&ctx->file_table, ctx->nr_user_files);
7316 io_rsrc_data_free(ctx->file_data);
7317 ctx->file_data = NULL;
7318 ctx->nr_user_files = 0;
7319}
7320
7321static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7322{
7323 int ret;
7324
7325 if (!ctx->file_data)
7326 return -ENXIO;
7327 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
7328 if (!ret)
7329 __io_sqe_files_unregister(ctx);
7330 return ret;
7331}
7332
7333static void io_sq_thread_unpark(struct io_sq_data *sqd)
7334 __releases(&sqd->lock)
7335{
7336 WARN_ON_ONCE(sqd->thread == current);
7337
7338
7339
7340
7341
7342 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
7343 if (atomic_dec_return(&sqd->park_pending))
7344 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
7345 mutex_unlock(&sqd->lock);
7346}
7347
7348static void io_sq_thread_park(struct io_sq_data *sqd)
7349 __acquires(&sqd->lock)
7350{
7351 WARN_ON_ONCE(sqd->thread == current);
7352
7353 atomic_inc(&sqd->park_pending);
7354 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
7355 mutex_lock(&sqd->lock);
7356 if (sqd->thread)
7357 wake_up_process(sqd->thread);
7358}
7359
7360static void io_sq_thread_stop(struct io_sq_data *sqd)
7361{
7362 WARN_ON_ONCE(sqd->thread == current);
7363 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
7364
7365 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
7366 mutex_lock(&sqd->lock);
7367 if (sqd->thread)
7368 wake_up_process(sqd->thread);
7369 mutex_unlock(&sqd->lock);
7370 wait_for_completion(&sqd->exited);
7371}
7372
7373static void io_put_sq_data(struct io_sq_data *sqd)
7374{
7375 if (refcount_dec_and_test(&sqd->refs)) {
7376 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7377
7378 io_sq_thread_stop(sqd);
7379 kfree(sqd);
7380 }
7381}
7382
7383static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7384{
7385 struct io_sq_data *sqd = ctx->sq_data;
7386
7387 if (sqd) {
7388 io_sq_thread_park(sqd);
7389 list_del_init(&ctx->sqd_list);
7390 io_sqd_update_thread_idle(sqd);
7391 io_sq_thread_unpark(sqd);
7392
7393 io_put_sq_data(sqd);
7394 ctx->sq_data = NULL;
7395 }
7396}
7397
7398static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7399{
7400 struct io_ring_ctx *ctx_attach;
7401 struct io_sq_data *sqd;
7402 struct fd f;
7403
7404 f = fdget(p->wq_fd);
7405 if (!f.file)
7406 return ERR_PTR(-ENXIO);
7407 if (f.file->f_op != &io_uring_fops) {
7408 fdput(f);
7409 return ERR_PTR(-EINVAL);
7410 }
7411
7412 ctx_attach = f.file->private_data;
7413 sqd = ctx_attach->sq_data;
7414 if (!sqd) {
7415 fdput(f);
7416 return ERR_PTR(-EINVAL);
7417 }
7418 if (sqd->task_tgid != current->tgid) {
7419 fdput(f);
7420 return ERR_PTR(-EPERM);
7421 }
7422
7423 refcount_inc(&sqd->refs);
7424 fdput(f);
7425 return sqd;
7426}
7427
7428static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7429 bool *attached)
7430{
7431 struct io_sq_data *sqd;
7432
7433 *attached = false;
7434 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7435 sqd = io_attach_sq_data(p);
7436 if (!IS_ERR(sqd)) {
7437 *attached = true;
7438 return sqd;
7439 }
7440
7441 if (PTR_ERR(sqd) != -EPERM)
7442 return sqd;
7443 }
7444
7445 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7446 if (!sqd)
7447 return ERR_PTR(-ENOMEM);
7448
7449 atomic_set(&sqd->park_pending, 0);
7450 refcount_set(&sqd->refs, 1);
7451 INIT_LIST_HEAD(&sqd->ctx_list);
7452 mutex_init(&sqd->lock);
7453 init_waitqueue_head(&sqd->wait);
7454 init_completion(&sqd->exited);
7455 return sqd;
7456}
7457
7458#if defined(CONFIG_UNIX)
7459
7460
7461
7462
7463
7464static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7465{
7466 struct sock *sk = ctx->ring_sock->sk;
7467 struct scm_fp_list *fpl;
7468 struct sk_buff *skb;
7469 int i, nr_files;
7470
7471 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7472 if (!fpl)
7473 return -ENOMEM;
7474
7475 skb = alloc_skb(0, GFP_KERNEL);
7476 if (!skb) {
7477 kfree(fpl);
7478 return -ENOMEM;
7479 }
7480
7481 skb->sk = sk;
7482
7483 nr_files = 0;
7484 fpl->user = get_uid(current_user());
7485 for (i = 0; i < nr; i++) {
7486 struct file *file = io_file_from_index(ctx, i + offset);
7487
7488 if (!file)
7489 continue;
7490 fpl->fp[nr_files] = get_file(file);
7491 unix_inflight(fpl->user, fpl->fp[nr_files]);
7492 nr_files++;
7493 }
7494
7495 if (nr_files) {
7496 fpl->max = SCM_MAX_FD;
7497 fpl->count = nr_files;
7498 UNIXCB(skb).fp = fpl;
7499 skb->destructor = unix_destruct_scm;
7500 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7501 skb_queue_head(&sk->sk_receive_queue, skb);
7502
7503 for (i = 0; i < nr_files; i++)
7504 fput(fpl->fp[i]);
7505 } else {
7506 kfree_skb(skb);
7507 kfree(fpl);
7508 }
7509
7510 return 0;
7511}
7512
7513
7514
7515
7516
7517
7518static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7519{
7520 unsigned left, total;
7521 int ret = 0;
7522
7523 total = 0;
7524 left = ctx->nr_user_files;
7525 while (left) {
7526 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
7527
7528 ret = __io_sqe_files_scm(ctx, this_files, total);
7529 if (ret)
7530 break;
7531 left -= this_files;
7532 total += this_files;
7533 }
7534
7535 if (!ret)
7536 return 0;
7537
7538 while (total < ctx->nr_user_files) {
7539 struct file *file = io_file_from_index(ctx, total);
7540
7541 if (file)
7542 fput(file);
7543 total++;
7544 }
7545
7546 return ret;
7547}
7548#else
7549static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7550{
7551 return 0;
7552}
7553#endif
7554
7555static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
7556{
7557 struct file *file = prsrc->file;
7558#if defined(CONFIG_UNIX)
7559 struct sock *sock = ctx->ring_sock->sk;
7560 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7561 struct sk_buff *skb;
7562 int i;
7563
7564 __skb_queue_head_init(&list);
7565
7566
7567
7568
7569
7570 skb = skb_dequeue(head);
7571 while (skb) {
7572 struct scm_fp_list *fp;
7573
7574 fp = UNIXCB(skb).fp;
7575 for (i = 0; i < fp->count; i++) {
7576 int left;
7577
7578 if (fp->fp[i] != file)
7579 continue;
7580
7581 unix_notinflight(fp->user, fp->fp[i]);
7582 left = fp->count - 1 - i;
7583 if (left) {
7584 memmove(&fp->fp[i], &fp->fp[i + 1],
7585 left * sizeof(struct file *));
7586 }
7587 fp->count--;
7588 if (!fp->count) {
7589 kfree_skb(skb);
7590 skb = NULL;
7591 } else {
7592 __skb_queue_tail(&list, skb);
7593 }
7594 fput(file);
7595 file = NULL;
7596 break;
7597 }
7598
7599 if (!file)
7600 break;
7601
7602 __skb_queue_tail(&list, skb);
7603
7604 skb = skb_dequeue(head);
7605 }
7606
7607 if (skb_peek(&list)) {
7608 spin_lock_irq(&head->lock);
7609 while ((skb = __skb_dequeue(&list)) != NULL)
7610 __skb_queue_tail(head, skb);
7611 spin_unlock_irq(&head->lock);
7612 }
7613#else
7614 fput(file);
7615#endif
7616}
7617
7618static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
7619{
7620 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
7621 struct io_ring_ctx *ctx = rsrc_data->ctx;
7622 struct io_rsrc_put *prsrc, *tmp;
7623
7624 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7625 list_del(&prsrc->list);
7626
7627 if (prsrc->tag) {
7628 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
7629
7630 io_ring_submit_lock(ctx, lock_ring);
7631 spin_lock_irq(&ctx->completion_lock);
7632 io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
7633 ctx->cq_extra++;
7634 io_commit_cqring(ctx);
7635 spin_unlock_irq(&ctx->completion_lock);
7636 io_cqring_ev_posted(ctx);
7637 io_ring_submit_unlock(ctx, lock_ring);
7638 }
7639
7640 rsrc_data->do_put(ctx, prsrc);
7641 kfree(prsrc);
7642 }
7643
7644 io_rsrc_node_destroy(ref_node);
7645 if (atomic_dec_and_test(&rsrc_data->refs))
7646 complete(&rsrc_data->done);
7647}
7648
7649static void io_rsrc_put_work(struct work_struct *work)
7650{
7651 struct io_ring_ctx *ctx;
7652 struct llist_node *node;
7653
7654 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7655 node = llist_del_all(&ctx->rsrc_put_llist);
7656
7657 while (node) {
7658 struct io_rsrc_node *ref_node;
7659 struct llist_node *next = node->next;
7660
7661 ref_node = llist_entry(node, struct io_rsrc_node, llist);
7662 __io_rsrc_put_work(ref_node);
7663 node = next;
7664 }
7665}
7666
7667static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
7668{
7669 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
7670 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
7671 unsigned long flags;
7672 bool first_add = false;
7673
7674 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
7675 node->done = true;
7676
7677 while (!list_empty(&ctx->rsrc_ref_list)) {
7678 node = list_first_entry(&ctx->rsrc_ref_list,
7679 struct io_rsrc_node, node);
7680
7681 if (!node->done)
7682 break;
7683 list_del(&node->node);
7684 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
7685 }
7686 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
7687
7688 if (first_add)
7689 mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
7690}
7691
7692static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
7693{
7694 struct io_rsrc_node *ref_node;
7695
7696 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7697 if (!ref_node)
7698 return NULL;
7699
7700 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
7701 0, GFP_KERNEL)) {
7702 kfree(ref_node);
7703 return NULL;
7704 }
7705 INIT_LIST_HEAD(&ref_node->node);
7706 INIT_LIST_HEAD(&ref_node->rsrc_list);
7707 ref_node->done = false;
7708 return ref_node;
7709}
7710
7711static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7712 unsigned nr_args, u64 __user *tags)
7713{
7714 __s32 __user *fds = (__s32 __user *) arg;
7715 struct file *file;
7716 int fd, ret;
7717 unsigned i;
7718
7719 if (ctx->file_data)
7720 return -EBUSY;
7721 if (!nr_args)
7722 return -EINVAL;
7723 if (nr_args > IORING_MAX_FIXED_FILES)
7724 return -EMFILE;
7725 ret = io_rsrc_node_switch_start(ctx);
7726 if (ret)
7727 return ret;
7728 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
7729 &ctx->file_data);
7730 if (ret)
7731 return ret;
7732
7733 ret = -ENOMEM;
7734 if (!io_alloc_file_tables(&ctx->file_table, nr_args))
7735 goto out_free;
7736
7737 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
7738 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7739 ret = -EFAULT;
7740 goto out_fput;
7741 }
7742
7743 if (fd == -1) {
7744 ret = -EINVAL;
7745 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
7746 goto out_fput;
7747 continue;
7748 }
7749
7750 file = fget(fd);
7751 ret = -EBADF;
7752 if (unlikely(!file))
7753 goto out_fput;
7754
7755
7756
7757
7758
7759
7760
7761
7762 if (file->f_op == &io_uring_fops) {
7763 fput(file);
7764 goto out_fput;
7765 }
7766 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
7767 }
7768
7769 ret = io_sqe_files_scm(ctx);
7770 if (ret) {
7771 __io_sqe_files_unregister(ctx);
7772 return ret;
7773 }
7774
7775 io_rsrc_node_switch(ctx, NULL);
7776 return ret;
7777out_fput:
7778 for (i = 0; i < ctx->nr_user_files; i++) {
7779 file = io_file_from_index(ctx, i);
7780 if (file)
7781 fput(file);
7782 }
7783 io_free_file_tables(&ctx->file_table, nr_args);
7784 ctx->nr_user_files = 0;
7785out_free:
7786 io_rsrc_data_free(ctx->file_data);
7787 ctx->file_data = NULL;
7788 return ret;
7789}
7790
7791static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7792 int index)
7793{
7794#if defined(CONFIG_UNIX)
7795 struct sock *sock = ctx->ring_sock->sk;
7796 struct sk_buff_head *head = &sock->sk_receive_queue;
7797 struct sk_buff *skb;
7798
7799
7800
7801
7802
7803
7804 spin_lock_irq(&head->lock);
7805 skb = skb_peek(head);
7806 if (skb) {
7807 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7808
7809 if (fpl->count < SCM_MAX_FD) {
7810 __skb_unlink(skb, head);
7811 spin_unlock_irq(&head->lock);
7812 fpl->fp[fpl->count] = get_file(file);
7813 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7814 fpl->count++;
7815 spin_lock_irq(&head->lock);
7816 __skb_queue_head(head, skb);
7817 } else {
7818 skb = NULL;
7819 }
7820 }
7821 spin_unlock_irq(&head->lock);
7822
7823 if (skb) {
7824 fput(file);
7825 return 0;
7826 }
7827
7828 return __io_sqe_files_scm(ctx, 1, index);
7829#else
7830 return 0;
7831#endif
7832}
7833
7834static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
7835 struct io_rsrc_node *node, void *rsrc)
7836{
7837 struct io_rsrc_put *prsrc;
7838
7839 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7840 if (!prsrc)
7841 return -ENOMEM;
7842
7843 prsrc->tag = *io_get_tag_slot(data, idx);
7844 prsrc->rsrc = rsrc;
7845 list_add(&prsrc->list, &node->rsrc_list);
7846 return 0;
7847}
7848
7849static int __io_sqe_files_update(struct io_ring_ctx *ctx,
7850 struct io_uring_rsrc_update2 *up,
7851 unsigned nr_args)
7852{
7853 u64 __user *tags = u64_to_user_ptr(up->tags);
7854 __s32 __user *fds = u64_to_user_ptr(up->data);
7855 struct io_rsrc_data *data = ctx->file_data;
7856 struct io_fixed_file *file_slot;
7857 struct file *file;
7858 int fd, i, err = 0;
7859 unsigned int done;
7860 bool needs_switch = false;
7861
7862 if (!ctx->file_data)
7863 return -ENXIO;
7864 if (up->offset + nr_args > ctx->nr_user_files)
7865 return -EINVAL;
7866
7867 for (done = 0; done < nr_args; done++) {
7868 u64 tag = 0;
7869
7870 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
7871 copy_from_user(&fd, &fds[done], sizeof(fd))) {
7872 err = -EFAULT;
7873 break;
7874 }
7875 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
7876 err = -EINVAL;
7877 break;
7878 }
7879 if (fd == IORING_REGISTER_FILES_SKIP)
7880 continue;
7881
7882 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
7883 file_slot = io_fixed_file_slot(&ctx->file_table, i);
7884
7885 if (file_slot->file_ptr) {
7886 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
7887 err = io_queue_rsrc_removal(data, up->offset + done,
7888 ctx->rsrc_node, file);
7889 if (err)
7890 break;
7891 file_slot->file_ptr = 0;
7892 needs_switch = true;
7893 }
7894 if (fd != -1) {
7895 file = fget(fd);
7896 if (!file) {
7897 err = -EBADF;
7898 break;
7899 }
7900
7901
7902
7903
7904
7905
7906
7907
7908 if (file->f_op == &io_uring_fops) {
7909 fput(file);
7910 err = -EBADF;
7911 break;
7912 }
7913 *io_get_tag_slot(data, up->offset + done) = tag;
7914 io_fixed_file_set(file_slot, file);
7915 err = io_sqe_file_register(ctx, file, i);
7916 if (err) {
7917 file_slot->file_ptr = 0;
7918 fput(file);
7919 break;
7920 }
7921 }
7922 }
7923
7924 if (needs_switch)
7925 io_rsrc_node_switch(ctx, data);
7926 return done ? done : err;
7927}
7928
7929static struct io_wq_work *io_free_work(struct io_wq_work *work)
7930{
7931 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7932
7933 req = io_put_req_find_next(req);
7934 return req ? &req->work : NULL;
7935}
7936
7937static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
7938 struct task_struct *task)
7939{
7940 struct io_wq_hash *hash;
7941 struct io_wq_data data;
7942 unsigned int concurrency;
7943
7944 mutex_lock(&ctx->uring_lock);
7945 hash = ctx->hash_map;
7946 if (!hash) {
7947 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
7948 if (!hash) {
7949 mutex_unlock(&ctx->uring_lock);
7950 return ERR_PTR(-ENOMEM);
7951 }
7952 refcount_set(&hash->refs, 1);
7953 init_waitqueue_head(&hash->wait);
7954 ctx->hash_map = hash;
7955 }
7956 mutex_unlock(&ctx->uring_lock);
7957
7958 data.hash = hash;
7959 data.task = task;
7960 data.free_work = io_free_work;
7961 data.do_work = io_wq_submit_work;
7962
7963
7964 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
7965
7966 return io_wq_create(concurrency, &data);
7967}
7968
7969static int io_uring_alloc_task_context(struct task_struct *task,
7970 struct io_ring_ctx *ctx)
7971{
7972 struct io_uring_task *tctx;
7973 int ret;
7974
7975 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
7976 if (unlikely(!tctx))
7977 return -ENOMEM;
7978
7979 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7980 if (unlikely(ret)) {
7981 kfree(tctx);
7982 return ret;
7983 }
7984
7985 tctx->io_wq = io_init_wq_offload(ctx, task);
7986 if (IS_ERR(tctx->io_wq)) {
7987 ret = PTR_ERR(tctx->io_wq);
7988 percpu_counter_destroy(&tctx->inflight);
7989 kfree(tctx);
7990 return ret;
7991 }
7992
7993 xa_init(&tctx->xa);
7994 init_waitqueue_head(&tctx->wait);
7995 atomic_set(&tctx->in_idle, 0);
7996 atomic_set(&tctx->inflight_tracked, 0);
7997 task->io_uring = tctx;
7998 spin_lock_init(&tctx->task_lock);
7999 INIT_WQ_LIST(&tctx->task_list);
8000 init_task_work(&tctx->task_work, tctx_task_work);
8001 return 0;
8002}
8003
8004void __io_uring_free(struct task_struct *tsk)
8005{
8006 struct io_uring_task *tctx = tsk->io_uring;
8007
8008 WARN_ON_ONCE(!xa_empty(&tctx->xa));
8009 WARN_ON_ONCE(tctx->io_wq);
8010 WARN_ON_ONCE(tctx->cached_refs);
8011
8012 percpu_counter_destroy(&tctx->inflight);
8013 kfree(tctx);
8014 tsk->io_uring = NULL;
8015}
8016
8017static int io_sq_offload_create(struct io_ring_ctx *ctx,
8018 struct io_uring_params *p)
8019{
8020 int ret;
8021
8022
8023 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
8024 IORING_SETUP_ATTACH_WQ) {
8025 struct fd f;
8026
8027 f = fdget(p->wq_fd);
8028 if (!f.file)
8029 return -ENXIO;
8030 if (f.file->f_op != &io_uring_fops) {
8031 fdput(f);
8032 return -EINVAL;
8033 }
8034 fdput(f);
8035 }
8036 if (ctx->flags & IORING_SETUP_SQPOLL) {
8037 struct task_struct *tsk;
8038 struct io_sq_data *sqd;
8039 bool attached;
8040
8041 sqd = io_get_sq_data(p, &attached);
8042 if (IS_ERR(sqd)) {
8043 ret = PTR_ERR(sqd);
8044 goto err;
8045 }
8046
8047 ctx->sq_creds = get_current_cred();
8048 ctx->sq_data = sqd;
8049 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8050 if (!ctx->sq_thread_idle)
8051 ctx->sq_thread_idle = HZ;
8052
8053 io_sq_thread_park(sqd);
8054 list_add(&ctx->sqd_list, &sqd->ctx_list);
8055 io_sqd_update_thread_idle(sqd);
8056
8057 ret = (attached && !sqd->thread) ? -ENXIO : 0;
8058 io_sq_thread_unpark(sqd);
8059
8060 if (ret < 0)
8061 goto err;
8062 if (attached)
8063 return 0;
8064
8065 if (p->flags & IORING_SETUP_SQ_AFF) {
8066 int cpu = p->sq_thread_cpu;
8067
8068 ret = -EINVAL;
8069 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
8070 goto err_sqpoll;
8071 sqd->sq_cpu = cpu;
8072 } else {
8073 sqd->sq_cpu = -1;
8074 }
8075
8076 sqd->task_pid = current->pid;
8077 sqd->task_tgid = current->tgid;
8078 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
8079 if (IS_ERR(tsk)) {
8080 ret = PTR_ERR(tsk);
8081 goto err_sqpoll;
8082 }
8083
8084 sqd->thread = tsk;
8085 ret = io_uring_alloc_task_context(tsk, ctx);
8086 wake_up_new_task(tsk);
8087 if (ret)
8088 goto err;
8089 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8090
8091 ret = -EINVAL;
8092 goto err;
8093 }
8094
8095 return 0;
8096err_sqpoll:
8097 complete(&ctx->sq_data->exited);
8098err:
8099 io_sq_thread_finish(ctx);
8100 return ret;
8101}
8102
8103static inline void __io_unaccount_mem(struct user_struct *user,
8104 unsigned long nr_pages)
8105{
8106 atomic_long_sub(nr_pages, &user->locked_vm);
8107}
8108
8109static inline int __io_account_mem(struct user_struct *user,
8110 unsigned long nr_pages)
8111{
8112 unsigned long page_limit, cur_pages, new_pages;
8113
8114
8115 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8116
8117 do {
8118 cur_pages = atomic_long_read(&user->locked_vm);
8119 new_pages = cur_pages + nr_pages;
8120 if (new_pages > page_limit)
8121 return -ENOMEM;
8122 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8123 new_pages) != cur_pages);
8124
8125 return 0;
8126}
8127
8128static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
8129{
8130 if (ctx->user)
8131 __io_unaccount_mem(ctx->user, nr_pages);
8132
8133 if (ctx->mm_account)
8134 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
8135}
8136
8137static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
8138{
8139 int ret;
8140
8141 if (ctx->user) {
8142 ret = __io_account_mem(ctx->user, nr_pages);
8143 if (ret)
8144 return ret;
8145 }
8146
8147 if (ctx->mm_account)
8148 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
8149
8150 return 0;
8151}
8152
8153static void io_mem_free(void *ptr)
8154{
8155 struct page *page;
8156
8157 if (!ptr)
8158 return;
8159
8160 page = virt_to_head_page(ptr);
8161 if (put_page_testzero(page))
8162 free_compound_page(page);
8163}
8164
8165static void *io_mem_alloc(size_t size)
8166{
8167 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
8168 __GFP_NORETRY | __GFP_ACCOUNT;
8169
8170 return (void *) __get_free_pages(gfp_flags, get_order(size));
8171}
8172
8173static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8174 size_t *sq_offset)
8175{
8176 struct io_rings *rings;
8177 size_t off, sq_array_size;
8178
8179 off = struct_size(rings, cqes, cq_entries);
8180 if (off == SIZE_MAX)
8181 return SIZE_MAX;
8182
8183#ifdef CONFIG_SMP
8184 off = ALIGN(off, SMP_CACHE_BYTES);
8185 if (off == 0)
8186 return SIZE_MAX;
8187#endif
8188
8189 if (sq_offset)
8190 *sq_offset = off;
8191
8192 sq_array_size = array_size(sizeof(u32), sq_entries);
8193 if (sq_array_size == SIZE_MAX)
8194 return SIZE_MAX;
8195
8196 if (check_add_overflow(off, sq_array_size, &off))
8197 return SIZE_MAX;
8198
8199 return off;
8200}
8201
8202static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
8203{
8204 struct io_mapped_ubuf *imu = *slot;
8205 unsigned int i;
8206
8207 if (imu != ctx->dummy_ubuf) {
8208 for (i = 0; i < imu->nr_bvecs; i++)
8209 unpin_user_page(imu->bvec[i].bv_page);
8210 if (imu->acct_pages)
8211 io_unaccount_mem(ctx, imu->acct_pages);
8212 kvfree(imu);
8213 }
8214 *slot = NULL;
8215}
8216
8217static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
8218{
8219 io_buffer_unmap(ctx, &prsrc->buf);
8220 prsrc->buf = NULL;
8221}
8222
8223static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8224{
8225 unsigned int i;
8226
8227 for (i = 0; i < ctx->nr_user_bufs; i++)
8228 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
8229 kfree(ctx->user_bufs);
8230 io_rsrc_data_free(ctx->buf_data);
8231 ctx->user_bufs = NULL;
8232 ctx->buf_data = NULL;
8233 ctx->nr_user_bufs = 0;
8234}
8235
8236static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8237{
8238 int ret;
8239
8240 if (!ctx->buf_data)
8241 return -ENXIO;
8242
8243 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
8244 if (!ret)
8245 __io_sqe_buffers_unregister(ctx);
8246 return ret;
8247}
8248
8249static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8250 void __user *arg, unsigned index)
8251{
8252 struct iovec __user *src;
8253
8254#ifdef CONFIG_COMPAT
8255 if (ctx->compat) {
8256 struct compat_iovec __user *ciovs;
8257 struct compat_iovec ciov;
8258
8259 ciovs = (struct compat_iovec __user *) arg;
8260 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8261 return -EFAULT;
8262
8263 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
8264 dst->iov_len = ciov.iov_len;
8265 return 0;
8266 }
8267#endif
8268 src = (struct iovec __user *) arg;
8269 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8270 return -EFAULT;
8271 return 0;
8272}
8273
8274
8275
8276
8277
8278
8279
8280
8281
8282
8283static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8284 int nr_pages, struct page *hpage)
8285{
8286 int i, j;
8287
8288
8289 for (i = 0; i < nr_pages; i++) {
8290 if (!PageCompound(pages[i]))
8291 continue;
8292 if (compound_head(pages[i]) == hpage)
8293 return true;
8294 }
8295
8296
8297 for (i = 0; i < ctx->nr_user_bufs; i++) {
8298 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
8299
8300 for (j = 0; j < imu->nr_bvecs; j++) {
8301 if (!PageCompound(imu->bvec[j].bv_page))
8302 continue;
8303 if (compound_head(imu->bvec[j].bv_page) == hpage)
8304 return true;
8305 }
8306 }
8307
8308 return false;
8309}
8310
8311static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8312 int nr_pages, struct io_mapped_ubuf *imu,
8313 struct page **last_hpage)
8314{
8315 int i, ret;
8316
8317 imu->acct_pages = 0;
8318 for (i = 0; i < nr_pages; i++) {
8319 if (!PageCompound(pages[i])) {
8320 imu->acct_pages++;
8321 } else {
8322 struct page *hpage;
8323
8324 hpage = compound_head(pages[i]);
8325 if (hpage == *last_hpage)
8326 continue;
8327 *last_hpage = hpage;
8328 if (headpage_already_acct(ctx, pages, i, hpage))
8329 continue;
8330 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8331 }
8332 }
8333
8334 if (!imu->acct_pages)
8335 return 0;
8336
8337 ret = io_account_mem(ctx, imu->acct_pages);
8338 if (ret)
8339 imu->acct_pages = 0;
8340 return ret;
8341}
8342
8343static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
8344 struct io_mapped_ubuf **pimu,
8345 struct page **last_hpage)
8346{
8347 struct io_mapped_ubuf *imu = NULL;
8348 struct vm_area_struct **vmas = NULL;
8349 struct page **pages = NULL;
8350 unsigned long off, start, end, ubuf;
8351 size_t size;
8352 int ret, pret, nr_pages, i;
8353
8354 if (!iov->iov_base) {
8355 *pimu = ctx->dummy_ubuf;
8356 return 0;
8357 }
8358
8359 ubuf = (unsigned long) iov->iov_base;
8360 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8361 start = ubuf >> PAGE_SHIFT;
8362 nr_pages = end - start;
8363
8364 *pimu = NULL;
8365 ret = -ENOMEM;
8366
8367 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8368 if (!pages)
8369 goto done;
8370
8371 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8372 GFP_KERNEL);
8373 if (!vmas)
8374 goto done;
8375
8376 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
8377 if (!imu)
8378 goto done;
8379
8380 ret = 0;
8381 mmap_read_lock(current->mm);
8382 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8383 pages, vmas);
8384 if (pret == nr_pages) {
8385
8386 for (i = 0; i < nr_pages; i++) {
8387 struct vm_area_struct *vma = vmas[i];
8388
8389 if (vma_is_shmem(vma))
8390 continue;
8391 if (vma->vm_file &&
8392 !is_file_hugepages(vma->vm_file)) {
8393 ret = -EOPNOTSUPP;
8394 break;
8395 }
8396 }
8397 } else {
8398 ret = pret < 0 ? pret : -EFAULT;
8399 }
8400 mmap_read_unlock(current->mm);
8401 if (ret) {
8402
8403
8404
8405
8406 if (pret > 0)
8407 unpin_user_pages(pages, pret);
8408 goto done;
8409 }
8410
8411 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8412 if (ret) {
8413 unpin_user_pages(pages, pret);
8414 goto done;
8415 }
8416
8417 off = ubuf & ~PAGE_MASK;
8418 size = iov->iov_len;
8419 for (i = 0; i < nr_pages; i++) {
8420 size_t vec_len;
8421
8422 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8423 imu->bvec[i].bv_page = pages[i];
8424 imu->bvec[i].bv_len = vec_len;
8425 imu->bvec[i].bv_offset = off;
8426 off = 0;
8427 size -= vec_len;
8428 }
8429
8430 imu->ubuf = ubuf;
8431 imu->ubuf_end = ubuf + iov->iov_len;
8432 imu->nr_bvecs = nr_pages;
8433 *pimu = imu;
8434 ret = 0;
8435done:
8436 if (ret)
8437 kvfree(imu);
8438 kvfree(pages);
8439 kvfree(vmas);
8440 return ret;
8441}
8442
8443static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
8444{
8445 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
8446 return ctx->user_bufs ? 0 : -ENOMEM;
8447}
8448
8449static int io_buffer_validate(struct iovec *iov)
8450{
8451 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
8452
8453
8454
8455
8456
8457
8458 if (!iov->iov_base)
8459 return iov->iov_len ? -EFAULT : 0;
8460 if (!iov->iov_len)
8461 return -EFAULT;
8462
8463
8464 if (iov->iov_len > SZ_1G)
8465 return -EFAULT;
8466
8467 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
8468 return -EOVERFLOW;
8469
8470 return 0;
8471}
8472
8473static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8474 unsigned int nr_args, u64 __user *tags)
8475{
8476 struct page *last_hpage = NULL;
8477 struct io_rsrc_data *data;
8478 int i, ret;
8479 struct iovec iov;
8480
8481 if (ctx->user_bufs)
8482 return -EBUSY;
8483 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
8484 return -EINVAL;
8485 ret = io_rsrc_node_switch_start(ctx);
8486 if (ret)
8487 return ret;
8488 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
8489 if (ret)
8490 return ret;
8491 ret = io_buffers_map_alloc(ctx, nr_args);
8492 if (ret) {
8493 io_rsrc_data_free(data);
8494 return ret;
8495 }
8496
8497 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
8498 ret = io_copy_iov(ctx, &iov, arg, i);
8499 if (ret)
8500 break;
8501 ret = io_buffer_validate(&iov);
8502 if (ret)
8503 break;
8504 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
8505 ret = -EINVAL;
8506 break;
8507 }
8508
8509 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
8510 &last_hpage);
8511 if (ret)
8512 break;
8513 }
8514
8515 WARN_ON_ONCE(ctx->buf_data);
8516
8517 ctx->buf_data = data;
8518 if (ret)
8519 __io_sqe_buffers_unregister(ctx);
8520 else
8521 io_rsrc_node_switch(ctx, NULL);
8522 return ret;
8523}
8524
8525static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
8526 struct io_uring_rsrc_update2 *up,
8527 unsigned int nr_args)
8528{
8529 u64 __user *tags = u64_to_user_ptr(up->tags);
8530 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
8531 struct page *last_hpage = NULL;
8532 bool needs_switch = false;
8533 __u32 done;
8534 int i, err;
8535
8536 if (!ctx->buf_data)
8537 return -ENXIO;
8538 if (up->offset + nr_args > ctx->nr_user_bufs)
8539 return -EINVAL;
8540
8541 for (done = 0; done < nr_args; done++) {
8542 struct io_mapped_ubuf *imu;
8543 int offset = up->offset + done;
8544 u64 tag = 0;
8545
8546 err = io_copy_iov(ctx, &iov, iovs, done);
8547 if (err)
8548 break;
8549 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
8550 err = -EFAULT;
8551 break;
8552 }
8553 err = io_buffer_validate(&iov);
8554 if (err)
8555 break;
8556 if (!iov.iov_base && tag) {
8557 err = -EINVAL;
8558 break;
8559 }
8560 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
8561 if (err)
8562 break;
8563
8564 i = array_index_nospec(offset, ctx->nr_user_bufs);
8565 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
8566 err = io_queue_rsrc_removal(ctx->buf_data, offset,
8567 ctx->rsrc_node, ctx->user_bufs[i]);
8568 if (unlikely(err)) {
8569 io_buffer_unmap(ctx, &imu);
8570 break;
8571 }
8572 ctx->user_bufs[i] = NULL;
8573 needs_switch = true;
8574 }
8575
8576 ctx->user_bufs[i] = imu;
8577 *io_get_tag_slot(ctx->buf_data, offset) = tag;
8578 }
8579
8580 if (needs_switch)
8581 io_rsrc_node_switch(ctx, ctx->buf_data);
8582 return done ? done : err;
8583}
8584
8585static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8586{
8587 __s32 __user *fds = arg;
8588 int fd;
8589
8590 if (ctx->cq_ev_fd)
8591 return -EBUSY;
8592
8593 if (copy_from_user(&fd, fds, sizeof(*fds)))
8594 return -EFAULT;
8595
8596 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8597 if (IS_ERR(ctx->cq_ev_fd)) {
8598 int ret = PTR_ERR(ctx->cq_ev_fd);
8599
8600 ctx->cq_ev_fd = NULL;
8601 return ret;
8602 }
8603
8604 return 0;
8605}
8606
8607static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8608{
8609 if (ctx->cq_ev_fd) {
8610 eventfd_ctx_put(ctx->cq_ev_fd);
8611 ctx->cq_ev_fd = NULL;
8612 return 0;
8613 }
8614
8615 return -ENXIO;
8616}
8617
8618static void io_destroy_buffers(struct io_ring_ctx *ctx)
8619{
8620 struct io_buffer *buf;
8621 unsigned long index;
8622
8623 xa_for_each(&ctx->io_buffers, index, buf)
8624 __io_remove_buffers(ctx, buf, index, -1U);
8625}
8626
8627static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
8628{
8629 struct io_kiocb *req, *nxt;
8630
8631 list_for_each_entry_safe(req, nxt, list, compl.list) {
8632 if (tsk && req->task != tsk)
8633 continue;
8634 list_del(&req->compl.list);
8635 kmem_cache_free(req_cachep, req);
8636 }
8637}
8638
8639static void io_req_caches_free(struct io_ring_ctx *ctx)
8640{
8641 struct io_submit_state *submit_state = &ctx->submit_state;
8642 struct io_comp_state *cs = &ctx->submit_state.comp;
8643
8644 mutex_lock(&ctx->uring_lock);
8645
8646 if (submit_state->free_reqs) {
8647 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8648 submit_state->reqs);
8649 submit_state->free_reqs = 0;
8650 }
8651
8652 io_flush_cached_locked_reqs(ctx, cs);
8653 io_req_cache_free(&cs->free_list, NULL);
8654 mutex_unlock(&ctx->uring_lock);
8655}
8656
8657static void io_wait_rsrc_data(struct io_rsrc_data *data)
8658{
8659 if (data && !atomic_dec_and_test(&data->refs))
8660 wait_for_completion(&data->done);
8661}
8662
8663static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8664{
8665 io_sq_thread_finish(ctx);
8666
8667 if (ctx->mm_account) {
8668 mmdrop(ctx->mm_account);
8669 ctx->mm_account = NULL;
8670 }
8671
8672
8673 io_wait_rsrc_data(ctx->buf_data);
8674 io_wait_rsrc_data(ctx->file_data);
8675
8676 mutex_lock(&ctx->uring_lock);
8677 if (ctx->buf_data)
8678 __io_sqe_buffers_unregister(ctx);
8679 if (ctx->file_data)
8680 __io_sqe_files_unregister(ctx);
8681 if (ctx->rings)
8682 __io_cqring_overflow_flush(ctx, true);
8683 mutex_unlock(&ctx->uring_lock);
8684 io_eventfd_unregister(ctx);
8685 io_destroy_buffers(ctx);
8686 if (ctx->sq_creds)
8687 put_cred(ctx->sq_creds);
8688
8689
8690 if (ctx->rsrc_node)
8691 io_rsrc_node_destroy(ctx->rsrc_node);
8692 if (ctx->rsrc_backup_node)
8693 io_rsrc_node_destroy(ctx->rsrc_backup_node);
8694 flush_delayed_work(&ctx->rsrc_put_work);
8695
8696 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
8697 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
8698
8699#if defined(CONFIG_UNIX)
8700 if (ctx->ring_sock) {
8701 ctx->ring_sock->file = NULL;
8702 sock_release(ctx->ring_sock);
8703 }
8704#endif
8705
8706 io_mem_free(ctx->rings);
8707 io_mem_free(ctx->sq_sqes);
8708
8709 percpu_ref_exit(&ctx->refs);
8710 free_uid(ctx->user);
8711 io_req_caches_free(ctx);
8712 if (ctx->hash_map)
8713 io_wq_put_hash(ctx->hash_map);
8714 kfree(ctx->cancel_hash);
8715 kfree(ctx->dummy_ubuf);
8716 kfree(ctx);
8717}
8718
8719static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8720{
8721 struct io_ring_ctx *ctx = file->private_data;
8722 __poll_t mask = 0;
8723
8724 poll_wait(file, &ctx->poll_wait, wait);
8725
8726
8727
8728
8729 smp_rmb();
8730 if (!io_sqring_full(ctx))
8731 mask |= EPOLLOUT | EPOLLWRNORM;
8732
8733
8734
8735
8736
8737
8738
8739
8740
8741
8742
8743
8744
8745
8746 if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
8747 mask |= EPOLLIN | EPOLLRDNORM;
8748
8749 return mask;
8750}
8751
8752static int io_uring_fasync(int fd, struct file *file, int on)
8753{
8754 struct io_ring_ctx *ctx = file->private_data;
8755
8756 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8757}
8758
8759static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
8760{
8761 const struct cred *creds;
8762
8763 creds = xa_erase(&ctx->personalities, id);
8764 if (creds) {
8765 put_cred(creds);
8766 return 0;
8767 }
8768
8769 return -EINVAL;
8770}
8771
8772struct io_tctx_exit {
8773 struct callback_head task_work;
8774 struct completion completion;
8775 struct io_ring_ctx *ctx;
8776};
8777
8778static void io_tctx_exit_cb(struct callback_head *cb)
8779{
8780 struct io_uring_task *tctx = current->io_uring;
8781 struct io_tctx_exit *work;
8782
8783 work = container_of(cb, struct io_tctx_exit, task_work);
8784
8785
8786
8787
8788 if (!atomic_read(&tctx->in_idle))
8789 io_uring_del_tctx_node((unsigned long)work->ctx);
8790 complete(&work->completion);
8791}
8792
8793static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8794{
8795 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8796
8797 return req->ctx == data;
8798}
8799
8800static void io_ring_exit_work(struct work_struct *work)
8801{
8802 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
8803 unsigned long timeout = jiffies + HZ * 60 * 5;
8804 struct io_tctx_exit exit;
8805 struct io_tctx_node *node;
8806 int ret;
8807
8808
8809
8810
8811
8812
8813
8814 do {
8815 io_uring_try_cancel_requests(ctx, NULL, true);
8816 if (ctx->sq_data) {
8817 struct io_sq_data *sqd = ctx->sq_data;
8818 struct task_struct *tsk;
8819
8820 io_sq_thread_park(sqd);
8821 tsk = sqd->thread;
8822 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
8823 io_wq_cancel_cb(tsk->io_uring->io_wq,
8824 io_cancel_ctx_cb, ctx, true);
8825 io_sq_thread_unpark(sqd);
8826 }
8827
8828 WARN_ON_ONCE(time_after(jiffies, timeout));
8829 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
8830
8831 init_completion(&exit.completion);
8832 init_task_work(&exit.task_work, io_tctx_exit_cb);
8833 exit.ctx = ctx;
8834
8835
8836
8837
8838
8839
8840 mutex_lock(&ctx->uring_lock);
8841 while (!list_empty(&ctx->tctx_list)) {
8842 WARN_ON_ONCE(time_after(jiffies, timeout));
8843
8844 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
8845 ctx_node);
8846
8847 list_rotate_left(&ctx->tctx_list);
8848 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
8849 if (WARN_ON_ONCE(ret))
8850 continue;
8851 wake_up_process(node->task);
8852
8853 mutex_unlock(&ctx->uring_lock);
8854 wait_for_completion(&exit.completion);
8855 mutex_lock(&ctx->uring_lock);
8856 }
8857 mutex_unlock(&ctx->uring_lock);
8858 spin_lock_irq(&ctx->completion_lock);
8859 spin_unlock_irq(&ctx->completion_lock);
8860
8861 io_ring_ctx_free(ctx);
8862}
8863
8864
8865static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
8866 bool cancel_all)
8867{
8868 struct io_kiocb *req, *tmp;
8869 int canceled = 0;
8870
8871 spin_lock_irq(&ctx->completion_lock);
8872 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
8873 if (io_match_task(req, tsk, cancel_all)) {
8874 io_kill_timeout(req, -ECANCELED);
8875 canceled++;
8876 }
8877 }
8878 if (canceled != 0)
8879 io_commit_cqring(ctx);
8880 spin_unlock_irq(&ctx->completion_lock);
8881 if (canceled != 0)
8882 io_cqring_ev_posted(ctx);
8883 return canceled != 0;
8884}
8885
8886static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8887{
8888 unsigned long index;
8889 struct creds *creds;
8890
8891 mutex_lock(&ctx->uring_lock);
8892 percpu_ref_kill(&ctx->refs);
8893 if (ctx->rings)
8894 __io_cqring_overflow_flush(ctx, true);
8895 xa_for_each(&ctx->personalities, index, creds)
8896 io_unregister_personality(ctx, index);
8897 mutex_unlock(&ctx->uring_lock);
8898
8899 io_kill_timeouts(ctx, NULL, true);
8900 io_poll_remove_all(ctx, NULL, true);
8901
8902
8903 io_iopoll_try_reap_events(ctx);
8904
8905 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
8906
8907
8908
8909
8910
8911
8912 queue_work(system_unbound_wq, &ctx->exit_work);
8913}
8914
8915static int io_uring_release(struct inode *inode, struct file *file)
8916{
8917 struct io_ring_ctx *ctx = file->private_data;
8918
8919 file->private_data = NULL;
8920 io_ring_ctx_wait_and_kill(ctx);
8921 return 0;
8922}
8923
8924struct io_task_cancel {
8925 struct task_struct *task;
8926 bool all;
8927};
8928
8929static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
8930{
8931 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8932 struct io_task_cancel *cancel = data;
8933 bool ret;
8934
8935 if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
8936 unsigned long flags;
8937 struct io_ring_ctx *ctx = req->ctx;
8938
8939
8940 spin_lock_irqsave(&ctx->completion_lock, flags);
8941 ret = io_match_task(req, cancel->task, cancel->all);
8942 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8943 } else {
8944 ret = io_match_task(req, cancel->task, cancel->all);
8945 }
8946 return ret;
8947}
8948
8949static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
8950 struct task_struct *task, bool cancel_all)
8951{
8952 struct io_defer_entry *de;
8953 LIST_HEAD(list);
8954
8955 spin_lock_irq(&ctx->completion_lock);
8956 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
8957 if (io_match_task(de->req, task, cancel_all)) {
8958 list_cut_position(&list, &ctx->defer_list, &de->list);
8959 break;
8960 }
8961 }
8962 spin_unlock_irq(&ctx->completion_lock);
8963 if (list_empty(&list))
8964 return false;
8965
8966 while (!list_empty(&list)) {
8967 de = list_first_entry(&list, struct io_defer_entry, list);
8968 list_del_init(&de->list);
8969 io_req_complete_failed(de->req, -ECANCELED);
8970 kfree(de);
8971 }
8972 return true;
8973}
8974
8975static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
8976{
8977 struct io_tctx_node *node;
8978 enum io_wq_cancel cret;
8979 bool ret = false;
8980
8981 mutex_lock(&ctx->uring_lock);
8982 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
8983 struct io_uring_task *tctx = node->task->io_uring;
8984
8985
8986
8987
8988
8989 if (!tctx || !tctx->io_wq)
8990 continue;
8991 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
8992 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8993 }
8994 mutex_unlock(&ctx->uring_lock);
8995
8996 return ret;
8997}
8998
8999static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
9000 struct task_struct *task,
9001 bool cancel_all)
9002{
9003 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
9004 struct io_uring_task *tctx = task ? task->io_uring : NULL;
9005
9006 while (1) {
9007 enum io_wq_cancel cret;
9008 bool ret = false;
9009
9010 if (!task) {
9011 ret |= io_uring_try_cancel_iowq(ctx);
9012 } else if (tctx && tctx->io_wq) {
9013
9014
9015
9016
9017 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
9018 &cancel, true);
9019 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9020 }
9021
9022
9023 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
9024 (ctx->sq_data && ctx->sq_data->thread == current)) {
9025 while (!list_empty_careful(&ctx->iopoll_list)) {
9026 io_iopoll_try_reap_events(ctx);
9027 ret = true;
9028 }
9029 }
9030
9031 ret |= io_cancel_defer_files(ctx, task, cancel_all);
9032 ret |= io_poll_remove_all(ctx, task, cancel_all);
9033 ret |= io_kill_timeouts(ctx, task, cancel_all);
9034 if (task)
9035 ret |= io_run_task_work();
9036 if (!ret)
9037 break;
9038 cond_resched();
9039 }
9040}
9041
9042static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
9043{
9044 struct io_uring_task *tctx = current->io_uring;
9045 struct io_tctx_node *node;
9046 int ret;
9047
9048 if (unlikely(!tctx)) {
9049 ret = io_uring_alloc_task_context(current, ctx);
9050 if (unlikely(ret))
9051 return ret;
9052 tctx = current->io_uring;
9053 }
9054 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
9055 node = kmalloc(sizeof(*node), GFP_KERNEL);
9056 if (!node)
9057 return -ENOMEM;
9058 node->ctx = ctx;
9059 node->task = current;
9060
9061 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
9062 node, GFP_KERNEL));
9063 if (ret) {
9064 kfree(node);
9065 return ret;
9066 }
9067
9068 mutex_lock(&ctx->uring_lock);
9069 list_add(&node->ctx_node, &ctx->tctx_list);
9070 mutex_unlock(&ctx->uring_lock);
9071 }
9072 tctx->last = ctx;
9073 return 0;
9074}
9075
9076
9077
9078
9079static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
9080{
9081 struct io_uring_task *tctx = current->io_uring;
9082
9083 if (likely(tctx && tctx->last == ctx))
9084 return 0;
9085 return __io_uring_add_tctx_node(ctx);
9086}
9087
9088
9089
9090
9091static void io_uring_del_tctx_node(unsigned long index)
9092{
9093 struct io_uring_task *tctx = current->io_uring;
9094 struct io_tctx_node *node;
9095
9096 if (!tctx)
9097 return;
9098 node = xa_erase(&tctx->xa, index);
9099 if (!node)
9100 return;
9101
9102 WARN_ON_ONCE(current != node->task);
9103 WARN_ON_ONCE(list_empty(&node->ctx_node));
9104
9105 mutex_lock(&node->ctx->uring_lock);
9106 list_del(&node->ctx_node);
9107 mutex_unlock(&node->ctx->uring_lock);
9108
9109 if (tctx->last == node->ctx)
9110 tctx->last = NULL;
9111 kfree(node);
9112}
9113
9114static void io_uring_clean_tctx(struct io_uring_task *tctx)
9115{
9116 struct io_wq *wq = tctx->io_wq;
9117 struct io_tctx_node *node;
9118 unsigned long index;
9119
9120 xa_for_each(&tctx->xa, index, node)
9121 io_uring_del_tctx_node(index);
9122 if (wq) {
9123
9124
9125
9126
9127 tctx->io_wq = NULL;
9128 io_wq_put_and_exit(wq);
9129 }
9130}
9131
9132static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
9133{
9134 if (tracked)
9135 return atomic_read(&tctx->inflight_tracked);
9136 return percpu_counter_sum(&tctx->inflight);
9137}
9138
9139static void io_uring_drop_tctx_refs(struct task_struct *task)
9140{
9141 struct io_uring_task *tctx = task->io_uring;
9142 unsigned int refs = tctx->cached_refs;
9143
9144 tctx->cached_refs = 0;
9145 percpu_counter_sub(&tctx->inflight, refs);
9146 put_task_struct_many(task, refs);
9147}
9148
9149
9150
9151
9152
9153static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
9154{
9155 struct io_uring_task *tctx = current->io_uring;
9156 struct io_ring_ctx *ctx;
9157 s64 inflight;
9158 DEFINE_WAIT(wait);
9159
9160 WARN_ON_ONCE(sqd && sqd->thread != current);
9161
9162 if (!current->io_uring)
9163 return;
9164 if (tctx->io_wq)
9165 io_wq_exit_start(tctx->io_wq);
9166
9167 io_uring_drop_tctx_refs(current);
9168 atomic_inc(&tctx->in_idle);
9169 do {
9170
9171 inflight = tctx_inflight(tctx, !cancel_all);
9172 if (!inflight)
9173 break;
9174
9175 if (!sqd) {
9176 struct io_tctx_node *node;
9177 unsigned long index;
9178
9179 xa_for_each(&tctx->xa, index, node) {
9180
9181 if (node->ctx->sq_data)
9182 continue;
9183 io_uring_try_cancel_requests(node->ctx, current,
9184 cancel_all);
9185 }
9186 } else {
9187 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9188 io_uring_try_cancel_requests(ctx, current,
9189 cancel_all);
9190 }
9191
9192 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9193
9194
9195
9196
9197
9198 if (inflight == tctx_inflight(tctx, !cancel_all))
9199 schedule();
9200 finish_wait(&tctx->wait, &wait);
9201 } while (1);
9202 atomic_dec(&tctx->in_idle);
9203
9204 io_uring_clean_tctx(tctx);
9205 if (cancel_all) {
9206
9207 __io_uring_free(current);
9208 }
9209}
9210
9211void __io_uring_cancel(struct files_struct *files)
9212{
9213 io_uring_cancel_generic(!files, NULL);
9214}
9215
9216static void *io_uring_validate_mmap_request(struct file *file,
9217 loff_t pgoff, size_t sz)
9218{
9219 struct io_ring_ctx *ctx = file->private_data;
9220 loff_t offset = pgoff << PAGE_SHIFT;
9221 struct page *page;
9222 void *ptr;
9223
9224 switch (offset) {
9225 case IORING_OFF_SQ_RING:
9226 case IORING_OFF_CQ_RING:
9227 ptr = ctx->rings;
9228 break;
9229 case IORING_OFF_SQES:
9230 ptr = ctx->sq_sqes;
9231 break;
9232 default:
9233 return ERR_PTR(-EINVAL);
9234 }
9235
9236 page = virt_to_head_page(ptr);
9237 if (sz > page_size(page))
9238 return ERR_PTR(-EINVAL);
9239
9240 return ptr;
9241}
9242
9243#ifdef CONFIG_MMU
9244
9245static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9246{
9247 size_t sz = vma->vm_end - vma->vm_start;
9248 unsigned long pfn;
9249 void *ptr;
9250
9251 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9252 if (IS_ERR(ptr))
9253 return PTR_ERR(ptr);
9254
9255 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9256 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9257}
9258
9259#else
9260
9261static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9262{
9263 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9264}
9265
9266static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9267{
9268 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9269}
9270
9271static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9272 unsigned long addr, unsigned long len,
9273 unsigned long pgoff, unsigned long flags)
9274{
9275 void *ptr;
9276
9277 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9278 if (IS_ERR(ptr))
9279 return PTR_ERR(ptr);
9280
9281 return (unsigned long) ptr;
9282}
9283
9284#endif
9285
9286static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
9287{
9288 DEFINE_WAIT(wait);
9289
9290 do {
9291 if (!io_sqring_full(ctx))
9292 break;
9293 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9294
9295 if (!io_sqring_full(ctx))
9296 break;
9297 schedule();
9298 } while (!signal_pending(current));
9299
9300 finish_wait(&ctx->sqo_sq_wait, &wait);
9301 return 0;
9302}
9303
9304static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9305 struct __kernel_timespec __user **ts,
9306 const sigset_t __user **sig)
9307{
9308 struct io_uring_getevents_arg arg;
9309
9310
9311
9312
9313
9314 if (!(flags & IORING_ENTER_EXT_ARG)) {
9315 *sig = (const sigset_t __user *) argp;
9316 *ts = NULL;
9317 return 0;
9318 }
9319
9320
9321
9322
9323
9324 if (*argsz != sizeof(arg))
9325 return -EINVAL;
9326 if (copy_from_user(&arg, argp, sizeof(arg)))
9327 return -EFAULT;
9328 *sig = u64_to_user_ptr(arg.sigmask);
9329 *argsz = arg.sigmask_sz;
9330 *ts = u64_to_user_ptr(arg.ts);
9331 return 0;
9332}
9333
9334SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
9335 u32, min_complete, u32, flags, const void __user *, argp,
9336 size_t, argsz)
9337{
9338 struct io_ring_ctx *ctx;
9339 int submitted = 0;
9340 struct fd f;
9341 long ret;
9342
9343 io_run_task_work();
9344
9345 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9346 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
9347 return -EINVAL;
9348
9349 f = fdget(fd);
9350 if (unlikely(!f.file))
9351 return -EBADF;
9352
9353 ret = -EOPNOTSUPP;
9354 if (unlikely(f.file->f_op != &io_uring_fops))
9355 goto out_fput;
9356
9357 ret = -ENXIO;
9358 ctx = f.file->private_data;
9359 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
9360 goto out_fput;
9361
9362 ret = -EBADFD;
9363 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
9364 goto out;
9365
9366
9367
9368
9369
9370
9371 ret = 0;
9372 if (ctx->flags & IORING_SETUP_SQPOLL) {
9373 io_cqring_overflow_flush(ctx, false);
9374
9375 if (unlikely(ctx->sq_data->thread == NULL)) {
9376 ret = -EOWNERDEAD;
9377 goto out;
9378 }
9379 if (flags & IORING_ENTER_SQ_WAKEUP)
9380 wake_up(&ctx->sq_data->wait);
9381 if (flags & IORING_ENTER_SQ_WAIT) {
9382 ret = io_sqpoll_wait_sq(ctx);
9383 if (ret)
9384 goto out;
9385 }
9386 submitted = to_submit;
9387 } else if (to_submit) {
9388 ret = io_uring_add_tctx_node(ctx);
9389 if (unlikely(ret))
9390 goto out;
9391 mutex_lock(&ctx->uring_lock);
9392 submitted = io_submit_sqes(ctx, to_submit);
9393 mutex_unlock(&ctx->uring_lock);
9394
9395 if (submitted != to_submit)
9396 goto out;
9397 }
9398 if (flags & IORING_ENTER_GETEVENTS) {
9399 const sigset_t __user *sig;
9400 struct __kernel_timespec __user *ts;
9401
9402 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9403 if (unlikely(ret))
9404 goto out;
9405
9406 min_complete = min(min_complete, ctx->cq_entries);
9407
9408
9409
9410
9411
9412
9413
9414 if (ctx->flags & IORING_SETUP_IOPOLL &&
9415 !(ctx->flags & IORING_SETUP_SQPOLL)) {
9416 ret = io_iopoll_check(ctx, min_complete);
9417 } else {
9418 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
9419 }
9420 }
9421
9422out:
9423 percpu_ref_put(&ctx->refs);
9424out_fput:
9425 fdput(f);
9426 return submitted ? submitted : ret;
9427}
9428
9429#ifdef CONFIG_PROC_FS
9430static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9431 const struct cred *cred)
9432{
9433 struct user_namespace *uns = seq_user_ns(m);
9434 struct group_info *gi;
9435 kernel_cap_t cap;
9436 unsigned __capi;
9437 int g;
9438
9439 seq_printf(m, "%5d\n", id);
9440 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9441 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9442 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9443 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9444 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9445 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9446 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9447 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9448 seq_puts(m, "\n\tGroups:\t");
9449 gi = cred->group_info;
9450 for (g = 0; g < gi->ngroups; g++) {
9451 seq_put_decimal_ull(m, g ? " " : "",
9452 from_kgid_munged(uns, gi->gid[g]));
9453 }
9454 seq_puts(m, "\n\tCapEff:\t");
9455 cap = cred->cap_effective;
9456 CAP_FOR_EACH_U32(__capi)
9457 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9458 seq_putc(m, '\n');
9459 return 0;
9460}
9461
9462static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9463{
9464 struct io_sq_data *sq = NULL;
9465 bool has_lock;
9466 int i;
9467
9468
9469
9470
9471
9472
9473
9474 has_lock = mutex_trylock(&ctx->uring_lock);
9475
9476 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
9477 sq = ctx->sq_data;
9478 if (!sq->thread)
9479 sq = NULL;
9480 }
9481
9482 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9483 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
9484 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
9485 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
9486 struct file *f = io_file_from_index(ctx, i);
9487
9488 if (f)
9489 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9490 else
9491 seq_printf(m, "%5u: <none>\n", i);
9492 }
9493 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
9494 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
9495 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
9496 unsigned int len = buf->ubuf_end - buf->ubuf;
9497
9498 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
9499 }
9500 if (has_lock && !xa_empty(&ctx->personalities)) {
9501 unsigned long index;
9502 const struct cred *cred;
9503
9504 seq_printf(m, "Personalities:\n");
9505 xa_for_each(&ctx->personalities, index, cred)
9506 io_uring_show_cred(m, index, cred);
9507 }
9508 seq_printf(m, "PollList:\n");
9509 spin_lock_irq(&ctx->completion_lock);
9510 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9511 struct hlist_head *list = &ctx->cancel_hash[i];
9512 struct io_kiocb *req;
9513
9514 hlist_for_each_entry(req, list, hash_node)
9515 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9516 req->task->task_works != NULL);
9517 }
9518 spin_unlock_irq(&ctx->completion_lock);
9519 if (has_lock)
9520 mutex_unlock(&ctx->uring_lock);
9521}
9522
9523static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9524{
9525 struct io_ring_ctx *ctx = f->private_data;
9526
9527 if (percpu_ref_tryget(&ctx->refs)) {
9528 __io_uring_show_fdinfo(ctx, m);
9529 percpu_ref_put(&ctx->refs);
9530 }
9531}
9532#endif
9533
9534static const struct file_operations io_uring_fops = {
9535 .release = io_uring_release,
9536 .mmap = io_uring_mmap,
9537#ifndef CONFIG_MMU
9538 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9539 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9540#endif
9541 .poll = io_uring_poll,
9542 .fasync = io_uring_fasync,
9543#ifdef CONFIG_PROC_FS
9544 .show_fdinfo = io_uring_show_fdinfo,
9545#endif
9546};
9547
9548static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9549 struct io_uring_params *p)
9550{
9551 struct io_rings *rings;
9552 size_t size, sq_array_offset;
9553
9554
9555 ctx->sq_entries = p->sq_entries;
9556 ctx->cq_entries = p->cq_entries;
9557
9558 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9559 if (size == SIZE_MAX)
9560 return -EOVERFLOW;
9561
9562 rings = io_mem_alloc(size);
9563 if (!rings)
9564 return -ENOMEM;
9565
9566 ctx->rings = rings;
9567 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9568 rings->sq_ring_mask = p->sq_entries - 1;
9569 rings->cq_ring_mask = p->cq_entries - 1;
9570 rings->sq_ring_entries = p->sq_entries;
9571 rings->cq_ring_entries = p->cq_entries;
9572
9573 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
9574 if (size == SIZE_MAX) {
9575 io_mem_free(ctx->rings);
9576 ctx->rings = NULL;
9577 return -EOVERFLOW;
9578 }
9579
9580 ctx->sq_sqes = io_mem_alloc(size);
9581 if (!ctx->sq_sqes) {
9582 io_mem_free(ctx->rings);
9583 ctx->rings = NULL;
9584 return -ENOMEM;
9585 }
9586
9587 return 0;
9588}
9589
9590static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9591{
9592 int ret, fd;
9593
9594 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9595 if (fd < 0)
9596 return fd;
9597
9598 ret = io_uring_add_tctx_node(ctx);
9599 if (ret) {
9600 put_unused_fd(fd);
9601 return ret;
9602 }
9603 fd_install(fd, file);
9604 return fd;
9605}
9606
9607
9608
9609
9610
9611
9612
9613static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
9614{
9615 struct file *file;
9616#if defined(CONFIG_UNIX)
9617 int ret;
9618
9619 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9620 &ctx->ring_sock);
9621 if (ret)
9622 return ERR_PTR(ret);
9623#endif
9624
9625 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9626 O_RDWR | O_CLOEXEC);
9627#if defined(CONFIG_UNIX)
9628 if (IS_ERR(file)) {
9629 sock_release(ctx->ring_sock);
9630 ctx->ring_sock = NULL;
9631 } else {
9632 ctx->ring_sock->file = file;
9633 }
9634#endif
9635 return file;
9636}
9637
9638static int io_uring_create(unsigned entries, struct io_uring_params *p,
9639 struct io_uring_params __user *params)
9640{
9641 struct io_ring_ctx *ctx;
9642 struct file *file;
9643 int ret;
9644
9645 if (!entries)
9646 return -EINVAL;
9647 if (entries > IORING_MAX_ENTRIES) {
9648 if (!(p->flags & IORING_SETUP_CLAMP))
9649 return -EINVAL;
9650 entries = IORING_MAX_ENTRIES;
9651 }
9652
9653
9654
9655
9656
9657
9658
9659
9660
9661 p->sq_entries = roundup_pow_of_two(entries);
9662 if (p->flags & IORING_SETUP_CQSIZE) {
9663
9664
9665
9666
9667
9668 if (!p->cq_entries)
9669 return -EINVAL;
9670 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9671 if (!(p->flags & IORING_SETUP_CLAMP))
9672 return -EINVAL;
9673 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9674 }
9675 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9676 if (p->cq_entries < p->sq_entries)
9677 return -EINVAL;
9678 } else {
9679 p->cq_entries = 2 * p->sq_entries;
9680 }
9681
9682 ctx = io_ring_ctx_alloc(p);
9683 if (!ctx)
9684 return -ENOMEM;
9685 ctx->compat = in_compat_syscall();
9686 if (!capable(CAP_IPC_LOCK))
9687 ctx->user = get_uid(current_user());
9688
9689
9690
9691
9692
9693
9694
9695 mmgrab(current->mm);
9696 ctx->mm_account = current->mm;
9697
9698 ret = io_allocate_scq_urings(ctx, p);
9699 if (ret)
9700 goto err;
9701
9702 ret = io_sq_offload_create(ctx, p);
9703 if (ret)
9704 goto err;
9705
9706 ret = io_rsrc_node_switch_start(ctx);
9707 if (ret)
9708 goto err;
9709 io_rsrc_node_switch(ctx, NULL);
9710
9711 memset(&p->sq_off, 0, sizeof(p->sq_off));
9712 p->sq_off.head = offsetof(struct io_rings, sq.head);
9713 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9714 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9715 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9716 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9717 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9718 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
9719
9720 memset(&p->cq_off, 0, sizeof(p->cq_off));
9721 p->cq_off.head = offsetof(struct io_rings, cq.head);
9722 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9723 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9724 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9725 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9726 p->cq_off.cqes = offsetof(struct io_rings, cqes);
9727 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
9728
9729 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9730 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
9731 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
9732 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
9733 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
9734 IORING_FEAT_RSRC_TAGS;
9735
9736 if (copy_to_user(params, p, sizeof(*p))) {
9737 ret = -EFAULT;
9738 goto err;
9739 }
9740
9741 file = io_uring_get_file(ctx);
9742 if (IS_ERR(file)) {
9743 ret = PTR_ERR(file);
9744 goto err;
9745 }
9746
9747
9748
9749
9750
9751 ret = io_uring_install_fd(ctx, file);
9752 if (ret < 0) {
9753
9754 fput(file);
9755 return ret;
9756 }
9757
9758 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
9759 return ret;
9760err:
9761 io_ring_ctx_wait_and_kill(ctx);
9762 return ret;
9763}
9764
9765
9766
9767
9768
9769
9770static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9771{
9772 struct io_uring_params p;
9773 int i;
9774
9775 if (copy_from_user(&p, params, sizeof(p)))
9776 return -EFAULT;
9777 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9778 if (p.resv[i])
9779 return -EINVAL;
9780 }
9781
9782 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
9783 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
9784 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9785 IORING_SETUP_R_DISABLED))
9786 return -EINVAL;
9787
9788 return io_uring_create(entries, &p, params);
9789}
9790
9791SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9792 struct io_uring_params __user *, params)
9793{
9794 return io_uring_setup(entries, params);
9795}
9796
9797static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9798{
9799 struct io_uring_probe *p;
9800 size_t size;
9801 int i, ret;
9802
9803 size = struct_size(p, ops, nr_args);
9804 if (size == SIZE_MAX)
9805 return -EOVERFLOW;
9806 p = kzalloc(size, GFP_KERNEL);
9807 if (!p)
9808 return -ENOMEM;
9809
9810 ret = -EFAULT;
9811 if (copy_from_user(p, arg, size))
9812 goto out;
9813 ret = -EINVAL;
9814 if (memchr_inv(p, 0, size))
9815 goto out;
9816
9817 p->last_op = IORING_OP_LAST - 1;
9818 if (nr_args > IORING_OP_LAST)
9819 nr_args = IORING_OP_LAST;
9820
9821 for (i = 0; i < nr_args; i++) {
9822 p->ops[i].op = i;
9823 if (!io_op_defs[i].not_supported)
9824 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9825 }
9826 p->ops_len = i;
9827
9828 ret = 0;
9829 if (copy_to_user(arg, p, size))
9830 ret = -EFAULT;
9831out:
9832 kfree(p);
9833 return ret;
9834}
9835
9836static int io_register_personality(struct io_ring_ctx *ctx)
9837{
9838 const struct cred *creds;
9839 u32 id;
9840 int ret;
9841
9842 creds = get_current_cred();
9843
9844 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
9845 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
9846 if (ret < 0) {
9847 put_cred(creds);
9848 return ret;
9849 }
9850 return id;
9851}
9852
9853static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9854 unsigned int nr_args)
9855{
9856 struct io_uring_restriction *res;
9857 size_t size;
9858 int i, ret;
9859
9860
9861 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9862 return -EBADFD;
9863
9864
9865 if (ctx->restrictions.registered)
9866 return -EBUSY;
9867
9868 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9869 return -EINVAL;
9870
9871 size = array_size(nr_args, sizeof(*res));
9872 if (size == SIZE_MAX)
9873 return -EOVERFLOW;
9874
9875 res = memdup_user(arg, size);
9876 if (IS_ERR(res))
9877 return PTR_ERR(res);
9878
9879 ret = 0;
9880
9881 for (i = 0; i < nr_args; i++) {
9882 switch (res[i].opcode) {
9883 case IORING_RESTRICTION_REGISTER_OP:
9884 if (res[i].register_op >= IORING_REGISTER_LAST) {
9885 ret = -EINVAL;
9886 goto out;
9887 }
9888
9889 __set_bit(res[i].register_op,
9890 ctx->restrictions.register_op);
9891 break;
9892 case IORING_RESTRICTION_SQE_OP:
9893 if (res[i].sqe_op >= IORING_OP_LAST) {
9894 ret = -EINVAL;
9895 goto out;
9896 }
9897
9898 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9899 break;
9900 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9901 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9902 break;
9903 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9904 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9905 break;
9906 default:
9907 ret = -EINVAL;
9908 goto out;
9909 }
9910 }
9911
9912out:
9913
9914 if (ret != 0)
9915 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9916 else
9917 ctx->restrictions.registered = true;
9918
9919 kfree(res);
9920 return ret;
9921}
9922
9923static int io_register_enable_rings(struct io_ring_ctx *ctx)
9924{
9925 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9926 return -EBADFD;
9927
9928 if (ctx->restrictions.registered)
9929 ctx->restricted = 1;
9930
9931 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9932 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
9933 wake_up(&ctx->sq_data->wait);
9934 return 0;
9935}
9936
9937static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
9938 struct io_uring_rsrc_update2 *up,
9939 unsigned nr_args)
9940{
9941 __u32 tmp;
9942 int err;
9943
9944 if (up->resv)
9945 return -EINVAL;
9946 if (check_add_overflow(up->offset, nr_args, &tmp))
9947 return -EOVERFLOW;
9948 err = io_rsrc_node_switch_start(ctx);
9949 if (err)
9950 return err;
9951
9952 switch (type) {
9953 case IORING_RSRC_FILE:
9954 return __io_sqe_files_update(ctx, up, nr_args);
9955 case IORING_RSRC_BUFFER:
9956 return __io_sqe_buffers_update(ctx, up, nr_args);
9957 }
9958 return -EINVAL;
9959}
9960
9961static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
9962 unsigned nr_args)
9963{
9964 struct io_uring_rsrc_update2 up;
9965
9966 if (!nr_args)
9967 return -EINVAL;
9968 memset(&up, 0, sizeof(up));
9969 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
9970 return -EFAULT;
9971 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
9972}
9973
9974static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
9975 unsigned size, unsigned type)
9976{
9977 struct io_uring_rsrc_update2 up;
9978
9979 if (size != sizeof(up))
9980 return -EINVAL;
9981 if (copy_from_user(&up, arg, sizeof(up)))
9982 return -EFAULT;
9983 if (!up.nr || up.resv)
9984 return -EINVAL;
9985 return __io_register_rsrc_update(ctx, type, &up, up.nr);
9986}
9987
9988static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
9989 unsigned int size, unsigned int type)
9990{
9991 struct io_uring_rsrc_register rr;
9992
9993
9994 if (size != sizeof(rr))
9995 return -EINVAL;
9996
9997 memset(&rr, 0, sizeof(rr));
9998 if (copy_from_user(&rr, arg, size))
9999 return -EFAULT;
10000 if (!rr.nr || rr.resv || rr.resv2)
10001 return -EINVAL;
10002
10003 switch (type) {
10004 case IORING_RSRC_FILE:
10005 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
10006 rr.nr, u64_to_user_ptr(rr.tags));
10007 case IORING_RSRC_BUFFER:
10008 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
10009 rr.nr, u64_to_user_ptr(rr.tags));
10010 }
10011 return -EINVAL;
10012}
10013
10014static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg,
10015 unsigned len)
10016{
10017 struct io_uring_task *tctx = current->io_uring;
10018 cpumask_var_t new_mask;
10019 int ret;
10020
10021 if (!tctx || !tctx->io_wq)
10022 return -EINVAL;
10023
10024 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
10025 return -ENOMEM;
10026
10027 cpumask_clear(new_mask);
10028 if (len > cpumask_size())
10029 len = cpumask_size();
10030
10031 if (copy_from_user(new_mask, arg, len)) {
10032 free_cpumask_var(new_mask);
10033 return -EFAULT;
10034 }
10035
10036 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
10037 free_cpumask_var(new_mask);
10038 return ret;
10039}
10040
10041static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
10042{
10043 struct io_uring_task *tctx = current->io_uring;
10044
10045 if (!tctx || !tctx->io_wq)
10046 return -EINVAL;
10047
10048 return io_wq_cpu_affinity(tctx->io_wq, NULL);
10049}
10050
10051static bool io_register_op_must_quiesce(int op)
10052{
10053 switch (op) {
10054 case IORING_REGISTER_BUFFERS:
10055 case IORING_UNREGISTER_BUFFERS:
10056 case IORING_REGISTER_FILES:
10057 case IORING_UNREGISTER_FILES:
10058 case IORING_REGISTER_FILES_UPDATE:
10059 case IORING_REGISTER_PROBE:
10060 case IORING_REGISTER_PERSONALITY:
10061 case IORING_UNREGISTER_PERSONALITY:
10062 case IORING_REGISTER_FILES2:
10063 case IORING_REGISTER_FILES_UPDATE2:
10064 case IORING_REGISTER_BUFFERS2:
10065 case IORING_REGISTER_BUFFERS_UPDATE:
10066 case IORING_REGISTER_IOWQ_AFF:
10067 case IORING_UNREGISTER_IOWQ_AFF:
10068 return false;
10069 default:
10070 return true;
10071 }
10072}
10073
10074static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
10075 void __user *arg, unsigned nr_args)
10076 __releases(ctx->uring_lock)
10077 __acquires(ctx->uring_lock)
10078{
10079 int ret;
10080
10081
10082
10083
10084
10085
10086 if (percpu_ref_is_dying(&ctx->refs))
10087 return -ENXIO;
10088
10089 if (ctx->restricted) {
10090 if (opcode >= IORING_REGISTER_LAST)
10091 return -EINVAL;
10092 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
10093 if (!test_bit(opcode, ctx->restrictions.register_op))
10094 return -EACCES;
10095 }
10096
10097 if (io_register_op_must_quiesce(opcode)) {
10098 percpu_ref_kill(&ctx->refs);
10099
10100
10101
10102
10103
10104
10105
10106
10107
10108 mutex_unlock(&ctx->uring_lock);
10109 do {
10110 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10111 if (!ret)
10112 break;
10113 ret = io_run_task_work_sig();
10114 if (ret < 0)
10115 break;
10116 } while (1);
10117 mutex_lock(&ctx->uring_lock);
10118
10119 if (ret) {
10120 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
10121 return ret;
10122 }
10123 }
10124
10125 switch (opcode) {
10126 case IORING_REGISTER_BUFFERS:
10127 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
10128 break;
10129 case IORING_UNREGISTER_BUFFERS:
10130 ret = -EINVAL;
10131 if (arg || nr_args)
10132 break;
10133 ret = io_sqe_buffers_unregister(ctx);
10134 break;
10135 case IORING_REGISTER_FILES:
10136 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
10137 break;
10138 case IORING_UNREGISTER_FILES:
10139 ret = -EINVAL;
10140 if (arg || nr_args)
10141 break;
10142 ret = io_sqe_files_unregister(ctx);
10143 break;
10144 case IORING_REGISTER_FILES_UPDATE:
10145 ret = io_register_files_update(ctx, arg, nr_args);
10146 break;
10147 case IORING_REGISTER_EVENTFD:
10148 case IORING_REGISTER_EVENTFD_ASYNC:
10149 ret = -EINVAL;
10150 if (nr_args != 1)
10151 break;
10152 ret = io_eventfd_register(ctx, arg);
10153 if (ret)
10154 break;
10155 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
10156 ctx->eventfd_async = 1;
10157 else
10158 ctx->eventfd_async = 0;
10159 break;
10160 case IORING_UNREGISTER_EVENTFD:
10161 ret = -EINVAL;
10162 if (arg || nr_args)
10163 break;
10164 ret = io_eventfd_unregister(ctx);
10165 break;
10166 case IORING_REGISTER_PROBE:
10167 ret = -EINVAL;
10168 if (!arg || nr_args > 256)
10169 break;
10170 ret = io_probe(ctx, arg, nr_args);
10171 break;
10172 case IORING_REGISTER_PERSONALITY:
10173 ret = -EINVAL;
10174 if (arg || nr_args)
10175 break;
10176 ret = io_register_personality(ctx);
10177 break;
10178 case IORING_UNREGISTER_PERSONALITY:
10179 ret = -EINVAL;
10180 if (arg)
10181 break;
10182 ret = io_unregister_personality(ctx, nr_args);
10183 break;
10184 case IORING_REGISTER_ENABLE_RINGS:
10185 ret = -EINVAL;
10186 if (arg || nr_args)
10187 break;
10188 ret = io_register_enable_rings(ctx);
10189 break;
10190 case IORING_REGISTER_RESTRICTIONS:
10191 ret = io_register_restrictions(ctx, arg, nr_args);
10192 break;
10193 case IORING_REGISTER_FILES2:
10194 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
10195 break;
10196 case IORING_REGISTER_FILES_UPDATE2:
10197 ret = io_register_rsrc_update(ctx, arg, nr_args,
10198 IORING_RSRC_FILE);
10199 break;
10200 case IORING_REGISTER_BUFFERS2:
10201 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
10202 break;
10203 case IORING_REGISTER_BUFFERS_UPDATE:
10204 ret = io_register_rsrc_update(ctx, arg, nr_args,
10205 IORING_RSRC_BUFFER);
10206 break;
10207 case IORING_REGISTER_IOWQ_AFF:
10208 ret = -EINVAL;
10209 if (!arg || !nr_args)
10210 break;
10211 ret = io_register_iowq_aff(ctx, arg, nr_args);
10212 break;
10213 case IORING_UNREGISTER_IOWQ_AFF:
10214 ret = -EINVAL;
10215 if (arg || nr_args)
10216 break;
10217 ret = io_unregister_iowq_aff(ctx);
10218 break;
10219 default:
10220 ret = -EINVAL;
10221 break;
10222 }
10223
10224 if (io_register_op_must_quiesce(opcode)) {
10225
10226 percpu_ref_reinit(&ctx->refs);
10227 reinit_completion(&ctx->ref_comp);
10228 }
10229 return ret;
10230}
10231
10232SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
10233 void __user *, arg, unsigned int, nr_args)
10234{
10235 struct io_ring_ctx *ctx;
10236 long ret = -EBADF;
10237 struct fd f;
10238
10239 f = fdget(fd);
10240 if (!f.file)
10241 return -EBADF;
10242
10243 ret = -EOPNOTSUPP;
10244 if (f.file->f_op != &io_uring_fops)
10245 goto out_fput;
10246
10247 ctx = f.file->private_data;
10248
10249 io_run_task_work();
10250
10251 mutex_lock(&ctx->uring_lock);
10252 ret = __io_uring_register(ctx, opcode, arg, nr_args);
10253 mutex_unlock(&ctx->uring_lock);
10254 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
10255 ctx->cq_ev_fd != NULL, ret);
10256out_fput:
10257 fdput(f);
10258 return ret;
10259}
10260
10261static int __init io_uring_init(void)
10262{
10263#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10264 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10265 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10266} while (0)
10267
10268#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10269 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10270 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10271 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10272 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10273 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10274 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10275 BUILD_BUG_SQE_ELEM(8, __u64, off);
10276 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10277 BUILD_BUG_SQE_ELEM(16, __u64, addr);
10278 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
10279 BUILD_BUG_SQE_ELEM(24, __u32, len);
10280 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10281 BUILD_BUG_SQE_ELEM(28, int, rw_flags);
10282 BUILD_BUG_SQE_ELEM(28, __u32, rw_flags);
10283 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
10284 BUILD_BUG_SQE_ELEM(28, __u16, poll_events);
10285 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
10286 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10287 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10288 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10289 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10290 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10291 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10292 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10293 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
10294 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
10295 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10296 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
10297 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
10298 BUILD_BUG_SQE_ELEM(42, __u16, personality);
10299 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
10300
10301 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
10302 sizeof(struct io_uring_rsrc_update));
10303 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
10304 sizeof(struct io_uring_rsrc_update2));
10305
10306 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
10307
10308 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
10309 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
10310
10311 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
10312 SLAB_ACCOUNT);
10313 return 0;
10314};
10315__initcall(io_uring_init);
10316