linux/fs/io_uring.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Shared application/kernel submission and completion ring pairs, for
   4 * supporting fast/efficient IO.
   5 *
   6 * A note on the read/write ordering memory barriers that are matched between
   7 * the application and kernel side.
   8 *
   9 * After the application reads the CQ ring tail, it must use an
  10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
  11 * before writing the tail (using smp_load_acquire to read the tail will
  12 * do). It also needs a smp_mb() before updating CQ head (ordering the
  13 * entry load(s) with the head store), pairing with an implicit barrier
  14 * through a control-dependency in io_get_cqring (smp_store_release to
  15 * store head will do). Failure to do so could lead to reading invalid
  16 * CQ entries.
  17 *
  18 * Likewise, the application must use an appropriate smp_wmb() before
  19 * writing the SQ tail (ordering SQ entry stores with the tail store),
  20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
  21 * to store the tail will do). And it needs a barrier ordering the SQ
  22 * head load before writing new SQ entries (smp_load_acquire to read
  23 * head will do).
  24 *
  25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
  26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
  27 * updating the SQ tail; a full memory barrier smp_mb() is needed
  28 * between.
  29 *
  30 * Also see the examples in the liburing library:
  31 *
  32 *      git://git.kernel.dk/liburing
  33 *
  34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
  35 * from data shared between the kernel and application. This is done both
  36 * for ordering purposes, but also to ensure that once a value is loaded from
  37 * data that the application could potentially modify, it remains stable.
  38 *
  39 * Copyright (C) 2018-2019 Jens Axboe
  40 * Copyright (c) 2018-2019 Christoph Hellwig
  41 */
  42#include <linux/kernel.h>
  43#include <linux/init.h>
  44#include <linux/errno.h>
  45#include <linux/syscalls.h>
  46#include <linux/compat.h>
  47#include <net/compat.h>
  48#include <linux/refcount.h>
  49#include <linux/uio.h>
  50#include <linux/bits.h>
  51
  52#include <linux/sched/signal.h>
  53#include <linux/fs.h>
  54#include <linux/file.h>
  55#include <linux/fdtable.h>
  56#include <linux/mm.h>
  57#include <linux/mman.h>
  58#include <linux/percpu.h>
  59#include <linux/slab.h>
  60#include <linux/kthread.h>
  61#include <linux/blkdev.h>
  62#include <linux/bvec.h>
  63#include <linux/net.h>
  64#include <net/sock.h>
  65#include <net/af_unix.h>
  66#include <net/scm.h>
  67#include <linux/anon_inodes.h>
  68#include <linux/sched/mm.h>
  69#include <linux/uaccess.h>
  70#include <linux/nospec.h>
  71#include <linux/sizes.h>
  72#include <linux/hugetlb.h>
  73#include <linux/highmem.h>
  74#include <linux/namei.h>
  75#include <linux/fsnotify.h>
  76#include <linux/fadvise.h>
  77#include <linux/eventpoll.h>
  78#include <linux/fs_struct.h>
  79#include <linux/splice.h>
  80#include <linux/task_work.h>
  81
  82#define CREATE_TRACE_POINTS
  83#include <trace/events/io_uring.h>
  84
  85#include <uapi/linux/io_uring.h>
  86
  87#include "internal.h"
  88#include "io-wq.h"
  89
  90#define IORING_MAX_ENTRIES      32768
  91#define IORING_MAX_CQ_ENTRIES   (2 * IORING_MAX_ENTRIES)
  92
  93/*
  94 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
  95 */
  96#define IORING_FILE_TABLE_SHIFT 9
  97#define IORING_MAX_FILES_TABLE  (1U << IORING_FILE_TABLE_SHIFT)
  98#define IORING_FILE_TABLE_MASK  (IORING_MAX_FILES_TABLE - 1)
  99#define IORING_MAX_FIXED_FILES  (64 * IORING_MAX_FILES_TABLE)
 100
 101struct io_uring {
 102        u32 head ____cacheline_aligned_in_smp;
 103        u32 tail ____cacheline_aligned_in_smp;
 104};
 105
 106/*
 107 * This data is shared with the application through the mmap at offsets
 108 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
 109 *
 110 * The offsets to the member fields are published through struct
 111 * io_sqring_offsets when calling io_uring_setup.
 112 */
 113struct io_rings {
 114        /*
 115         * Head and tail offsets into the ring; the offsets need to be
 116         * masked to get valid indices.
 117         *
 118         * The kernel controls head of the sq ring and the tail of the cq ring,
 119         * and the application controls tail of the sq ring and the head of the
 120         * cq ring.
 121         */
 122        struct io_uring         sq, cq;
 123        /*
 124         * Bitmasks to apply to head and tail offsets (constant, equals
 125         * ring_entries - 1)
 126         */
 127        u32                     sq_ring_mask, cq_ring_mask;
 128        /* Ring sizes (constant, power of 2) */
 129        u32                     sq_ring_entries, cq_ring_entries;
 130        /*
 131         * Number of invalid entries dropped by the kernel due to
 132         * invalid index stored in array
 133         *
 134         * Written by the kernel, shouldn't be modified by the
 135         * application (i.e. get number of "new events" by comparing to
 136         * cached value).
 137         *
 138         * After a new SQ head value was read by the application this
 139         * counter includes all submissions that were dropped reaching
 140         * the new SQ head (and possibly more).
 141         */
 142        u32                     sq_dropped;
 143        /*
 144         * Runtime SQ flags
 145         *
 146         * Written by the kernel, shouldn't be modified by the
 147         * application.
 148         *
 149         * The application needs a full memory barrier before checking
 150         * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
 151         */
 152        u32                     sq_flags;
 153        /*
 154         * Runtime CQ flags
 155         *
 156         * Written by the application, shouldn't be modified by the
 157         * kernel.
 158         */
 159        u32                     cq_flags;
 160        /*
 161         * Number of completion events lost because the queue was full;
 162         * this should be avoided by the application by making sure
 163         * there are not more requests pending than there is space in
 164         * the completion queue.
 165         *
 166         * Written by the kernel, shouldn't be modified by the
 167         * application (i.e. get number of "new events" by comparing to
 168         * cached value).
 169         *
 170         * As completion events come in out of order this counter is not
 171         * ordered with any other data.
 172         */
 173        u32                     cq_overflow;
 174        /*
 175         * Ring buffer of completion events.
 176         *
 177         * The kernel writes completion events fresh every time they are
 178         * produced, so the application is allowed to modify pending
 179         * entries.
 180         */
 181        struct io_uring_cqe     cqes[] ____cacheline_aligned_in_smp;
 182};
 183
 184struct io_mapped_ubuf {
 185        u64             ubuf;
 186        size_t          len;
 187        struct          bio_vec *bvec;
 188        unsigned int    nr_bvecs;
 189};
 190
 191struct fixed_file_table {
 192        struct file             **files;
 193};
 194
 195struct fixed_file_ref_node {
 196        struct percpu_ref               refs;
 197        struct list_head                node;
 198        struct list_head                file_list;
 199        struct fixed_file_data          *file_data;
 200        struct llist_node               llist;
 201};
 202
 203struct fixed_file_data {
 204        struct fixed_file_table         *table;
 205        struct io_ring_ctx              *ctx;
 206
 207        struct percpu_ref               *cur_refs;
 208        struct percpu_ref               refs;
 209        struct completion               done;
 210        struct list_head                ref_list;
 211        spinlock_t                      lock;
 212};
 213
 214struct io_buffer {
 215        struct list_head list;
 216        __u64 addr;
 217        __s32 len;
 218        __u16 bid;
 219};
 220
 221struct io_ring_ctx {
 222        struct {
 223                struct percpu_ref       refs;
 224        } ____cacheline_aligned_in_smp;
 225
 226        struct {
 227                unsigned int            flags;
 228                unsigned int            compat: 1;
 229                unsigned int            account_mem: 1;
 230                unsigned int            cq_overflow_flushed: 1;
 231                unsigned int            drain_next: 1;
 232                unsigned int            eventfd_async: 1;
 233
 234                /*
 235                 * Ring buffer of indices into array of io_uring_sqe, which is
 236                 * mmapped by the application using the IORING_OFF_SQES offset.
 237                 *
 238                 * This indirection could e.g. be used to assign fixed
 239                 * io_uring_sqe entries to operations and only submit them to
 240                 * the queue when needed.
 241                 *
 242                 * The kernel modifies neither the indices array nor the entries
 243                 * array.
 244                 */
 245                u32                     *sq_array;
 246                unsigned                cached_sq_head;
 247                unsigned                sq_entries;
 248                unsigned                sq_mask;
 249                unsigned                sq_thread_idle;
 250                unsigned                cached_sq_dropped;
 251                atomic_t                cached_cq_overflow;
 252                unsigned long           sq_check_overflow;
 253
 254                struct list_head        defer_list;
 255                struct list_head        timeout_list;
 256                struct list_head        cq_overflow_list;
 257
 258                wait_queue_head_t       inflight_wait;
 259                struct io_uring_sqe     *sq_sqes;
 260        } ____cacheline_aligned_in_smp;
 261
 262        struct io_rings *rings;
 263
 264        /* IO offload */
 265        struct io_wq            *io_wq;
 266        struct task_struct      *sqo_thread;    /* if using sq thread polling */
 267        struct mm_struct        *sqo_mm;
 268        wait_queue_head_t       sqo_wait;
 269
 270        /*
 271         * If used, fixed file set. Writers must ensure that ->refs is dead,
 272         * readers must ensure that ->refs is alive as long as the file* is
 273         * used. Only updated through io_uring_register(2).
 274         */
 275        struct fixed_file_data  *file_data;
 276        unsigned                nr_user_files;
 277        int                     ring_fd;
 278        struct file             *ring_file;
 279
 280        /* if used, fixed mapped user buffers */
 281        unsigned                nr_user_bufs;
 282        struct io_mapped_ubuf   *user_bufs;
 283
 284        struct user_struct      *user;
 285
 286        const struct cred       *creds;
 287
 288        struct completion       ref_comp;
 289        struct completion       sq_thread_comp;
 290
 291        /* if all else fails... */
 292        struct io_kiocb         *fallback_req;
 293
 294#if defined(CONFIG_UNIX)
 295        struct socket           *ring_sock;
 296#endif
 297
 298        struct idr              io_buffer_idr;
 299
 300        struct idr              personality_idr;
 301
 302        struct {
 303                unsigned                cached_cq_tail;
 304                unsigned                cq_entries;
 305                unsigned                cq_mask;
 306                atomic_t                cq_timeouts;
 307                unsigned long           cq_check_overflow;
 308                struct wait_queue_head  cq_wait;
 309                struct fasync_struct    *cq_fasync;
 310                struct eventfd_ctx      *cq_ev_fd;
 311        } ____cacheline_aligned_in_smp;
 312
 313        struct {
 314                struct mutex            uring_lock;
 315                wait_queue_head_t       wait;
 316        } ____cacheline_aligned_in_smp;
 317
 318        struct {
 319                spinlock_t              completion_lock;
 320
 321                /*
 322                 * ->poll_list is protected by the ctx->uring_lock for
 323                 * io_uring instances that don't use IORING_SETUP_SQPOLL.
 324                 * For SQPOLL, only the single threaded io_sq_thread() will
 325                 * manipulate the list, hence no extra locking is needed there.
 326                 */
 327                struct list_head        poll_list;
 328                struct hlist_head       *cancel_hash;
 329                unsigned                cancel_hash_bits;
 330                bool                    poll_multi_file;
 331
 332                spinlock_t              inflight_lock;
 333                struct list_head        inflight_list;
 334        } ____cacheline_aligned_in_smp;
 335
 336        struct delayed_work             file_put_work;
 337        struct llist_head               file_put_llist;
 338
 339        struct work_struct              exit_work;
 340};
 341
 342/*
 343 * First field must be the file pointer in all the
 344 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
 345 */
 346struct io_poll_iocb {
 347        struct file                     *file;
 348        union {
 349                struct wait_queue_head  *head;
 350                u64                     addr;
 351        };
 352        __poll_t                        events;
 353        bool                            done;
 354        bool                            canceled;
 355        struct wait_queue_entry         wait;
 356};
 357
 358struct io_close {
 359        struct file                     *file;
 360        struct file                     *put_file;
 361        int                             fd;
 362};
 363
 364struct io_timeout_data {
 365        struct io_kiocb                 *req;
 366        struct hrtimer                  timer;
 367        struct timespec64               ts;
 368        enum hrtimer_mode               mode;
 369};
 370
 371struct io_accept {
 372        struct file                     *file;
 373        struct sockaddr __user          *addr;
 374        int __user                      *addr_len;
 375        int                             flags;
 376        unsigned long                   nofile;
 377};
 378
 379struct io_sync {
 380        struct file                     *file;
 381        loff_t                          len;
 382        loff_t                          off;
 383        int                             flags;
 384        int                             mode;
 385};
 386
 387struct io_cancel {
 388        struct file                     *file;
 389        u64                             addr;
 390};
 391
 392struct io_timeout {
 393        struct file                     *file;
 394        u64                             addr;
 395        int                             flags;
 396        u32                             off;
 397        u32                             target_seq;
 398};
 399
 400struct io_rw {
 401        /* NOTE: kiocb has the file as the first member, so don't do it here */
 402        struct kiocb                    kiocb;
 403        u64                             addr;
 404        u64                             len;
 405};
 406
 407struct io_connect {
 408        struct file                     *file;
 409        struct sockaddr __user          *addr;
 410        int                             addr_len;
 411};
 412
 413struct io_sr_msg {
 414        struct file                     *file;
 415        union {
 416                struct user_msghdr __user *msg;
 417                void __user             *buf;
 418        };
 419        int                             msg_flags;
 420        int                             bgid;
 421        size_t                          len;
 422        struct io_buffer                *kbuf;
 423};
 424
 425struct io_open {
 426        struct file                     *file;
 427        int                             dfd;
 428        struct filename                 *filename;
 429        struct open_how                 how;
 430        unsigned long                   nofile;
 431};
 432
 433struct io_files_update {
 434        struct file                     *file;
 435        u64                             arg;
 436        u32                             nr_args;
 437        u32                             offset;
 438};
 439
 440struct io_fadvise {
 441        struct file                     *file;
 442        u64                             offset;
 443        u32                             len;
 444        u32                             advice;
 445};
 446
 447struct io_madvise {
 448        struct file                     *file;
 449        u64                             addr;
 450        u32                             len;
 451        u32                             advice;
 452};
 453
 454struct io_epoll {
 455        struct file                     *file;
 456        int                             epfd;
 457        int                             op;
 458        int                             fd;
 459        struct epoll_event              event;
 460};
 461
 462struct io_splice {
 463        struct file                     *file_out;
 464        struct file                     *file_in;
 465        loff_t                          off_out;
 466        loff_t                          off_in;
 467        u64                             len;
 468        unsigned int                    flags;
 469};
 470
 471struct io_provide_buf {
 472        struct file                     *file;
 473        __u64                           addr;
 474        __s32                           len;
 475        __u32                           bgid;
 476        __u16                           nbufs;
 477        __u16                           bid;
 478};
 479
 480struct io_statx {
 481        struct file                     *file;
 482        int                             dfd;
 483        unsigned int                    mask;
 484        unsigned int                    flags;
 485        const char __user               *filename;
 486        struct statx __user             *buffer;
 487};
 488
 489struct io_async_connect {
 490        struct sockaddr_storage         address;
 491};
 492
 493struct io_async_msghdr {
 494        struct iovec                    fast_iov[UIO_FASTIOV];
 495        struct iovec                    *iov;
 496        struct sockaddr __user          *uaddr;
 497        struct msghdr                   msg;
 498        struct sockaddr_storage         addr;
 499};
 500
 501struct io_async_rw {
 502        struct iovec                    fast_iov[UIO_FASTIOV];
 503        struct iovec                    *iov;
 504        ssize_t                         nr_segs;
 505        ssize_t                         size;
 506};
 507
 508struct io_async_ctx {
 509        union {
 510                struct io_async_rw      rw;
 511                struct io_async_msghdr  msg;
 512                struct io_async_connect connect;
 513                struct io_timeout_data  timeout;
 514        };
 515};
 516
 517enum {
 518        REQ_F_FIXED_FILE_BIT    = IOSQE_FIXED_FILE_BIT,
 519        REQ_F_IO_DRAIN_BIT      = IOSQE_IO_DRAIN_BIT,
 520        REQ_F_LINK_BIT          = IOSQE_IO_LINK_BIT,
 521        REQ_F_HARDLINK_BIT      = IOSQE_IO_HARDLINK_BIT,
 522        REQ_F_FORCE_ASYNC_BIT   = IOSQE_ASYNC_BIT,
 523        REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
 524
 525        REQ_F_LINK_HEAD_BIT,
 526        REQ_F_LINK_NEXT_BIT,
 527        REQ_F_FAIL_LINK_BIT,
 528        REQ_F_INFLIGHT_BIT,
 529        REQ_F_CUR_POS_BIT,
 530        REQ_F_NOWAIT_BIT,
 531        REQ_F_LINK_TIMEOUT_BIT,
 532        REQ_F_TIMEOUT_BIT,
 533        REQ_F_ISREG_BIT,
 534        REQ_F_MUST_PUNT_BIT,
 535        REQ_F_TIMEOUT_NOSEQ_BIT,
 536        REQ_F_COMP_LOCKED_BIT,
 537        REQ_F_NEED_CLEANUP_BIT,
 538        REQ_F_OVERFLOW_BIT,
 539        REQ_F_POLLED_BIT,
 540        REQ_F_BUFFER_SELECTED_BIT,
 541        REQ_F_NO_FILE_TABLE_BIT,
 542        REQ_F_QUEUE_TIMEOUT_BIT,
 543        REQ_F_WORK_INITIALIZED_BIT,
 544        REQ_F_TASK_PINNED_BIT,
 545
 546        /* not a real bit, just to check we're not overflowing the space */
 547        __REQ_F_LAST_BIT,
 548};
 549
 550enum {
 551        /* ctx owns file */
 552        REQ_F_FIXED_FILE        = BIT(REQ_F_FIXED_FILE_BIT),
 553        /* drain existing IO first */
 554        REQ_F_IO_DRAIN          = BIT(REQ_F_IO_DRAIN_BIT),
 555        /* linked sqes */
 556        REQ_F_LINK              = BIT(REQ_F_LINK_BIT),
 557        /* doesn't sever on completion < 0 */
 558        REQ_F_HARDLINK          = BIT(REQ_F_HARDLINK_BIT),
 559        /* IOSQE_ASYNC */
 560        REQ_F_FORCE_ASYNC       = BIT(REQ_F_FORCE_ASYNC_BIT),
 561        /* IOSQE_BUFFER_SELECT */
 562        REQ_F_BUFFER_SELECT     = BIT(REQ_F_BUFFER_SELECT_BIT),
 563
 564        /* head of a link */
 565        REQ_F_LINK_HEAD         = BIT(REQ_F_LINK_HEAD_BIT),
 566        /* already grabbed next link */
 567        REQ_F_LINK_NEXT         = BIT(REQ_F_LINK_NEXT_BIT),
 568        /* fail rest of links */
 569        REQ_F_FAIL_LINK         = BIT(REQ_F_FAIL_LINK_BIT),
 570        /* on inflight list */
 571        REQ_F_INFLIGHT          = BIT(REQ_F_INFLIGHT_BIT),
 572        /* read/write uses file position */
 573        REQ_F_CUR_POS           = BIT(REQ_F_CUR_POS_BIT),
 574        /* must not punt to workers */
 575        REQ_F_NOWAIT            = BIT(REQ_F_NOWAIT_BIT),
 576        /* has linked timeout */
 577        REQ_F_LINK_TIMEOUT      = BIT(REQ_F_LINK_TIMEOUT_BIT),
 578        /* timeout request */
 579        REQ_F_TIMEOUT           = BIT(REQ_F_TIMEOUT_BIT),
 580        /* regular file */
 581        REQ_F_ISREG             = BIT(REQ_F_ISREG_BIT),
 582        /* must be punted even for NONBLOCK */
 583        REQ_F_MUST_PUNT         = BIT(REQ_F_MUST_PUNT_BIT),
 584        /* no timeout sequence */
 585        REQ_F_TIMEOUT_NOSEQ     = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
 586        /* completion under lock */
 587        REQ_F_COMP_LOCKED       = BIT(REQ_F_COMP_LOCKED_BIT),
 588        /* needs cleanup */
 589        REQ_F_NEED_CLEANUP      = BIT(REQ_F_NEED_CLEANUP_BIT),
 590        /* in overflow list */
 591        REQ_F_OVERFLOW          = BIT(REQ_F_OVERFLOW_BIT),
 592        /* already went through poll handler */
 593        REQ_F_POLLED            = BIT(REQ_F_POLLED_BIT),
 594        /* buffer already selected */
 595        REQ_F_BUFFER_SELECTED   = BIT(REQ_F_BUFFER_SELECTED_BIT),
 596        /* doesn't need file table for this request */
 597        REQ_F_NO_FILE_TABLE     = BIT(REQ_F_NO_FILE_TABLE_BIT),
 598        /* needs to queue linked timeout */
 599        REQ_F_QUEUE_TIMEOUT     = BIT(REQ_F_QUEUE_TIMEOUT_BIT),
 600        /* io_wq_work is initialized */
 601        REQ_F_WORK_INITIALIZED  = BIT(REQ_F_WORK_INITIALIZED_BIT),
 602        /* req->task is refcounted */
 603        REQ_F_TASK_PINNED       = BIT(REQ_F_TASK_PINNED_BIT),
 604};
 605
 606struct async_poll {
 607        struct io_poll_iocb     poll;
 608        struct io_poll_iocb     *double_poll;
 609        struct io_wq_work       work;
 610};
 611
 612/*
 613 * NOTE! Each of the iocb union members has the file pointer
 614 * as the first entry in their struct definition. So you can
 615 * access the file pointer through any of the sub-structs,
 616 * or directly as just 'ki_filp' in this struct.
 617 */
 618struct io_kiocb {
 619        union {
 620                struct file             *file;
 621                struct io_rw            rw;
 622                struct io_poll_iocb     poll;
 623                struct io_accept        accept;
 624                struct io_sync          sync;
 625                struct io_cancel        cancel;
 626                struct io_timeout       timeout;
 627                struct io_connect       connect;
 628                struct io_sr_msg        sr_msg;
 629                struct io_open          open;
 630                struct io_close         close;
 631                struct io_files_update  files_update;
 632                struct io_fadvise       fadvise;
 633                struct io_madvise       madvise;
 634                struct io_epoll         epoll;
 635                struct io_splice        splice;
 636                struct io_provide_buf   pbuf;
 637                struct io_statx         statx;
 638        };
 639
 640        struct io_async_ctx             *io;
 641        int                             cflags;
 642        u8                              opcode;
 643        /* polled IO has completed */
 644        u8                              iopoll_completed;
 645
 646        u16                             buf_index;
 647
 648        struct io_ring_ctx      *ctx;
 649        struct list_head        list;
 650        unsigned int            flags;
 651        refcount_t              refs;
 652        struct task_struct      *task;
 653        unsigned long           fsize;
 654        u64                     user_data;
 655        u32                     result;
 656        u32                     sequence;
 657
 658        struct list_head        link_list;
 659
 660        struct list_head        inflight_entry;
 661
 662        struct percpu_ref       *fixed_file_refs;
 663
 664        union {
 665                /*
 666                 * Only commands that never go async can use the below fields,
 667                 * obviously. Right now only IORING_OP_POLL_ADD uses them, and
 668                 * async armed poll handlers for regular commands. The latter
 669                 * restore the work, if needed.
 670                 */
 671                struct {
 672                        struct callback_head    task_work;
 673                        struct hlist_node       hash_node;
 674                        struct async_poll       *apoll;
 675                };
 676                struct io_wq_work       work;
 677        };
 678};
 679
 680#define IO_PLUG_THRESHOLD               2
 681#define IO_IOPOLL_BATCH                 8
 682
 683struct io_submit_state {
 684        struct blk_plug         plug;
 685
 686        /*
 687         * io_kiocb alloc cache
 688         */
 689        void                    *reqs[IO_IOPOLL_BATCH];
 690        unsigned int            free_reqs;
 691
 692        /*
 693         * File reference cache
 694         */
 695        struct file             *file;
 696        unsigned int            fd;
 697        unsigned int            has_refs;
 698        unsigned int            used_refs;
 699        unsigned int            ios_left;
 700};
 701
 702struct io_op_def {
 703        /* needs req->io allocated for deferral/async */
 704        unsigned                async_ctx : 1;
 705        /* needs current->mm setup, does mm access */
 706        unsigned                needs_mm : 1;
 707        /* needs req->file assigned */
 708        unsigned                needs_file : 1;
 709        /* don't fail if file grab fails */
 710        unsigned                needs_file_no_error : 1;
 711        /* hash wq insertion if file is a regular file */
 712        unsigned                hash_reg_file : 1;
 713        /* unbound wq insertion if file is a non-regular file */
 714        unsigned                unbound_nonreg_file : 1;
 715        /* opcode is not supported by this kernel */
 716        unsigned                not_supported : 1;
 717        /* needs file table */
 718        unsigned                file_table : 1;
 719        /* needs ->fs */
 720        unsigned                needs_fs : 1;
 721        /* set if opcode supports polled "wait" */
 722        unsigned                pollin : 1;
 723        unsigned                pollout : 1;
 724        /* op supports buffer selection */
 725        unsigned                buffer_select : 1;
 726};
 727
 728static const struct io_op_def io_op_defs[] = {
 729        [IORING_OP_NOP] = {},
 730        [IORING_OP_READV] = {
 731                .async_ctx              = 1,
 732                .needs_mm               = 1,
 733                .needs_file             = 1,
 734                .unbound_nonreg_file    = 1,
 735                .pollin                 = 1,
 736                .buffer_select          = 1,
 737        },
 738        [IORING_OP_WRITEV] = {
 739                .async_ctx              = 1,
 740                .needs_mm               = 1,
 741                .needs_file             = 1,
 742                .hash_reg_file          = 1,
 743                .unbound_nonreg_file    = 1,
 744                .pollout                = 1,
 745        },
 746        [IORING_OP_FSYNC] = {
 747                .needs_file             = 1,
 748        },
 749        [IORING_OP_READ_FIXED] = {
 750                .needs_file             = 1,
 751                .unbound_nonreg_file    = 1,
 752                .pollin                 = 1,
 753        },
 754        [IORING_OP_WRITE_FIXED] = {
 755                .needs_file             = 1,
 756                .hash_reg_file          = 1,
 757                .unbound_nonreg_file    = 1,
 758                .pollout                = 1,
 759        },
 760        [IORING_OP_POLL_ADD] = {
 761                .needs_file             = 1,
 762                .unbound_nonreg_file    = 1,
 763        },
 764        [IORING_OP_POLL_REMOVE] = {},
 765        [IORING_OP_SYNC_FILE_RANGE] = {
 766                .needs_file             = 1,
 767        },
 768        [IORING_OP_SENDMSG] = {
 769                .async_ctx              = 1,
 770                .needs_mm               = 1,
 771                .needs_file             = 1,
 772                .unbound_nonreg_file    = 1,
 773                .needs_fs               = 1,
 774                .pollout                = 1,
 775        },
 776        [IORING_OP_RECVMSG] = {
 777                .async_ctx              = 1,
 778                .needs_mm               = 1,
 779                .needs_file             = 1,
 780                .unbound_nonreg_file    = 1,
 781                .needs_fs               = 1,
 782                .pollin                 = 1,
 783                .buffer_select          = 1,
 784        },
 785        [IORING_OP_TIMEOUT] = {
 786                .async_ctx              = 1,
 787                .needs_mm               = 1,
 788        },
 789        [IORING_OP_TIMEOUT_REMOVE] = {},
 790        [IORING_OP_ACCEPT] = {
 791                .needs_mm               = 1,
 792                .needs_file             = 1,
 793                .unbound_nonreg_file    = 1,
 794                .file_table             = 1,
 795                .pollin                 = 1,
 796        },
 797        [IORING_OP_ASYNC_CANCEL] = {},
 798        [IORING_OP_LINK_TIMEOUT] = {
 799                .async_ctx              = 1,
 800                .needs_mm               = 1,
 801        },
 802        [IORING_OP_CONNECT] = {
 803                .async_ctx              = 1,
 804                .needs_mm               = 1,
 805                .needs_file             = 1,
 806                .unbound_nonreg_file    = 1,
 807                .pollout                = 1,
 808        },
 809        [IORING_OP_FALLOCATE] = {
 810                .needs_file             = 1,
 811        },
 812        [IORING_OP_OPENAT] = {
 813                .file_table             = 1,
 814                .needs_fs               = 1,
 815        },
 816        [IORING_OP_CLOSE] = {
 817                .needs_file             = 1,
 818                .needs_file_no_error    = 1,
 819                .file_table             = 1,
 820        },
 821        [IORING_OP_FILES_UPDATE] = {
 822                .needs_mm               = 1,
 823                .file_table             = 1,
 824        },
 825        [IORING_OP_STATX] = {
 826                .needs_mm               = 1,
 827                .needs_fs               = 1,
 828                .file_table             = 1,
 829        },
 830        [IORING_OP_READ] = {
 831                .needs_mm               = 1,
 832                .needs_file             = 1,
 833                .unbound_nonreg_file    = 1,
 834                .pollin                 = 1,
 835                .buffer_select          = 1,
 836        },
 837        [IORING_OP_WRITE] = {
 838                .needs_mm               = 1,
 839                .needs_file             = 1,
 840                .unbound_nonreg_file    = 1,
 841                .pollout                = 1,
 842        },
 843        [IORING_OP_FADVISE] = {
 844                .needs_file             = 1,
 845        },
 846        [IORING_OP_MADVISE] = {
 847                .needs_mm               = 1,
 848        },
 849        [IORING_OP_SEND] = {
 850                .needs_mm               = 1,
 851                .needs_file             = 1,
 852                .unbound_nonreg_file    = 1,
 853                .pollout                = 1,
 854        },
 855        [IORING_OP_RECV] = {
 856                .needs_mm               = 1,
 857                .needs_file             = 1,
 858                .unbound_nonreg_file    = 1,
 859                .pollin                 = 1,
 860                .buffer_select          = 1,
 861        },
 862        [IORING_OP_OPENAT2] = {
 863                .file_table             = 1,
 864                .needs_fs               = 1,
 865        },
 866        [IORING_OP_EPOLL_CTL] = {
 867                .unbound_nonreg_file    = 1,
 868                .file_table             = 1,
 869        },
 870        [IORING_OP_SPLICE] = {
 871                .needs_file             = 1,
 872                .hash_reg_file          = 1,
 873                .unbound_nonreg_file    = 1,
 874        },
 875        [IORING_OP_PROVIDE_BUFFERS] = {},
 876        [IORING_OP_REMOVE_BUFFERS] = {},
 877        [IORING_OP_TEE] = {
 878                .needs_file             = 1,
 879                .hash_reg_file          = 1,
 880                .unbound_nonreg_file    = 1,
 881        },
 882};
 883
 884static void io_wq_submit_work(struct io_wq_work **workptr);
 885static void io_cqring_fill_event(struct io_kiocb *req, long res);
 886static void io_put_req(struct io_kiocb *req);
 887static void __io_double_put_req(struct io_kiocb *req);
 888static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
 889static void io_queue_linked_timeout(struct io_kiocb *req);
 890static int __io_sqe_files_update(struct io_ring_ctx *ctx,
 891                                 struct io_uring_files_update *ip,
 892                                 unsigned nr_args);
 893static int io_grab_files(struct io_kiocb *req);
 894static void io_complete_rw_common(struct kiocb *kiocb, long res);
 895static void io_cleanup_req(struct io_kiocb *req);
 896static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
 897                       int fd, struct file **out_file, bool fixed);
 898static void __io_queue_sqe(struct io_kiocb *req,
 899                           const struct io_uring_sqe *sqe);
 900
 901static struct kmem_cache *req_cachep;
 902
 903static const struct file_operations io_uring_fops;
 904
 905struct sock *io_uring_get_socket(struct file *file)
 906{
 907#if defined(CONFIG_UNIX)
 908        if (file->f_op == &io_uring_fops) {
 909                struct io_ring_ctx *ctx = file->private_data;
 910
 911                return ctx->ring_sock->sk;
 912        }
 913#endif
 914        return NULL;
 915}
 916EXPORT_SYMBOL(io_uring_get_socket);
 917
 918static void io_get_req_task(struct io_kiocb *req)
 919{
 920        if (req->flags & REQ_F_TASK_PINNED)
 921                return;
 922        get_task_struct(req->task);
 923        req->flags |= REQ_F_TASK_PINNED;
 924}
 925
 926/* not idempotent -- it doesn't clear REQ_F_TASK_PINNED */
 927static void __io_put_req_task(struct io_kiocb *req)
 928{
 929        if (req->flags & REQ_F_TASK_PINNED)
 930                put_task_struct(req->task);
 931}
 932
 933static void io_file_put_work(struct work_struct *work);
 934
 935/*
 936 * Note: must call io_req_init_async() for the first time you
 937 * touch any members of io_wq_work.
 938 */
 939static inline void io_req_init_async(struct io_kiocb *req)
 940{
 941        if (req->flags & REQ_F_WORK_INITIALIZED)
 942                return;
 943
 944        memset(&req->work, 0, sizeof(req->work));
 945        req->flags |= REQ_F_WORK_INITIALIZED;
 946}
 947
 948static inline bool io_async_submit(struct io_ring_ctx *ctx)
 949{
 950        return ctx->flags & IORING_SETUP_SQPOLL;
 951}
 952
 953static void io_ring_ctx_ref_free(struct percpu_ref *ref)
 954{
 955        struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
 956
 957        complete(&ctx->ref_comp);
 958}
 959
 960static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 961{
 962        struct io_ring_ctx *ctx;
 963        int hash_bits;
 964
 965        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 966        if (!ctx)
 967                return NULL;
 968
 969        ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
 970        if (!ctx->fallback_req)
 971                goto err;
 972
 973        /*
 974         * Use 5 bits less than the max cq entries, that should give us around
 975         * 32 entries per hash list if totally full and uniformly spread.
 976         */
 977        hash_bits = ilog2(p->cq_entries);
 978        hash_bits -= 5;
 979        if (hash_bits <= 0)
 980                hash_bits = 1;
 981        ctx->cancel_hash_bits = hash_bits;
 982        ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
 983                                        GFP_KERNEL);
 984        if (!ctx->cancel_hash)
 985                goto err;
 986        __hash_init(ctx->cancel_hash, 1U << hash_bits);
 987
 988        if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
 989                            PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
 990                goto err;
 991
 992        ctx->flags = p->flags;
 993        init_waitqueue_head(&ctx->sqo_wait);
 994        init_waitqueue_head(&ctx->cq_wait);
 995        INIT_LIST_HEAD(&ctx->cq_overflow_list);
 996        init_completion(&ctx->ref_comp);
 997        init_completion(&ctx->sq_thread_comp);
 998        idr_init(&ctx->io_buffer_idr);
 999        idr_init(&ctx->personality_idr);
1000        mutex_init(&ctx->uring_lock);
1001        init_waitqueue_head(&ctx->wait);
1002        spin_lock_init(&ctx->completion_lock);
1003        INIT_LIST_HEAD(&ctx->poll_list);
1004        INIT_LIST_HEAD(&ctx->defer_list);
1005        INIT_LIST_HEAD(&ctx->timeout_list);
1006        init_waitqueue_head(&ctx->inflight_wait);
1007        spin_lock_init(&ctx->inflight_lock);
1008        INIT_LIST_HEAD(&ctx->inflight_list);
1009        INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
1010        init_llist_head(&ctx->file_put_llist);
1011        return ctx;
1012err:
1013        if (ctx->fallback_req)
1014                kmem_cache_free(req_cachep, ctx->fallback_req);
1015        kfree(ctx->cancel_hash);
1016        kfree(ctx);
1017        return NULL;
1018}
1019
1020static inline bool __req_need_defer(struct io_kiocb *req)
1021{
1022        struct io_ring_ctx *ctx = req->ctx;
1023
1024        return req->sequence != ctx->cached_cq_tail
1025                                + atomic_read(&ctx->cached_cq_overflow);
1026}
1027
1028static inline bool req_need_defer(struct io_kiocb *req)
1029{
1030        if (unlikely(req->flags & REQ_F_IO_DRAIN))
1031                return __req_need_defer(req);
1032
1033        return false;
1034}
1035
1036static void __io_commit_cqring(struct io_ring_ctx *ctx)
1037{
1038        struct io_rings *rings = ctx->rings;
1039
1040        /* order cqe stores with ring update */
1041        smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
1042
1043        if (wq_has_sleeper(&ctx->cq_wait)) {
1044                wake_up_interruptible(&ctx->cq_wait);
1045                kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1046        }
1047}
1048
1049static inline void io_req_work_grab_env(struct io_kiocb *req,
1050                                        const struct io_op_def *def)
1051{
1052        if (!req->work.mm && def->needs_mm) {
1053                mmgrab(current->mm);
1054                req->work.mm = current->mm;
1055        }
1056        if (!req->work.creds)
1057                req->work.creds = get_current_cred();
1058        if (!req->work.fs && def->needs_fs) {
1059                spin_lock(&current->fs->lock);
1060                if (!current->fs->in_exec) {
1061                        req->work.fs = current->fs;
1062                        req->work.fs->users++;
1063                } else {
1064                        req->work.flags |= IO_WQ_WORK_CANCEL;
1065                }
1066                spin_unlock(&current->fs->lock);
1067        }
1068}
1069
1070static inline void io_req_work_drop_env(struct io_kiocb *req)
1071{
1072        if (!(req->flags & REQ_F_WORK_INITIALIZED))
1073                return;
1074
1075        if (req->work.mm) {
1076                mmdrop(req->work.mm);
1077                req->work.mm = NULL;
1078        }
1079        if (req->work.creds) {
1080                put_cred(req->work.creds);
1081                req->work.creds = NULL;
1082        }
1083        if (req->work.fs) {
1084                struct fs_struct *fs = req->work.fs;
1085
1086                spin_lock(&req->work.fs->lock);
1087                if (--fs->users)
1088                        fs = NULL;
1089                spin_unlock(&req->work.fs->lock);
1090                if (fs)
1091                        free_fs_struct(fs);
1092        }
1093}
1094
1095static inline void io_prep_async_work(struct io_kiocb *req,
1096                                      struct io_kiocb **link)
1097{
1098        const struct io_op_def *def = &io_op_defs[req->opcode];
1099
1100        io_req_init_async(req);
1101
1102        if (req->flags & REQ_F_ISREG) {
1103                if (def->hash_reg_file)
1104                        io_wq_hash_work(&req->work, file_inode(req->file));
1105        } else {
1106                if (def->unbound_nonreg_file)
1107                        req->work.flags |= IO_WQ_WORK_UNBOUND;
1108        }
1109
1110        io_req_work_grab_env(req, def);
1111
1112        *link = io_prep_linked_timeout(req);
1113}
1114
1115static inline void io_queue_async_work(struct io_kiocb *req)
1116{
1117        struct io_ring_ctx *ctx = req->ctx;
1118        struct io_kiocb *link;
1119
1120        io_prep_async_work(req, &link);
1121
1122        trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1123                                        &req->work, req->flags);
1124        io_wq_enqueue(ctx->io_wq, &req->work);
1125
1126        if (link)
1127                io_queue_linked_timeout(link);
1128}
1129
1130static void io_kill_timeout(struct io_kiocb *req)
1131{
1132        int ret;
1133
1134        ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
1135        if (ret != -1) {
1136                atomic_inc(&req->ctx->cq_timeouts);
1137                list_del_init(&req->list);
1138                req->flags |= REQ_F_COMP_LOCKED;
1139                io_cqring_fill_event(req, 0);
1140                io_put_req(req);
1141        }
1142}
1143
1144static void io_kill_timeouts(struct io_ring_ctx *ctx)
1145{
1146        struct io_kiocb *req, *tmp;
1147
1148        spin_lock_irq(&ctx->completion_lock);
1149        list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
1150                io_kill_timeout(req);
1151        spin_unlock_irq(&ctx->completion_lock);
1152}
1153
1154static void __io_queue_deferred(struct io_ring_ctx *ctx)
1155{
1156        do {
1157                struct io_kiocb *req = list_first_entry(&ctx->defer_list,
1158                                                        struct io_kiocb, list);
1159
1160                if (req_need_defer(req))
1161                        break;
1162                list_del_init(&req->list);
1163                io_queue_async_work(req);
1164        } while (!list_empty(&ctx->defer_list));
1165}
1166
1167static void io_flush_timeouts(struct io_ring_ctx *ctx)
1168{
1169        while (!list_empty(&ctx->timeout_list)) {
1170                struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
1171                                                        struct io_kiocb, list);
1172
1173                if (req->flags & REQ_F_TIMEOUT_NOSEQ)
1174                        break;
1175                if (req->timeout.target_seq != ctx->cached_cq_tail
1176                                        - atomic_read(&ctx->cq_timeouts))
1177                        break;
1178
1179                list_del_init(&req->list);
1180                io_kill_timeout(req);
1181        }
1182}
1183
1184static void io_commit_cqring(struct io_ring_ctx *ctx)
1185{
1186        io_flush_timeouts(ctx);
1187        __io_commit_cqring(ctx);
1188
1189        if (unlikely(!list_empty(&ctx->defer_list)))
1190                __io_queue_deferred(ctx);
1191}
1192
1193static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1194{
1195        struct io_rings *rings = ctx->rings;
1196        unsigned tail;
1197
1198        tail = ctx->cached_cq_tail;
1199        /*
1200         * writes to the cq entry need to come after reading head; the
1201         * control dependency is enough as we're using WRITE_ONCE to
1202         * fill the cq entry
1203         */
1204        if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
1205                return NULL;
1206
1207        ctx->cached_cq_tail++;
1208        return &rings->cqes[tail & ctx->cq_mask];
1209}
1210
1211static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1212{
1213        if (!ctx->cq_ev_fd)
1214                return false;
1215        if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1216                return false;
1217        if (!ctx->eventfd_async)
1218                return true;
1219        return io_wq_current_is_worker();
1220}
1221
1222static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1223{
1224        if (waitqueue_active(&ctx->wait))
1225                wake_up(&ctx->wait);
1226        if (waitqueue_active(&ctx->sqo_wait))
1227                wake_up(&ctx->sqo_wait);
1228        if (io_should_trigger_evfd(ctx))
1229                eventfd_signal(ctx->cq_ev_fd, 1);
1230}
1231
1232/* Returns true if there are no backlogged entries after the flush */
1233static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1234{
1235        struct io_rings *rings = ctx->rings;
1236        struct io_uring_cqe *cqe;
1237        struct io_kiocb *req;
1238        unsigned long flags;
1239        LIST_HEAD(list);
1240
1241        if (!force) {
1242                if (list_empty_careful(&ctx->cq_overflow_list))
1243                        return true;
1244                if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1245                    rings->cq_ring_entries))
1246                        return false;
1247        }
1248
1249        spin_lock_irqsave(&ctx->completion_lock, flags);
1250
1251        /* if force is set, the ring is going away. always drop after that */
1252        if (force)
1253                ctx->cq_overflow_flushed = 1;
1254
1255        cqe = NULL;
1256        while (!list_empty(&ctx->cq_overflow_list)) {
1257                cqe = io_get_cqring(ctx);
1258                if (!cqe && !force)
1259                        break;
1260
1261                req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
1262                                                list);
1263                list_move(&req->list, &list);
1264                req->flags &= ~REQ_F_OVERFLOW;
1265                if (cqe) {
1266                        WRITE_ONCE(cqe->user_data, req->user_data);
1267                        WRITE_ONCE(cqe->res, req->result);
1268                        WRITE_ONCE(cqe->flags, req->cflags);
1269                } else {
1270                        WRITE_ONCE(ctx->rings->cq_overflow,
1271                                atomic_inc_return(&ctx->cached_cq_overflow));
1272                }
1273        }
1274
1275        io_commit_cqring(ctx);
1276        if (cqe) {
1277                clear_bit(0, &ctx->sq_check_overflow);
1278                clear_bit(0, &ctx->cq_check_overflow);
1279                ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1280        }
1281        spin_unlock_irqrestore(&ctx->completion_lock, flags);
1282        io_cqring_ev_posted(ctx);
1283
1284        while (!list_empty(&list)) {
1285                req = list_first_entry(&list, struct io_kiocb, list);
1286                list_del(&req->list);
1287                io_put_req(req);
1288        }
1289
1290        return cqe != NULL;
1291}
1292
1293static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
1294{
1295        struct io_ring_ctx *ctx = req->ctx;
1296        struct io_uring_cqe *cqe;
1297
1298        trace_io_uring_complete(ctx, req->user_data, res);
1299
1300        /*
1301         * If we can't get a cq entry, userspace overflowed the
1302         * submission (by quite a lot). Increment the overflow count in
1303         * the ring.
1304         */
1305        cqe = io_get_cqring(ctx);
1306        if (likely(cqe)) {
1307                WRITE_ONCE(cqe->user_data, req->user_data);
1308                WRITE_ONCE(cqe->res, res);
1309                WRITE_ONCE(cqe->flags, cflags);
1310        } else if (ctx->cq_overflow_flushed) {
1311                WRITE_ONCE(ctx->rings->cq_overflow,
1312                                atomic_inc_return(&ctx->cached_cq_overflow));
1313        } else {
1314                if (list_empty(&ctx->cq_overflow_list)) {
1315                        set_bit(0, &ctx->sq_check_overflow);
1316                        set_bit(0, &ctx->cq_check_overflow);
1317                        ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
1318                }
1319                req->flags |= REQ_F_OVERFLOW;
1320                refcount_inc(&req->refs);
1321                req->result = res;
1322                req->cflags = cflags;
1323                list_add_tail(&req->list, &ctx->cq_overflow_list);
1324        }
1325}
1326
1327static void io_cqring_fill_event(struct io_kiocb *req, long res)
1328{
1329        __io_cqring_fill_event(req, res, 0);
1330}
1331
1332static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
1333{
1334        struct io_ring_ctx *ctx = req->ctx;
1335        unsigned long flags;
1336
1337        spin_lock_irqsave(&ctx->completion_lock, flags);
1338        __io_cqring_fill_event(req, res, cflags);
1339        io_commit_cqring(ctx);
1340        spin_unlock_irqrestore(&ctx->completion_lock, flags);
1341
1342        io_cqring_ev_posted(ctx);
1343}
1344
1345static void io_cqring_add_event(struct io_kiocb *req, long res)
1346{
1347        __io_cqring_add_event(req, res, 0);
1348}
1349
1350static inline bool io_is_fallback_req(struct io_kiocb *req)
1351{
1352        return req == (struct io_kiocb *)
1353                        ((unsigned long) req->ctx->fallback_req & ~1UL);
1354}
1355
1356static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1357{
1358        struct io_kiocb *req;
1359
1360        req = ctx->fallback_req;
1361        if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
1362                return req;
1363
1364        return NULL;
1365}
1366
1367static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
1368                                     struct io_submit_state *state)
1369{
1370        gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1371        struct io_kiocb *req;
1372
1373        if (!state) {
1374                req = kmem_cache_alloc(req_cachep, gfp);
1375                if (unlikely(!req))
1376                        goto fallback;
1377        } else if (!state->free_reqs) {
1378                size_t sz;
1379                int ret;
1380
1381                sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
1382                ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1383
1384                /*
1385                 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1386                 * retry single alloc to be on the safe side.
1387                 */
1388                if (unlikely(ret <= 0)) {
1389                        state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1390                        if (!state->reqs[0])
1391                                goto fallback;
1392                        ret = 1;
1393                }
1394                state->free_reqs = ret - 1;
1395                req = state->reqs[ret - 1];
1396        } else {
1397                state->free_reqs--;
1398                req = state->reqs[state->free_reqs];
1399        }
1400
1401        return req;
1402fallback:
1403        return io_get_fallback_req(ctx);
1404}
1405
1406static inline void io_put_file(struct io_kiocb *req, struct file *file,
1407                          bool fixed)
1408{
1409        if (fixed)
1410                percpu_ref_put(req->fixed_file_refs);
1411        else
1412                fput(file);
1413}
1414
1415static void __io_req_aux_free(struct io_kiocb *req)
1416{
1417        if (req->flags & REQ_F_NEED_CLEANUP)
1418                io_cleanup_req(req);
1419
1420        kfree(req->io);
1421        if (req->file)
1422                io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
1423        __io_put_req_task(req);
1424        io_req_work_drop_env(req);
1425}
1426
1427static void __io_free_req(struct io_kiocb *req)
1428{
1429        __io_req_aux_free(req);
1430
1431        if (req->flags & REQ_F_INFLIGHT) {
1432                struct io_ring_ctx *ctx = req->ctx;
1433                unsigned long flags;
1434
1435                spin_lock_irqsave(&ctx->inflight_lock, flags);
1436                list_del(&req->inflight_entry);
1437                if (waitqueue_active(&ctx->inflight_wait))
1438                        wake_up(&ctx->inflight_wait);
1439                spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1440        }
1441
1442        percpu_ref_put(&req->ctx->refs);
1443        if (likely(!io_is_fallback_req(req)))
1444                kmem_cache_free(req_cachep, req);
1445        else
1446                clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req);
1447}
1448
1449struct req_batch {
1450        void *reqs[IO_IOPOLL_BATCH];
1451        int to_free;
1452        int need_iter;
1453};
1454
1455static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
1456{
1457        if (!rb->to_free)
1458                return;
1459        if (rb->need_iter) {
1460                int i, inflight = 0;
1461                unsigned long flags;
1462
1463                for (i = 0; i < rb->to_free; i++) {
1464                        struct io_kiocb *req = rb->reqs[i];
1465
1466                        if (req->flags & REQ_F_INFLIGHT)
1467                                inflight++;
1468                        __io_req_aux_free(req);
1469                }
1470                if (!inflight)
1471                        goto do_free;
1472
1473                spin_lock_irqsave(&ctx->inflight_lock, flags);
1474                for (i = 0; i < rb->to_free; i++) {
1475                        struct io_kiocb *req = rb->reqs[i];
1476
1477                        if (req->flags & REQ_F_INFLIGHT) {
1478                                list_del(&req->inflight_entry);
1479                                if (!--inflight)
1480                                        break;
1481                        }
1482                }
1483                spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1484
1485                if (waitqueue_active(&ctx->inflight_wait))
1486                        wake_up(&ctx->inflight_wait);
1487        }
1488do_free:
1489        kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
1490        percpu_ref_put_many(&ctx->refs, rb->to_free);
1491        rb->to_free = rb->need_iter = 0;
1492}
1493
1494static bool io_link_cancel_timeout(struct io_kiocb *req)
1495{
1496        struct io_ring_ctx *ctx = req->ctx;
1497        int ret;
1498
1499        ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
1500        if (ret != -1) {
1501                io_cqring_fill_event(req, -ECANCELED);
1502                io_commit_cqring(ctx);
1503                req->flags &= ~REQ_F_LINK_HEAD;
1504                io_put_req(req);
1505                return true;
1506        }
1507
1508        return false;
1509}
1510
1511static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
1512{
1513        struct io_ring_ctx *ctx = req->ctx;
1514        bool wake_ev = false;
1515
1516        /* Already got next link */
1517        if (req->flags & REQ_F_LINK_NEXT)
1518                return;
1519
1520        /*
1521         * The list should never be empty when we are called here. But could
1522         * potentially happen if the chain is messed up, check to be on the
1523         * safe side.
1524         */
1525        while (!list_empty(&req->link_list)) {
1526                struct io_kiocb *nxt = list_first_entry(&req->link_list,
1527                                                struct io_kiocb, link_list);
1528
1529                if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
1530                             (nxt->flags & REQ_F_TIMEOUT))) {
1531                        list_del_init(&nxt->link_list);
1532                        wake_ev |= io_link_cancel_timeout(nxt);
1533                        req->flags &= ~REQ_F_LINK_TIMEOUT;
1534                        continue;
1535                }
1536
1537                list_del_init(&req->link_list);
1538                if (!list_empty(&nxt->link_list))
1539                        nxt->flags |= REQ_F_LINK_HEAD;
1540                *nxtptr = nxt;
1541                break;
1542        }
1543
1544        req->flags |= REQ_F_LINK_NEXT;
1545        if (wake_ev)
1546                io_cqring_ev_posted(ctx);
1547}
1548
1549/*
1550 * Called if REQ_F_LINK_HEAD is set, and we fail the head request
1551 */
1552static void io_fail_links(struct io_kiocb *req)
1553{
1554        struct io_ring_ctx *ctx = req->ctx;
1555        unsigned long flags;
1556
1557        spin_lock_irqsave(&ctx->completion_lock, flags);
1558
1559        while (!list_empty(&req->link_list)) {
1560                struct io_kiocb *link = list_first_entry(&req->link_list,
1561                                                struct io_kiocb, link_list);
1562
1563                list_del_init(&link->link_list);
1564                trace_io_uring_fail_link(req, link);
1565
1566                if ((req->flags & REQ_F_LINK_TIMEOUT) &&
1567                    link->opcode == IORING_OP_LINK_TIMEOUT) {
1568                        io_link_cancel_timeout(link);
1569                } else {
1570                        io_cqring_fill_event(link, -ECANCELED);
1571                        __io_double_put_req(link);
1572                }
1573                req->flags &= ~REQ_F_LINK_TIMEOUT;
1574        }
1575
1576        io_commit_cqring(ctx);
1577        spin_unlock_irqrestore(&ctx->completion_lock, flags);
1578        io_cqring_ev_posted(ctx);
1579}
1580
1581static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
1582{
1583        if (likely(!(req->flags & REQ_F_LINK_HEAD)))
1584                return;
1585
1586        /*
1587         * If LINK is set, we have dependent requests in this chain. If we
1588         * didn't fail this request, queue the first one up, moving any other
1589         * dependencies to the next request. In case of failure, fail the rest
1590         * of the chain.
1591         */
1592        if (req->flags & REQ_F_FAIL_LINK) {
1593                io_fail_links(req);
1594        } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
1595                        REQ_F_LINK_TIMEOUT) {
1596                struct io_ring_ctx *ctx = req->ctx;
1597                unsigned long flags;
1598
1599                /*
1600                 * If this is a timeout link, we could be racing with the
1601                 * timeout timer. Grab the completion lock for this case to
1602                 * protect against that.
1603                 */
1604                spin_lock_irqsave(&ctx->completion_lock, flags);
1605                io_req_link_next(req, nxt);
1606                spin_unlock_irqrestore(&ctx->completion_lock, flags);
1607        } else {
1608                io_req_link_next(req, nxt);
1609        }
1610}
1611
1612static void io_free_req(struct io_kiocb *req)
1613{
1614        struct io_kiocb *nxt = NULL;
1615
1616        io_req_find_next(req, &nxt);
1617        __io_free_req(req);
1618
1619        if (nxt)
1620                io_queue_async_work(nxt);
1621}
1622
1623static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
1624{
1625        struct io_kiocb *link;
1626        const struct io_op_def *def = &io_op_defs[nxt->opcode];
1627
1628        if ((nxt->flags & REQ_F_ISREG) && def->hash_reg_file)
1629                io_wq_hash_work(&nxt->work, file_inode(nxt->file));
1630
1631        *workptr = &nxt->work;
1632        link = io_prep_linked_timeout(nxt);
1633        if (link)
1634                nxt->flags |= REQ_F_QUEUE_TIMEOUT;
1635}
1636
1637/*
1638 * Drop reference to request, return next in chain (if there is one) if this
1639 * was the last reference to this request.
1640 */
1641__attribute__((nonnull))
1642static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
1643{
1644        if (refcount_dec_and_test(&req->refs)) {
1645                io_req_find_next(req, nxtptr);
1646                __io_free_req(req);
1647        }
1648}
1649
1650static void io_put_req(struct io_kiocb *req)
1651{
1652        if (refcount_dec_and_test(&req->refs))
1653                io_free_req(req);
1654}
1655
1656static void io_steal_work(struct io_kiocb *req,
1657                          struct io_wq_work **workptr)
1658{
1659        /*
1660         * It's in an io-wq worker, so there always should be at least
1661         * one reference, which will be dropped in io_put_work() just
1662         * after the current handler returns.
1663         *
1664         * It also means, that if the counter dropped to 1, then there is
1665         * no asynchronous users left, so it's safe to steal the next work.
1666         */
1667        if (refcount_read(&req->refs) == 1) {
1668                struct io_kiocb *nxt = NULL;
1669
1670                io_req_find_next(req, &nxt);
1671                if (nxt)
1672                        io_wq_assign_next(workptr, nxt);
1673        }
1674}
1675
1676/*
1677 * Must only be used if we don't need to care about links, usually from
1678 * within the completion handling itself.
1679 */
1680static void __io_double_put_req(struct io_kiocb *req)
1681{
1682        /* drop both submit and complete references */
1683        if (refcount_sub_and_test(2, &req->refs))
1684                __io_free_req(req);
1685}
1686
1687static void io_double_put_req(struct io_kiocb *req)
1688{
1689        /* drop both submit and complete references */
1690        if (refcount_sub_and_test(2, &req->refs))
1691                io_free_req(req);
1692}
1693
1694static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
1695{
1696        struct io_rings *rings = ctx->rings;
1697
1698        if (test_bit(0, &ctx->cq_check_overflow)) {
1699                /*
1700                 * noflush == true is from the waitqueue handler, just ensure
1701                 * we wake up the task, and the next invocation will flush the
1702                 * entries. We cannot safely to it from here.
1703                 */
1704                if (noflush && !list_empty(&ctx->cq_overflow_list))
1705                        return -1U;
1706
1707                io_cqring_overflow_flush(ctx, false);
1708        }
1709
1710        /* See comment at the top of this file */
1711        smp_rmb();
1712        return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
1713}
1714
1715static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1716{
1717        struct io_rings *rings = ctx->rings;
1718
1719        /* make sure SQ entry isn't read before tail */
1720        return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1721}
1722
1723static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
1724{
1725        if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req))
1726                return false;
1727
1728        if (req->file || req->io)
1729                rb->need_iter++;
1730
1731        rb->reqs[rb->to_free++] = req;
1732        if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
1733                io_free_req_many(req->ctx, rb);
1734        return true;
1735}
1736
1737static int io_put_kbuf(struct io_kiocb *req)
1738{
1739        struct io_buffer *kbuf;
1740        int cflags;
1741
1742        kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
1743        cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
1744        cflags |= IORING_CQE_F_BUFFER;
1745        req->rw.addr = 0;
1746        kfree(kbuf);
1747        return cflags;
1748}
1749
1750static void io_iopoll_queue(struct list_head *again)
1751{
1752        struct io_kiocb *req;
1753
1754        do {
1755                req = list_first_entry(again, struct io_kiocb, list);
1756                list_del(&req->list);
1757
1758                /* shouldn't happen unless io_uring is dying, cancel reqs */
1759                if (unlikely(!current->mm)) {
1760                        io_complete_rw_common(&req->rw.kiocb, -EAGAIN);
1761                        io_put_req(req);
1762                        continue;
1763                }
1764
1765                refcount_inc(&req->refs);
1766                io_queue_async_work(req);
1767        } while (!list_empty(again));
1768}
1769
1770/*
1771 * Find and free completed poll iocbs
1772 */
1773static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1774                               struct list_head *done)
1775{
1776        struct req_batch rb;
1777        struct io_kiocb *req;
1778        LIST_HEAD(again);
1779
1780        /* order with ->result store in io_complete_rw_iopoll() */
1781        smp_rmb();
1782
1783        rb.to_free = rb.need_iter = 0;
1784        while (!list_empty(done)) {
1785                int cflags = 0;
1786
1787                req = list_first_entry(done, struct io_kiocb, list);
1788                if (READ_ONCE(req->result) == -EAGAIN) {
1789                        req->iopoll_completed = 0;
1790                        list_move_tail(&req->list, &again);
1791                        continue;
1792                }
1793                list_del(&req->list);
1794
1795                if (req->flags & REQ_F_BUFFER_SELECTED)
1796                        cflags = io_put_kbuf(req);
1797
1798                __io_cqring_fill_event(req, req->result, cflags);
1799                (*nr_events)++;
1800
1801                if (refcount_dec_and_test(&req->refs) &&
1802                    !io_req_multi_free(&rb, req))
1803                        io_free_req(req);
1804        }
1805
1806        io_commit_cqring(ctx);
1807        if (ctx->flags & IORING_SETUP_SQPOLL)
1808                io_cqring_ev_posted(ctx);
1809        io_free_req_many(ctx, &rb);
1810
1811        if (!list_empty(&again))
1812                io_iopoll_queue(&again);
1813}
1814
1815static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
1816                        long min)
1817{
1818        struct io_kiocb *req, *tmp;
1819        LIST_HEAD(done);
1820        bool spin;
1821        int ret;
1822
1823        /*
1824         * Only spin for completions if we don't have multiple devices hanging
1825         * off our complete list, and we're under the requested amount.
1826         */
1827        spin = !ctx->poll_multi_file && *nr_events < min;
1828
1829        ret = 0;
1830        list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
1831                struct kiocb *kiocb = &req->rw.kiocb;
1832
1833                /*
1834                 * Move completed and retryable entries to our local lists.
1835                 * If we find a request that requires polling, break out
1836                 * and complete those lists first, if we have entries there.
1837                 */
1838                if (READ_ONCE(req->iopoll_completed)) {
1839                        list_move_tail(&req->list, &done);
1840                        continue;
1841                }
1842                if (!list_empty(&done))
1843                        break;
1844
1845                ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
1846                if (ret < 0)
1847                        break;
1848
1849                if (ret && spin)
1850                        spin = false;
1851                ret = 0;
1852        }
1853
1854        if (!list_empty(&done))
1855                io_iopoll_complete(ctx, nr_events, &done);
1856
1857        return ret;
1858}
1859
1860/*
1861 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
1862 * non-spinning poll check - we'll still enter the driver poll loop, but only
1863 * as a non-spinning completion check.
1864 */
1865static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
1866                                long min)
1867{
1868        while (!list_empty(&ctx->poll_list) && !need_resched()) {
1869                int ret;
1870
1871                ret = io_do_iopoll(ctx, nr_events, min);
1872                if (ret < 0)
1873                        return ret;
1874                if (!min || *nr_events >= min)
1875                        return 0;
1876        }
1877
1878        return 1;
1879}
1880
1881/*
1882 * We can't just wait for polled events to come to us, we have to actively
1883 * find and complete them.
1884 */
1885static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1886{
1887        if (!(ctx->flags & IORING_SETUP_IOPOLL))
1888                return;
1889
1890        mutex_lock(&ctx->uring_lock);
1891        while (!list_empty(&ctx->poll_list)) {
1892                unsigned int nr_events = 0;
1893
1894                io_iopoll_getevents(ctx, &nr_events, 1);
1895
1896                /*
1897                 * Ensure we allow local-to-the-cpu processing to take place,
1898                 * in this case we need to ensure that we reap all events.
1899                 */
1900                cond_resched();
1901        }
1902        mutex_unlock(&ctx->uring_lock);
1903}
1904
1905static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1906                           long min)
1907{
1908        int iters = 0, ret = 0;
1909
1910        /*
1911         * We disallow the app entering submit/complete with polling, but we
1912         * still need to lock the ring to prevent racing with polled issue
1913         * that got punted to a workqueue.
1914         */
1915        mutex_lock(&ctx->uring_lock);
1916        do {
1917                int tmin = 0;
1918
1919                /*
1920                 * Don't enter poll loop if we already have events pending.
1921                 * If we do, we can potentially be spinning for commands that
1922                 * already triggered a CQE (eg in error).
1923                 */
1924                if (io_cqring_events(ctx, false))
1925                        break;
1926
1927                /*
1928                 * If a submit got punted to a workqueue, we can have the
1929                 * application entering polling for a command before it gets
1930                 * issued. That app will hold the uring_lock for the duration
1931                 * of the poll right here, so we need to take a breather every
1932                 * now and then to ensure that the issue has a chance to add
1933                 * the poll to the issued list. Otherwise we can spin here
1934                 * forever, while the workqueue is stuck trying to acquire the
1935                 * very same mutex.
1936                 */
1937                if (!(++iters & 7)) {
1938                        mutex_unlock(&ctx->uring_lock);
1939                        mutex_lock(&ctx->uring_lock);
1940                }
1941
1942                if (*nr_events < min)
1943                        tmin = min - *nr_events;
1944
1945                ret = io_iopoll_getevents(ctx, nr_events, tmin);
1946                if (ret <= 0)
1947                        break;
1948                ret = 0;
1949        } while (min && !*nr_events && !need_resched());
1950
1951        mutex_unlock(&ctx->uring_lock);
1952        return ret;
1953}
1954
1955static void kiocb_end_write(struct io_kiocb *req)
1956{
1957        /*
1958         * Tell lockdep we inherited freeze protection from submission
1959         * thread.
1960         */
1961        if (req->flags & REQ_F_ISREG) {
1962                struct inode *inode = file_inode(req->file);
1963
1964                __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
1965        }
1966        file_end_write(req->file);
1967}
1968
1969static inline void req_set_fail_links(struct io_kiocb *req)
1970{
1971        if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1972                req->flags |= REQ_F_FAIL_LINK;
1973}
1974
1975static void io_complete_rw_common(struct kiocb *kiocb, long res)
1976{
1977        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
1978        int cflags = 0;
1979
1980        if (kiocb->ki_flags & IOCB_WRITE)
1981                kiocb_end_write(req);
1982
1983        if (res != req->result)
1984                req_set_fail_links(req);
1985        if (req->flags & REQ_F_BUFFER_SELECTED)
1986                cflags = io_put_kbuf(req);
1987        __io_cqring_add_event(req, res, cflags);
1988}
1989
1990static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1991{
1992        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
1993
1994        io_complete_rw_common(kiocb, res);
1995        io_put_req(req);
1996}
1997
1998static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1999{
2000        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2001
2002        if (kiocb->ki_flags & IOCB_WRITE)
2003                kiocb_end_write(req);
2004
2005        if (res != -EAGAIN && res != req->result)
2006                req_set_fail_links(req);
2007
2008        WRITE_ONCE(req->result, res);
2009        /* order with io_poll_complete() checking ->result */
2010        smp_wmb();
2011        WRITE_ONCE(req->iopoll_completed, 1);
2012}
2013
2014/*
2015 * After the iocb has been issued, it's safe to be found on the poll list.
2016 * Adding the kiocb to the list AFTER submission ensures that we don't
2017 * find it from a io_iopoll_getevents() thread before the issuer is done
2018 * accessing the kiocb cookie.
2019 */
2020static void io_iopoll_req_issued(struct io_kiocb *req)
2021{
2022        struct io_ring_ctx *ctx = req->ctx;
2023
2024        /*
2025         * Track whether we have multiple files in our lists. This will impact
2026         * how we do polling eventually, not spinning if we're on potentially
2027         * different devices.
2028         */
2029        if (list_empty(&ctx->poll_list)) {
2030                ctx->poll_multi_file = false;
2031        } else if (!ctx->poll_multi_file) {
2032                struct io_kiocb *list_req;
2033
2034                list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
2035                                                list);
2036                if (list_req->file != req->file)
2037                        ctx->poll_multi_file = true;
2038        }
2039
2040        /*
2041         * For fast devices, IO may have already completed. If it has, add
2042         * it to the front so we find it first.
2043         */
2044        if (READ_ONCE(req->iopoll_completed))
2045                list_add(&req->list, &ctx->poll_list);
2046        else
2047                list_add_tail(&req->list, &ctx->poll_list);
2048
2049        if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2050            wq_has_sleeper(&ctx->sqo_wait))
2051                wake_up(&ctx->sqo_wait);
2052}
2053
2054static void __io_state_file_put(struct io_submit_state *state)
2055{
2056        int diff = state->has_refs - state->used_refs;
2057
2058        if (diff)
2059                fput_many(state->file, diff);
2060        state->file = NULL;
2061}
2062
2063static inline void io_state_file_put(struct io_submit_state *state)
2064{
2065        if (state->file)
2066                __io_state_file_put(state);
2067}
2068
2069/*
2070 * Get as many references to a file as we have IOs left in this submission,
2071 * assuming most submissions are for one file, or at least that each file
2072 * has more than one submission.
2073 */
2074static struct file *__io_file_get(struct io_submit_state *state, int fd)
2075{
2076        if (!state)
2077                return fget(fd);
2078
2079        if (state->file) {
2080                if (state->fd == fd) {
2081                        state->used_refs++;
2082                        state->ios_left--;
2083                        return state->file;
2084                }
2085                __io_state_file_put(state);
2086        }
2087        state->file = fget_many(fd, state->ios_left);
2088        if (!state->file)
2089                return NULL;
2090
2091        state->fd = fd;
2092        state->has_refs = state->ios_left;
2093        state->used_refs = 1;
2094        state->ios_left--;
2095        return state->file;
2096}
2097
2098/*
2099 * If we tracked the file through the SCM inflight mechanism, we could support
2100 * any file. For now, just ensure that anything potentially problematic is done
2101 * inline.
2102 */
2103static bool io_file_supports_async(struct file *file, int rw)
2104{
2105        umode_t mode = file_inode(file)->i_mode;
2106
2107        if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode))
2108                return true;
2109        if (S_ISREG(mode) && file->f_op != &io_uring_fops)
2110                return true;
2111
2112        /* any ->read/write should understand O_NONBLOCK */
2113        if (file->f_flags & O_NONBLOCK)
2114                return true;
2115
2116        if (!(file->f_mode & FMODE_NOWAIT))
2117                return false;
2118
2119        if (rw == READ)
2120                return file->f_op->read_iter != NULL;
2121
2122        return file->f_op->write_iter != NULL;
2123}
2124
2125static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2126                      bool force_nonblock)
2127{
2128        struct io_ring_ctx *ctx = req->ctx;
2129        struct kiocb *kiocb = &req->rw.kiocb;
2130        unsigned ioprio;
2131        int ret;
2132
2133        if (S_ISREG(file_inode(req->file)->i_mode))
2134                req->flags |= REQ_F_ISREG;
2135
2136        kiocb->ki_pos = READ_ONCE(sqe->off);
2137        if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2138                req->flags |= REQ_F_CUR_POS;
2139                kiocb->ki_pos = req->file->f_pos;
2140        }
2141        kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
2142        kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2143        ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2144        if (unlikely(ret))
2145                return ret;
2146
2147        ioprio = READ_ONCE(sqe->ioprio);
2148        if (ioprio) {
2149                ret = ioprio_check_cap(ioprio);
2150                if (ret)
2151                        return ret;
2152
2153                kiocb->ki_ioprio = ioprio;
2154        } else
2155                kiocb->ki_ioprio = get_current_ioprio();
2156
2157        /* don't allow async punt if RWF_NOWAIT was requested */
2158        if (kiocb->ki_flags & IOCB_NOWAIT)
2159                req->flags |= REQ_F_NOWAIT;
2160
2161        if (force_nonblock)
2162                kiocb->ki_flags |= IOCB_NOWAIT;
2163
2164        if (ctx->flags & IORING_SETUP_IOPOLL) {
2165                if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2166                    !kiocb->ki_filp->f_op->iopoll)
2167                        return -EOPNOTSUPP;
2168
2169                kiocb->ki_flags |= IOCB_HIPRI;
2170                kiocb->ki_complete = io_complete_rw_iopoll;
2171                req->result = 0;
2172                req->iopoll_completed = 0;
2173        } else {
2174                if (kiocb->ki_flags & IOCB_HIPRI)
2175                        return -EINVAL;
2176                kiocb->ki_complete = io_complete_rw;
2177        }
2178
2179        req->rw.addr = READ_ONCE(sqe->addr);
2180        req->rw.len = READ_ONCE(sqe->len);
2181        req->buf_index = READ_ONCE(sqe->buf_index);
2182        return 0;
2183}
2184
2185static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2186{
2187        switch (ret) {
2188        case -EIOCBQUEUED:
2189                break;
2190        case -ERESTARTSYS:
2191        case -ERESTARTNOINTR:
2192        case -ERESTARTNOHAND:
2193        case -ERESTART_RESTARTBLOCK:
2194                /*
2195                 * We can't just restart the syscall, since previously
2196                 * submitted sqes may already be in progress. Just fail this
2197                 * IO with EINTR.
2198                 */
2199                ret = -EINTR;
2200                /* fall through */
2201        default:
2202                kiocb->ki_complete(kiocb, ret, 0);
2203        }
2204}
2205
2206static void kiocb_done(struct kiocb *kiocb, ssize_t ret)
2207{
2208        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2209
2210        if (req->flags & REQ_F_CUR_POS)
2211                req->file->f_pos = kiocb->ki_pos;
2212        if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
2213                io_complete_rw(kiocb, ret, 0);
2214        else
2215                io_rw_done(kiocb, ret);
2216}
2217
2218static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
2219                               struct iov_iter *iter)
2220{
2221        struct io_ring_ctx *ctx = req->ctx;
2222        size_t len = req->rw.len;
2223        struct io_mapped_ubuf *imu;
2224        u16 index, buf_index;
2225        size_t offset;
2226        u64 buf_addr;
2227
2228        /* attempt to use fixed buffers without having provided iovecs */
2229        if (unlikely(!ctx->user_bufs))
2230                return -EFAULT;
2231
2232        buf_index = req->buf_index;
2233        if (unlikely(buf_index >= ctx->nr_user_bufs))
2234                return -EFAULT;
2235
2236        index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2237        imu = &ctx->user_bufs[index];
2238        buf_addr = req->rw.addr;
2239
2240        /* overflow */
2241        if (buf_addr + len < buf_addr)
2242                return -EFAULT;
2243        /* not inside the mapped region */
2244        if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2245                return -EFAULT;
2246
2247        /*
2248         * May not be a start of buffer, set size appropriately
2249         * and advance us to the beginning.
2250         */
2251        offset = buf_addr - imu->ubuf;
2252        iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
2253
2254        if (offset) {
2255                /*
2256                 * Don't use iov_iter_advance() here, as it's really slow for
2257                 * using the latter parts of a big fixed buffer - it iterates
2258                 * over each segment manually. We can cheat a bit here, because
2259                 * we know that:
2260                 *
2261                 * 1) it's a BVEC iter, we set it up
2262                 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2263                 *    first and last bvec
2264                 *
2265                 * So just find our index, and adjust the iterator afterwards.
2266                 * If the offset is within the first bvec (or the whole first
2267                 * bvec, just use iov_iter_advance(). This makes it easier
2268                 * since we can just skip the first segment, which may not
2269                 * be PAGE_SIZE aligned.
2270                 */
2271                const struct bio_vec *bvec = imu->bvec;
2272
2273                if (offset <= bvec->bv_len) {
2274                        iov_iter_advance(iter, offset);
2275                } else {
2276                        unsigned long seg_skip;
2277
2278                        /* skip first vec */
2279                        offset -= bvec->bv_len;
2280                        seg_skip = 1 + (offset >> PAGE_SHIFT);
2281
2282                        iter->bvec = bvec + seg_skip;
2283                        iter->nr_segs -= seg_skip;
2284                        iter->count -= bvec->bv_len + offset;
2285                        iter->iov_offset = offset & ~PAGE_MASK;
2286                }
2287        }
2288
2289        return len;
2290}
2291
2292static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2293{
2294        if (needs_lock)
2295                mutex_unlock(&ctx->uring_lock);
2296}
2297
2298static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2299{
2300        /*
2301         * "Normal" inline submissions always hold the uring_lock, since we
2302         * grab it from the system call. Same is true for the SQPOLL offload.
2303         * The only exception is when we've detached the request and issue it
2304         * from an async worker thread, grab the lock for that case.
2305         */
2306        if (needs_lock)
2307                mutex_lock(&ctx->uring_lock);
2308}
2309
2310static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2311                                          int bgid, struct io_buffer *kbuf,
2312                                          bool needs_lock)
2313{
2314        struct io_buffer *head;
2315
2316        if (req->flags & REQ_F_BUFFER_SELECTED)
2317                return kbuf;
2318
2319        io_ring_submit_lock(req->ctx, needs_lock);
2320
2321        lockdep_assert_held(&req->ctx->uring_lock);
2322
2323        head = idr_find(&req->ctx->io_buffer_idr, bgid);
2324        if (head) {
2325                if (!list_empty(&head->list)) {
2326                        kbuf = list_last_entry(&head->list, struct io_buffer,
2327                                                        list);
2328                        list_del(&kbuf->list);
2329                } else {
2330                        kbuf = head;
2331                        idr_remove(&req->ctx->io_buffer_idr, bgid);
2332                }
2333                if (*len > kbuf->len)
2334                        *len = kbuf->len;
2335        } else {
2336                kbuf = ERR_PTR(-ENOBUFS);
2337        }
2338
2339        io_ring_submit_unlock(req->ctx, needs_lock);
2340
2341        return kbuf;
2342}
2343
2344static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2345                                        bool needs_lock)
2346{
2347        struct io_buffer *kbuf;
2348        u16 bgid;
2349
2350        kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2351        bgid = req->buf_index;
2352        kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2353        if (IS_ERR(kbuf))
2354                return kbuf;
2355        req->rw.addr = (u64) (unsigned long) kbuf;
2356        req->flags |= REQ_F_BUFFER_SELECTED;
2357        return u64_to_user_ptr(kbuf->addr);
2358}
2359
2360#ifdef CONFIG_COMPAT
2361static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2362                                bool needs_lock)
2363{
2364        struct compat_iovec __user *uiov;
2365        compat_ssize_t clen;
2366        void __user *buf;
2367        ssize_t len;
2368
2369        uiov = u64_to_user_ptr(req->rw.addr);
2370        if (!access_ok(uiov, sizeof(*uiov)))
2371                return -EFAULT;
2372        if (__get_user(clen, &uiov->iov_len))
2373                return -EFAULT;
2374        if (clen < 0)
2375                return -EINVAL;
2376
2377        len = clen;
2378        buf = io_rw_buffer_select(req, &len, needs_lock);
2379        if (IS_ERR(buf))
2380                return PTR_ERR(buf);
2381        iov[0].iov_base = buf;
2382        iov[0].iov_len = (compat_size_t) len;
2383        return 0;
2384}
2385#endif
2386
2387static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2388                                      bool needs_lock)
2389{
2390        struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2391        void __user *buf;
2392        ssize_t len;
2393
2394        if (copy_from_user(iov, uiov, sizeof(*uiov)))
2395                return -EFAULT;
2396
2397        len = iov[0].iov_len;
2398        if (len < 0)
2399                return -EINVAL;
2400        buf = io_rw_buffer_select(req, &len, needs_lock);
2401        if (IS_ERR(buf))
2402                return PTR_ERR(buf);
2403        iov[0].iov_base = buf;
2404        iov[0].iov_len = len;
2405        return 0;
2406}
2407
2408static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2409                                    bool needs_lock)
2410{
2411        if (req->flags & REQ_F_BUFFER_SELECTED) {
2412                struct io_buffer *kbuf;
2413
2414                kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2415                iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2416                iov[0].iov_len = kbuf->len;
2417                return 0;
2418        }
2419        if (!req->rw.len)
2420                return 0;
2421        else if (req->rw.len > 1)
2422                return -EINVAL;
2423
2424#ifdef CONFIG_COMPAT
2425        if (req->ctx->compat)
2426                return io_compat_import(req, iov, needs_lock);
2427#endif
2428
2429        return __io_iov_buffer_select(req, iov, needs_lock);
2430}
2431
2432static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
2433                               struct iovec **iovec, struct iov_iter *iter,
2434                               bool needs_lock)
2435{
2436        void __user *buf = u64_to_user_ptr(req->rw.addr);
2437        size_t sqe_len = req->rw.len;
2438        ssize_t ret;
2439        u8 opcode;
2440
2441        opcode = req->opcode;
2442        if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
2443                *iovec = NULL;
2444                return io_import_fixed(req, rw, iter);
2445        }
2446
2447        /* buffer index only valid with fixed read/write, or buffer select  */
2448        if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
2449                return -EINVAL;
2450
2451        if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
2452                if (req->flags & REQ_F_BUFFER_SELECT) {
2453                        buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
2454                        if (IS_ERR(buf)) {
2455                                *iovec = NULL;
2456                                return PTR_ERR(buf);
2457                        }
2458                        req->rw.len = sqe_len;
2459                }
2460
2461                ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2462                *iovec = NULL;
2463                return ret < 0 ? ret : sqe_len;
2464        }
2465
2466        if (req->io) {
2467                struct io_async_rw *iorw = &req->io->rw;
2468
2469                *iovec = iorw->iov;
2470                iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size);
2471                if (iorw->iov == iorw->fast_iov)
2472                        *iovec = NULL;
2473                return iorw->size;
2474        }
2475
2476        if (req->flags & REQ_F_BUFFER_SELECT) {
2477                ret = io_iov_buffer_select(req, *iovec, needs_lock);
2478                if (!ret) {
2479                        ret = (*iovec)->iov_len;
2480                        iov_iter_init(iter, rw, *iovec, 1, ret);
2481                }
2482                *iovec = NULL;
2483                return ret;
2484        }
2485
2486#ifdef CONFIG_COMPAT
2487        if (req->ctx->compat)
2488                return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
2489                                                iovec, iter);
2490#endif
2491
2492        return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
2493}
2494
2495/*
2496 * For files that don't have ->read_iter() and ->write_iter(), handle them
2497 * by looping over ->read() or ->write() manually.
2498 */
2499static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2500                           struct iov_iter *iter)
2501{
2502        ssize_t ret = 0;
2503
2504        /*
2505         * Don't support polled IO through this interface, and we can't
2506         * support non-blocking either. For the latter, this just causes
2507         * the kiocb to be handled from an async context.
2508         */
2509        if (kiocb->ki_flags & IOCB_HIPRI)
2510                return -EOPNOTSUPP;
2511        if (kiocb->ki_flags & IOCB_NOWAIT)
2512                return -EAGAIN;
2513
2514        while (iov_iter_count(iter)) {
2515                struct iovec iovec;
2516                ssize_t nr;
2517
2518                if (!iov_iter_is_bvec(iter)) {
2519                        iovec = iov_iter_iovec(iter);
2520                } else {
2521                        /* fixed buffers import bvec */
2522                        iovec.iov_base = kmap(iter->bvec->bv_page)
2523                                                + iter->iov_offset;
2524                        iovec.iov_len = min(iter->count,
2525                                        iter->bvec->bv_len - iter->iov_offset);
2526                }
2527
2528                if (rw == READ) {
2529                        nr = file->f_op->read(file, iovec.iov_base,
2530                                              iovec.iov_len, &kiocb->ki_pos);
2531                } else {
2532                        nr = file->f_op->write(file, iovec.iov_base,
2533                                               iovec.iov_len, &kiocb->ki_pos);
2534                }
2535
2536                if (iov_iter_is_bvec(iter))
2537                        kunmap(iter->bvec->bv_page);
2538
2539                if (nr < 0) {
2540                        if (!ret)
2541                                ret = nr;
2542                        break;
2543                }
2544                ret += nr;
2545                if (nr != iovec.iov_len)
2546                        break;
2547                iov_iter_advance(iter, nr);
2548        }
2549
2550        return ret;
2551}
2552
2553static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
2554                          struct iovec *iovec, struct iovec *fast_iov,
2555                          struct iov_iter *iter)
2556{
2557        req->io->rw.nr_segs = iter->nr_segs;
2558        req->io->rw.size = io_size;
2559        req->io->rw.iov = iovec;
2560        if (!req->io->rw.iov) {
2561                req->io->rw.iov = req->io->rw.fast_iov;
2562                if (req->io->rw.iov != fast_iov)
2563                        memcpy(req->io->rw.iov, fast_iov,
2564                               sizeof(struct iovec) * iter->nr_segs);
2565        } else {
2566                req->flags |= REQ_F_NEED_CLEANUP;
2567        }
2568}
2569
2570static inline int __io_alloc_async_ctx(struct io_kiocb *req)
2571{
2572        req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
2573        return req->io == NULL;
2574}
2575
2576static int io_alloc_async_ctx(struct io_kiocb *req)
2577{
2578        if (!io_op_defs[req->opcode].async_ctx)
2579                return 0;
2580
2581        return  __io_alloc_async_ctx(req);
2582}
2583
2584static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2585                             struct iovec *iovec, struct iovec *fast_iov,
2586                             struct iov_iter *iter)
2587{
2588        if (!io_op_defs[req->opcode].async_ctx)
2589                return 0;
2590        if (!req->io) {
2591                if (__io_alloc_async_ctx(req))
2592                        return -ENOMEM;
2593
2594                io_req_map_rw(req, io_size, iovec, fast_iov, iter);
2595        }
2596        return 0;
2597}
2598
2599static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2600                        bool force_nonblock)
2601{
2602        struct io_async_ctx *io;
2603        struct iov_iter iter;
2604        ssize_t ret;
2605
2606        ret = io_prep_rw(req, sqe, force_nonblock);
2607        if (ret)
2608                return ret;
2609
2610        if (unlikely(!(req->file->f_mode & FMODE_READ)))
2611                return -EBADF;
2612
2613        /* either don't need iovec imported or already have it */
2614        if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
2615                return 0;
2616
2617        io = req->io;
2618        io->rw.iov = io->rw.fast_iov;
2619        req->io = NULL;
2620        ret = io_import_iovec(READ, req, &io->rw.iov, &iter, !force_nonblock);
2621        req->io = io;
2622        if (ret < 0)
2623                return ret;
2624
2625        io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2626        return 0;
2627}
2628
2629static int io_read(struct io_kiocb *req, bool force_nonblock)
2630{
2631        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
2632        struct kiocb *kiocb = &req->rw.kiocb;
2633        struct iov_iter iter;
2634        size_t iov_count;
2635        ssize_t io_size, ret;
2636
2637        ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
2638        if (ret < 0)
2639                return ret;
2640
2641        /* Ensure we clear previously set non-block flag */
2642        if (!force_nonblock)
2643                kiocb->ki_flags &= ~IOCB_NOWAIT;
2644
2645        req->result = 0;
2646        io_size = ret;
2647        if (req->flags & REQ_F_LINK_HEAD)
2648                req->result = io_size;
2649
2650        /*
2651         * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2652         * we know to async punt it even if it was opened O_NONBLOCK
2653         */
2654        if (force_nonblock && !io_file_supports_async(req->file, READ))
2655                goto copy_iov;
2656
2657        iov_count = iov_iter_count(&iter);
2658        ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
2659        if (!ret) {
2660                ssize_t ret2;
2661
2662                if (req->file->f_op->read_iter)
2663                        ret2 = call_read_iter(req->file, kiocb, &iter);
2664                else
2665                        ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
2666
2667                /* Catch -EAGAIN return for forced non-blocking submission */
2668                if (!force_nonblock || ret2 != -EAGAIN) {
2669                        kiocb_done(kiocb, ret2);
2670                } else {
2671copy_iov:
2672                        ret = io_setup_async_rw(req, io_size, iovec,
2673                                                inline_vecs, &iter);
2674                        if (ret)
2675                                goto out_free;
2676                        /* any defer here is final, must blocking retry */
2677                        if (!(req->flags & REQ_F_NOWAIT) &&
2678                            !file_can_poll(req->file))
2679                                req->flags |= REQ_F_MUST_PUNT;
2680                        return -EAGAIN;
2681                }
2682        }
2683out_free:
2684        if (!(req->flags & REQ_F_NEED_CLEANUP))
2685                kfree(iovec);
2686        return ret;
2687}
2688
2689static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2690                         bool force_nonblock)
2691{
2692        struct io_async_ctx *io;
2693        struct iov_iter iter;
2694        ssize_t ret;
2695
2696        ret = io_prep_rw(req, sqe, force_nonblock);
2697        if (ret)
2698                return ret;
2699
2700        if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
2701                return -EBADF;
2702
2703        req->fsize = rlimit(RLIMIT_FSIZE);
2704
2705        /* either don't need iovec imported or already have it */
2706        if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
2707                return 0;
2708
2709        io = req->io;
2710        io->rw.iov = io->rw.fast_iov;
2711        req->io = NULL;
2712        ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter, !force_nonblock);
2713        req->io = io;
2714        if (ret < 0)
2715                return ret;
2716
2717        io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2718        return 0;
2719}
2720
2721static int io_write(struct io_kiocb *req, bool force_nonblock)
2722{
2723        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
2724        struct kiocb *kiocb = &req->rw.kiocb;
2725        struct iov_iter iter;
2726        size_t iov_count;
2727        ssize_t ret, io_size;
2728
2729        ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
2730        if (ret < 0)
2731                return ret;
2732
2733        /* Ensure we clear previously set non-block flag */
2734        if (!force_nonblock)
2735                req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
2736
2737        req->result = 0;
2738        io_size = ret;
2739        if (req->flags & REQ_F_LINK_HEAD)
2740                req->result = io_size;
2741
2742        /*
2743         * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2744         * we know to async punt it even if it was opened O_NONBLOCK
2745         */
2746        if (force_nonblock && !io_file_supports_async(req->file, WRITE))
2747                goto copy_iov;
2748
2749        /* file path doesn't support NOWAIT for non-direct_IO */
2750        if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
2751            (req->flags & REQ_F_ISREG))
2752                goto copy_iov;
2753
2754        iov_count = iov_iter_count(&iter);
2755        ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
2756        if (!ret) {
2757                ssize_t ret2;
2758
2759                /*
2760                 * Open-code file_start_write here to grab freeze protection,
2761                 * which will be released by another thread in
2762                 * io_complete_rw().  Fool lockdep by telling it the lock got
2763                 * released so that it doesn't complain about the held lock when
2764                 * we return to userspace.
2765                 */
2766                if (req->flags & REQ_F_ISREG) {
2767                        __sb_start_write(file_inode(req->file)->i_sb,
2768                                                SB_FREEZE_WRITE, true);
2769                        __sb_writers_release(file_inode(req->file)->i_sb,
2770                                                SB_FREEZE_WRITE);
2771                }
2772                kiocb->ki_flags |= IOCB_WRITE;
2773
2774                if (!force_nonblock)
2775                        current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2776
2777                if (req->file->f_op->write_iter)
2778                        ret2 = call_write_iter(req->file, kiocb, &iter);
2779                else
2780                        ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
2781
2782                if (!force_nonblock)
2783                        current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2784
2785                /*
2786                 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
2787                 * retry them without IOCB_NOWAIT.
2788                 */
2789                if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
2790                        ret2 = -EAGAIN;
2791                if (!force_nonblock || ret2 != -EAGAIN) {
2792                        kiocb_done(kiocb, ret2);
2793                } else {
2794copy_iov:
2795                        ret = io_setup_async_rw(req, io_size, iovec,
2796                                                inline_vecs, &iter);
2797                        if (ret)
2798                                goto out_free;
2799                        /* any defer here is final, must blocking retry */
2800                        if (!(req->flags & REQ_F_NOWAIT) &&
2801                            !file_can_poll(req->file))
2802                                req->flags |= REQ_F_MUST_PUNT;
2803                        return -EAGAIN;
2804                }
2805        }
2806out_free:
2807        if (!(req->flags & REQ_F_NEED_CLEANUP))
2808                kfree(iovec);
2809        return ret;
2810}
2811
2812static int __io_splice_prep(struct io_kiocb *req,
2813                            const struct io_uring_sqe *sqe)
2814{
2815        struct io_splice* sp = &req->splice;
2816        unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
2817        int ret;
2818
2819        if (req->flags & REQ_F_NEED_CLEANUP)
2820                return 0;
2821        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2822                return -EINVAL;
2823
2824        sp->file_in = NULL;
2825        sp->len = READ_ONCE(sqe->len);
2826        sp->flags = READ_ONCE(sqe->splice_flags);
2827
2828        if (unlikely(sp->flags & ~valid_flags))
2829                return -EINVAL;
2830
2831        ret = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), &sp->file_in,
2832                          (sp->flags & SPLICE_F_FD_IN_FIXED));
2833        if (ret)
2834                return ret;
2835        req->flags |= REQ_F_NEED_CLEANUP;
2836
2837        if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
2838                /*
2839                 * Splice operation will be punted aync, and here need to
2840                 * modify io_wq_work.flags, so initialize io_wq_work firstly.
2841                 */
2842                io_req_init_async(req);
2843                req->work.flags |= IO_WQ_WORK_UNBOUND;
2844        }
2845
2846        return 0;
2847}
2848
2849static int io_tee_prep(struct io_kiocb *req,
2850                       const struct io_uring_sqe *sqe)
2851{
2852        if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
2853                return -EINVAL;
2854        return __io_splice_prep(req, sqe);
2855}
2856
2857static int io_tee(struct io_kiocb *req, bool force_nonblock)
2858{
2859        struct io_splice *sp = &req->splice;
2860        struct file *in = sp->file_in;
2861        struct file *out = sp->file_out;
2862        unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
2863        long ret = 0;
2864
2865        if (force_nonblock)
2866                return -EAGAIN;
2867        if (sp->len)
2868                ret = do_tee(in, out, sp->len, flags);
2869
2870        io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
2871        req->flags &= ~REQ_F_NEED_CLEANUP;
2872
2873        io_cqring_add_event(req, ret);
2874        if (ret != sp->len)
2875                req_set_fail_links(req);
2876        io_put_req(req);
2877        return 0;
2878}
2879
2880static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2881{
2882        struct io_splice* sp = &req->splice;
2883
2884        sp->off_in = READ_ONCE(sqe->splice_off_in);
2885        sp->off_out = READ_ONCE(sqe->off);
2886        return __io_splice_prep(req, sqe);
2887}
2888
2889static int io_splice(struct io_kiocb *req, bool force_nonblock)
2890{
2891        struct io_splice *sp = &req->splice;
2892        struct file *in = sp->file_in;
2893        struct file *out = sp->file_out;
2894        unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
2895        loff_t *poff_in, *poff_out;
2896        long ret = 0;
2897
2898        if (force_nonblock)
2899                return -EAGAIN;
2900
2901        poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
2902        poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
2903
2904        if (sp->len)
2905                ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
2906
2907        io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
2908        req->flags &= ~REQ_F_NEED_CLEANUP;
2909
2910        io_cqring_add_event(req, ret);
2911        if (ret != sp->len)
2912                req_set_fail_links(req);
2913        io_put_req(req);
2914        return 0;
2915}
2916
2917/*
2918 * IORING_OP_NOP just posts a completion event, nothing else.
2919 */
2920static int io_nop(struct io_kiocb *req)
2921{
2922        struct io_ring_ctx *ctx = req->ctx;
2923
2924        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2925                return -EINVAL;
2926
2927        io_cqring_add_event(req, 0);
2928        io_put_req(req);
2929        return 0;
2930}
2931
2932static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2933{
2934        struct io_ring_ctx *ctx = req->ctx;
2935
2936        if (!req->file)
2937                return -EBADF;
2938
2939        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2940                return -EINVAL;
2941        if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
2942                return -EINVAL;
2943
2944        req->sync.flags = READ_ONCE(sqe->fsync_flags);
2945        if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
2946                return -EINVAL;
2947
2948        req->sync.off = READ_ONCE(sqe->off);
2949        req->sync.len = READ_ONCE(sqe->len);
2950        return 0;
2951}
2952
2953static int io_fsync(struct io_kiocb *req, bool force_nonblock)
2954{
2955        loff_t end = req->sync.off + req->sync.len;
2956        int ret;
2957
2958        /* fsync always requires a blocking context */
2959        if (force_nonblock)
2960                return -EAGAIN;
2961
2962        ret = vfs_fsync_range(req->file, req->sync.off,
2963                                end > 0 ? end : LLONG_MAX,
2964                                req->sync.flags & IORING_FSYNC_DATASYNC);
2965        if (ret < 0)
2966                req_set_fail_links(req);
2967        io_cqring_add_event(req, ret);
2968        io_put_req(req);
2969        return 0;
2970}
2971
2972static int io_fallocate_prep(struct io_kiocb *req,
2973                             const struct io_uring_sqe *sqe)
2974{
2975        if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
2976                return -EINVAL;
2977        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2978                return -EINVAL;
2979
2980        req->sync.off = READ_ONCE(sqe->off);
2981        req->sync.len = READ_ONCE(sqe->addr);
2982        req->sync.mode = READ_ONCE(sqe->len);
2983        req->fsize = rlimit(RLIMIT_FSIZE);
2984        return 0;
2985}
2986
2987static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
2988{
2989        int ret;
2990
2991        /* fallocate always requiring blocking context */
2992        if (force_nonblock)
2993                return -EAGAIN;
2994
2995        current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2996        ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2997                                req->sync.len);
2998        current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2999        if (ret < 0)
3000                req_set_fail_links(req);
3001        io_cqring_add_event(req, ret);
3002        io_put_req(req);
3003        return 0;
3004}
3005
3006static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3007{
3008        const char __user *fname;
3009        int ret;
3010
3011        if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3012                return -EINVAL;
3013        if (unlikely(sqe->ioprio || sqe->buf_index))
3014                return -EINVAL;
3015        if (unlikely(req->flags & REQ_F_FIXED_FILE))
3016                return -EBADF;
3017
3018        /* open.how should be already initialised */
3019        if (!(req->open.how.flags & O_PATH) && force_o_largefile())
3020                req->open.how.flags |= O_LARGEFILE;
3021
3022        req->open.dfd = READ_ONCE(sqe->fd);
3023        fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3024        req->open.filename = getname(fname);
3025        if (IS_ERR(req->open.filename)) {
3026                ret = PTR_ERR(req->open.filename);
3027                req->open.filename = NULL;
3028                return ret;
3029        }
3030        req->open.nofile = rlimit(RLIMIT_NOFILE);
3031        req->flags |= REQ_F_NEED_CLEANUP;
3032        return 0;
3033}
3034
3035static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3036{
3037        u64 flags, mode;
3038
3039        if (req->flags & REQ_F_NEED_CLEANUP)
3040                return 0;
3041        mode = READ_ONCE(sqe->len);
3042        flags = READ_ONCE(sqe->open_flags);
3043        req->open.how = build_open_how(flags, mode);
3044        return __io_openat_prep(req, sqe);
3045}
3046
3047static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3048{
3049        struct open_how __user *how;
3050        size_t len;
3051        int ret;
3052
3053        if (req->flags & REQ_F_NEED_CLEANUP)
3054                return 0;
3055        how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3056        len = READ_ONCE(sqe->len);
3057        if (len < OPEN_HOW_SIZE_VER0)
3058                return -EINVAL;
3059
3060        ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3061                                        len);
3062        if (ret)
3063                return ret;
3064
3065        return __io_openat_prep(req, sqe);
3066}
3067
3068static int io_openat2(struct io_kiocb *req, bool force_nonblock)
3069{
3070        struct open_flags op;
3071        struct file *file;
3072        int ret;
3073
3074        if (force_nonblock)
3075                return -EAGAIN;
3076
3077        ret = build_open_flags(&req->open.how, &op);
3078        if (ret)
3079                goto err;
3080
3081        ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
3082        if (ret < 0)
3083                goto err;
3084
3085        file = do_filp_open(req->open.dfd, req->open.filename, &op);
3086        if (IS_ERR(file)) {
3087                put_unused_fd(ret);
3088                ret = PTR_ERR(file);
3089        } else {
3090                fsnotify_open(file);
3091                fd_install(ret, file);
3092        }
3093err:
3094        putname(req->open.filename);
3095        req->flags &= ~REQ_F_NEED_CLEANUP;
3096        if (ret < 0)
3097                req_set_fail_links(req);
3098        io_cqring_add_event(req, ret);
3099        io_put_req(req);
3100        return 0;
3101}
3102
3103static int io_openat(struct io_kiocb *req, bool force_nonblock)
3104{
3105        return io_openat2(req, force_nonblock);
3106}
3107
3108static int io_remove_buffers_prep(struct io_kiocb *req,
3109                                  const struct io_uring_sqe *sqe)
3110{
3111        struct io_provide_buf *p = &req->pbuf;
3112        u64 tmp;
3113
3114        if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3115                return -EINVAL;
3116
3117        tmp = READ_ONCE(sqe->fd);
3118        if (!tmp || tmp > USHRT_MAX)
3119                return -EINVAL;
3120
3121        memset(p, 0, sizeof(*p));
3122        p->nbufs = tmp;
3123        p->bgid = READ_ONCE(sqe->buf_group);
3124        return 0;
3125}
3126
3127static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3128                               int bgid, unsigned nbufs)
3129{
3130        unsigned i = 0;
3131
3132        /* shouldn't happen */
3133        if (!nbufs)
3134                return 0;
3135
3136        /* the head kbuf is the list itself */
3137        while (!list_empty(&buf->list)) {
3138                struct io_buffer *nxt;
3139
3140                nxt = list_first_entry(&buf->list, struct io_buffer, list);
3141                list_del(&nxt->list);
3142                kfree(nxt);
3143                if (++i == nbufs)
3144                        return i;
3145        }
3146        i++;
3147        kfree(buf);
3148        idr_remove(&ctx->io_buffer_idr, bgid);
3149
3150        return i;
3151}
3152
3153static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock)
3154{
3155        struct io_provide_buf *p = &req->pbuf;
3156        struct io_ring_ctx *ctx = req->ctx;
3157        struct io_buffer *head;
3158        int ret = 0;
3159
3160        io_ring_submit_lock(ctx, !force_nonblock);
3161
3162        lockdep_assert_held(&ctx->uring_lock);
3163
3164        ret = -ENOENT;
3165        head = idr_find(&ctx->io_buffer_idr, p->bgid);
3166        if (head)
3167                ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3168
3169        io_ring_submit_lock(ctx, !force_nonblock);
3170        if (ret < 0)
3171                req_set_fail_links(req);
3172        io_cqring_add_event(req, ret);
3173        io_put_req(req);
3174        return 0;
3175}
3176
3177static int io_provide_buffers_prep(struct io_kiocb *req,
3178                                   const struct io_uring_sqe *sqe)
3179{
3180        struct io_provide_buf *p = &req->pbuf;
3181        u64 tmp;
3182
3183        if (sqe->ioprio || sqe->rw_flags)
3184                return -EINVAL;
3185
3186        tmp = READ_ONCE(sqe->fd);
3187        if (!tmp || tmp > USHRT_MAX)
3188                return -E2BIG;
3189        p->nbufs = tmp;
3190        p->addr = READ_ONCE(sqe->addr);
3191        p->len = READ_ONCE(sqe->len);
3192
3193        if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
3194                return -EFAULT;
3195
3196        p->bgid = READ_ONCE(sqe->buf_group);
3197        tmp = READ_ONCE(sqe->off);
3198        if (tmp > USHRT_MAX)
3199                return -E2BIG;
3200        p->bid = tmp;
3201        return 0;
3202}
3203
3204static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3205{
3206        struct io_buffer *buf;
3207        u64 addr = pbuf->addr;
3208        int i, bid = pbuf->bid;
3209
3210        for (i = 0; i < pbuf->nbufs; i++) {
3211                buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3212                if (!buf)
3213                        break;
3214
3215                buf->addr = addr;
3216                buf->len = pbuf->len;
3217                buf->bid = bid;
3218                addr += pbuf->len;
3219                bid++;
3220                if (!*head) {
3221                        INIT_LIST_HEAD(&buf->list);
3222                        *head = buf;
3223                } else {
3224                        list_add_tail(&buf->list, &(*head)->list);
3225                }
3226        }
3227
3228        return i ? i : -ENOMEM;
3229}
3230
3231static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock)
3232{
3233        struct io_provide_buf *p = &req->pbuf;
3234        struct io_ring_ctx *ctx = req->ctx;
3235        struct io_buffer *head, *list;
3236        int ret = 0;
3237
3238        io_ring_submit_lock(ctx, !force_nonblock);
3239
3240        lockdep_assert_held(&ctx->uring_lock);
3241
3242        list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
3243
3244        ret = io_add_buffers(p, &head);
3245        if (ret < 0)
3246                goto out;
3247
3248        if (!list) {
3249                ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
3250                                        GFP_KERNEL);
3251                if (ret < 0) {
3252                        __io_remove_buffers(ctx, head, p->bgid, -1U);
3253                        goto out;
3254                }
3255        }
3256out:
3257        io_ring_submit_unlock(ctx, !force_nonblock);
3258        if (ret < 0)
3259                req_set_fail_links(req);
3260        io_cqring_add_event(req, ret);
3261        io_put_req(req);
3262        return 0;
3263}
3264
3265static int io_epoll_ctl_prep(struct io_kiocb *req,
3266                             const struct io_uring_sqe *sqe)
3267{
3268#if defined(CONFIG_EPOLL)
3269        if (sqe->ioprio || sqe->buf_index)
3270                return -EINVAL;
3271        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3272                return -EINVAL;
3273
3274        req->epoll.epfd = READ_ONCE(sqe->fd);
3275        req->epoll.op = READ_ONCE(sqe->len);
3276        req->epoll.fd = READ_ONCE(sqe->off);
3277
3278        if (ep_op_has_event(req->epoll.op)) {
3279                struct epoll_event __user *ev;
3280
3281                ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
3282                if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
3283                        return -EFAULT;
3284        }
3285
3286        return 0;
3287#else
3288        return -EOPNOTSUPP;
3289#endif
3290}
3291
3292static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock)
3293{
3294#if defined(CONFIG_EPOLL)
3295        struct io_epoll *ie = &req->epoll;
3296        int ret;
3297
3298        ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
3299        if (force_nonblock && ret == -EAGAIN)
3300                return -EAGAIN;
3301
3302        if (ret < 0)
3303                req_set_fail_links(req);
3304        io_cqring_add_event(req, ret);
3305        io_put_req(req);
3306        return 0;
3307#else
3308        return -EOPNOTSUPP;
3309#endif
3310}
3311
3312static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3313{
3314#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3315        if (sqe->ioprio || sqe->buf_index || sqe->off)
3316                return -EINVAL;
3317        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3318                return -EINVAL;
3319
3320        req->madvise.addr = READ_ONCE(sqe->addr);
3321        req->madvise.len = READ_ONCE(sqe->len);
3322        req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
3323        return 0;
3324#else
3325        return -EOPNOTSUPP;
3326#endif
3327}
3328
3329static int io_madvise(struct io_kiocb *req, bool force_nonblock)
3330{
3331#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3332        struct io_madvise *ma = &req->madvise;
3333        int ret;
3334
3335        if (force_nonblock)
3336                return -EAGAIN;
3337
3338        ret = do_madvise(ma->addr, ma->len, ma->advice);
3339        if (ret < 0)
3340                req_set_fail_links(req);
3341        io_cqring_add_event(req, ret);
3342        io_put_req(req);
3343        return 0;
3344#else
3345        return -EOPNOTSUPP;
3346#endif
3347}
3348
3349static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3350{
3351        if (sqe->ioprio || sqe->buf_index || sqe->addr)
3352                return -EINVAL;
3353        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3354                return -EINVAL;
3355
3356        req->fadvise.offset = READ_ONCE(sqe->off);
3357        req->fadvise.len = READ_ONCE(sqe->len);
3358        req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
3359        return 0;
3360}
3361
3362static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
3363{
3364        struct io_fadvise *fa = &req->fadvise;
3365        int ret;
3366
3367        if (force_nonblock) {
3368                switch (fa->advice) {
3369                case POSIX_FADV_NORMAL:
3370                case POSIX_FADV_RANDOM:
3371                case POSIX_FADV_SEQUENTIAL:
3372                        break;
3373                default:
3374                        return -EAGAIN;
3375                }
3376        }
3377
3378        ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
3379        if (ret < 0)
3380                req_set_fail_links(req);
3381        io_cqring_add_event(req, ret);
3382        io_put_req(req);
3383        return 0;
3384}
3385
3386static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3387{
3388        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3389                return -EINVAL;
3390        if (sqe->ioprio || sqe->buf_index)
3391                return -EINVAL;
3392        if (req->flags & REQ_F_FIXED_FILE)
3393                return -EBADF;
3394
3395        req->statx.dfd = READ_ONCE(sqe->fd);
3396        req->statx.mask = READ_ONCE(sqe->len);
3397        req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
3398        req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3399        req->statx.flags = READ_ONCE(sqe->statx_flags);
3400
3401        return 0;
3402}
3403
3404static int io_statx(struct io_kiocb *req, bool force_nonblock)
3405{
3406        struct io_statx *ctx = &req->statx;
3407        int ret;
3408
3409        if (force_nonblock) {
3410                /* only need file table for an actual valid fd */
3411                if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
3412                        req->flags |= REQ_F_NO_FILE_TABLE;
3413                return -EAGAIN;
3414        }
3415
3416        ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
3417                       ctx->buffer);
3418
3419        if (ret < 0)
3420                req_set_fail_links(req);
3421        io_cqring_add_event(req, ret);
3422        io_put_req(req);
3423        return 0;
3424}
3425
3426static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3427{
3428        /*
3429         * If we queue this for async, it must not be cancellable. That would
3430         * leave the 'file' in an undeterminate state, and here need to modify
3431         * io_wq_work.flags, so initialize io_wq_work firstly.
3432         */
3433        io_req_init_async(req);
3434        req->work.flags |= IO_WQ_WORK_NO_CANCEL;
3435
3436        if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3437                return -EINVAL;
3438        if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
3439            sqe->rw_flags || sqe->buf_index)
3440                return -EINVAL;
3441        if (req->flags & REQ_F_FIXED_FILE)
3442                return -EBADF;
3443
3444        req->close.fd = READ_ONCE(sqe->fd);
3445        if ((req->file && req->file->f_op == &io_uring_fops) ||
3446            req->close.fd == req->ctx->ring_fd)
3447                return -EBADF;
3448
3449        req->close.put_file = NULL;
3450        return 0;
3451}
3452
3453static int io_close(struct io_kiocb *req, bool force_nonblock)
3454{
3455        struct io_close *close = &req->close;
3456        int ret;
3457
3458        /* might be already done during nonblock submission */
3459        if (!close->put_file) {
3460                ret = __close_fd_get_file(close->fd, &close->put_file);
3461                if (ret < 0)
3462                        return (ret == -ENOENT) ? -EBADF : ret;
3463        }
3464
3465        /* if the file has a flush method, be safe and punt to async */
3466        if (close->put_file->f_op->flush && force_nonblock) {
3467                /* avoid grabbing files - we don't need the files */
3468                req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT;
3469                return -EAGAIN;
3470        }
3471
3472        /* No ->flush() or already async, safely close from here */
3473        ret = filp_close(close->put_file, req->work.files);
3474        if (ret < 0)
3475                req_set_fail_links(req);
3476        io_cqring_add_event(req, ret);
3477        fput(close->put_file);
3478        close->put_file = NULL;
3479        io_put_req(req);
3480        return 0;
3481}
3482
3483static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3484{
3485        struct io_ring_ctx *ctx = req->ctx;
3486
3487        if (!req->file)
3488                return -EBADF;
3489
3490        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3491                return -EINVAL;
3492        if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3493                return -EINVAL;
3494
3495        req->sync.off = READ_ONCE(sqe->off);
3496        req->sync.len = READ_ONCE(sqe->len);
3497        req->sync.flags = READ_ONCE(sqe->sync_range_flags);
3498        return 0;
3499}
3500
3501static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
3502{
3503        int ret;
3504
3505        /* sync_file_range always requires a blocking context */
3506        if (force_nonblock)
3507                return -EAGAIN;
3508
3509        ret = sync_file_range(req->file, req->sync.off, req->sync.len,
3510                                req->sync.flags);
3511        if (ret < 0)
3512                req_set_fail_links(req);
3513        io_cqring_add_event(req, ret);
3514        io_put_req(req);
3515        return 0;
3516}
3517
3518#if defined(CONFIG_NET)
3519static int io_setup_async_msg(struct io_kiocb *req,
3520                              struct io_async_msghdr *kmsg)
3521{
3522        if (req->io)
3523                return -EAGAIN;
3524        if (io_alloc_async_ctx(req)) {
3525                if (kmsg->iov != kmsg->fast_iov)
3526                        kfree(kmsg->iov);
3527                return -ENOMEM;
3528        }
3529        req->flags |= REQ_F_NEED_CLEANUP;
3530        memcpy(&req->io->msg, kmsg, sizeof(*kmsg));
3531        return -EAGAIN;
3532}
3533
3534static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3535{
3536        struct io_sr_msg *sr = &req->sr_msg;
3537        struct io_async_ctx *io = req->io;
3538        int ret;
3539
3540        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3541                return -EINVAL;
3542
3543        sr->msg_flags = READ_ONCE(sqe->msg_flags);
3544        sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
3545        sr->len = READ_ONCE(sqe->len);
3546
3547#ifdef CONFIG_COMPAT
3548        if (req->ctx->compat)
3549                sr->msg_flags |= MSG_CMSG_COMPAT;
3550#endif
3551
3552        if (!io || req->opcode == IORING_OP_SEND)
3553                return 0;
3554        /* iovec is already imported */
3555        if (req->flags & REQ_F_NEED_CLEANUP)
3556                return 0;
3557
3558        io->msg.msg.msg_name = &io->msg.addr;
3559        io->msg.iov = io->msg.fast_iov;
3560        ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
3561                                        &io->msg.iov);
3562        if (!ret)
3563                req->flags |= REQ_F_NEED_CLEANUP;
3564        return ret;
3565}
3566
3567static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
3568{
3569        struct io_async_msghdr *kmsg = NULL;
3570        struct socket *sock;
3571        int ret;
3572
3573        sock = sock_from_file(req->file, &ret);
3574        if (sock) {
3575                struct io_async_ctx io;
3576                unsigned flags;
3577
3578                if (req->io) {
3579                        kmsg = &req->io->msg;
3580                        kmsg->msg.msg_name = &req->io->msg.addr;
3581                        /* if iov is set, it's allocated already */
3582                        if (!kmsg->iov)
3583                                kmsg->iov = kmsg->fast_iov;
3584                        kmsg->msg.msg_iter.iov = kmsg->iov;
3585                } else {
3586                        struct io_sr_msg *sr = &req->sr_msg;
3587
3588                        kmsg = &io.msg;
3589                        kmsg->msg.msg_name = &io.msg.addr;
3590
3591                        io.msg.iov = io.msg.fast_iov;
3592                        ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
3593                                        sr->msg_flags, &io.msg.iov);
3594                        if (ret)
3595                                return ret;
3596                }
3597
3598                flags = req->sr_msg.msg_flags;
3599                if (flags & MSG_DONTWAIT)
3600                        req->flags |= REQ_F_NOWAIT;
3601                else if (force_nonblock)
3602                        flags |= MSG_DONTWAIT;
3603
3604                ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
3605                if (force_nonblock && ret == -EAGAIN)
3606                        return io_setup_async_msg(req, kmsg);
3607                if (ret == -ERESTARTSYS)
3608                        ret = -EINTR;
3609        }
3610
3611        if (kmsg && kmsg->iov != kmsg->fast_iov)
3612                kfree(kmsg->iov);
3613        req->flags &= ~REQ_F_NEED_CLEANUP;
3614        io_cqring_add_event(req, ret);
3615        if (ret < 0)
3616                req_set_fail_links(req);
3617        io_put_req(req);
3618        return 0;
3619}
3620
3621static int io_send(struct io_kiocb *req, bool force_nonblock)
3622{
3623        struct socket *sock;
3624        int ret;
3625
3626        sock = sock_from_file(req->file, &ret);
3627        if (sock) {
3628                struct io_sr_msg *sr = &req->sr_msg;
3629                struct msghdr msg;
3630                struct iovec iov;
3631                unsigned flags;
3632
3633                ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
3634                                                &msg.msg_iter);
3635                if (ret)
3636                        return ret;
3637
3638                msg.msg_name = NULL;
3639                msg.msg_control = NULL;
3640                msg.msg_controllen = 0;
3641                msg.msg_namelen = 0;
3642
3643                flags = req->sr_msg.msg_flags;
3644                if (flags & MSG_DONTWAIT)
3645                        req->flags |= REQ_F_NOWAIT;
3646                else if (force_nonblock)
3647                        flags |= MSG_DONTWAIT;
3648
3649                msg.msg_flags = flags;
3650                ret = sock_sendmsg(sock, &msg);
3651                if (force_nonblock && ret == -EAGAIN)
3652                        return -EAGAIN;
3653                if (ret == -ERESTARTSYS)
3654                        ret = -EINTR;
3655        }
3656
3657        io_cqring_add_event(req, ret);
3658        if (ret < 0)
3659                req_set_fail_links(req);
3660        io_put_req(req);
3661        return 0;
3662}
3663
3664static int __io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
3665{
3666        struct io_sr_msg *sr = &req->sr_msg;
3667        struct iovec __user *uiov;
3668        size_t iov_len;
3669        int ret;
3670
3671        ret = __copy_msghdr_from_user(&io->msg.msg, sr->msg, &io->msg.uaddr,
3672                                        &uiov, &iov_len);
3673        if (ret)
3674                return ret;
3675
3676        if (req->flags & REQ_F_BUFFER_SELECT) {
3677                if (iov_len > 1)
3678                        return -EINVAL;
3679                if (copy_from_user(io->msg.iov, uiov, sizeof(*uiov)))
3680                        return -EFAULT;
3681                sr->len = io->msg.iov[0].iov_len;
3682                iov_iter_init(&io->msg.msg.msg_iter, READ, io->msg.iov, 1,
3683                                sr->len);
3684                io->msg.iov = NULL;
3685        } else {
3686                ret = import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
3687                                        &io->msg.iov, &io->msg.msg.msg_iter);
3688                if (ret > 0)
3689                        ret = 0;
3690        }
3691
3692        return ret;
3693}
3694
3695#ifdef CONFIG_COMPAT
3696static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
3697                                        struct io_async_ctx *io)
3698{
3699        struct compat_msghdr __user *msg_compat;
3700        struct io_sr_msg *sr = &req->sr_msg;
3701        struct compat_iovec __user *uiov;
3702        compat_uptr_t ptr;
3703        compat_size_t len;
3704        int ret;
3705
3706        msg_compat = (struct compat_msghdr __user *) sr->msg;
3707        ret = __get_compat_msghdr(&io->msg.msg, msg_compat, &io->msg.uaddr,
3708                                        &ptr, &len);
3709        if (ret)
3710                return ret;
3711
3712        uiov = compat_ptr(ptr);
3713        if (req->flags & REQ_F_BUFFER_SELECT) {
3714                compat_ssize_t clen;
3715
3716                if (len > 1)
3717                        return -EINVAL;
3718                if (!access_ok(uiov, sizeof(*uiov)))
3719                        return -EFAULT;
3720                if (__get_user(clen, &uiov->iov_len))
3721                        return -EFAULT;
3722                if (clen < 0)
3723                        return -EINVAL;
3724                sr->len = io->msg.iov[0].iov_len;
3725                io->msg.iov = NULL;
3726        } else {
3727                ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV,
3728                                                &io->msg.iov,
3729                                                &io->msg.msg.msg_iter);
3730                if (ret < 0)
3731                        return ret;
3732        }
3733
3734        return 0;
3735}
3736#endif
3737
3738static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
3739{
3740        io->msg.msg.msg_name = &io->msg.addr;
3741        io->msg.iov = io->msg.fast_iov;
3742
3743#ifdef CONFIG_COMPAT
3744        if (req->ctx->compat)
3745                return __io_compat_recvmsg_copy_hdr(req, io);
3746#endif
3747
3748        return __io_recvmsg_copy_hdr(req, io);
3749}
3750
3751static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
3752                                               int *cflags, bool needs_lock)
3753{
3754        struct io_sr_msg *sr = &req->sr_msg;
3755        struct io_buffer *kbuf;
3756
3757        if (!(req->flags & REQ_F_BUFFER_SELECT))
3758                return NULL;
3759
3760        kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
3761        if (IS_ERR(kbuf))
3762                return kbuf;
3763
3764        sr->kbuf = kbuf;
3765        req->flags |= REQ_F_BUFFER_SELECTED;
3766
3767        *cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
3768        *cflags |= IORING_CQE_F_BUFFER;
3769        return kbuf;
3770}
3771
3772static int io_recvmsg_prep(struct io_kiocb *req,
3773                           const struct io_uring_sqe *sqe)
3774{
3775        struct io_sr_msg *sr = &req->sr_msg;
3776        struct io_async_ctx *io = req->io;
3777        int ret;
3778
3779        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3780                return -EINVAL;
3781
3782        sr->msg_flags = READ_ONCE(sqe->msg_flags);
3783        sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
3784        sr->len = READ_ONCE(sqe->len);
3785        sr->bgid = READ_ONCE(sqe->buf_group);
3786
3787#ifdef CONFIG_COMPAT
3788        if (req->ctx->compat)
3789                sr->msg_flags |= MSG_CMSG_COMPAT;
3790#endif
3791
3792        if (!io || req->opcode == IORING_OP_RECV)
3793                return 0;
3794        /* iovec is already imported */
3795        if (req->flags & REQ_F_NEED_CLEANUP)
3796                return 0;
3797
3798        ret = io_recvmsg_copy_hdr(req, io);
3799        if (!ret)
3800                req->flags |= REQ_F_NEED_CLEANUP;
3801        return ret;
3802}
3803
3804static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
3805{
3806        struct io_async_msghdr *kmsg = NULL;
3807        struct socket *sock;
3808        int ret, cflags = 0;
3809
3810        sock = sock_from_file(req->file, &ret);
3811        if (sock) {
3812                struct io_buffer *kbuf;
3813                struct io_async_ctx io;
3814                unsigned flags;
3815
3816                if (req->io) {
3817                        kmsg = &req->io->msg;
3818                        kmsg->msg.msg_name = &req->io->msg.addr;
3819                        /* if iov is set, it's allocated already */
3820                        if (!kmsg->iov)
3821                                kmsg->iov = kmsg->fast_iov;
3822                        kmsg->msg.msg_iter.iov = kmsg->iov;
3823                } else {
3824                        kmsg = &io.msg;
3825                        kmsg->msg.msg_name = &io.msg.addr;
3826
3827                        ret = io_recvmsg_copy_hdr(req, &io);
3828                        if (ret)
3829                                return ret;
3830                }
3831
3832                kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3833                if (IS_ERR(kbuf)) {
3834                        return PTR_ERR(kbuf);
3835                } else if (kbuf) {
3836                        kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3837                        iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
3838                                        1, req->sr_msg.len);
3839                }
3840
3841                flags = req->sr_msg.msg_flags;
3842                if (flags & MSG_DONTWAIT)
3843                        req->flags |= REQ_F_NOWAIT;
3844                else if (force_nonblock)
3845                        flags |= MSG_DONTWAIT;
3846
3847                ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
3848                                                kmsg->uaddr, flags);
3849                if (force_nonblock && ret == -EAGAIN) {
3850                        ret = io_setup_async_msg(req, kmsg);
3851                        if (ret != -EAGAIN)
3852                                kfree(kbuf);
3853                        return ret;
3854                }
3855                if (ret == -ERESTARTSYS)
3856                        ret = -EINTR;
3857                if (kbuf)
3858                        kfree(kbuf);
3859        }
3860
3861        if (kmsg && kmsg->iov != kmsg->fast_iov)
3862                kfree(kmsg->iov);
3863        req->flags &= ~REQ_F_NEED_CLEANUP;
3864        __io_cqring_add_event(req, ret, cflags);
3865        if (ret < 0)
3866                req_set_fail_links(req);
3867        io_put_req(req);
3868        return 0;
3869}
3870
3871static int io_recv(struct io_kiocb *req, bool force_nonblock)
3872{
3873        struct io_buffer *kbuf = NULL;
3874        struct socket *sock;
3875        int ret, cflags = 0;
3876
3877        sock = sock_from_file(req->file, &ret);
3878        if (sock) {
3879                struct io_sr_msg *sr = &req->sr_msg;
3880                void __user *buf = sr->buf;
3881                struct msghdr msg;
3882                struct iovec iov;
3883                unsigned flags;
3884
3885                kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3886                if (IS_ERR(kbuf))
3887                        return PTR_ERR(kbuf);
3888                else if (kbuf)
3889                        buf = u64_to_user_ptr(kbuf->addr);
3890
3891                ret = import_single_range(READ, buf, sr->len, &iov,
3892                                                &msg.msg_iter);
3893                if (ret) {
3894                        kfree(kbuf);
3895                        return ret;
3896                }
3897
3898                req->flags |= REQ_F_NEED_CLEANUP;
3899                msg.msg_name = NULL;
3900                msg.msg_control = NULL;
3901                msg.msg_controllen = 0;
3902                msg.msg_namelen = 0;
3903                msg.msg_iocb = NULL;
3904                msg.msg_flags = 0;
3905
3906                flags = req->sr_msg.msg_flags;
3907                if (flags & MSG_DONTWAIT)
3908                        req->flags |= REQ_F_NOWAIT;
3909                else if (force_nonblock)
3910                        flags |= MSG_DONTWAIT;
3911
3912                ret = sock_recvmsg(sock, &msg, flags);
3913                if (force_nonblock && ret == -EAGAIN)
3914                        return -EAGAIN;
3915                if (ret == -ERESTARTSYS)
3916                        ret = -EINTR;
3917        }
3918
3919        kfree(kbuf);
3920        req->flags &= ~REQ_F_NEED_CLEANUP;
3921        __io_cqring_add_event(req, ret, cflags);
3922        if (ret < 0)
3923                req_set_fail_links(req);
3924        io_put_req(req);
3925        return 0;
3926}
3927
3928static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3929{
3930        struct io_accept *accept = &req->accept;
3931
3932        if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3933                return -EINVAL;
3934        if (sqe->ioprio || sqe->len || sqe->buf_index)
3935                return -EINVAL;
3936
3937        accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3938        accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3939        accept->flags = READ_ONCE(sqe->accept_flags);
3940        accept->nofile = rlimit(RLIMIT_NOFILE);
3941        return 0;
3942}
3943
3944static int io_accept(struct io_kiocb *req, bool force_nonblock)
3945{
3946        struct io_accept *accept = &req->accept;
3947        unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
3948        int ret;
3949
3950        if (req->file->f_flags & O_NONBLOCK)
3951                req->flags |= REQ_F_NOWAIT;
3952
3953        ret = __sys_accept4_file(req->file, file_flags, accept->addr,
3954                                        accept->addr_len, accept->flags,
3955                                        accept->nofile);
3956        if (ret == -EAGAIN && force_nonblock)
3957                return -EAGAIN;
3958        if (ret < 0) {
3959                if (ret == -ERESTARTSYS)
3960                        ret = -EINTR;
3961                req_set_fail_links(req);
3962        }
3963        io_cqring_add_event(req, ret);
3964        io_put_req(req);
3965        return 0;
3966}
3967
3968static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3969{
3970        struct io_connect *conn = &req->connect;
3971        struct io_async_ctx *io = req->io;
3972
3973        if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3974                return -EINVAL;
3975        if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
3976                return -EINVAL;
3977
3978        conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3979        conn->addr_len =  READ_ONCE(sqe->addr2);
3980
3981        if (!io)
3982                return 0;
3983
3984        return move_addr_to_kernel(conn->addr, conn->addr_len,
3985                                        &io->connect.address);
3986}
3987
3988static int io_connect(struct io_kiocb *req, bool force_nonblock)
3989{
3990        struct io_async_ctx __io, *io;
3991        unsigned file_flags;
3992        int ret;
3993
3994        if (req->io) {
3995                io = req->io;
3996        } else {
3997                ret = move_addr_to_kernel(req->connect.addr,
3998                                                req->connect.addr_len,
3999                                                &__io.connect.address);
4000                if (ret)
4001                        goto out;
4002                io = &__io;
4003        }
4004
4005        file_flags = force_nonblock ? O_NONBLOCK : 0;
4006
4007        ret = __sys_connect_file(req->file, &io->connect.address,
4008                                        req->connect.addr_len, file_flags);
4009        if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
4010                if (req->io)
4011                        return -EAGAIN;
4012                if (io_alloc_async_ctx(req)) {
4013                        ret = -ENOMEM;
4014                        goto out;
4015                }
4016                memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
4017                return -EAGAIN;
4018        }
4019        if (ret == -ERESTARTSYS)
4020                ret = -EINTR;
4021out:
4022        if (ret < 0)
4023                req_set_fail_links(req);
4024        io_cqring_add_event(req, ret);
4025        io_put_req(req);
4026        return 0;
4027}
4028#else /* !CONFIG_NET */
4029static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4030{
4031        return -EOPNOTSUPP;
4032}
4033
4034static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
4035{
4036        return -EOPNOTSUPP;
4037}
4038
4039static int io_send(struct io_kiocb *req, bool force_nonblock)
4040{
4041        return -EOPNOTSUPP;
4042}
4043
4044static int io_recvmsg_prep(struct io_kiocb *req,
4045                           const struct io_uring_sqe *sqe)
4046{
4047        return -EOPNOTSUPP;
4048}
4049
4050static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
4051{
4052        return -EOPNOTSUPP;
4053}
4054
4055static int io_recv(struct io_kiocb *req, bool force_nonblock)
4056{
4057        return -EOPNOTSUPP;
4058}
4059
4060static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4061{
4062        return -EOPNOTSUPP;
4063}
4064
4065static int io_accept(struct io_kiocb *req, bool force_nonblock)
4066{
4067        return -EOPNOTSUPP;
4068}
4069
4070static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4071{
4072        return -EOPNOTSUPP;
4073}
4074
4075static int io_connect(struct io_kiocb *req, bool force_nonblock)
4076{
4077        return -EOPNOTSUPP;
4078}
4079#endif /* CONFIG_NET */
4080
4081struct io_poll_table {
4082        struct poll_table_struct pt;
4083        struct io_kiocb *req;
4084        int error;
4085};
4086
4087static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
4088{
4089        struct task_struct *tsk = req->task;
4090        struct io_ring_ctx *ctx = req->ctx;
4091        int ret, notify = TWA_RESUME;
4092
4093        /*
4094         * SQPOLL kernel thread doesn't need notification, just a wakeup.
4095         * If we're not using an eventfd, then TWA_RESUME is always fine,
4096         * as we won't have dependencies between request completions for
4097         * other kernel wait conditions.
4098         */
4099        if (ctx->flags & IORING_SETUP_SQPOLL)
4100                notify = 0;
4101        else if (ctx->cq_ev_fd)
4102                notify = TWA_SIGNAL;
4103
4104        ret = task_work_add(tsk, cb, notify);
4105        if (!ret)
4106                wake_up_process(tsk);
4107        return ret;
4108}
4109
4110static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4111                           __poll_t mask, task_work_func_t func)
4112{
4113        struct task_struct *tsk;
4114        int ret;
4115
4116        /* for instances that support it check for an event match first: */
4117        if (mask && !(mask & poll->events))
4118                return 0;
4119
4120        trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4121
4122        list_del_init(&poll->wait.entry);
4123
4124        tsk = req->task;
4125        req->result = mask;
4126        init_task_work(&req->task_work, func);
4127        /*
4128         * If this fails, then the task is exiting. When a task exits, the
4129         * work gets canceled, so just cancel this request as well instead
4130         * of executing it. We can't safely execute it anyway, as we may not
4131         * have the needed state needed for it anyway.
4132         */
4133        ret = io_req_task_work_add(req, &req->task_work);
4134        if (unlikely(ret)) {
4135                WRITE_ONCE(poll->canceled, true);
4136                tsk = io_wq_get_task(req->ctx->io_wq);
4137                task_work_add(tsk, &req->task_work, 0);
4138                wake_up_process(tsk);
4139        }
4140        return 1;
4141}
4142
4143static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4144        __acquires(&req->ctx->completion_lock)
4145{
4146        struct io_ring_ctx *ctx = req->ctx;
4147
4148        if (!req->result && !READ_ONCE(poll->canceled)) {
4149                struct poll_table_struct pt = { ._key = poll->events };
4150
4151                req->result = vfs_poll(req->file, &pt) & poll->events;
4152        }
4153
4154        spin_lock_irq(&ctx->completion_lock);
4155        if (!req->result && !READ_ONCE(poll->canceled)) {
4156                add_wait_queue(poll->head, &poll->wait);
4157                return true;
4158        }
4159
4160        return false;
4161}
4162
4163static void io_poll_remove_double(struct io_kiocb *req, void *data)
4164{
4165        struct io_poll_iocb *poll = data;
4166
4167        lockdep_assert_held(&req->ctx->completion_lock);
4168
4169        if (poll && poll->head) {
4170                struct wait_queue_head *head = poll->head;
4171
4172                spin_lock(&head->lock);
4173                list_del_init(&poll->wait.entry);
4174                if (poll->wait.private)
4175                        refcount_dec(&req->refs);
4176                poll->head = NULL;
4177                spin_unlock(&head->lock);
4178        }
4179}
4180
4181static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4182{
4183        struct io_ring_ctx *ctx = req->ctx;
4184
4185        io_poll_remove_double(req, req->io);
4186        req->poll.done = true;
4187        io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4188        io_commit_cqring(ctx);
4189}
4190
4191static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
4192{
4193        struct io_ring_ctx *ctx = req->ctx;
4194
4195        if (io_poll_rewait(req, &req->poll)) {
4196                spin_unlock_irq(&ctx->completion_lock);
4197                return;
4198        }
4199
4200        hash_del(&req->hash_node);
4201        io_poll_complete(req, req->result, 0);
4202        spin_unlock_irq(&ctx->completion_lock);
4203
4204        io_put_req_find_next(req, nxt);
4205        io_cqring_ev_posted(ctx);
4206}
4207
4208static void io_poll_task_func(struct callback_head *cb)
4209{
4210        struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4211        struct io_kiocb *nxt = NULL;
4212
4213        io_poll_task_handler(req, &nxt);
4214        if (nxt) {
4215                struct io_ring_ctx *ctx = nxt->ctx;
4216
4217                mutex_lock(&ctx->uring_lock);
4218                __io_queue_sqe(nxt, NULL);
4219                mutex_unlock(&ctx->uring_lock);
4220        }
4221}
4222
4223static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4224                               int sync, void *key)
4225{
4226        struct io_kiocb *req = wait->private;
4227        struct io_poll_iocb *poll = req->apoll->double_poll;
4228        __poll_t mask = key_to_poll(key);
4229
4230        /* for instances that support it check for an event match first: */
4231        if (mask && !(mask & poll->events))
4232                return 0;
4233
4234        if (poll && poll->head) {
4235                bool done;
4236
4237                spin_lock(&poll->head->lock);
4238                done = list_empty(&poll->wait.entry);
4239                if (!done)
4240                        list_del_init(&poll->wait.entry);
4241                spin_unlock(&poll->head->lock);
4242                if (!done)
4243                        __io_async_wake(req, poll, mask, io_poll_task_func);
4244        }
4245        refcount_dec(&req->refs);
4246        return 1;
4247}
4248
4249static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4250                              wait_queue_func_t wake_func)
4251{
4252        poll->head = NULL;
4253        poll->done = false;
4254        poll->canceled = false;
4255        poll->events = events;
4256        INIT_LIST_HEAD(&poll->wait.entry);
4257        init_waitqueue_func_entry(&poll->wait, wake_func);
4258}
4259
4260static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
4261                            struct wait_queue_head *head,
4262                            struct io_poll_iocb **poll_ptr)
4263{
4264        struct io_kiocb *req = pt->req;
4265
4266        /*
4267         * If poll->head is already set, it's because the file being polled
4268         * uses multiple waitqueues for poll handling (eg one for read, one
4269         * for write). Setup a separate io_poll_iocb if this happens.
4270         */
4271        if (unlikely(poll->head)) {
4272                /* already have a 2nd entry, fail a third attempt */
4273                if (*poll_ptr) {
4274                        pt->error = -EINVAL;
4275                        return;
4276                }
4277                poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
4278                if (!poll) {
4279                        pt->error = -ENOMEM;
4280                        return;
4281                }
4282                io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
4283                refcount_inc(&req->refs);
4284                poll->wait.private = req;
4285                *poll_ptr = poll;
4286        }
4287
4288        pt->error = 0;
4289        poll->head = head;
4290        add_wait_queue(head, &poll->wait);
4291}
4292
4293static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
4294                               struct poll_table_struct *p)
4295{
4296        struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4297        struct async_poll *apoll = pt->req->apoll;
4298
4299        __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
4300}
4301
4302static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
4303{
4304        struct mm_struct *mm = current->mm;
4305
4306        if (mm) {
4307                kthread_unuse_mm(mm);
4308                mmput(mm);
4309        }
4310}
4311
4312static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
4313                                   struct io_kiocb *req)
4314{
4315        if (io_op_defs[req->opcode].needs_mm && !current->mm) {
4316                if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
4317                        return -EFAULT;
4318                kthread_use_mm(ctx->sqo_mm);
4319        }
4320
4321        return 0;
4322}
4323
4324static void io_async_task_func(struct callback_head *cb)
4325{
4326        struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4327        struct async_poll *apoll = req->apoll;
4328        struct io_ring_ctx *ctx = req->ctx;
4329        bool canceled = false;
4330
4331        trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
4332
4333        if (io_poll_rewait(req, &apoll->poll)) {
4334                spin_unlock_irq(&ctx->completion_lock);
4335                return;
4336        }
4337
4338        /* If req is still hashed, it cannot have been canceled. Don't check. */
4339        if (hash_hashed(&req->hash_node)) {
4340                hash_del(&req->hash_node);
4341        } else {
4342                canceled = READ_ONCE(apoll->poll.canceled);
4343                if (canceled) {
4344                        io_cqring_fill_event(req, -ECANCELED);
4345                        io_commit_cqring(ctx);
4346                }
4347        }
4348
4349        io_poll_remove_double(req, apoll->double_poll);
4350        spin_unlock_irq(&ctx->completion_lock);
4351
4352        /* restore ->work in case we need to retry again */
4353        if (req->flags & REQ_F_WORK_INITIALIZED)
4354                memcpy(&req->work, &apoll->work, sizeof(req->work));
4355        kfree(apoll->double_poll);
4356        kfree(apoll);
4357
4358        if (!canceled) {
4359                __set_current_state(TASK_RUNNING);
4360                if (io_sq_thread_acquire_mm(ctx, req)) {
4361                        io_cqring_add_event(req, -EFAULT);
4362                        goto end_req;
4363                }
4364                mutex_lock(&ctx->uring_lock);
4365                __io_queue_sqe(req, NULL);
4366                mutex_unlock(&ctx->uring_lock);
4367        } else {
4368                io_cqring_ev_posted(ctx);
4369end_req:
4370                req_set_fail_links(req);
4371                io_double_put_req(req);
4372        }
4373}
4374
4375static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4376                        void *key)
4377{
4378        struct io_kiocb *req = wait->private;
4379        struct io_poll_iocb *poll = &req->apoll->poll;
4380
4381        trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
4382                                        key_to_poll(key));
4383
4384        return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
4385}
4386
4387static void io_poll_req_insert(struct io_kiocb *req)
4388{
4389        struct io_ring_ctx *ctx = req->ctx;
4390        struct hlist_head *list;
4391
4392        list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
4393        hlist_add_head(&req->hash_node, list);
4394}
4395
4396static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
4397                                      struct io_poll_iocb *poll,
4398                                      struct io_poll_table *ipt, __poll_t mask,
4399                                      wait_queue_func_t wake_func)
4400        __acquires(&ctx->completion_lock)
4401{
4402        struct io_ring_ctx *ctx = req->ctx;
4403        bool cancel = false;
4404
4405        poll->file = req->file;
4406        io_init_poll_iocb(poll, mask, wake_func);
4407        poll->wait.private = req;
4408
4409        ipt->pt._key = mask;
4410        ipt->req = req;
4411        ipt->error = -EINVAL;
4412
4413        mask = vfs_poll(req->file, &ipt->pt) & poll->events;
4414
4415        spin_lock_irq(&ctx->completion_lock);
4416        if (likely(poll->head)) {
4417                spin_lock(&poll->head->lock);
4418                if (unlikely(list_empty(&poll->wait.entry))) {
4419                        if (ipt->error)
4420                                cancel = true;
4421                        ipt->error = 0;
4422                        mask = 0;
4423                }
4424                if (mask || ipt->error)
4425                        list_del_init(&poll->wait.entry);
4426                else if (cancel)
4427                        WRITE_ONCE(poll->canceled, true);
4428                else if (!poll->done) /* actually waiting for an event */
4429                        io_poll_req_insert(req);
4430                spin_unlock(&poll->head->lock);
4431        }
4432
4433        return mask;
4434}
4435
4436static bool io_arm_poll_handler(struct io_kiocb *req)
4437{
4438        const struct io_op_def *def = &io_op_defs[req->opcode];
4439        struct io_ring_ctx *ctx = req->ctx;
4440        struct async_poll *apoll;
4441        struct io_poll_table ipt;
4442        __poll_t mask, ret;
4443
4444        if (!req->file || !file_can_poll(req->file))
4445                return false;
4446        if (req->flags & (REQ_F_MUST_PUNT | REQ_F_POLLED))
4447                return false;
4448        if (!def->pollin && !def->pollout)
4449                return false;
4450
4451        apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
4452        if (unlikely(!apoll))
4453                return false;
4454        apoll->double_poll = NULL;
4455
4456        req->flags |= REQ_F_POLLED;
4457        if (req->flags & REQ_F_WORK_INITIALIZED)
4458                memcpy(&apoll->work, &req->work, sizeof(req->work));
4459
4460        io_get_req_task(req);
4461        req->apoll = apoll;
4462        INIT_HLIST_NODE(&req->hash_node);
4463
4464        mask = 0;
4465        if (def->pollin)
4466                mask |= POLLIN | POLLRDNORM;
4467        if (def->pollout)
4468                mask |= POLLOUT | POLLWRNORM;
4469        mask |= POLLERR | POLLPRI;
4470
4471        ipt.pt._qproc = io_async_queue_proc;
4472
4473        ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
4474                                        io_async_wake);
4475        if (ret) {
4476                io_poll_remove_double(req, apoll->double_poll);
4477                spin_unlock_irq(&ctx->completion_lock);
4478                if (req->flags & REQ_F_WORK_INITIALIZED)
4479                        memcpy(&req->work, &apoll->work, sizeof(req->work));
4480                kfree(apoll->double_poll);
4481                kfree(apoll);
4482                return false;
4483        }
4484        spin_unlock_irq(&ctx->completion_lock);
4485        trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
4486                                        apoll->poll.events);
4487        return true;
4488}
4489
4490static bool __io_poll_remove_one(struct io_kiocb *req,
4491                                 struct io_poll_iocb *poll)
4492{
4493        bool do_complete = false;
4494
4495        spin_lock(&poll->head->lock);
4496        WRITE_ONCE(poll->canceled, true);
4497        if (!list_empty(&poll->wait.entry)) {
4498                list_del_init(&poll->wait.entry);
4499                do_complete = true;
4500        }
4501        spin_unlock(&poll->head->lock);
4502        hash_del(&req->hash_node);
4503        return do_complete;
4504}
4505
4506static bool io_poll_remove_one(struct io_kiocb *req)
4507{
4508        bool do_complete;
4509
4510        if (req->opcode == IORING_OP_POLL_ADD) {
4511                io_poll_remove_double(req, req->io);
4512                do_complete = __io_poll_remove_one(req, &req->poll);
4513        } else {
4514                struct async_poll *apoll = req->apoll;
4515
4516                io_poll_remove_double(req, apoll->double_poll);
4517
4518                /* non-poll requests have submit ref still */
4519                do_complete = __io_poll_remove_one(req, &apoll->poll);
4520                if (do_complete) {
4521                        io_put_req(req);
4522                        /*
4523                         * restore ->work because we will call
4524                         * io_req_work_drop_env below when dropping the
4525                         * final reference.
4526                         */
4527                        if (req->flags & REQ_F_WORK_INITIALIZED)
4528                                memcpy(&req->work, &apoll->work,
4529                                       sizeof(req->work));
4530                        kfree(apoll->double_poll);
4531                        kfree(apoll);
4532                }
4533        }
4534
4535        if (do_complete) {
4536                io_cqring_fill_event(req, -ECANCELED);
4537                io_commit_cqring(req->ctx);
4538                req->flags |= REQ_F_COMP_LOCKED;
4539                io_put_req(req);
4540        }
4541
4542        return do_complete;
4543}
4544
4545static void io_poll_remove_all(struct io_ring_ctx *ctx)
4546{
4547        struct hlist_node *tmp;
4548        struct io_kiocb *req;
4549        int posted = 0, i;
4550
4551        spin_lock_irq(&ctx->completion_lock);
4552        for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
4553                struct hlist_head *list;
4554
4555                list = &ctx->cancel_hash[i];
4556                hlist_for_each_entry_safe(req, tmp, list, hash_node)
4557                        posted += io_poll_remove_one(req);
4558        }
4559        spin_unlock_irq(&ctx->completion_lock);
4560
4561        if (posted)
4562                io_cqring_ev_posted(ctx);
4563}
4564
4565static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
4566{
4567        struct hlist_head *list;
4568        struct io_kiocb *req;
4569
4570        list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
4571        hlist_for_each_entry(req, list, hash_node) {
4572                if (sqe_addr != req->user_data)
4573                        continue;
4574                if (io_poll_remove_one(req))
4575                        return 0;
4576                return -EALREADY;
4577        }
4578
4579        return -ENOENT;
4580}
4581
4582static int io_poll_remove_prep(struct io_kiocb *req,
4583                               const struct io_uring_sqe *sqe)
4584{
4585        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4586                return -EINVAL;
4587        if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
4588            sqe->poll_events)
4589                return -EINVAL;
4590
4591        req->poll.addr = READ_ONCE(sqe->addr);
4592        return 0;
4593}
4594
4595/*
4596 * Find a running poll command that matches one specified in sqe->addr,
4597 * and remove it if found.
4598 */
4599static int io_poll_remove(struct io_kiocb *req)
4600{
4601        struct io_ring_ctx *ctx = req->ctx;
4602        u64 addr;
4603        int ret;
4604
4605        addr = req->poll.addr;
4606        spin_lock_irq(&ctx->completion_lock);
4607        ret = io_poll_cancel(ctx, addr);
4608        spin_unlock_irq(&ctx->completion_lock);
4609
4610        io_cqring_add_event(req, ret);
4611        if (ret < 0)
4612                req_set_fail_links(req);
4613        io_put_req(req);
4614        return 0;
4615}
4616
4617static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4618                        void *key)
4619{
4620        struct io_kiocb *req = wait->private;
4621        struct io_poll_iocb *poll = &req->poll;
4622
4623        return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
4624}
4625
4626static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
4627                               struct poll_table_struct *p)
4628{
4629        struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4630
4631        __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->io);
4632}
4633
4634static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4635{
4636        struct io_poll_iocb *poll = &req->poll;
4637        u16 events;
4638
4639        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4640                return -EINVAL;
4641        if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
4642                return -EINVAL;
4643        if (!poll->file)
4644                return -EBADF;
4645
4646        events = READ_ONCE(sqe->poll_events);
4647        poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
4648
4649        io_get_req_task(req);
4650        return 0;
4651}
4652
4653static int io_poll_add(struct io_kiocb *req)
4654{
4655        struct io_poll_iocb *poll = &req->poll;
4656        struct io_ring_ctx *ctx = req->ctx;
4657        struct io_poll_table ipt;
4658        __poll_t mask;
4659
4660        /* ->work is in union with hash_node and others */
4661        io_req_work_drop_env(req);
4662        req->flags &= ~REQ_F_WORK_INITIALIZED;
4663
4664        INIT_HLIST_NODE(&req->hash_node);
4665        INIT_LIST_HEAD(&req->list);
4666        ipt.pt._qproc = io_poll_queue_proc;
4667
4668        mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
4669                                        io_poll_wake);
4670
4671        if (mask) { /* no async, we'd stolen it */
4672                ipt.error = 0;
4673                io_poll_complete(req, mask, 0);
4674        }
4675        spin_unlock_irq(&ctx->completion_lock);
4676
4677        if (mask) {
4678                io_cqring_ev_posted(ctx);
4679                io_put_req(req);
4680        }
4681        return ipt.error;
4682}
4683
4684static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
4685{
4686        struct io_timeout_data *data = container_of(timer,
4687                                                struct io_timeout_data, timer);
4688        struct io_kiocb *req = data->req;
4689        struct io_ring_ctx *ctx = req->ctx;
4690        unsigned long flags;
4691
4692        atomic_inc(&ctx->cq_timeouts);
4693
4694        spin_lock_irqsave(&ctx->completion_lock, flags);
4695        /*
4696         * We could be racing with timeout deletion. If the list is empty,
4697         * then timeout lookup already found it and will be handling it.
4698         */
4699        if (!list_empty(&req->list))
4700                list_del_init(&req->list);
4701
4702        io_cqring_fill_event(req, -ETIME);
4703        io_commit_cqring(ctx);
4704        spin_unlock_irqrestore(&ctx->completion_lock, flags);
4705
4706        io_cqring_ev_posted(ctx);
4707        req_set_fail_links(req);
4708        io_put_req(req);
4709        return HRTIMER_NORESTART;
4710}
4711
4712static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
4713{
4714        struct io_kiocb *req;
4715        int ret = -ENOENT;
4716
4717        list_for_each_entry(req, &ctx->timeout_list, list) {
4718                if (user_data == req->user_data) {
4719                        list_del_init(&req->list);
4720                        ret = 0;
4721                        break;
4722                }
4723        }
4724
4725        if (ret == -ENOENT)
4726                return ret;
4727
4728        ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
4729        if (ret == -1)
4730                return -EALREADY;
4731
4732        req_set_fail_links(req);
4733        io_cqring_fill_event(req, -ECANCELED);
4734        io_put_req(req);
4735        return 0;
4736}
4737
4738static int io_timeout_remove_prep(struct io_kiocb *req,
4739                                  const struct io_uring_sqe *sqe)
4740{
4741        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4742                return -EINVAL;
4743        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
4744                return -EINVAL;
4745        if (sqe->ioprio || sqe->buf_index || sqe->len)
4746                return -EINVAL;
4747
4748        req->timeout.addr = READ_ONCE(sqe->addr);
4749        req->timeout.flags = READ_ONCE(sqe->timeout_flags);
4750        if (req->timeout.flags)
4751                return -EINVAL;
4752
4753        return 0;
4754}
4755
4756/*
4757 * Remove or update an existing timeout command
4758 */
4759static int io_timeout_remove(struct io_kiocb *req)
4760{
4761        struct io_ring_ctx *ctx = req->ctx;
4762        int ret;
4763
4764        spin_lock_irq(&ctx->completion_lock);
4765        ret = io_timeout_cancel(ctx, req->timeout.addr);
4766
4767        io_cqring_fill_event(req, ret);
4768        io_commit_cqring(ctx);
4769        spin_unlock_irq(&ctx->completion_lock);
4770        io_cqring_ev_posted(ctx);
4771        if (ret < 0)
4772                req_set_fail_links(req);
4773        io_put_req(req);
4774        return 0;
4775}
4776
4777static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
4778                           bool is_timeout_link)
4779{
4780        struct io_timeout_data *data;
4781        unsigned flags;
4782        u32 off = READ_ONCE(sqe->off);
4783
4784        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4785                return -EINVAL;
4786        if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
4787                return -EINVAL;
4788        if (off && is_timeout_link)
4789                return -EINVAL;
4790        flags = READ_ONCE(sqe->timeout_flags);
4791        if (flags & ~IORING_TIMEOUT_ABS)
4792                return -EINVAL;
4793
4794        req->timeout.off = off;
4795
4796        if (!req->io && io_alloc_async_ctx(req))
4797                return -ENOMEM;
4798
4799        data = &req->io->timeout;
4800        data->req = req;
4801        req->flags |= REQ_F_TIMEOUT;
4802
4803        if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
4804                return -EFAULT;
4805
4806        if (flags & IORING_TIMEOUT_ABS)
4807                data->mode = HRTIMER_MODE_ABS;
4808        else
4809                data->mode = HRTIMER_MODE_REL;
4810
4811        hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
4812        return 0;
4813}
4814
4815static int io_timeout(struct io_kiocb *req)
4816{
4817        struct io_ring_ctx *ctx = req->ctx;
4818        struct io_timeout_data *data = &req->io->timeout;
4819        struct list_head *entry;
4820        u32 tail, off = req->timeout.off;
4821
4822        spin_lock_irq(&ctx->completion_lock);
4823
4824        /*
4825         * sqe->off holds how many events that need to occur for this
4826         * timeout event to be satisfied. If it isn't set, then this is
4827         * a pure timeout request, sequence isn't used.
4828         */
4829        if (!off) {
4830                req->flags |= REQ_F_TIMEOUT_NOSEQ;
4831                entry = ctx->timeout_list.prev;
4832                goto add;
4833        }
4834
4835        tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
4836        req->timeout.target_seq = tail + off;
4837
4838        /*
4839         * Insertion sort, ensuring the first entry in the list is always
4840         * the one we need first.
4841         */
4842        list_for_each_prev(entry, &ctx->timeout_list) {
4843                struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
4844
4845                if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
4846                        continue;
4847                /* nxt.seq is behind @tail, otherwise would've been completed */
4848                if (off >= nxt->timeout.target_seq - tail)
4849                        break;
4850        }
4851add:
4852        list_add(&req->list, entry);
4853        data->timer.function = io_timeout_fn;
4854        hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
4855        spin_unlock_irq(&ctx->completion_lock);
4856        return 0;
4857}
4858
4859static bool io_cancel_cb(struct io_wq_work *work, void *data)
4860{
4861        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
4862
4863        return req->user_data == (unsigned long) data;
4864}
4865
4866static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
4867{
4868        enum io_wq_cancel cancel_ret;
4869        int ret = 0;
4870
4871        cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
4872        switch (cancel_ret) {
4873        case IO_WQ_CANCEL_OK:
4874                ret = 0;
4875                break;
4876        case IO_WQ_CANCEL_RUNNING:
4877                ret = -EALREADY;
4878                break;
4879        case IO_WQ_CANCEL_NOTFOUND:
4880                ret = -ENOENT;
4881                break;
4882        }
4883
4884        return ret;
4885}
4886
4887static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
4888                                     struct io_kiocb *req, __u64 sqe_addr,
4889                                     int success_ret)
4890{
4891        unsigned long flags;
4892        int ret;
4893
4894        ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
4895        if (ret != -ENOENT) {
4896                spin_lock_irqsave(&ctx->completion_lock, flags);
4897                goto done;
4898        }
4899
4900        spin_lock_irqsave(&ctx->completion_lock, flags);
4901        ret = io_timeout_cancel(ctx, sqe_addr);
4902        if (ret != -ENOENT)
4903                goto done;
4904        ret = io_poll_cancel(ctx, sqe_addr);
4905done:
4906        if (!ret)
4907                ret = success_ret;
4908        io_cqring_fill_event(req, ret);
4909        io_commit_cqring(ctx);
4910        spin_unlock_irqrestore(&ctx->completion_lock, flags);
4911        io_cqring_ev_posted(ctx);
4912
4913        if (ret < 0)
4914                req_set_fail_links(req);
4915        io_put_req(req);
4916}
4917
4918static int io_async_cancel_prep(struct io_kiocb *req,
4919                                const struct io_uring_sqe *sqe)
4920{
4921        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4922                return -EINVAL;
4923        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
4924                return -EINVAL;
4925        if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
4926                return -EINVAL;
4927
4928        req->cancel.addr = READ_ONCE(sqe->addr);
4929        return 0;
4930}
4931
4932static int io_async_cancel(struct io_kiocb *req)
4933{
4934        struct io_ring_ctx *ctx = req->ctx;
4935
4936        io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
4937        return 0;
4938}
4939
4940static int io_files_update_prep(struct io_kiocb *req,
4941                                const struct io_uring_sqe *sqe)
4942{
4943        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
4944                return -EINVAL;
4945        if (sqe->ioprio || sqe->rw_flags)
4946                return -EINVAL;
4947
4948        req->files_update.offset = READ_ONCE(sqe->off);
4949        req->files_update.nr_args = READ_ONCE(sqe->len);
4950        if (!req->files_update.nr_args)
4951                return -EINVAL;
4952        req->files_update.arg = READ_ONCE(sqe->addr);
4953        return 0;
4954}
4955
4956static int io_files_update(struct io_kiocb *req, bool force_nonblock)
4957{
4958        struct io_ring_ctx *ctx = req->ctx;
4959        struct io_uring_files_update up;
4960        int ret;
4961
4962        if (force_nonblock)
4963                return -EAGAIN;
4964
4965        up.offset = req->files_update.offset;
4966        up.fds = req->files_update.arg;
4967
4968        mutex_lock(&ctx->uring_lock);
4969        ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
4970        mutex_unlock(&ctx->uring_lock);
4971
4972        if (ret < 0)
4973                req_set_fail_links(req);
4974        io_cqring_add_event(req, ret);
4975        io_put_req(req);
4976        return 0;
4977}
4978
4979static int io_req_defer_prep(struct io_kiocb *req,
4980                             const struct io_uring_sqe *sqe)
4981{
4982        ssize_t ret = 0;
4983
4984        if (!sqe)
4985                return 0;
4986
4987        io_req_init_async(req);
4988
4989        if (io_op_defs[req->opcode].file_table) {
4990                ret = io_grab_files(req);
4991                if (unlikely(ret))
4992                        return ret;
4993        }
4994
4995        io_req_work_grab_env(req, &io_op_defs[req->opcode]);
4996
4997        switch (req->opcode) {
4998        case IORING_OP_NOP:
4999                break;
5000        case IORING_OP_READV:
5001        case IORING_OP_READ_FIXED:
5002        case IORING_OP_READ:
5003                ret = io_read_prep(req, sqe, true);
5004                break;
5005        case IORING_OP_WRITEV:
5006        case IORING_OP_WRITE_FIXED:
5007        case IORING_OP_WRITE:
5008                ret = io_write_prep(req, sqe, true);
5009                break;
5010        case IORING_OP_POLL_ADD:
5011                ret = io_poll_add_prep(req, sqe);
5012                break;
5013        case IORING_OP_POLL_REMOVE:
5014                ret = io_poll_remove_prep(req, sqe);
5015                break;
5016        case IORING_OP_FSYNC:
5017                ret = io_prep_fsync(req, sqe);
5018                break;
5019        case IORING_OP_SYNC_FILE_RANGE:
5020                ret = io_prep_sfr(req, sqe);
5021                break;
5022        case IORING_OP_SENDMSG:
5023        case IORING_OP_SEND:
5024                ret = io_sendmsg_prep(req, sqe);
5025                break;
5026        case IORING_OP_RECVMSG:
5027        case IORING_OP_RECV:
5028                ret = io_recvmsg_prep(req, sqe);
5029                break;
5030        case IORING_OP_CONNECT:
5031                ret = io_connect_prep(req, sqe);
5032                break;
5033        case IORING_OP_TIMEOUT:
5034                ret = io_timeout_prep(req, sqe, false);
5035                break;
5036        case IORING_OP_TIMEOUT_REMOVE:
5037                ret = io_timeout_remove_prep(req, sqe);
5038                break;
5039        case IORING_OP_ASYNC_CANCEL:
5040                ret = io_async_cancel_prep(req, sqe);
5041                break;
5042        case IORING_OP_LINK_TIMEOUT:
5043                ret = io_timeout_prep(req, sqe, true);
5044                break;
5045        case IORING_OP_ACCEPT:
5046                ret = io_accept_prep(req, sqe);
5047                break;
5048        case IORING_OP_FALLOCATE:
5049                ret = io_fallocate_prep(req, sqe);
5050                break;
5051        case IORING_OP_OPENAT:
5052                ret = io_openat_prep(req, sqe);
5053                break;
5054        case IORING_OP_CLOSE:
5055                ret = io_close_prep(req, sqe);
5056                break;
5057        case IORING_OP_FILES_UPDATE:
5058                ret = io_files_update_prep(req, sqe);
5059                break;
5060        case IORING_OP_STATX:
5061                ret = io_statx_prep(req, sqe);
5062                break;
5063        case IORING_OP_FADVISE:
5064                ret = io_fadvise_prep(req, sqe);
5065                break;
5066        case IORING_OP_MADVISE:
5067                ret = io_madvise_prep(req, sqe);
5068                break;
5069        case IORING_OP_OPENAT2:
5070                ret = io_openat2_prep(req, sqe);
5071                break;
5072        case IORING_OP_EPOLL_CTL:
5073                ret = io_epoll_ctl_prep(req, sqe);
5074                break;
5075        case IORING_OP_SPLICE:
5076                ret = io_splice_prep(req, sqe);
5077                break;
5078        case IORING_OP_PROVIDE_BUFFERS:
5079                ret = io_provide_buffers_prep(req, sqe);
5080                break;
5081        case IORING_OP_REMOVE_BUFFERS:
5082                ret = io_remove_buffers_prep(req, sqe);
5083                break;
5084        case IORING_OP_TEE:
5085                ret = io_tee_prep(req, sqe);
5086                break;
5087        default:
5088                printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5089                                req->opcode);
5090                ret = -EINVAL;
5091                break;
5092        }
5093
5094        return ret;
5095}
5096
5097static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5098{
5099        struct io_ring_ctx *ctx = req->ctx;
5100        int ret;
5101
5102        /* Still need defer if there is pending req in defer list. */
5103        if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
5104                return 0;
5105
5106        if (!req->io) {
5107                if (io_alloc_async_ctx(req))
5108                        return -EAGAIN;
5109                ret = io_req_defer_prep(req, sqe);
5110                if (ret < 0)
5111                        return ret;
5112        }
5113
5114        spin_lock_irq(&ctx->completion_lock);
5115        if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
5116                spin_unlock_irq(&ctx->completion_lock);
5117                return 0;
5118        }
5119
5120        trace_io_uring_defer(ctx, req, req->user_data);
5121        list_add_tail(&req->list, &ctx->defer_list);
5122        spin_unlock_irq(&ctx->completion_lock);
5123        return -EIOCBQUEUED;
5124}
5125
5126static void io_cleanup_req(struct io_kiocb *req)
5127{
5128        struct io_async_ctx *io = req->io;
5129
5130        switch (req->opcode) {
5131        case IORING_OP_READV:
5132        case IORING_OP_READ_FIXED:
5133        case IORING_OP_READ:
5134                if (req->flags & REQ_F_BUFFER_SELECTED)
5135                        kfree((void *)(unsigned long)req->rw.addr);
5136                /* fallthrough */
5137        case IORING_OP_WRITEV:
5138        case IORING_OP_WRITE_FIXED:
5139        case IORING_OP_WRITE:
5140                if (io->rw.iov != io->rw.fast_iov)
5141                        kfree(io->rw.iov);
5142                break;
5143        case IORING_OP_RECVMSG:
5144                if (req->flags & REQ_F_BUFFER_SELECTED)
5145                        kfree(req->sr_msg.kbuf);
5146                /* fallthrough */
5147        case IORING_OP_SENDMSG:
5148                if (io->msg.iov != io->msg.fast_iov)
5149                        kfree(io->msg.iov);
5150                break;
5151        case IORING_OP_RECV:
5152                if (req->flags & REQ_F_BUFFER_SELECTED)
5153                        kfree(req->sr_msg.kbuf);
5154                break;
5155        case IORING_OP_OPENAT:
5156        case IORING_OP_OPENAT2:
5157                break;
5158        case IORING_OP_SPLICE:
5159        case IORING_OP_TEE:
5160                io_put_file(req, req->splice.file_in,
5161                            (req->splice.flags & SPLICE_F_FD_IN_FIXED));
5162                break;
5163        }
5164
5165        req->flags &= ~REQ_F_NEED_CLEANUP;
5166}
5167
5168static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5169                        bool force_nonblock)
5170{
5171        struct io_ring_ctx *ctx = req->ctx;
5172        int ret;
5173
5174        switch (req->opcode) {
5175        case IORING_OP_NOP:
5176                ret = io_nop(req);
5177                break;
5178        case IORING_OP_READV:
5179        case IORING_OP_READ_FIXED:
5180        case IORING_OP_READ:
5181                if (sqe) {
5182                        ret = io_read_prep(req, sqe, force_nonblock);
5183                        if (ret < 0)
5184                                break;
5185                }
5186                ret = io_read(req, force_nonblock);
5187                break;
5188        case IORING_OP_WRITEV:
5189        case IORING_OP_WRITE_FIXED:
5190        case IORING_OP_WRITE:
5191                if (sqe) {
5192                        ret = io_write_prep(req, sqe, force_nonblock);
5193                        if (ret < 0)
5194                                break;
5195                }
5196                ret = io_write(req, force_nonblock);
5197                break;
5198        case IORING_OP_FSYNC:
5199                if (sqe) {
5200                        ret = io_prep_fsync(req, sqe);
5201                        if (ret < 0)
5202                                break;
5203                }
5204                ret = io_fsync(req, force_nonblock);
5205                break;
5206        case IORING_OP_POLL_ADD:
5207                if (sqe) {
5208                        ret = io_poll_add_prep(req, sqe);
5209                        if (ret)
5210                                break;
5211                }
5212                ret = io_poll_add(req);
5213                break;
5214        case IORING_OP_POLL_REMOVE:
5215                if (sqe) {
5216                        ret = io_poll_remove_prep(req, sqe);
5217                        if (ret < 0)
5218                                break;
5219                }
5220                ret = io_poll_remove(req);
5221                break;
5222        case IORING_OP_SYNC_FILE_RANGE:
5223                if (sqe) {
5224                        ret = io_prep_sfr(req, sqe);
5225                        if (ret < 0)
5226                                break;
5227                }
5228                ret = io_sync_file_range(req, force_nonblock);
5229                break;
5230        case IORING_OP_SENDMSG:
5231        case IORING_OP_SEND:
5232                if (sqe) {
5233                        ret = io_sendmsg_prep(req, sqe);
5234                        if (ret < 0)
5235                                break;
5236                }
5237                if (req->opcode == IORING_OP_SENDMSG)
5238                        ret = io_sendmsg(req, force_nonblock);
5239                else
5240                        ret = io_send(req, force_nonblock);
5241                break;
5242        case IORING_OP_RECVMSG:
5243        case IORING_OP_RECV:
5244                if (sqe) {
5245                        ret = io_recvmsg_prep(req, sqe);
5246                        if (ret)
5247                                break;
5248                }
5249                if (req->opcode == IORING_OP_RECVMSG)
5250                        ret = io_recvmsg(req, force_nonblock);
5251                else
5252                        ret = io_recv(req, force_nonblock);
5253                break;
5254        case IORING_OP_TIMEOUT:
5255                if (sqe) {
5256                        ret = io_timeout_prep(req, sqe, false);
5257                        if (ret)
5258                                break;
5259                }
5260                ret = io_timeout(req);
5261                break;
5262        case IORING_OP_TIMEOUT_REMOVE:
5263                if (sqe) {
5264                        ret = io_timeout_remove_prep(req, sqe);
5265                        if (ret)
5266                                break;
5267                }
5268                ret = io_timeout_remove(req);
5269                break;
5270        case IORING_OP_ACCEPT:
5271                if (sqe) {
5272                        ret = io_accept_prep(req, sqe);
5273                        if (ret)
5274                                break;
5275                }
5276                ret = io_accept(req, force_nonblock);
5277                break;
5278        case IORING_OP_CONNECT:
5279                if (sqe) {
5280                        ret = io_connect_prep(req, sqe);
5281                        if (ret)
5282                                break;
5283                }
5284                ret = io_connect(req, force_nonblock);
5285                break;
5286        case IORING_OP_ASYNC_CANCEL:
5287                if (sqe) {
5288                        ret = io_async_cancel_prep(req, sqe);
5289                        if (ret)
5290                                break;
5291                }
5292                ret = io_async_cancel(req);
5293                break;
5294        case IORING_OP_FALLOCATE:
5295                if (sqe) {
5296                        ret = io_fallocate_prep(req, sqe);
5297                        if (ret)
5298                                break;
5299                }
5300                ret = io_fallocate(req, force_nonblock);
5301                break;
5302        case IORING_OP_OPENAT:
5303                if (sqe) {
5304                        ret = io_openat_prep(req, sqe);
5305                        if (ret)
5306                                break;
5307                }
5308                ret = io_openat(req, force_nonblock);
5309                break;
5310        case IORING_OP_CLOSE:
5311                if (sqe) {
5312                        ret = io_close_prep(req, sqe);
5313                        if (ret)
5314                                break;
5315                }
5316                ret = io_close(req, force_nonblock);
5317                break;
5318        case IORING_OP_FILES_UPDATE:
5319                if (sqe) {
5320                        ret = io_files_update_prep(req, sqe);
5321                        if (ret)
5322                                break;
5323                }
5324                ret = io_files_update(req, force_nonblock);
5325                break;
5326        case IORING_OP_STATX:
5327                if (sqe) {
5328                        ret = io_statx_prep(req, sqe);
5329                        if (ret)
5330                                break;
5331                }
5332                ret = io_statx(req, force_nonblock);
5333                break;
5334        case IORING_OP_FADVISE:
5335                if (sqe) {
5336                        ret = io_fadvise_prep(req, sqe);
5337                        if (ret)
5338                                break;
5339                }
5340                ret = io_fadvise(req, force_nonblock);
5341                break;
5342        case IORING_OP_MADVISE:
5343                if (sqe) {
5344                        ret = io_madvise_prep(req, sqe);
5345                        if (ret)
5346                                break;
5347                }
5348                ret = io_madvise(req, force_nonblock);
5349                break;
5350        case IORING_OP_OPENAT2:
5351                if (sqe) {
5352                        ret = io_openat2_prep(req, sqe);
5353                        if (ret)
5354                                break;
5355                }
5356                ret = io_openat2(req, force_nonblock);
5357                break;
5358        case IORING_OP_EPOLL_CTL:
5359                if (sqe) {
5360                        ret = io_epoll_ctl_prep(req, sqe);
5361                        if (ret)
5362                                break;
5363                }
5364                ret = io_epoll_ctl(req, force_nonblock);
5365                break;
5366        case IORING_OP_SPLICE:
5367                if (sqe) {
5368                        ret = io_splice_prep(req, sqe);
5369                        if (ret < 0)
5370                                break;
5371                }
5372                ret = io_splice(req, force_nonblock);
5373                break;
5374        case IORING_OP_PROVIDE_BUFFERS:
5375                if (sqe) {
5376                        ret = io_provide_buffers_prep(req, sqe);
5377                        if (ret)
5378                                break;
5379                }
5380                ret = io_provide_buffers(req, force_nonblock);
5381                break;
5382        case IORING_OP_REMOVE_BUFFERS:
5383                if (sqe) {
5384                        ret = io_remove_buffers_prep(req, sqe);
5385                        if (ret)
5386                                break;
5387                }
5388                ret = io_remove_buffers(req, force_nonblock);
5389                break;
5390        case IORING_OP_TEE:
5391                if (sqe) {
5392                        ret = io_tee_prep(req, sqe);
5393                        if (ret < 0)
5394                                break;
5395                }
5396                ret = io_tee(req, force_nonblock);
5397                break;
5398        default:
5399                ret = -EINVAL;
5400                break;
5401        }
5402
5403        if (ret)
5404                return ret;
5405
5406        /* If the op doesn't have a file, we're not polling for it */
5407        if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
5408                const bool in_async = io_wq_current_is_worker();
5409
5410                /* workqueue context doesn't hold uring_lock, grab it now */
5411                if (in_async)
5412                        mutex_lock(&ctx->uring_lock);
5413
5414                io_iopoll_req_issued(req);
5415
5416                if (in_async)
5417                        mutex_unlock(&ctx->uring_lock);
5418        }
5419
5420        return 0;
5421}
5422
5423static void io_arm_async_linked_timeout(struct io_kiocb *req)
5424{
5425        struct io_kiocb *link;
5426
5427        /* link head's timeout is queued in io_queue_async_work() */
5428        if (!(req->flags & REQ_F_QUEUE_TIMEOUT))
5429                return;
5430
5431        link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
5432        io_queue_linked_timeout(link);
5433}
5434
5435static void io_wq_submit_work(struct io_wq_work **workptr)
5436{
5437        struct io_wq_work *work = *workptr;
5438        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5439        int ret = 0;
5440
5441        io_arm_async_linked_timeout(req);
5442
5443        /* if NO_CANCEL is set, we must still run the work */
5444        if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
5445                                IO_WQ_WORK_CANCEL) {
5446                ret = -ECANCELED;
5447        }
5448
5449        if (!ret) {
5450                do {
5451                        ret = io_issue_sqe(req, NULL, false);
5452                        /*
5453                         * We can get EAGAIN for polled IO even though we're
5454                         * forcing a sync submission from here, since we can't
5455                         * wait for request slots on the block side.
5456                         */
5457                        if (ret != -EAGAIN)
5458                                break;
5459                        cond_resched();
5460                } while (1);
5461        }
5462
5463        if (ret) {
5464                req_set_fail_links(req);
5465                io_cqring_add_event(req, ret);
5466                io_put_req(req);
5467        }
5468
5469        io_steal_work(req, workptr);
5470}
5471
5472static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
5473                                              int index)
5474{
5475        struct fixed_file_table *table;
5476
5477        table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
5478        return table->files[index & IORING_FILE_TABLE_MASK];
5479}
5480
5481static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
5482                        int fd, struct file **out_file, bool fixed)
5483{
5484        struct io_ring_ctx *ctx = req->ctx;
5485        struct file *file;
5486
5487        if (fixed) {
5488                if (unlikely(!ctx->file_data ||
5489                    (unsigned) fd >= ctx->nr_user_files))
5490                        return -EBADF;
5491                fd = array_index_nospec(fd, ctx->nr_user_files);
5492                file = io_file_from_index(ctx, fd);
5493                if (file) {
5494                        req->fixed_file_refs = ctx->file_data->cur_refs;
5495                        percpu_ref_get(req->fixed_file_refs);
5496                }
5497        } else {
5498                trace_io_uring_file_get(ctx, fd);
5499                file = __io_file_get(state, fd);
5500        }
5501
5502        if (file || io_op_defs[req->opcode].needs_file_no_error) {
5503                *out_file = file;
5504                return 0;
5505        }
5506        return -EBADF;
5507}
5508
5509static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
5510                           int fd)
5511{
5512        bool fixed;
5513
5514        fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
5515        if (unlikely(!fixed && io_async_submit(req->ctx)))
5516                return -EBADF;
5517
5518        return io_file_get(state, req, fd, &req->file, fixed);
5519}
5520
5521static int io_grab_files(struct io_kiocb *req)
5522{
5523        int ret = -EBADF;
5524        struct io_ring_ctx *ctx = req->ctx;
5525
5526        if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
5527                return 0;
5528        if (!ctx->ring_file)
5529                return -EBADF;
5530
5531        rcu_read_lock();
5532        spin_lock_irq(&ctx->inflight_lock);
5533        /*
5534         * We use the f_ops->flush() handler to ensure that we can flush
5535         * out work accessing these files if the fd is closed. Check if
5536         * the fd has changed since we started down this path, and disallow
5537         * this operation if it has.
5538         */
5539        if (fcheck(ctx->ring_fd) == ctx->ring_file) {
5540                list_add(&req->inflight_entry, &ctx->inflight_list);
5541                req->flags |= REQ_F_INFLIGHT;
5542                req->work.files = current->files;
5543                ret = 0;
5544        }
5545        spin_unlock_irq(&ctx->inflight_lock);
5546        rcu_read_unlock();
5547
5548        return ret;
5549}
5550
5551static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
5552{
5553        struct io_timeout_data *data = container_of(timer,
5554                                                struct io_timeout_data, timer);
5555        struct io_kiocb *req = data->req;
5556        struct io_ring_ctx *ctx = req->ctx;
5557        struct io_kiocb *prev = NULL;
5558        unsigned long flags;
5559
5560        spin_lock_irqsave(&ctx->completion_lock, flags);
5561
5562        /*
5563         * We don't expect the list to be empty, that will only happen if we
5564         * race with the completion of the linked work.
5565         */
5566        if (!list_empty(&req->link_list)) {
5567                prev = list_entry(req->link_list.prev, struct io_kiocb,
5568                                  link_list);
5569                if (refcount_inc_not_zero(&prev->refs)) {
5570                        list_del_init(&req->link_list);
5571                        prev->flags &= ~REQ_F_LINK_TIMEOUT;
5572                } else
5573                        prev = NULL;
5574        }
5575
5576        spin_unlock_irqrestore(&ctx->completion_lock, flags);
5577
5578        if (prev) {
5579                req_set_fail_links(prev);
5580                io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
5581                io_put_req(prev);
5582        } else {
5583                io_cqring_add_event(req, -ETIME);
5584                io_put_req(req);
5585        }
5586        return HRTIMER_NORESTART;
5587}
5588
5589static void io_queue_linked_timeout(struct io_kiocb *req)
5590{
5591        struct io_ring_ctx *ctx = req->ctx;
5592
5593        /*
5594         * If the list is now empty, then our linked request finished before
5595         * we got a chance to setup the timer
5596         */
5597        spin_lock_irq(&ctx->completion_lock);
5598        if (!list_empty(&req->link_list)) {
5599                struct io_timeout_data *data = &req->io->timeout;
5600
5601                data->timer.function = io_link_timeout_fn;
5602                hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
5603                                data->mode);
5604        }
5605        spin_unlock_irq(&ctx->completion_lock);
5606
5607        /* drop submission reference */
5608        io_put_req(req);
5609}
5610
5611static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
5612{
5613        struct io_kiocb *nxt;
5614
5615        if (!(req->flags & REQ_F_LINK_HEAD))
5616                return NULL;
5617        /* for polled retry, if flag is set, we already went through here */
5618        if (req->flags & REQ_F_POLLED)
5619                return NULL;
5620
5621        nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
5622                                        link_list);
5623        if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
5624                return NULL;
5625
5626        req->flags |= REQ_F_LINK_TIMEOUT;
5627        return nxt;
5628}
5629
5630static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5631{
5632        struct io_kiocb *linked_timeout;
5633        struct io_kiocb *nxt;
5634        const struct cred *old_creds = NULL;
5635        int ret;
5636
5637again:
5638        linked_timeout = io_prep_linked_timeout(req);
5639
5640        if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds &&
5641            req->work.creds != current_cred()) {
5642                if (old_creds)
5643                        revert_creds(old_creds);
5644                if (old_creds == req->work.creds)
5645                        old_creds = NULL; /* restored original creds */
5646                else
5647                        old_creds = override_creds(req->work.creds);
5648        }
5649
5650        ret = io_issue_sqe(req, sqe, true);
5651
5652        /*
5653         * We async punt it if the file wasn't marked NOWAIT, or if the file
5654         * doesn't support non-blocking read/write attempts
5655         */
5656        if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
5657            (req->flags & REQ_F_MUST_PUNT))) {
5658                if (io_arm_poll_handler(req)) {
5659                        if (linked_timeout)
5660                                io_queue_linked_timeout(linked_timeout);
5661                        goto exit;
5662                }
5663punt:
5664                io_req_init_async(req);
5665
5666                if (io_op_defs[req->opcode].file_table) {
5667                        ret = io_grab_files(req);
5668                        if (ret)
5669                                goto err;
5670                }
5671
5672                /*
5673                 * Queued up for async execution, worker will release
5674                 * submit reference when the iocb is actually submitted.
5675                 */
5676                io_queue_async_work(req);
5677                goto exit;
5678        }
5679
5680err:
5681        nxt = NULL;
5682        /* drop submission reference */
5683        io_put_req_find_next(req, &nxt);
5684
5685        if (linked_timeout) {
5686                if (!ret)
5687                        io_queue_linked_timeout(linked_timeout);
5688                else
5689                        io_put_req(linked_timeout);
5690        }
5691
5692        /* and drop final reference, if we failed */
5693        if (ret) {
5694                io_cqring_add_event(req, ret);
5695                req_set_fail_links(req);
5696                io_put_req(req);
5697        }
5698        if (nxt) {
5699                req = nxt;
5700
5701                if (req->flags & REQ_F_FORCE_ASYNC)
5702                        goto punt;
5703                goto again;
5704        }
5705exit:
5706        if (old_creds)
5707                revert_creds(old_creds);
5708}
5709
5710static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5711{
5712        int ret;
5713
5714        ret = io_req_defer(req, sqe);
5715        if (ret) {
5716                if (ret != -EIOCBQUEUED) {
5717fail_req:
5718                        io_cqring_add_event(req, ret);
5719                        req_set_fail_links(req);
5720                        io_double_put_req(req);
5721                }
5722        } else if (req->flags & REQ_F_FORCE_ASYNC) {
5723                if (!req->io) {
5724                        ret = -EAGAIN;
5725                        if (io_alloc_async_ctx(req))
5726                                goto fail_req;
5727                        ret = io_req_defer_prep(req, sqe);
5728                        if (unlikely(ret < 0))
5729                                goto fail_req;
5730                }
5731
5732                /*
5733                 * Never try inline submit of IOSQE_ASYNC is set, go straight
5734                 * to async execution.
5735                 */
5736                io_req_init_async(req);
5737                req->work.flags |= IO_WQ_WORK_CONCURRENT;
5738                io_queue_async_work(req);
5739        } else {
5740                __io_queue_sqe(req, sqe);
5741        }
5742}
5743
5744static inline void io_queue_link_head(struct io_kiocb *req)
5745{
5746        if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
5747                io_cqring_add_event(req, -ECANCELED);
5748                io_double_put_req(req);
5749        } else
5750                io_queue_sqe(req, NULL);
5751}
5752
5753static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5754                         struct io_kiocb **link)
5755{
5756        struct io_ring_ctx *ctx = req->ctx;
5757        int ret;
5758
5759        /*
5760         * If we already have a head request, queue this one for async
5761         * submittal once the head completes. If we don't have a head but
5762         * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
5763         * submitted sync once the chain is complete. If none of those
5764         * conditions are true (normal request), then just queue it.
5765         */
5766        if (*link) {
5767                struct io_kiocb *head = *link;
5768
5769                /*
5770                 * Taking sequential execution of a link, draining both sides
5771                 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
5772                 * requests in the link. So, it drains the head and the
5773                 * next after the link request. The last one is done via
5774                 * drain_next flag to persist the effect across calls.
5775                 */
5776                if (req->flags & REQ_F_IO_DRAIN) {
5777                        head->flags |= REQ_F_IO_DRAIN;
5778                        ctx->drain_next = 1;
5779                }
5780                if (io_alloc_async_ctx(req))
5781                        return -EAGAIN;
5782
5783                ret = io_req_defer_prep(req, sqe);
5784                if (ret) {
5785                        /* fail even hard links since we don't submit */
5786                        head->flags |= REQ_F_FAIL_LINK;
5787                        return ret;
5788                }
5789                trace_io_uring_link(ctx, req, head);
5790                list_add_tail(&req->link_list, &head->link_list);
5791
5792                /* last request of a link, enqueue the link */
5793                if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
5794                        io_queue_link_head(head);
5795                        *link = NULL;
5796                }
5797        } else {
5798                if (unlikely(ctx->drain_next)) {
5799                        req->flags |= REQ_F_IO_DRAIN;
5800                        ctx->drain_next = 0;
5801                }
5802                if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
5803                        req->flags |= REQ_F_LINK_HEAD;
5804                        INIT_LIST_HEAD(&req->link_list);
5805
5806                        if (io_alloc_async_ctx(req))
5807                                return -EAGAIN;
5808
5809                        ret = io_req_defer_prep(req, sqe);
5810                        if (ret)
5811                                req->flags |= REQ_F_FAIL_LINK;
5812                        *link = req;
5813                } else {
5814                        io_queue_sqe(req, sqe);
5815                }
5816        }
5817
5818        return 0;
5819}
5820
5821/*
5822 * Batched submission is done, ensure local IO is flushed out.
5823 */
5824static void io_submit_state_end(struct io_submit_state *state)
5825{
5826        blk_finish_plug(&state->plug);
5827        io_state_file_put(state);
5828        if (state->free_reqs)
5829                kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
5830}
5831
5832/*
5833 * Start submission side cache.
5834 */
5835static void io_submit_state_start(struct io_submit_state *state,
5836                                  unsigned int max_ios)
5837{
5838        blk_start_plug(&state->plug);
5839        state->free_reqs = 0;
5840        state->file = NULL;
5841        state->ios_left = max_ios;
5842}
5843
5844static void io_commit_sqring(struct io_ring_ctx *ctx)
5845{
5846        struct io_rings *rings = ctx->rings;
5847
5848        /*
5849         * Ensure any loads from the SQEs are done at this point,
5850         * since once we write the new head, the application could
5851         * write new data to them.
5852         */
5853        smp_store_release(&rings->sq.head, ctx->cached_sq_head);
5854}
5855
5856/*
5857 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
5858 * that is mapped by userspace. This means that care needs to be taken to
5859 * ensure that reads are stable, as we cannot rely on userspace always
5860 * being a good citizen. If members of the sqe are validated and then later
5861 * used, it's important that those reads are done through READ_ONCE() to
5862 * prevent a re-load down the line.
5863 */
5864static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
5865{
5866        u32 *sq_array = ctx->sq_array;
5867        unsigned head;
5868
5869        /*
5870         * The cached sq head (or cq tail) serves two purposes:
5871         *
5872         * 1) allows us to batch the cost of updating the user visible
5873         *    head updates.
5874         * 2) allows the kernel side to track the head on its own, even
5875         *    though the application is the one updating it.
5876         */
5877        head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
5878        if (likely(head < ctx->sq_entries))
5879                return &ctx->sq_sqes[head];
5880
5881        /* drop invalid entries */
5882        ctx->cached_sq_dropped++;
5883        WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
5884        return NULL;
5885}
5886
5887static inline void io_consume_sqe(struct io_ring_ctx *ctx)
5888{
5889        ctx->cached_sq_head++;
5890}
5891
5892#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
5893                                IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
5894                                IOSQE_BUFFER_SELECT)
5895
5896static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
5897                       const struct io_uring_sqe *sqe,
5898                       struct io_submit_state *state)
5899{
5900        unsigned int sqe_flags;
5901        int id;
5902
5903        /*
5904         * All io need record the previous position, if LINK vs DARIN,
5905         * it can be used to mark the position of the first IO in the
5906         * link list.
5907         */
5908        req->sequence = ctx->cached_sq_head - ctx->cached_sq_dropped;
5909        req->opcode = READ_ONCE(sqe->opcode);
5910        req->user_data = READ_ONCE(sqe->user_data);
5911        req->io = NULL;
5912        req->file = NULL;
5913        req->ctx = ctx;
5914        req->flags = 0;
5915        /* one is dropped after submission, the other at completion */
5916        refcount_set(&req->refs, 2);
5917        req->task = current;
5918        req->result = 0;
5919
5920        if (unlikely(req->opcode >= IORING_OP_LAST))
5921                return -EINVAL;
5922
5923        if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
5924                return -EFAULT;
5925
5926        sqe_flags = READ_ONCE(sqe->flags);
5927        /* enforce forwards compatibility on users */
5928        if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
5929                return -EINVAL;
5930
5931        if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
5932            !io_op_defs[req->opcode].buffer_select)
5933                return -EOPNOTSUPP;
5934
5935        id = READ_ONCE(sqe->personality);
5936        if (id) {
5937                io_req_init_async(req);
5938                req->work.creds = idr_find(&ctx->personality_idr, id);
5939                if (unlikely(!req->work.creds))
5940                        return -EINVAL;
5941                get_cred(req->work.creds);
5942        }
5943
5944        /* same numerical values with corresponding REQ_F_*, safe to copy */
5945        req->flags |= sqe_flags;
5946
5947        if (!io_op_defs[req->opcode].needs_file)
5948                return 0;
5949
5950        return io_req_set_file(state, req, READ_ONCE(sqe->fd));
5951}
5952
5953static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
5954                          struct file *ring_file, int ring_fd)
5955{
5956        struct io_submit_state state, *statep = NULL;
5957        struct io_kiocb *link = NULL;
5958        int i, submitted = 0;
5959
5960        /* if we have a backlog and couldn't flush it all, return BUSY */
5961        if (test_bit(0, &ctx->sq_check_overflow)) {
5962                if (!list_empty(&ctx->cq_overflow_list) &&
5963                    !io_cqring_overflow_flush(ctx, false))
5964                        return -EBUSY;
5965        }
5966
5967        /* make sure SQ entry isn't read before tail */
5968        nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
5969
5970        if (!percpu_ref_tryget_many(&ctx->refs, nr))
5971                return -EAGAIN;
5972
5973        if (nr > IO_PLUG_THRESHOLD) {
5974                io_submit_state_start(&state, nr);
5975                statep = &state;
5976        }
5977
5978        ctx->ring_fd = ring_fd;
5979        ctx->ring_file = ring_file;
5980
5981        for (i = 0; i < nr; i++) {
5982                const struct io_uring_sqe *sqe;
5983                struct io_kiocb *req;
5984                int err;
5985
5986                sqe = io_get_sqe(ctx);
5987                if (unlikely(!sqe)) {
5988                        io_consume_sqe(ctx);
5989                        break;
5990                }
5991                req = io_alloc_req(ctx, statep);
5992                if (unlikely(!req)) {
5993                        if (!submitted)
5994                                submitted = -EAGAIN;
5995                        break;
5996                }
5997
5998                err = io_init_req(ctx, req, sqe, statep);
5999                io_consume_sqe(ctx);
6000                /* will complete beyond this point, count as submitted */
6001                submitted++;
6002
6003                if (unlikely(err)) {
6004fail_req:
6005                        io_cqring_add_event(req, err);
6006                        io_double_put_req(req);
6007                        break;
6008                }
6009
6010                trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
6011                                                true, io_async_submit(ctx));
6012                err = io_submit_sqe(req, sqe, &link);
6013                if (err)
6014                        goto fail_req;
6015        }
6016
6017        if (unlikely(submitted != nr)) {
6018                int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
6019
6020                percpu_ref_put_many(&ctx->refs, nr - ref_used);
6021        }
6022        if (link)
6023                io_queue_link_head(link);
6024        if (statep)
6025                io_submit_state_end(&state);
6026
6027         /* Commit SQ ring head once we've consumed and submitted all SQEs */
6028        io_commit_sqring(ctx);
6029
6030        return submitted;
6031}
6032
6033static int io_sq_thread(void *data)
6034{
6035        struct io_ring_ctx *ctx = data;
6036        const struct cred *old_cred;
6037        DEFINE_WAIT(wait);
6038        unsigned long timeout;
6039        int ret = 0;
6040
6041        complete(&ctx->sq_thread_comp);
6042
6043        old_cred = override_creds(ctx->creds);
6044
6045        timeout = jiffies + ctx->sq_thread_idle;
6046        while (!kthread_should_park()) {
6047                unsigned int to_submit;
6048
6049                if (!list_empty(&ctx->poll_list)) {
6050                        unsigned nr_events = 0;
6051
6052                        mutex_lock(&ctx->uring_lock);
6053                        if (!list_empty(&ctx->poll_list))
6054                                io_iopoll_getevents(ctx, &nr_events, 0);
6055                        else
6056                                timeout = jiffies + ctx->sq_thread_idle;
6057                        mutex_unlock(&ctx->uring_lock);
6058                }
6059
6060                to_submit = io_sqring_entries(ctx);
6061
6062                /*
6063                 * If submit got -EBUSY, flag us as needing the application
6064                 * to enter the kernel to reap and flush events.
6065                 */
6066                if (!to_submit || ret == -EBUSY || need_resched()) {
6067                        /*
6068                         * Drop cur_mm before scheduling, we can't hold it for
6069                         * long periods (or over schedule()). Do this before
6070                         * adding ourselves to the waitqueue, as the unuse/drop
6071                         * may sleep.
6072                         */
6073                        io_sq_thread_drop_mm(ctx);
6074
6075                        /*
6076                         * We're polling. If we're within the defined idle
6077                         * period, then let us spin without work before going
6078                         * to sleep. The exception is if we got EBUSY doing
6079                         * more IO, we should wait for the application to
6080                         * reap events and wake us up.
6081                         */
6082                        if (!list_empty(&ctx->poll_list) || need_resched() ||
6083                            (!time_after(jiffies, timeout) && ret != -EBUSY &&
6084                            !percpu_ref_is_dying(&ctx->refs))) {
6085                                if (current->task_works)
6086                                        task_work_run();
6087                                cond_resched();
6088                                continue;
6089                        }
6090
6091                        prepare_to_wait(&ctx->sqo_wait, &wait,
6092                                                TASK_INTERRUPTIBLE);
6093
6094                        /*
6095                         * While doing polled IO, before going to sleep, we need
6096                         * to check if there are new reqs added to poll_list, it
6097                         * is because reqs may have been punted to io worker and
6098                         * will be added to poll_list later, hence check the
6099                         * poll_list again.
6100                         */
6101                        if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6102                            !list_empty_careful(&ctx->poll_list)) {
6103                                finish_wait(&ctx->sqo_wait, &wait);
6104                                continue;
6105                        }
6106
6107                        /* Tell userspace we may need a wakeup call */
6108                        spin_lock_irq(&ctx->completion_lock);
6109                        ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6110                        spin_unlock_irq(&ctx->completion_lock);
6111
6112                        to_submit = io_sqring_entries(ctx);
6113                        if (!to_submit || ret == -EBUSY) {
6114                                if (kthread_should_park()) {
6115                                        finish_wait(&ctx->sqo_wait, &wait);
6116                                        break;
6117                                }
6118                                if (current->task_works) {
6119                                        task_work_run();
6120                                        finish_wait(&ctx->sqo_wait, &wait);
6121                                        continue;
6122                                }
6123                                if (signal_pending(current))
6124                                        flush_signals(current);
6125                                schedule();
6126                                finish_wait(&ctx->sqo_wait, &wait);
6127
6128                                spin_lock_irq(&ctx->completion_lock);
6129                                ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6130                                spin_unlock_irq(&ctx->completion_lock);
6131                                ret = 0;
6132                                continue;
6133                        }
6134                        finish_wait(&ctx->sqo_wait, &wait);
6135
6136                        spin_lock_irq(&ctx->completion_lock);
6137                        ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6138                        spin_unlock_irq(&ctx->completion_lock);
6139                }
6140
6141                mutex_lock(&ctx->uring_lock);
6142                if (likely(!percpu_ref_is_dying(&ctx->refs)))
6143                        ret = io_submit_sqes(ctx, to_submit, NULL, -1);
6144                mutex_unlock(&ctx->uring_lock);
6145                timeout = jiffies + ctx->sq_thread_idle;
6146        }
6147
6148        if (current->task_works)
6149                task_work_run();
6150
6151        io_sq_thread_drop_mm(ctx);
6152        revert_creds(old_cred);
6153
6154        kthread_parkme();
6155
6156        return 0;
6157}
6158
6159struct io_wait_queue {
6160        struct wait_queue_entry wq;
6161        struct io_ring_ctx *ctx;
6162        unsigned to_wait;
6163        unsigned nr_timeouts;
6164};
6165
6166static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
6167{
6168        struct io_ring_ctx *ctx = iowq->ctx;
6169
6170        /*
6171         * Wake up if we have enough events, or if a timeout occurred since we
6172         * started waiting. For timeouts, we always want to return to userspace,
6173         * regardless of event count.
6174         */
6175        return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
6176                        atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6177}
6178
6179static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6180                            int wake_flags, void *key)
6181{
6182        struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6183                                                        wq);
6184
6185        /* use noflush == true, as we can't safely rely on locking context */
6186        if (!io_should_wake(iowq, true))
6187                return -1;
6188
6189        return autoremove_wake_function(curr, mode, wake_flags, key);
6190}
6191
6192/*
6193 * Wait until events become available, if we don't already have some. The
6194 * application must reap them itself, as they reside on the shared cq ring.
6195 */
6196static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
6197                          const sigset_t __user *sig, size_t sigsz)
6198{
6199        struct io_wait_queue iowq = {
6200                .wq = {
6201                        .private        = current,
6202                        .func           = io_wake_function,
6203                        .entry          = LIST_HEAD_INIT(iowq.wq.entry),
6204                },
6205                .ctx            = ctx,
6206                .to_wait        = min_events,
6207        };
6208        struct io_rings *rings = ctx->rings;
6209        int ret = 0;
6210
6211        do {
6212                if (io_cqring_events(ctx, false) >= min_events)
6213                        return 0;
6214                if (!current->task_works)
6215                        break;
6216                task_work_run();
6217        } while (1);
6218
6219        if (sig) {
6220#ifdef CONFIG_COMPAT
6221                if (in_compat_syscall())
6222                        ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
6223                                                      sigsz);
6224                else
6225#endif
6226                        ret = set_user_sigmask(sig, sigsz);
6227
6228                if (ret)
6229                        return ret;
6230        }
6231
6232        iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
6233        trace_io_uring_cqring_wait(ctx, min_events);
6234        do {
6235                prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6236                                                TASK_INTERRUPTIBLE);
6237                /* make sure we run task_work before checking for signals */
6238                if (current->task_works)
6239                        task_work_run();
6240                if (signal_pending(current)) {
6241                        if (current->jobctl & JOBCTL_TASK_WORK) {
6242                                spin_lock_irq(&current->sighand->siglock);
6243                                current->jobctl &= ~JOBCTL_TASK_WORK;
6244                                recalc_sigpending();
6245                                spin_unlock_irq(&current->sighand->siglock);
6246                                continue;
6247                        }
6248                        ret = -EINTR;
6249                        break;
6250                }
6251                if (io_should_wake(&iowq, false))
6252                        break;
6253                schedule();
6254        } while (1);
6255        finish_wait(&ctx->wait, &iowq.wq);
6256
6257        restore_saved_sigmask_unless(ret == -EINTR);
6258
6259        return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
6260}
6261
6262static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6263{
6264#if defined(CONFIG_UNIX)
6265        if (ctx->ring_sock) {
6266                struct sock *sock = ctx->ring_sock->sk;
6267                struct sk_buff *skb;
6268
6269                while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6270                        kfree_skb(skb);
6271        }
6272#else
6273        int i;
6274
6275        for (i = 0; i < ctx->nr_user_files; i++) {
6276                struct file *file;
6277
6278                file = io_file_from_index(ctx, i);
6279                if (file)
6280                        fput(file);
6281        }
6282#endif
6283}
6284
6285static void io_file_ref_kill(struct percpu_ref *ref)
6286{
6287        struct fixed_file_data *data;
6288
6289        data = container_of(ref, struct fixed_file_data, refs);
6290        complete(&data->done);
6291}
6292
6293static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
6294{
6295        struct fixed_file_data *data = ctx->file_data;
6296        struct fixed_file_ref_node *ref_node = NULL;
6297        unsigned nr_tables, i;
6298
6299        if (!data)
6300                return -ENXIO;
6301
6302        spin_lock(&data->lock);
6303        if (!list_empty(&data->ref_list))
6304                ref_node = list_first_entry(&data->ref_list,
6305                                struct fixed_file_ref_node, node);
6306        spin_unlock(&data->lock);
6307        if (ref_node)
6308                percpu_ref_kill(&ref_node->refs);
6309
6310        percpu_ref_kill(&data->refs);
6311
6312        /* wait for all refs nodes to complete */
6313        flush_delayed_work(&ctx->file_put_work);
6314        wait_for_completion(&data->done);
6315
6316        __io_sqe_files_unregister(ctx);
6317        nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
6318        for (i = 0; i < nr_tables; i++)
6319                kfree(data->table[i].files);
6320        kfree(data->table);
6321        percpu_ref_exit(&data->refs);
6322        kfree(data);
6323        ctx->file_data = NULL;
6324        ctx->nr_user_files = 0;
6325        return 0;
6326}
6327
6328static void io_sq_thread_stop(struct io_ring_ctx *ctx)
6329{
6330        if (ctx->sqo_thread) {
6331                wait_for_completion(&ctx->sq_thread_comp);
6332                /*
6333                 * The park is a bit of a work-around, without it we get
6334                 * warning spews on shutdown with SQPOLL set and affinity
6335                 * set to a single CPU.
6336                 */
6337                kthread_park(ctx->sqo_thread);
6338                kthread_stop(ctx->sqo_thread);
6339                ctx->sqo_thread = NULL;
6340        }
6341}
6342
6343static void io_finish_async(struct io_ring_ctx *ctx)
6344{
6345        io_sq_thread_stop(ctx);
6346
6347        if (ctx->io_wq) {
6348                io_wq_destroy(ctx->io_wq);
6349                ctx->io_wq = NULL;
6350        }
6351}
6352
6353#if defined(CONFIG_UNIX)
6354/*
6355 * Ensure the UNIX gc is aware of our file set, so we are certain that
6356 * the io_uring can be safely unregistered on process exit, even if we have
6357 * loops in the file referencing.
6358 */
6359static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
6360{
6361        struct sock *sk = ctx->ring_sock->sk;
6362        struct scm_fp_list *fpl;
6363        struct sk_buff *skb;
6364        int i, nr_files;
6365
6366        fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
6367        if (!fpl)
6368                return -ENOMEM;
6369
6370        skb = alloc_skb(0, GFP_KERNEL);
6371        if (!skb) {
6372                kfree(fpl);
6373                return -ENOMEM;
6374        }
6375
6376        skb->sk = sk;
6377
6378        nr_files = 0;
6379        fpl->user = get_uid(ctx->user);
6380        for (i = 0; i < nr; i++) {
6381                struct file *file = io_file_from_index(ctx, i + offset);
6382
6383                if (!file)
6384                        continue;
6385                fpl->fp[nr_files] = get_file(file);
6386                unix_inflight(fpl->user, fpl->fp[nr_files]);
6387                nr_files++;
6388        }
6389
6390        if (nr_files) {
6391                fpl->max = SCM_MAX_FD;
6392                fpl->count = nr_files;
6393                UNIXCB(skb).fp = fpl;
6394                skb->destructor = unix_destruct_scm;
6395                refcount_add(skb->truesize, &sk->sk_wmem_alloc);
6396                skb_queue_head(&sk->sk_receive_queue, skb);
6397
6398                for (i = 0; i < nr_files; i++)
6399                        fput(fpl->fp[i]);
6400        } else {
6401                kfree_skb(skb);
6402                kfree(fpl);
6403        }
6404
6405        return 0;
6406}
6407
6408/*
6409 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
6410 * causes regular reference counting to break down. We rely on the UNIX
6411 * garbage collection to take care of this problem for us.
6412 */
6413static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6414{
6415        unsigned left, total;
6416        int ret = 0;
6417
6418        total = 0;
6419        left = ctx->nr_user_files;
6420        while (left) {
6421                unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6422
6423                ret = __io_sqe_files_scm(ctx, this_files, total);
6424                if (ret)
6425                        break;
6426                left -= this_files;
6427                total += this_files;
6428        }
6429
6430        if (!ret)
6431                return 0;
6432
6433        while (total < ctx->nr_user_files) {
6434                struct file *file = io_file_from_index(ctx, total);
6435
6436                if (file)
6437                        fput(file);
6438                total++;
6439        }
6440
6441        return ret;
6442}
6443#else
6444static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6445{
6446        return 0;
6447}
6448#endif
6449
6450static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
6451                                    unsigned nr_files)
6452{
6453        int i;
6454
6455        for (i = 0; i < nr_tables; i++) {
6456                struct fixed_file_table *table = &ctx->file_data->table[i];
6457                unsigned this_files;
6458
6459                this_files = min(nr_files, IORING_MAX_FILES_TABLE);
6460                table->files = kcalloc(this_files, sizeof(struct file *),
6461                                        GFP_KERNEL);
6462                if (!table->files)
6463                        break;
6464                nr_files -= this_files;
6465        }
6466
6467        if (i == nr_tables)
6468                return 0;
6469
6470        for (i = 0; i < nr_tables; i++) {
6471                struct fixed_file_table *table = &ctx->file_data->table[i];
6472                kfree(table->files);
6473        }
6474        return 1;
6475}
6476
6477static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
6478{
6479#if defined(CONFIG_UNIX)
6480        struct sock *sock = ctx->ring_sock->sk;
6481        struct sk_buff_head list, *head = &sock->sk_receive_queue;
6482        struct sk_buff *skb;
6483        int i;
6484
6485        __skb_queue_head_init(&list);
6486
6487        /*
6488         * Find the skb that holds this file in its SCM_RIGHTS. When found,
6489         * remove this entry and rearrange the file array.
6490         */
6491        skb = skb_dequeue(head);
6492        while (skb) {
6493                struct scm_fp_list *fp;
6494
6495                fp = UNIXCB(skb).fp;
6496                for (i = 0; i < fp->count; i++) {
6497                        int left;
6498
6499                        if (fp->fp[i] != file)
6500                                continue;
6501
6502                        unix_notinflight(fp->user, fp->fp[i]);
6503                        left = fp->count - 1 - i;
6504                        if (left) {
6505                                memmove(&fp->fp[i], &fp->fp[i + 1],
6506                                                left * sizeof(struct file *));
6507                        }
6508                        fp->count--;
6509                        if (!fp->count) {
6510                                kfree_skb(skb);
6511                                skb = NULL;
6512                        } else {
6513                                __skb_queue_tail(&list, skb);
6514                        }
6515                        fput(file);
6516                        file = NULL;
6517                        break;
6518                }
6519
6520                if (!file)
6521                        break;
6522
6523                __skb_queue_tail(&list, skb);
6524
6525                skb = skb_dequeue(head);
6526        }
6527
6528        if (skb_peek(&list)) {
6529                spin_lock_irq(&head->lock);
6530                while ((skb = __skb_dequeue(&list)) != NULL)
6531                        __skb_queue_tail(head, skb);
6532                spin_unlock_irq(&head->lock);
6533        }
6534#else
6535        fput(file);
6536#endif
6537}
6538
6539struct io_file_put {
6540        struct list_head list;
6541        struct file *file;
6542};
6543
6544static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
6545{
6546        struct fixed_file_data *file_data = ref_node->file_data;
6547        struct io_ring_ctx *ctx = file_data->ctx;
6548        struct io_file_put *pfile, *tmp;
6549
6550        list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
6551                list_del(&pfile->list);
6552                io_ring_file_put(ctx, pfile->file);
6553                kfree(pfile);
6554        }
6555
6556        spin_lock(&file_data->lock);
6557        list_del(&ref_node->node);
6558        spin_unlock(&file_data->lock);
6559
6560        percpu_ref_exit(&ref_node->refs);
6561        kfree(ref_node);
6562        percpu_ref_put(&file_data->refs);
6563}
6564
6565static void io_file_put_work(struct work_struct *work)
6566{
6567        struct io_ring_ctx *ctx;
6568        struct llist_node *node;
6569
6570        ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
6571        node = llist_del_all(&ctx->file_put_llist);
6572
6573        while (node) {
6574                struct fixed_file_ref_node *ref_node;
6575                struct llist_node *next = node->next;
6576
6577                ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
6578                __io_file_put_work(ref_node);
6579                node = next;
6580        }
6581}
6582
6583static void io_file_data_ref_zero(struct percpu_ref *ref)
6584{
6585        struct fixed_file_ref_node *ref_node;
6586        struct io_ring_ctx *ctx;
6587        bool first_add;
6588        int delay = HZ;
6589
6590        ref_node = container_of(ref, struct fixed_file_ref_node, refs);
6591        ctx = ref_node->file_data->ctx;
6592
6593        if (percpu_ref_is_dying(&ctx->file_data->refs))
6594                delay = 0;
6595
6596        first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
6597        if (!delay)
6598                mod_delayed_work(system_wq, &ctx->file_put_work, 0);
6599        else if (first_add)
6600                queue_delayed_work(system_wq, &ctx->file_put_work, delay);
6601}
6602
6603static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
6604                        struct io_ring_ctx *ctx)
6605{
6606        struct fixed_file_ref_node *ref_node;
6607
6608        ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
6609        if (!ref_node)
6610                return ERR_PTR(-ENOMEM);
6611
6612        if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
6613                            0, GFP_KERNEL)) {
6614                kfree(ref_node);
6615                return ERR_PTR(-ENOMEM);
6616        }
6617        INIT_LIST_HEAD(&ref_node->node);
6618        INIT_LIST_HEAD(&ref_node->file_list);
6619        ref_node->file_data = ctx->file_data;
6620        return ref_node;
6621}
6622
6623static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
6624{
6625        percpu_ref_exit(&ref_node->refs);
6626        kfree(ref_node);
6627}
6628
6629static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
6630                                 unsigned nr_args)
6631{
6632        __s32 __user *fds = (__s32 __user *) arg;
6633        unsigned nr_tables;
6634        struct file *file;
6635        int fd, ret = 0;
6636        unsigned i;
6637        struct fixed_file_ref_node *ref_node;
6638
6639        if (ctx->file_data)
6640                return -EBUSY;
6641        if (!nr_args)
6642                return -EINVAL;
6643        if (nr_args > IORING_MAX_FIXED_FILES)
6644                return -EMFILE;
6645
6646        ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
6647        if (!ctx->file_data)
6648                return -ENOMEM;
6649        ctx->file_data->ctx = ctx;
6650        init_completion(&ctx->file_data->done);
6651        INIT_LIST_HEAD(&ctx->file_data->ref_list);
6652        spin_lock_init(&ctx->file_data->lock);
6653
6654        nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
6655        ctx->file_data->table = kcalloc(nr_tables,
6656                                        sizeof(struct fixed_file_table),
6657                                        GFP_KERNEL);
6658        if (!ctx->file_data->table) {
6659                kfree(ctx->file_data);
6660                ctx->file_data = NULL;
6661                return -ENOMEM;
6662        }
6663
6664        if (percpu_ref_init(&ctx->file_data->refs, io_file_ref_kill,
6665                                PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
6666                kfree(ctx->file_data->table);
6667                kfree(ctx->file_data);
6668                ctx->file_data = NULL;
6669                return -ENOMEM;
6670        }
6671
6672        if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
6673                percpu_ref_exit(&ctx->file_data->refs);
6674                kfree(ctx->file_data->table);
6675                kfree(ctx->file_data);
6676                ctx->file_data = NULL;
6677                return -ENOMEM;
6678        }
6679
6680        for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
6681                struct fixed_file_table *table;
6682                unsigned index;
6683
6684                ret = -EFAULT;
6685                if (copy_from_user(&fd, &fds[i], sizeof(fd)))
6686                        break;
6687                /* allow sparse sets */
6688                if (fd == -1) {
6689                        ret = 0;
6690                        continue;
6691                }
6692
6693                table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
6694                index = i & IORING_FILE_TABLE_MASK;
6695                file = fget(fd);
6696
6697                ret = -EBADF;
6698                if (!file)
6699                        break;
6700
6701                /*
6702                 * Don't allow io_uring instances to be registered. If UNIX
6703                 * isn't enabled, then this causes a reference cycle and this
6704                 * instance can never get freed. If UNIX is enabled we'll
6705                 * handle it just fine, but there's still no point in allowing
6706                 * a ring fd as it doesn't support regular read/write anyway.
6707                 */
6708                if (file->f_op == &io_uring_fops) {
6709                        fput(file);
6710                        break;
6711                }
6712                ret = 0;
6713                table->files[index] = file;
6714        }
6715
6716        if (ret) {
6717                for (i = 0; i < ctx->nr_user_files; i++) {
6718                        file = io_file_from_index(ctx, i);
6719                        if (file)
6720                                fput(file);
6721                }
6722                for (i = 0; i < nr_tables; i++)
6723                        kfree(ctx->file_data->table[i].files);
6724
6725                percpu_ref_exit(&ctx->file_data->refs);
6726                kfree(ctx->file_data->table);
6727                kfree(ctx->file_data);
6728                ctx->file_data = NULL;
6729                ctx->nr_user_files = 0;
6730                return ret;
6731        }
6732
6733        ret = io_sqe_files_scm(ctx);
6734        if (ret) {
6735                io_sqe_files_unregister(ctx);
6736                return ret;
6737        }
6738
6739        ref_node = alloc_fixed_file_ref_node(ctx);
6740        if (IS_ERR(ref_node)) {
6741                io_sqe_files_unregister(ctx);
6742                return PTR_ERR(ref_node);
6743        }
6744
6745        ctx->file_data->cur_refs = &ref_node->refs;
6746        spin_lock(&ctx->file_data->lock);
6747        list_add(&ref_node->node, &ctx->file_data->ref_list);
6748        spin_unlock(&ctx->file_data->lock);
6749        percpu_ref_get(&ctx->file_data->refs);
6750        return ret;
6751}
6752
6753static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
6754                                int index)
6755{
6756#if defined(CONFIG_UNIX)
6757        struct sock *sock = ctx->ring_sock->sk;
6758        struct sk_buff_head *head = &sock->sk_receive_queue;
6759        struct sk_buff *skb;
6760
6761        /*
6762         * See if we can merge this file into an existing skb SCM_RIGHTS
6763         * file set. If there's no room, fall back to allocating a new skb
6764         * and filling it in.
6765         */
6766        spin_lock_irq(&head->lock);
6767        skb = skb_peek(head);
6768        if (skb) {
6769                struct scm_fp_list *fpl = UNIXCB(skb).fp;
6770
6771                if (fpl->count < SCM_MAX_FD) {
6772                        __skb_unlink(skb, head);
6773                        spin_unlock_irq(&head->lock);
6774                        fpl->fp[fpl->count] = get_file(file);
6775                        unix_inflight(fpl->user, fpl->fp[fpl->count]);
6776                        fpl->count++;
6777                        spin_lock_irq(&head->lock);
6778                        __skb_queue_head(head, skb);
6779                } else {
6780                        skb = NULL;
6781                }
6782        }
6783        spin_unlock_irq(&head->lock);
6784
6785        if (skb) {
6786                fput(file);
6787                return 0;
6788        }
6789
6790        return __io_sqe_files_scm(ctx, 1, index);
6791#else
6792        return 0;
6793#endif
6794}
6795
6796static int io_queue_file_removal(struct fixed_file_data *data,
6797                                 struct file *file)
6798{
6799        struct io_file_put *pfile;
6800        struct percpu_ref *refs = data->cur_refs;
6801        struct fixed_file_ref_node *ref_node;
6802
6803        pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
6804        if (!pfile)
6805                return -ENOMEM;
6806
6807        ref_node = container_of(refs, struct fixed_file_ref_node, refs);
6808        pfile->file = file;
6809        list_add(&pfile->list, &ref_node->file_list);
6810
6811        return 0;
6812}
6813
6814static int __io_sqe_files_update(struct io_ring_ctx *ctx,
6815                                 struct io_uring_files_update *up,
6816                                 unsigned nr_args)
6817{
6818        struct fixed_file_data *data = ctx->file_data;
6819        struct fixed_file_ref_node *ref_node;
6820        struct file *file;
6821        __s32 __user *fds;
6822        int fd, i, err;
6823        __u32 done;
6824        bool needs_switch = false;
6825
6826        if (check_add_overflow(up->offset, nr_args, &done))
6827                return -EOVERFLOW;
6828        if (done > ctx->nr_user_files)
6829                return -EINVAL;
6830
6831        ref_node = alloc_fixed_file_ref_node(ctx);
6832        if (IS_ERR(ref_node))
6833                return PTR_ERR(ref_node);
6834
6835        done = 0;
6836        fds = u64_to_user_ptr(up->fds);
6837        while (nr_args) {
6838                struct fixed_file_table *table;
6839                unsigned index;
6840
6841                err = 0;
6842                if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
6843                        err = -EFAULT;
6844                        break;
6845                }
6846                i = array_index_nospec(up->offset, ctx->nr_user_files);
6847                table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
6848                index = i & IORING_FILE_TABLE_MASK;
6849                if (table->files[index]) {
6850                        file = io_file_from_index(ctx, index);
6851                        err = io_queue_file_removal(data, file);
6852                        if (err)
6853                                break;
6854                        table->files[index] = NULL;
6855                        needs_switch = true;
6856                }
6857                if (fd != -1) {
6858                        file = fget(fd);
6859                        if (!file) {
6860                                err = -EBADF;
6861                                break;
6862                        }
6863                        /*
6864                         * Don't allow io_uring instances to be registered. If
6865                         * UNIX isn't enabled, then this causes a reference
6866                         * cycle and this instance can never get freed. If UNIX
6867                         * is enabled we'll handle it just fine, but there's
6868                         * still no point in allowing a ring fd as it doesn't
6869                         * support regular read/write anyway.
6870                         */
6871                        if (file->f_op == &io_uring_fops) {
6872                                fput(file);
6873                                err = -EBADF;
6874                                break;
6875                        }
6876                        table->files[index] = file;
6877                        err = io_sqe_file_register(ctx, file, i);
6878                        if (err) {
6879                                fput(file);
6880                                break;
6881                        }
6882                }
6883                nr_args--;
6884                done++;
6885                up->offset++;
6886        }
6887
6888        if (needs_switch) {
6889                percpu_ref_kill(data->cur_refs);
6890                spin_lock(&data->lock);
6891                list_add(&ref_node->node, &data->ref_list);
6892                data->cur_refs = &ref_node->refs;
6893                spin_unlock(&data->lock);
6894                percpu_ref_get(&ctx->file_data->refs);
6895        } else
6896                destroy_fixed_file_ref_node(ref_node);
6897
6898        return done ? done : err;
6899}
6900
6901static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
6902                               unsigned nr_args)
6903{
6904        struct io_uring_files_update up;
6905
6906        if (!ctx->file_data)
6907                return -ENXIO;
6908        if (!nr_args)
6909                return -EINVAL;
6910        if (copy_from_user(&up, arg, sizeof(up)))
6911                return -EFAULT;
6912        if (up.resv)
6913                return -EINVAL;
6914
6915        return __io_sqe_files_update(ctx, &up, nr_args);
6916}
6917
6918static void io_free_work(struct io_wq_work *work)
6919{
6920        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6921
6922        /* Consider that io_steal_work() relies on this ref */
6923        io_put_req(req);
6924}
6925
6926static int io_init_wq_offload(struct io_ring_ctx *ctx,
6927                              struct io_uring_params *p)
6928{
6929        struct io_wq_data data;
6930        struct fd f;
6931        struct io_ring_ctx *ctx_attach;
6932        unsigned int concurrency;
6933        int ret = 0;
6934
6935        data.user = ctx->user;
6936        data.free_work = io_free_work;
6937        data.do_work = io_wq_submit_work;
6938
6939        if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
6940                /* Do QD, or 4 * CPUS, whatever is smallest */
6941                concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
6942
6943                ctx->io_wq = io_wq_create(concurrency, &data);
6944                if (IS_ERR(ctx->io_wq)) {
6945                        ret = PTR_ERR(ctx->io_wq);
6946                        ctx->io_wq = NULL;
6947                }
6948                return ret;
6949        }
6950
6951        f = fdget(p->wq_fd);
6952        if (!f.file)
6953                return -EBADF;
6954
6955        if (f.file->f_op != &io_uring_fops) {
6956                ret = -EINVAL;
6957                goto out_fput;
6958        }
6959
6960        ctx_attach = f.file->private_data;
6961        /* @io_wq is protected by holding the fd */
6962        if (!io_wq_get(ctx_attach->io_wq, &data)) {
6963                ret = -EINVAL;
6964                goto out_fput;
6965        }
6966
6967        ctx->io_wq = ctx_attach->io_wq;
6968out_fput:
6969        fdput(f);
6970        return ret;
6971}
6972
6973static int io_sq_offload_start(struct io_ring_ctx *ctx,
6974                               struct io_uring_params *p)
6975{
6976        int ret;
6977
6978        mmgrab(current->mm);
6979        ctx->sqo_mm = current->mm;
6980
6981        if (ctx->flags & IORING_SETUP_SQPOLL) {
6982                ret = -EPERM;
6983                if (!capable(CAP_SYS_ADMIN))
6984                        goto err;
6985
6986                ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
6987                if (!ctx->sq_thread_idle)
6988                        ctx->sq_thread_idle = HZ;
6989
6990                if (p->flags & IORING_SETUP_SQ_AFF) {
6991                        int cpu = p->sq_thread_cpu;
6992
6993                        ret = -EINVAL;
6994                        if (cpu >= nr_cpu_ids)
6995                                goto err;
6996                        if (!cpu_online(cpu))
6997                                goto err;
6998
6999                        ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
7000                                                        ctx, cpu,
7001                                                        "io_uring-sq");
7002                } else {
7003                        ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
7004                                                        "io_uring-sq");
7005                }
7006                if (IS_ERR(ctx->sqo_thread)) {
7007                        ret = PTR_ERR(ctx->sqo_thread);
7008                        ctx->sqo_thread = NULL;
7009                        goto err;
7010                }
7011                wake_up_process(ctx->sqo_thread);
7012        } else if (p->flags & IORING_SETUP_SQ_AFF) {
7013                /* Can't have SQ_AFF without SQPOLL */
7014                ret = -EINVAL;
7015                goto err;
7016        }
7017
7018        ret = io_init_wq_offload(ctx, p);
7019        if (ret)
7020                goto err;
7021
7022        return 0;
7023err:
7024        io_finish_async(ctx);
7025        mmdrop(ctx->sqo_mm);
7026        ctx->sqo_mm = NULL;
7027        return ret;
7028}
7029
7030static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
7031{
7032        atomic_long_sub(nr_pages, &user->locked_vm);
7033}
7034
7035static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
7036{
7037        unsigned long page_limit, cur_pages, new_pages;
7038
7039        /* Don't allow more pages than we can safely lock */
7040        page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
7041
7042        do {
7043                cur_pages = atomic_long_read(&user->locked_vm);
7044                new_pages = cur_pages + nr_pages;
7045                if (new_pages > page_limit)
7046                        return -ENOMEM;
7047        } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
7048                                        new_pages) != cur_pages);
7049
7050        return 0;
7051}
7052
7053static void io_mem_free(void *ptr)
7054{
7055        struct page *page;
7056
7057        if (!ptr)
7058                return;
7059
7060        page = virt_to_head_page(ptr);
7061        if (put_page_testzero(page))
7062                free_compound_page(page);
7063}
7064
7065static void *io_mem_alloc(size_t size)
7066{
7067        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
7068                                __GFP_NORETRY;
7069
7070        return (void *) __get_free_pages(gfp_flags, get_order(size));
7071}
7072
7073static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
7074                                size_t *sq_offset)
7075{
7076        struct io_rings *rings;
7077        size_t off, sq_array_size;
7078
7079        off = struct_size(rings, cqes, cq_entries);
7080        if (off == SIZE_MAX)
7081                return SIZE_MAX;
7082
7083#ifdef CONFIG_SMP
7084        off = ALIGN(off, SMP_CACHE_BYTES);
7085        if (off == 0)
7086                return SIZE_MAX;
7087#endif
7088
7089        sq_array_size = array_size(sizeof(u32), sq_entries);
7090        if (sq_array_size == SIZE_MAX)
7091                return SIZE_MAX;
7092
7093        if (check_add_overflow(off, sq_array_size, &off))
7094                return SIZE_MAX;
7095
7096        if (sq_offset)
7097                *sq_offset = off;
7098
7099        return off;
7100}
7101
7102static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
7103{
7104        size_t pages;
7105
7106        pages = (size_t)1 << get_order(
7107                rings_size(sq_entries, cq_entries, NULL));
7108        pages += (size_t)1 << get_order(
7109                array_size(sizeof(struct io_uring_sqe), sq_entries));
7110
7111        return pages;
7112}
7113
7114static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
7115{
7116        int i, j;
7117
7118        if (!ctx->user_bufs)
7119                return -ENXIO;
7120
7121        for (i = 0; i < ctx->nr_user_bufs; i++) {
7122                struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7123
7124                for (j = 0; j < imu->nr_bvecs; j++)
7125                        unpin_user_page(imu->bvec[j].bv_page);
7126
7127                if (ctx->account_mem)
7128                        io_unaccount_mem(ctx->user, imu->nr_bvecs);
7129                kvfree(imu->bvec);
7130                imu->nr_bvecs = 0;
7131        }
7132
7133        kfree(ctx->user_bufs);
7134        ctx->user_bufs = NULL;
7135        ctx->nr_user_bufs = 0;
7136        return 0;
7137}
7138
7139static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
7140                       void __user *arg, unsigned index)
7141{
7142        struct iovec __user *src;
7143
7144#ifdef CONFIG_COMPAT
7145        if (ctx->compat) {
7146                struct compat_iovec __user *ciovs;
7147                struct compat_iovec ciov;
7148
7149                ciovs = (struct compat_iovec __user *) arg;
7150                if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
7151                        return -EFAULT;
7152
7153                dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
7154                dst->iov_len = ciov.iov_len;
7155                return 0;
7156        }
7157#endif
7158        src = (struct iovec __user *) arg;
7159        if (copy_from_user(dst, &src[index], sizeof(*dst)))
7160                return -EFAULT;
7161        return 0;
7162}
7163
7164static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
7165                                  unsigned nr_args)
7166{
7167        struct vm_area_struct **vmas = NULL;
7168        struct page **pages = NULL;
7169        int i, j, got_pages = 0;
7170        int ret = -EINVAL;
7171
7172        if (ctx->user_bufs)
7173                return -EBUSY;
7174        if (!nr_args || nr_args > UIO_MAXIOV)
7175                return -EINVAL;
7176
7177        ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
7178                                        GFP_KERNEL);
7179        if (!ctx->user_bufs)
7180                return -ENOMEM;
7181
7182        for (i = 0; i < nr_args; i++) {
7183                struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7184                unsigned long off, start, end, ubuf;
7185                int pret, nr_pages;
7186                struct iovec iov;
7187                size_t size;
7188
7189                ret = io_copy_iov(ctx, &iov, arg, i);
7190                if (ret)
7191                        goto err;
7192
7193                /*
7194                 * Don't impose further limits on the size and buffer
7195                 * constraints here, we'll -EINVAL later when IO is
7196                 * submitted if they are wrong.
7197                 */
7198                ret = -EFAULT;
7199                if (!iov.iov_base || !iov.iov_len)
7200                        goto err;
7201
7202                /* arbitrary limit, but we need something */
7203                if (iov.iov_len > SZ_1G)
7204                        goto err;
7205
7206                ubuf = (unsigned long) iov.iov_base;
7207                end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
7208                start = ubuf >> PAGE_SHIFT;
7209                nr_pages = end - start;
7210
7211                if (ctx->account_mem) {
7212                        ret = io_account_mem(ctx->user, nr_pages);
7213                        if (ret)
7214                                goto err;
7215                }
7216
7217                ret = 0;
7218                if (!pages || nr_pages > got_pages) {
7219                        kvfree(vmas);
7220                        kvfree(pages);
7221                        pages = kvmalloc_array(nr_pages, sizeof(struct page *),
7222                                                GFP_KERNEL);
7223                        vmas = kvmalloc_array(nr_pages,
7224                                        sizeof(struct vm_area_struct *),
7225                                        GFP_KERNEL);
7226                        if (!pages || !vmas) {
7227                                ret = -ENOMEM;
7228                                if (ctx->account_mem)
7229                                        io_unaccount_mem(ctx->user, nr_pages);
7230                                goto err;
7231                        }
7232                        got_pages = nr_pages;
7233                }
7234
7235                imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
7236                                                GFP_KERNEL);
7237                ret = -ENOMEM;
7238                if (!imu->bvec) {
7239                        if (ctx->account_mem)
7240                                io_unaccount_mem(ctx->user, nr_pages);
7241                        goto err;
7242                }
7243
7244                ret = 0;
7245                mmap_read_lock(current->mm);
7246                pret = pin_user_pages(ubuf, nr_pages,
7247                                      FOLL_WRITE | FOLL_LONGTERM,
7248                                      pages, vmas);
7249                if (pret == nr_pages) {
7250                        /* don't support file backed memory */
7251                        for (j = 0; j < nr_pages; j++) {
7252                                struct vm_area_struct *vma = vmas[j];
7253
7254                                if (vma->vm_file &&
7255                                    !is_file_hugepages(vma->vm_file)) {
7256                                        ret = -EOPNOTSUPP;
7257                                        break;
7258                                }
7259                        }
7260                } else {
7261                        ret = pret < 0 ? pret : -EFAULT;
7262                }
7263                mmap_read_unlock(current->mm);
7264                if (ret) {
7265                        /*
7266                         * if we did partial map, or found file backed vmas,
7267                         * release any pages we did get
7268                         */
7269                        if (pret > 0)
7270                                unpin_user_pages(pages, pret);
7271                        if (ctx->account_mem)
7272                                io_unaccount_mem(ctx->user, nr_pages);
7273                        kvfree(imu->bvec);
7274                        goto err;
7275                }
7276
7277                off = ubuf & ~PAGE_MASK;
7278                size = iov.iov_len;
7279                for (j = 0; j < nr_pages; j++) {
7280                        size_t vec_len;
7281
7282                        vec_len = min_t(size_t, size, PAGE_SIZE - off);
7283                        imu->bvec[j].bv_page = pages[j];
7284                        imu->bvec[j].bv_len = vec_len;
7285                        imu->bvec[j].bv_offset = off;
7286                        off = 0;
7287                        size -= vec_len;
7288                }
7289                /* store original address for later verification */
7290                imu->ubuf = ubuf;
7291                imu->len = iov.iov_len;
7292                imu->nr_bvecs = nr_pages;
7293
7294                ctx->nr_user_bufs++;
7295        }
7296        kvfree(pages);
7297        kvfree(vmas);
7298        return 0;
7299err:
7300        kvfree(pages);
7301        kvfree(vmas);
7302        io_sqe_buffer_unregister(ctx);
7303        return ret;
7304}
7305
7306static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
7307{
7308        __s32 __user *fds = arg;
7309        int fd;
7310
7311        if (ctx->cq_ev_fd)
7312                return -EBUSY;
7313
7314        if (copy_from_user(&fd, fds, sizeof(*fds)))
7315                return -EFAULT;
7316
7317        ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
7318        if (IS_ERR(ctx->cq_ev_fd)) {
7319                int ret = PTR_ERR(ctx->cq_ev_fd);
7320                ctx->cq_ev_fd = NULL;
7321                return ret;
7322        }
7323
7324        return 0;
7325}
7326
7327static int io_eventfd_unregister(struct io_ring_ctx *ctx)
7328{
7329        if (ctx->cq_ev_fd) {
7330                eventfd_ctx_put(ctx->cq_ev_fd);
7331                ctx->cq_ev_fd = NULL;
7332                return 0;
7333        }
7334
7335        return -ENXIO;
7336}
7337
7338static int __io_destroy_buffers(int id, void *p, void *data)
7339{
7340        struct io_ring_ctx *ctx = data;
7341        struct io_buffer *buf = p;
7342
7343        __io_remove_buffers(ctx, buf, id, -1U);
7344        return 0;
7345}
7346
7347static void io_destroy_buffers(struct io_ring_ctx *ctx)
7348{
7349        idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
7350        idr_destroy(&ctx->io_buffer_idr);
7351}
7352
7353static void io_ring_ctx_free(struct io_ring_ctx *ctx)
7354{
7355        io_finish_async(ctx);
7356        if (ctx->sqo_mm)
7357                mmdrop(ctx->sqo_mm);
7358
7359        io_iopoll_reap_events(ctx);
7360        io_sqe_buffer_unregister(ctx);
7361        io_sqe_files_unregister(ctx);
7362        io_eventfd_unregister(ctx);
7363        io_destroy_buffers(ctx);
7364        idr_destroy(&ctx->personality_idr);
7365
7366#if defined(CONFIG_UNIX)
7367        if (ctx->ring_sock) {
7368                ctx->ring_sock->file = NULL; /* so that iput() is called */
7369                sock_release(ctx->ring_sock);
7370        }
7371#endif
7372
7373        io_mem_free(ctx->rings);
7374        io_mem_free(ctx->sq_sqes);
7375
7376        percpu_ref_exit(&ctx->refs);
7377        free_uid(ctx->user);
7378        put_cred(ctx->creds);
7379        kfree(ctx->cancel_hash);
7380        kmem_cache_free(req_cachep, ctx->fallback_req);
7381        kfree(ctx);
7382}
7383
7384static __poll_t io_uring_poll(struct file *file, poll_table *wait)
7385{
7386        struct io_ring_ctx *ctx = file->private_data;
7387        __poll_t mask = 0;
7388
7389        poll_wait(file, &ctx->cq_wait, wait);
7390        /*
7391         * synchronizes with barrier from wq_has_sleeper call in
7392         * io_commit_cqring
7393         */
7394        smp_rmb();
7395        if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
7396            ctx->rings->sq_ring_entries)
7397                mask |= EPOLLOUT | EPOLLWRNORM;
7398        if (io_cqring_events(ctx, false))
7399                mask |= EPOLLIN | EPOLLRDNORM;
7400
7401        return mask;
7402}
7403
7404static int io_uring_fasync(int fd, struct file *file, int on)
7405{
7406        struct io_ring_ctx *ctx = file->private_data;
7407
7408        return fasync_helper(fd, file, on, &ctx->cq_fasync);
7409}
7410
7411static int io_remove_personalities(int id, void *p, void *data)
7412{
7413        struct io_ring_ctx *ctx = data;
7414        const struct cred *cred;
7415
7416        cred = idr_remove(&ctx->personality_idr, id);
7417        if (cred)
7418                put_cred(cred);
7419        return 0;
7420}
7421
7422static void io_ring_exit_work(struct work_struct *work)
7423{
7424        struct io_ring_ctx *ctx;
7425
7426        ctx = container_of(work, struct io_ring_ctx, exit_work);
7427        if (ctx->rings)
7428                io_cqring_overflow_flush(ctx, true);
7429
7430        /*
7431         * If we're doing polled IO and end up having requests being
7432         * submitted async (out-of-line), then completions can come in while
7433         * we're waiting for refs to drop. We need to reap these manually,
7434         * as nobody else will be looking for them.
7435         */
7436        while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)) {
7437                io_iopoll_reap_events(ctx);
7438                if (ctx->rings)
7439                        io_cqring_overflow_flush(ctx, true);
7440        }
7441        io_ring_ctx_free(ctx);
7442}
7443
7444static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
7445{
7446        mutex_lock(&ctx->uring_lock);
7447        percpu_ref_kill(&ctx->refs);
7448        mutex_unlock(&ctx->uring_lock);
7449
7450        io_kill_timeouts(ctx);
7451        io_poll_remove_all(ctx);
7452
7453        if (ctx->io_wq)
7454                io_wq_cancel_all(ctx->io_wq);
7455
7456        io_iopoll_reap_events(ctx);
7457        /* if we failed setting up the ctx, we might not have any rings */
7458        if (ctx->rings)
7459                io_cqring_overflow_flush(ctx, true);
7460        idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
7461
7462        /*
7463         * Do this upfront, so we won't have a grace period where the ring
7464         * is closed but resources aren't reaped yet. This can cause
7465         * spurious failure in setting up a new ring.
7466         */
7467        if (ctx->account_mem)
7468                io_unaccount_mem(ctx->user,
7469                                ring_pages(ctx->sq_entries, ctx->cq_entries));
7470
7471        INIT_WORK(&ctx->exit_work, io_ring_exit_work);
7472        queue_work(system_wq, &ctx->exit_work);
7473}
7474
7475static int io_uring_release(struct inode *inode, struct file *file)
7476{
7477        struct io_ring_ctx *ctx = file->private_data;
7478
7479        file->private_data = NULL;
7480        io_ring_ctx_wait_and_kill(ctx);
7481        return 0;
7482}
7483
7484static bool io_wq_files_match(struct io_wq_work *work, void *data)
7485{
7486        struct files_struct *files = data;
7487
7488        return work->files == files;
7489}
7490
7491static void io_uring_cancel_files(struct io_ring_ctx *ctx,
7492                                  struct files_struct *files)
7493{
7494        if (list_empty_careful(&ctx->inflight_list))
7495                return;
7496
7497        /* cancel all at once, should be faster than doing it one by one*/
7498        io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
7499
7500        while (!list_empty_careful(&ctx->inflight_list)) {
7501                struct io_kiocb *cancel_req = NULL, *req;
7502                DEFINE_WAIT(wait);
7503
7504                spin_lock_irq(&ctx->inflight_lock);
7505                list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
7506                        if (req->work.files != files)
7507                                continue;
7508                        /* req is being completed, ignore */
7509                        if (!refcount_inc_not_zero(&req->refs))
7510                                continue;
7511                        cancel_req = req;
7512                        break;
7513                }
7514                if (cancel_req)
7515                        prepare_to_wait(&ctx->inflight_wait, &wait,
7516                                                TASK_UNINTERRUPTIBLE);
7517                spin_unlock_irq(&ctx->inflight_lock);
7518
7519                /* We need to keep going until we don't find a matching req */
7520                if (!cancel_req)
7521                        break;
7522
7523                if (cancel_req->flags & REQ_F_OVERFLOW) {
7524                        spin_lock_irq(&ctx->completion_lock);
7525                        list_del(&cancel_req->list);
7526                        cancel_req->flags &= ~REQ_F_OVERFLOW;
7527                        if (list_empty(&ctx->cq_overflow_list)) {
7528                                clear_bit(0, &ctx->sq_check_overflow);
7529                                clear_bit(0, &ctx->cq_check_overflow);
7530                                ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
7531                        }
7532                        spin_unlock_irq(&ctx->completion_lock);
7533
7534                        WRITE_ONCE(ctx->rings->cq_overflow,
7535                                atomic_inc_return(&ctx->cached_cq_overflow));
7536
7537                        /*
7538                         * Put inflight ref and overflow ref. If that's
7539                         * all we had, then we're done with this request.
7540                         */
7541                        if (refcount_sub_and_test(2, &cancel_req->refs)) {
7542                                io_free_req(cancel_req);
7543                                finish_wait(&ctx->inflight_wait, &wait);
7544                                continue;
7545                        }
7546                } else {
7547                        io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
7548                        io_put_req(cancel_req);
7549                }
7550
7551                schedule();
7552                finish_wait(&ctx->inflight_wait, &wait);
7553        }
7554}
7555
7556static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
7557{
7558        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7559        struct task_struct *task = data;
7560
7561        return req->task == task;
7562}
7563
7564static int io_uring_flush(struct file *file, void *data)
7565{
7566        struct io_ring_ctx *ctx = file->private_data;
7567
7568        io_uring_cancel_files(ctx, data);
7569
7570        /*
7571         * If the task is going away, cancel work it may have pending
7572         */
7573        if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
7574                io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, current, true);
7575
7576        return 0;
7577}
7578
7579static void *io_uring_validate_mmap_request(struct file *file,
7580                                            loff_t pgoff, size_t sz)
7581{
7582        struct io_ring_ctx *ctx = file->private_data;
7583        loff_t offset = pgoff << PAGE_SHIFT;
7584        struct page *page;
7585        void *ptr;
7586
7587        switch (offset) {
7588        case IORING_OFF_SQ_RING:
7589        case IORING_OFF_CQ_RING:
7590                ptr = ctx->rings;
7591                break;
7592        case IORING_OFF_SQES:
7593                ptr = ctx->sq_sqes;
7594                break;
7595        default:
7596                return ERR_PTR(-EINVAL);
7597        }
7598
7599        page = virt_to_head_page(ptr);
7600        if (sz > page_size(page))
7601                return ERR_PTR(-EINVAL);
7602
7603        return ptr;
7604}
7605
7606#ifdef CONFIG_MMU
7607
7608static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7609{
7610        size_t sz = vma->vm_end - vma->vm_start;
7611        unsigned long pfn;
7612        void *ptr;
7613
7614        ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
7615        if (IS_ERR(ptr))
7616                return PTR_ERR(ptr);
7617
7618        pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
7619        return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
7620}
7621
7622#else /* !CONFIG_MMU */
7623
7624static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7625{
7626        return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
7627}
7628
7629static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
7630{
7631        return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
7632}
7633
7634static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
7635        unsigned long addr, unsigned long len,
7636        unsigned long pgoff, unsigned long flags)
7637{
7638        void *ptr;
7639
7640        ptr = io_uring_validate_mmap_request(file, pgoff, len);
7641        if (IS_ERR(ptr))
7642                return PTR_ERR(ptr);
7643
7644        return (unsigned long) ptr;
7645}
7646
7647#endif /* !CONFIG_MMU */
7648
7649SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
7650                u32, min_complete, u32, flags, const sigset_t __user *, sig,
7651                size_t, sigsz)
7652{
7653        struct io_ring_ctx *ctx;
7654        long ret = -EBADF;
7655        int submitted = 0;
7656        struct fd f;
7657
7658        if (current->task_works)
7659                task_work_run();
7660
7661        if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
7662                return -EINVAL;
7663
7664        f = fdget(fd);
7665        if (!f.file)
7666                return -EBADF;
7667
7668        ret = -EOPNOTSUPP;
7669        if (f.file->f_op != &io_uring_fops)
7670                goto out_fput;
7671
7672        ret = -ENXIO;
7673        ctx = f.file->private_data;
7674        if (!percpu_ref_tryget(&ctx->refs))
7675                goto out_fput;
7676
7677        /*
7678         * For SQ polling, the thread will do all submissions and completions.
7679         * Just return the requested submit count, and wake the thread if
7680         * we were asked to.
7681         */
7682        ret = 0;
7683        if (ctx->flags & IORING_SETUP_SQPOLL) {
7684                if (!list_empty_careful(&ctx->cq_overflow_list))
7685                        io_cqring_overflow_flush(ctx, false);
7686                if (flags & IORING_ENTER_SQ_WAKEUP)
7687                        wake_up(&ctx->sqo_wait);
7688                submitted = to_submit;
7689        } else if (to_submit) {
7690                mutex_lock(&ctx->uring_lock);
7691                submitted = io_submit_sqes(ctx, to_submit, f.file, fd);
7692                mutex_unlock(&ctx->uring_lock);
7693
7694                if (submitted != to_submit)
7695                        goto out;
7696        }
7697        if (flags & IORING_ENTER_GETEVENTS) {
7698                unsigned nr_events = 0;
7699
7700                min_complete = min(min_complete, ctx->cq_entries);
7701
7702                /*
7703                 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
7704                 * space applications don't need to do io completion events
7705                 * polling again, they can rely on io_sq_thread to do polling
7706                 * work, which can reduce cpu usage and uring_lock contention.
7707                 */
7708                if (ctx->flags & IORING_SETUP_IOPOLL &&
7709                    !(ctx->flags & IORING_SETUP_SQPOLL)) {
7710                        ret = io_iopoll_check(ctx, &nr_events, min_complete);
7711                } else {
7712                        ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
7713                }
7714        }
7715
7716out:
7717        percpu_ref_put(&ctx->refs);
7718out_fput:
7719        fdput(f);
7720        return submitted ? submitted : ret;
7721}
7722
7723#ifdef CONFIG_PROC_FS
7724static int io_uring_show_cred(int id, void *p, void *data)
7725{
7726        const struct cred *cred = p;
7727        struct seq_file *m = data;
7728        struct user_namespace *uns = seq_user_ns(m);
7729        struct group_info *gi;
7730        kernel_cap_t cap;
7731        unsigned __capi;
7732        int g;
7733
7734        seq_printf(m, "%5d\n", id);
7735        seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
7736        seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
7737        seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
7738        seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
7739        seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
7740        seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
7741        seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
7742        seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
7743        seq_puts(m, "\n\tGroups:\t");
7744        gi = cred->group_info;
7745        for (g = 0; g < gi->ngroups; g++) {
7746                seq_put_decimal_ull(m, g ? " " : "",
7747                                        from_kgid_munged(uns, gi->gid[g]));
7748        }
7749        seq_puts(m, "\n\tCapEff:\t");
7750        cap = cred->cap_effective;
7751        CAP_FOR_EACH_U32(__capi)
7752                seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
7753        seq_putc(m, '\n');
7754        return 0;
7755}
7756
7757static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
7758{
7759        int i;
7760
7761        mutex_lock(&ctx->uring_lock);
7762        seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
7763        for (i = 0; i < ctx->nr_user_files; i++) {
7764                struct fixed_file_table *table;
7765                struct file *f;
7766
7767                table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7768                f = table->files[i & IORING_FILE_TABLE_MASK];
7769                if (f)
7770                        seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
7771                else
7772                        seq_printf(m, "%5u: <none>\n", i);
7773        }
7774        seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
7775        for (i = 0; i < ctx->nr_user_bufs; i++) {
7776                struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
7777
7778                seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
7779                                                (unsigned int) buf->len);
7780        }
7781        if (!idr_is_empty(&ctx->personality_idr)) {
7782                seq_printf(m, "Personalities:\n");
7783                idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
7784        }
7785        seq_printf(m, "PollList:\n");
7786        spin_lock_irq(&ctx->completion_lock);
7787        for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
7788                struct hlist_head *list = &ctx->cancel_hash[i];
7789                struct io_kiocb *req;
7790
7791                hlist_for_each_entry(req, list, hash_node)
7792                        seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
7793                                        req->task->task_works != NULL);
7794        }
7795        spin_unlock_irq(&ctx->completion_lock);
7796        mutex_unlock(&ctx->uring_lock);
7797}
7798
7799static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
7800{
7801        struct io_ring_ctx *ctx = f->private_data;
7802
7803        if (percpu_ref_tryget(&ctx->refs)) {
7804                __io_uring_show_fdinfo(ctx, m);
7805                percpu_ref_put(&ctx->refs);
7806        }
7807}
7808#endif
7809
7810static const struct file_operations io_uring_fops = {
7811        .release        = io_uring_release,
7812        .flush          = io_uring_flush,
7813        .mmap           = io_uring_mmap,
7814#ifndef CONFIG_MMU
7815        .get_unmapped_area = io_uring_nommu_get_unmapped_area,
7816        .mmap_capabilities = io_uring_nommu_mmap_capabilities,
7817#endif
7818        .poll           = io_uring_poll,
7819        .fasync         = io_uring_fasync,
7820#ifdef CONFIG_PROC_FS
7821        .show_fdinfo    = io_uring_show_fdinfo,
7822#endif
7823};
7824
7825static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
7826                                  struct io_uring_params *p)
7827{
7828        struct io_rings *rings;
7829        size_t size, sq_array_offset;
7830
7831        size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
7832        if (size == SIZE_MAX)
7833                return -EOVERFLOW;
7834
7835        rings = io_mem_alloc(size);
7836        if (!rings)
7837                return -ENOMEM;
7838
7839        ctx->rings = rings;
7840        ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
7841        rings->sq_ring_mask = p->sq_entries - 1;
7842        rings->cq_ring_mask = p->cq_entries - 1;
7843        rings->sq_ring_entries = p->sq_entries;
7844        rings->cq_ring_entries = p->cq_entries;
7845        ctx->sq_mask = rings->sq_ring_mask;
7846        ctx->cq_mask = rings->cq_ring_mask;
7847        ctx->sq_entries = rings->sq_ring_entries;
7848        ctx->cq_entries = rings->cq_ring_entries;
7849
7850        size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
7851        if (size == SIZE_MAX) {
7852                io_mem_free(ctx->rings);
7853                ctx->rings = NULL;
7854                return -EOVERFLOW;
7855        }
7856
7857        ctx->sq_sqes = io_mem_alloc(size);
7858        if (!ctx->sq_sqes) {
7859                io_mem_free(ctx->rings);
7860                ctx->rings = NULL;
7861                return -ENOMEM;
7862        }
7863
7864        return 0;
7865}
7866
7867/*
7868 * Allocate an anonymous fd, this is what constitutes the application
7869 * visible backing of an io_uring instance. The application mmaps this
7870 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
7871 * we have to tie this fd to a socket for file garbage collection purposes.
7872 */
7873static int io_uring_get_fd(struct io_ring_ctx *ctx)
7874{
7875        struct file *file;
7876        int ret;
7877
7878#if defined(CONFIG_UNIX)
7879        ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
7880                                &ctx->ring_sock);
7881        if (ret)
7882                return ret;
7883#endif
7884
7885        ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
7886        if (ret < 0)
7887                goto err;
7888
7889        file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
7890                                        O_RDWR | O_CLOEXEC);
7891        if (IS_ERR(file)) {
7892                put_unused_fd(ret);
7893                ret = PTR_ERR(file);
7894                goto err;
7895        }
7896
7897#if defined(CONFIG_UNIX)
7898        ctx->ring_sock->file = file;
7899#endif
7900        fd_install(ret, file);
7901        return ret;
7902err:
7903#if defined(CONFIG_UNIX)
7904        sock_release(ctx->ring_sock);
7905        ctx->ring_sock = NULL;
7906#endif
7907        return ret;
7908}
7909
7910static int io_uring_create(unsigned entries, struct io_uring_params *p,
7911                           struct io_uring_params __user *params)
7912{
7913        struct user_struct *user = NULL;
7914        struct io_ring_ctx *ctx;
7915        bool account_mem;
7916        int ret;
7917
7918        if (!entries)
7919                return -EINVAL;
7920        if (entries > IORING_MAX_ENTRIES) {
7921                if (!(p->flags & IORING_SETUP_CLAMP))
7922                        return -EINVAL;
7923                entries = IORING_MAX_ENTRIES;
7924        }
7925
7926        /*
7927         * Use twice as many entries for the CQ ring. It's possible for the
7928         * application to drive a higher depth than the size of the SQ ring,
7929         * since the sqes are only used at submission time. This allows for
7930         * some flexibility in overcommitting a bit. If the application has
7931         * set IORING_SETUP_CQSIZE, it will have passed in the desired number
7932         * of CQ ring entries manually.
7933         */
7934        p->sq_entries = roundup_pow_of_two(entries);
7935        if (p->flags & IORING_SETUP_CQSIZE) {
7936                /*
7937                 * If IORING_SETUP_CQSIZE is set, we do the same roundup
7938                 * to a power-of-two, if it isn't already. We do NOT impose
7939                 * any cq vs sq ring sizing.
7940                 */
7941                if (p->cq_entries < p->sq_entries)
7942                        return -EINVAL;
7943                if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
7944                        if (!(p->flags & IORING_SETUP_CLAMP))
7945                                return -EINVAL;
7946                        p->cq_entries = IORING_MAX_CQ_ENTRIES;
7947                }
7948                p->cq_entries = roundup_pow_of_two(p->cq_entries);
7949        } else {
7950                p->cq_entries = 2 * p->sq_entries;
7951        }
7952
7953        user = get_uid(current_user());
7954        account_mem = !capable(CAP_IPC_LOCK);
7955
7956        if (account_mem) {
7957                ret = io_account_mem(user,
7958                                ring_pages(p->sq_entries, p->cq_entries));
7959                if (ret) {
7960                        free_uid(user);
7961                        return ret;
7962                }
7963        }
7964
7965        ctx = io_ring_ctx_alloc(p);
7966        if (!ctx) {
7967                if (account_mem)
7968                        io_unaccount_mem(user, ring_pages(p->sq_entries,
7969                                                                p->cq_entries));
7970                free_uid(user);
7971                return -ENOMEM;
7972        }
7973        ctx->compat = in_compat_syscall();
7974        ctx->account_mem = account_mem;
7975        ctx->user = user;
7976        ctx->creds = get_current_cred();
7977
7978        ret = io_allocate_scq_urings(ctx, p);
7979        if (ret)
7980                goto err;
7981
7982        ret = io_sq_offload_start(ctx, p);
7983        if (ret)
7984                goto err;
7985
7986        memset(&p->sq_off, 0, sizeof(p->sq_off));
7987        p->sq_off.head = offsetof(struct io_rings, sq.head);
7988        p->sq_off.tail = offsetof(struct io_rings, sq.tail);
7989        p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
7990        p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
7991        p->sq_off.flags = offsetof(struct io_rings, sq_flags);
7992        p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
7993        p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
7994
7995        memset(&p->cq_off, 0, sizeof(p->cq_off));
7996        p->cq_off.head = offsetof(struct io_rings, cq.head);
7997        p->cq_off.tail = offsetof(struct io_rings, cq.tail);
7998        p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
7999        p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
8000        p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
8001        p->cq_off.cqes = offsetof(struct io_rings, cqes);
8002        p->cq_off.flags = offsetof(struct io_rings, cq_flags);
8003
8004        p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
8005                        IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
8006                        IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL;
8007
8008        if (copy_to_user(params, p, sizeof(*p))) {
8009                ret = -EFAULT;
8010                goto err;
8011        }
8012        /*
8013         * Install ring fd as the very last thing, so we don't risk someone
8014         * having closed it before we finish setup
8015         */
8016        ret = io_uring_get_fd(ctx);
8017        if (ret < 0)
8018                goto err;
8019
8020        trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
8021        return ret;
8022err:
8023        io_ring_ctx_wait_and_kill(ctx);
8024        return ret;
8025}
8026
8027/*
8028 * Sets up an aio uring context, and returns the fd. Applications asks for a
8029 * ring size, we return the actual sq/cq ring sizes (among other things) in the
8030 * params structure passed in.
8031 */
8032static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
8033{
8034        struct io_uring_params p;
8035        int i;
8036
8037        if (copy_from_user(&p, params, sizeof(p)))
8038                return -EFAULT;
8039        for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
8040                if (p.resv[i])
8041                        return -EINVAL;
8042        }
8043
8044        if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8045                        IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
8046                        IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
8047                return -EINVAL;
8048
8049        return  io_uring_create(entries, &p, params);
8050}
8051
8052SYSCALL_DEFINE2(io_uring_setup, u32, entries,
8053                struct io_uring_params __user *, params)
8054{
8055        return io_uring_setup(entries, params);
8056}
8057
8058static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
8059{
8060        struct io_uring_probe *p;
8061        size_t size;
8062        int i, ret;
8063
8064        size = struct_size(p, ops, nr_args);
8065        if (size == SIZE_MAX)
8066                return -EOVERFLOW;
8067        p = kzalloc(size, GFP_KERNEL);
8068        if (!p)
8069                return -ENOMEM;
8070
8071        ret = -EFAULT;
8072        if (copy_from_user(p, arg, size))
8073                goto out;
8074        ret = -EINVAL;
8075        if (memchr_inv(p, 0, size))
8076                goto out;
8077
8078        p->last_op = IORING_OP_LAST - 1;
8079        if (nr_args > IORING_OP_LAST)
8080                nr_args = IORING_OP_LAST;
8081
8082        for (i = 0; i < nr_args; i++) {
8083                p->ops[i].op = i;
8084                if (!io_op_defs[i].not_supported)
8085                        p->ops[i].flags = IO_URING_OP_SUPPORTED;
8086        }
8087        p->ops_len = i;
8088
8089        ret = 0;
8090        if (copy_to_user(arg, p, size))
8091                ret = -EFAULT;
8092out:
8093        kfree(p);
8094        return ret;
8095}
8096
8097static int io_register_personality(struct io_ring_ctx *ctx)
8098{
8099        const struct cred *creds = get_current_cred();
8100        int id;
8101
8102        id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
8103                                USHRT_MAX, GFP_KERNEL);
8104        if (id < 0)
8105                put_cred(creds);
8106        return id;
8107}
8108
8109static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
8110{
8111        const struct cred *old_creds;
8112
8113        old_creds = idr_remove(&ctx->personality_idr, id);
8114        if (old_creds) {
8115                put_cred(old_creds);
8116                return 0;
8117        }
8118
8119        return -EINVAL;
8120}
8121
8122static bool io_register_op_must_quiesce(int op)
8123{
8124        switch (op) {
8125        case IORING_UNREGISTER_FILES:
8126        case IORING_REGISTER_FILES_UPDATE:
8127        case IORING_REGISTER_PROBE:
8128        case IORING_REGISTER_PERSONALITY:
8129        case IORING_UNREGISTER_PERSONALITY:
8130                return false;
8131        default:
8132                return true;
8133        }
8134}
8135
8136static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
8137                               void __user *arg, unsigned nr_args)
8138        __releases(ctx->uring_lock)
8139        __acquires(ctx->uring_lock)
8140{
8141        int ret;
8142
8143        /*
8144         * We're inside the ring mutex, if the ref is already dying, then
8145         * someone else killed the ctx or is already going through
8146         * io_uring_register().
8147         */
8148        if (percpu_ref_is_dying(&ctx->refs))
8149                return -ENXIO;
8150
8151        if (io_register_op_must_quiesce(opcode)) {
8152                percpu_ref_kill(&ctx->refs);
8153
8154                /*
8155                 * Drop uring mutex before waiting for references to exit. If
8156                 * another thread is currently inside io_uring_enter() it might
8157                 * need to grab the uring_lock to make progress. If we hold it
8158                 * here across the drain wait, then we can deadlock. It's safe
8159                 * to drop the mutex here, since no new references will come in
8160                 * after we've killed the percpu ref.
8161                 */
8162                mutex_unlock(&ctx->uring_lock);
8163                ret = wait_for_completion_interruptible(&ctx->ref_comp);
8164                mutex_lock(&ctx->uring_lock);
8165                if (ret) {
8166                        percpu_ref_resurrect(&ctx->refs);
8167                        ret = -EINTR;
8168                        goto out;
8169                }
8170        }
8171
8172        switch (opcode) {
8173        case IORING_REGISTER_BUFFERS:
8174                ret = io_sqe_buffer_register(ctx, arg, nr_args);
8175                break;
8176        case IORING_UNREGISTER_BUFFERS:
8177                ret = -EINVAL;
8178                if (arg || nr_args)
8179                        break;
8180                ret = io_sqe_buffer_unregister(ctx);
8181                break;
8182        case IORING_REGISTER_FILES:
8183                ret = io_sqe_files_register(ctx, arg, nr_args);
8184                break;
8185        case IORING_UNREGISTER_FILES:
8186                ret = -EINVAL;
8187                if (arg || nr_args)
8188                        break;
8189                ret = io_sqe_files_unregister(ctx);
8190                break;
8191        case IORING_REGISTER_FILES_UPDATE:
8192                ret = io_sqe_files_update(ctx, arg, nr_args);
8193                break;
8194        case IORING_REGISTER_EVENTFD:
8195        case IORING_REGISTER_EVENTFD_ASYNC:
8196                ret = -EINVAL;
8197                if (nr_args != 1)
8198                        break;
8199                ret = io_eventfd_register(ctx, arg);
8200                if (ret)
8201                        break;
8202                if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
8203                        ctx->eventfd_async = 1;
8204                else
8205                        ctx->eventfd_async = 0;
8206                break;
8207        case IORING_UNREGISTER_EVENTFD:
8208                ret = -EINVAL;
8209                if (arg || nr_args)
8210                        break;
8211                ret = io_eventfd_unregister(ctx);
8212                break;
8213        case IORING_REGISTER_PROBE:
8214                ret = -EINVAL;
8215                if (!arg || nr_args > 256)
8216                        break;
8217                ret = io_probe(ctx, arg, nr_args);
8218                break;
8219        case IORING_REGISTER_PERSONALITY:
8220                ret = -EINVAL;
8221                if (arg || nr_args)
8222                        break;
8223                ret = io_register_personality(ctx);
8224                break;
8225        case IORING_UNREGISTER_PERSONALITY:
8226                ret = -EINVAL;
8227                if (arg)
8228                        break;
8229                ret = io_unregister_personality(ctx, nr_args);
8230                break;
8231        default:
8232                ret = -EINVAL;
8233                break;
8234        }
8235
8236        if (io_register_op_must_quiesce(opcode)) {
8237                /* bring the ctx back to life */
8238                percpu_ref_reinit(&ctx->refs);
8239out:
8240                reinit_completion(&ctx->ref_comp);
8241        }
8242        return ret;
8243}
8244
8245SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
8246                void __user *, arg, unsigned int, nr_args)
8247{
8248        struct io_ring_ctx *ctx;
8249        long ret = -EBADF;
8250        struct fd f;
8251
8252        f = fdget(fd);
8253        if (!f.file)
8254                return -EBADF;
8255
8256        ret = -EOPNOTSUPP;
8257        if (f.file->f_op != &io_uring_fops)
8258                goto out_fput;
8259
8260        ctx = f.file->private_data;
8261
8262        mutex_lock(&ctx->uring_lock);
8263        ret = __io_uring_register(ctx, opcode, arg, nr_args);
8264        mutex_unlock(&ctx->uring_lock);
8265        trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
8266                                                        ctx->cq_ev_fd != NULL, ret);
8267out_fput:
8268        fdput(f);
8269        return ret;
8270}
8271
8272static int __init io_uring_init(void)
8273{
8274#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
8275        BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
8276        BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
8277} while (0)
8278
8279#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
8280        __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
8281        BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
8282        BUILD_BUG_SQE_ELEM(0,  __u8,   opcode);
8283        BUILD_BUG_SQE_ELEM(1,  __u8,   flags);
8284        BUILD_BUG_SQE_ELEM(2,  __u16,  ioprio);
8285        BUILD_BUG_SQE_ELEM(4,  __s32,  fd);
8286        BUILD_BUG_SQE_ELEM(8,  __u64,  off);
8287        BUILD_BUG_SQE_ELEM(8,  __u64,  addr2);
8288        BUILD_BUG_SQE_ELEM(16, __u64,  addr);
8289        BUILD_BUG_SQE_ELEM(16, __u64,  splice_off_in);
8290        BUILD_BUG_SQE_ELEM(24, __u32,  len);
8291        BUILD_BUG_SQE_ELEM(28,     __kernel_rwf_t, rw_flags);
8292        BUILD_BUG_SQE_ELEM(28, /* compat */   int, rw_flags);
8293        BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
8294        BUILD_BUG_SQE_ELEM(28, __u32,  fsync_flags);
8295        BUILD_BUG_SQE_ELEM(28, __u16,  poll_events);
8296        BUILD_BUG_SQE_ELEM(28, __u32,  sync_range_flags);
8297        BUILD_BUG_SQE_ELEM(28, __u32,  msg_flags);
8298        BUILD_BUG_SQE_ELEM(28, __u32,  timeout_flags);
8299        BUILD_BUG_SQE_ELEM(28, __u32,  accept_flags);
8300        BUILD_BUG_SQE_ELEM(28, __u32,  cancel_flags);
8301        BUILD_BUG_SQE_ELEM(28, __u32,  open_flags);
8302        BUILD_BUG_SQE_ELEM(28, __u32,  statx_flags);
8303        BUILD_BUG_SQE_ELEM(28, __u32,  fadvise_advice);
8304        BUILD_BUG_SQE_ELEM(28, __u32,  splice_flags);
8305        BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
8306        BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
8307        BUILD_BUG_SQE_ELEM(42, __u16,  personality);
8308        BUILD_BUG_SQE_ELEM(44, __s32,  splice_fd_in);
8309
8310        BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
8311        BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
8312        req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
8313        return 0;
8314};
8315__initcall(io_uring_init);
8316