1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) "%s: " fmt, __func__
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/time.h>
17#include <linux/aio_abi.h>
18#include <linux/export.h>
19#include <linux/syscalls.h>
20#include <linux/backing-dev.h>
21#include <linux/uio.h>
22
23#include <linux/sched/signal.h>
24#include <linux/fs.h>
25#include <linux/file.h>
26#include <linux/mm.h>
27#include <linux/mman.h>
28#include <linux/mmu_context.h>
29#include <linux/percpu.h>
30#include <linux/slab.h>
31#include <linux/timer.h>
32#include <linux/aio.h>
33#include <linux/highmem.h>
34#include <linux/workqueue.h>
35#include <linux/security.h>
36#include <linux/eventfd.h>
37#include <linux/blkdev.h>
38#include <linux/compat.h>
39#include <linux/migrate.h>
40#include <linux/ramfs.h>
41#include <linux/percpu-refcount.h>
42#include <linux/mount.h>
43
44#include <asm/kmap_types.h>
45#include <linux/uaccess.h>
46#include <linux/nospec.h>
47
48#include "internal.h"
49
50#define KIOCB_KEY 0
51
52#define AIO_RING_MAGIC 0xa10a10a1
53#define AIO_RING_COMPAT_FEATURES 1
54#define AIO_RING_INCOMPAT_FEATURES 0
55struct aio_ring {
56 unsigned id;
57 unsigned nr;
58 unsigned head;
59
60 unsigned tail;
61
62 unsigned magic;
63 unsigned compat_features;
64 unsigned incompat_features;
65 unsigned header_length;
66
67
68 struct io_event io_events[0];
69};
70
71#define AIO_RING_PAGES 8
72
73struct kioctx_table {
74 struct rcu_head rcu;
75 unsigned nr;
76 struct kioctx __rcu *table[];
77};
78
79struct kioctx_cpu {
80 unsigned reqs_available;
81};
82
83struct ctx_rq_wait {
84 struct completion comp;
85 atomic_t count;
86};
87
88struct kioctx {
89 struct percpu_ref users;
90 atomic_t dead;
91
92 struct percpu_ref reqs;
93
94 unsigned long user_id;
95
96 struct __percpu kioctx_cpu *cpu;
97
98
99
100
101
102 unsigned req_batch;
103
104
105
106
107
108
109
110 unsigned max_reqs;
111
112
113 unsigned nr_events;
114
115 unsigned long mmap_base;
116 unsigned long mmap_size;
117
118 struct page **ring_pages;
119 long nr_pages;
120
121 struct rcu_work free_rwork;
122
123
124
125
126 struct ctx_rq_wait *rq_wait;
127
128 struct {
129
130
131
132
133
134
135
136
137 atomic_t reqs_available;
138 } ____cacheline_aligned_in_smp;
139
140 struct {
141 spinlock_t ctx_lock;
142 struct list_head active_reqs;
143 } ____cacheline_aligned_in_smp;
144
145 struct {
146 struct mutex ring_lock;
147 wait_queue_head_t wait;
148 } ____cacheline_aligned_in_smp;
149
150 struct {
151 unsigned tail;
152 unsigned completed_events;
153 spinlock_t completion_lock;
154 } ____cacheline_aligned_in_smp;
155
156 struct page *internal_pages[AIO_RING_PAGES];
157 struct file *aio_ring_file;
158
159 unsigned id;
160};
161
162struct fsync_iocb {
163 struct work_struct work;
164 struct file *file;
165 bool datasync;
166};
167
168struct aio_kiocb {
169 union {
170 struct kiocb rw;
171 struct fsync_iocb fsync;
172 };
173
174 struct kioctx *ki_ctx;
175 kiocb_cancel_fn *ki_cancel;
176
177 struct iocb __user *ki_user_iocb;
178 __u64 ki_user_data;
179
180 struct list_head ki_list;
181
182
183
184
185
186
187 struct eventfd_ctx *ki_eventfd;
188};
189
190
191static DEFINE_SPINLOCK(aio_nr_lock);
192unsigned long aio_nr;
193unsigned long aio_max_nr = 0x10000;
194
195
196static struct kmem_cache *kiocb_cachep;
197static struct kmem_cache *kioctx_cachep;
198
199static struct vfsmount *aio_mnt;
200
201static const struct file_operations aio_ring_fops;
202static const struct address_space_operations aio_ctx_aops;
203
204static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
205{
206 struct file *file;
207 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
208 if (IS_ERR(inode))
209 return ERR_CAST(inode);
210
211 inode->i_mapping->a_ops = &aio_ctx_aops;
212 inode->i_mapping->private_data = ctx;
213 inode->i_size = PAGE_SIZE * nr_pages;
214
215 file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
216 O_RDWR, &aio_ring_fops);
217 if (IS_ERR(file))
218 iput(inode);
219 return file;
220}
221
222static struct dentry *aio_mount(struct file_system_type *fs_type,
223 int flags, const char *dev_name, void *data)
224{
225 struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, NULL,
226 AIO_RING_MAGIC);
227
228 if (!IS_ERR(root))
229 root->d_sb->s_iflags |= SB_I_NOEXEC;
230 return root;
231}
232
233
234
235
236
237static int __init aio_setup(void)
238{
239 static struct file_system_type aio_fs = {
240 .name = "aio",
241 .mount = aio_mount,
242 .kill_sb = kill_anon_super,
243 };
244 aio_mnt = kern_mount(&aio_fs);
245 if (IS_ERR(aio_mnt))
246 panic("Failed to create aio fs mount.");
247
248 kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
249 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
250 return 0;
251}
252__initcall(aio_setup);
253
254static void put_aio_ring_file(struct kioctx *ctx)
255{
256 struct file *aio_ring_file = ctx->aio_ring_file;
257 struct address_space *i_mapping;
258
259 if (aio_ring_file) {
260 truncate_setsize(file_inode(aio_ring_file), 0);
261
262
263 i_mapping = aio_ring_file->f_mapping;
264 spin_lock(&i_mapping->private_lock);
265 i_mapping->private_data = NULL;
266 ctx->aio_ring_file = NULL;
267 spin_unlock(&i_mapping->private_lock);
268
269 fput(aio_ring_file);
270 }
271}
272
273static void aio_free_ring(struct kioctx *ctx)
274{
275 int i;
276
277
278
279
280 put_aio_ring_file(ctx);
281
282 for (i = 0; i < ctx->nr_pages; i++) {
283 struct page *page;
284 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
285 page_count(ctx->ring_pages[i]));
286 page = ctx->ring_pages[i];
287 if (!page)
288 continue;
289 ctx->ring_pages[i] = NULL;
290 put_page(page);
291 }
292
293 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
294 kfree(ctx->ring_pages);
295 ctx->ring_pages = NULL;
296 }
297}
298
299static int aio_ring_mremap(struct vm_area_struct *vma)
300{
301 struct file *file = vma->vm_file;
302 struct mm_struct *mm = vma->vm_mm;
303 struct kioctx_table *table;
304 int i, res = -EINVAL;
305
306 spin_lock(&mm->ioctx_lock);
307 rcu_read_lock();
308 table = rcu_dereference(mm->ioctx_table);
309 for (i = 0; i < table->nr; i++) {
310 struct kioctx *ctx;
311
312 ctx = rcu_dereference(table->table[i]);
313 if (ctx && ctx->aio_ring_file == file) {
314 if (!atomic_read(&ctx->dead)) {
315 ctx->user_id = ctx->mmap_base = vma->vm_start;
316 res = 0;
317 }
318 break;
319 }
320 }
321
322 rcu_read_unlock();
323 spin_unlock(&mm->ioctx_lock);
324 return res;
325}
326
327static const struct vm_operations_struct aio_ring_vm_ops = {
328 .mremap = aio_ring_mremap,
329#if IS_ENABLED(CONFIG_MMU)
330 .fault = filemap_fault,
331 .map_pages = filemap_map_pages,
332 .page_mkwrite = filemap_page_mkwrite,
333#endif
334};
335
336static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
337{
338 vma->vm_flags |= VM_DONTEXPAND;
339 vma->vm_ops = &aio_ring_vm_ops;
340 return 0;
341}
342
343static const struct file_operations aio_ring_fops = {
344 .mmap = aio_ring_mmap,
345};
346
347#if IS_ENABLED(CONFIG_MIGRATION)
348static int aio_migratepage(struct address_space *mapping, struct page *new,
349 struct page *old, enum migrate_mode mode)
350{
351 struct kioctx *ctx;
352 unsigned long flags;
353 pgoff_t idx;
354 int rc;
355
356
357
358
359
360
361 if (mode == MIGRATE_SYNC_NO_COPY)
362 return -EINVAL;
363
364 rc = 0;
365
366
367 spin_lock(&mapping->private_lock);
368 ctx = mapping->private_data;
369 if (!ctx) {
370 rc = -EINVAL;
371 goto out;
372 }
373
374
375
376
377
378 if (!mutex_trylock(&ctx->ring_lock)) {
379 rc = -EAGAIN;
380 goto out;
381 }
382
383 idx = old->index;
384 if (idx < (pgoff_t)ctx->nr_pages) {
385
386 if (ctx->ring_pages[idx] != old)
387 rc = -EAGAIN;
388 } else
389 rc = -EINVAL;
390
391 if (rc != 0)
392 goto out_unlock;
393
394
395 BUG_ON(PageWriteback(old));
396 get_page(new);
397
398 rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
399 if (rc != MIGRATEPAGE_SUCCESS) {
400 put_page(new);
401 goto out_unlock;
402 }
403
404
405
406
407
408 spin_lock_irqsave(&ctx->completion_lock, flags);
409 migrate_page_copy(new, old);
410 BUG_ON(ctx->ring_pages[idx] != old);
411 ctx->ring_pages[idx] = new;
412 spin_unlock_irqrestore(&ctx->completion_lock, flags);
413
414
415 put_page(old);
416
417out_unlock:
418 mutex_unlock(&ctx->ring_lock);
419out:
420 spin_unlock(&mapping->private_lock);
421 return rc;
422}
423#endif
424
425static const struct address_space_operations aio_ctx_aops = {
426 .set_page_dirty = __set_page_dirty_no_writeback,
427#if IS_ENABLED(CONFIG_MIGRATION)
428 .migratepage = aio_migratepage,
429#endif
430};
431
432static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
433{
434 struct aio_ring *ring;
435 struct mm_struct *mm = current->mm;
436 unsigned long size, unused;
437 int nr_pages;
438 int i;
439 struct file *file;
440
441
442 nr_events += 2;
443
444 size = sizeof(struct aio_ring);
445 size += sizeof(struct io_event) * nr_events;
446
447 nr_pages = PFN_UP(size);
448 if (nr_pages < 0)
449 return -EINVAL;
450
451 file = aio_private_file(ctx, nr_pages);
452 if (IS_ERR(file)) {
453 ctx->aio_ring_file = NULL;
454 return -ENOMEM;
455 }
456
457 ctx->aio_ring_file = file;
458 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
459 / sizeof(struct io_event);
460
461 ctx->ring_pages = ctx->internal_pages;
462 if (nr_pages > AIO_RING_PAGES) {
463 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
464 GFP_KERNEL);
465 if (!ctx->ring_pages) {
466 put_aio_ring_file(ctx);
467 return -ENOMEM;
468 }
469 }
470
471 for (i = 0; i < nr_pages; i++) {
472 struct page *page;
473 page = find_or_create_page(file->f_mapping,
474 i, GFP_HIGHUSER | __GFP_ZERO);
475 if (!page)
476 break;
477 pr_debug("pid(%d) page[%d]->count=%d\n",
478 current->pid, i, page_count(page));
479 SetPageUptodate(page);
480 unlock_page(page);
481
482 ctx->ring_pages[i] = page;
483 }
484 ctx->nr_pages = i;
485
486 if (unlikely(i != nr_pages)) {
487 aio_free_ring(ctx);
488 return -ENOMEM;
489 }
490
491 ctx->mmap_size = nr_pages * PAGE_SIZE;
492 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
493
494 if (down_write_killable(&mm->mmap_sem)) {
495 ctx->mmap_size = 0;
496 aio_free_ring(ctx);
497 return -EINTR;
498 }
499
500 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
501 PROT_READ | PROT_WRITE,
502 MAP_SHARED, 0, &unused, NULL);
503 up_write(&mm->mmap_sem);
504 if (IS_ERR((void *)ctx->mmap_base)) {
505 ctx->mmap_size = 0;
506 aio_free_ring(ctx);
507 return -ENOMEM;
508 }
509
510 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
511
512 ctx->user_id = ctx->mmap_base;
513 ctx->nr_events = nr_events;
514
515 ring = kmap_atomic(ctx->ring_pages[0]);
516 ring->nr = nr_events;
517 ring->id = ~0U;
518 ring->head = ring->tail = 0;
519 ring->magic = AIO_RING_MAGIC;
520 ring->compat_features = AIO_RING_COMPAT_FEATURES;
521 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
522 ring->header_length = sizeof(struct aio_ring);
523 kunmap_atomic(ring);
524 flush_dcache_page(ctx->ring_pages[0]);
525
526 return 0;
527}
528
529#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
530#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
531#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
532
533void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
534{
535 struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
536 struct kioctx *ctx = req->ki_ctx;
537 unsigned long flags;
538
539 if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
540 return;
541
542 spin_lock_irqsave(&ctx->ctx_lock, flags);
543 list_add_tail(&req->ki_list, &ctx->active_reqs);
544 req->ki_cancel = cancel;
545 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
546}
547EXPORT_SYMBOL(kiocb_set_cancel_fn);
548
549
550
551
552
553
554static void free_ioctx(struct work_struct *work)
555{
556 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
557 free_rwork);
558 pr_debug("freeing %p\n", ctx);
559
560 aio_free_ring(ctx);
561 free_percpu(ctx->cpu);
562 percpu_ref_exit(&ctx->reqs);
563 percpu_ref_exit(&ctx->users);
564 kmem_cache_free(kioctx_cachep, ctx);
565}
566
567static void free_ioctx_reqs(struct percpu_ref *ref)
568{
569 struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
570
571
572 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
573 complete(&ctx->rq_wait->comp);
574
575
576 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
577 queue_rcu_work(system_wq, &ctx->free_rwork);
578}
579
580
581
582
583
584
585static void free_ioctx_users(struct percpu_ref *ref)
586{
587 struct kioctx *ctx = container_of(ref, struct kioctx, users);
588 struct aio_kiocb *req;
589
590 spin_lock_irq(&ctx->ctx_lock);
591
592 while (!list_empty(&ctx->active_reqs)) {
593 req = list_first_entry(&ctx->active_reqs,
594 struct aio_kiocb, ki_list);
595 req->ki_cancel(&req->rw);
596 list_del_init(&req->ki_list);
597 }
598
599 spin_unlock_irq(&ctx->ctx_lock);
600
601 percpu_ref_kill(&ctx->reqs);
602 percpu_ref_put(&ctx->reqs);
603}
604
605static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
606{
607 unsigned i, new_nr;
608 struct kioctx_table *table, *old;
609 struct aio_ring *ring;
610
611 spin_lock(&mm->ioctx_lock);
612 table = rcu_dereference_raw(mm->ioctx_table);
613
614 while (1) {
615 if (table)
616 for (i = 0; i < table->nr; i++)
617 if (!rcu_access_pointer(table->table[i])) {
618 ctx->id = i;
619 rcu_assign_pointer(table->table[i], ctx);
620 spin_unlock(&mm->ioctx_lock);
621
622
623
624
625
626 ring = kmap_atomic(ctx->ring_pages[0]);
627 ring->id = ctx->id;
628 kunmap_atomic(ring);
629 return 0;
630 }
631
632 new_nr = (table ? table->nr : 1) * 4;
633 spin_unlock(&mm->ioctx_lock);
634
635 table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) *
636 new_nr, GFP_KERNEL);
637 if (!table)
638 return -ENOMEM;
639
640 table->nr = new_nr;
641
642 spin_lock(&mm->ioctx_lock);
643 old = rcu_dereference_raw(mm->ioctx_table);
644
645 if (!old) {
646 rcu_assign_pointer(mm->ioctx_table, table);
647 } else if (table->nr > old->nr) {
648 memcpy(table->table, old->table,
649 old->nr * sizeof(struct kioctx *));
650
651 rcu_assign_pointer(mm->ioctx_table, table);
652 kfree_rcu(old, rcu);
653 } else {
654 kfree(table);
655 table = old;
656 }
657 }
658}
659
660static void aio_nr_sub(unsigned nr)
661{
662 spin_lock(&aio_nr_lock);
663 if (WARN_ON(aio_nr - nr > aio_nr))
664 aio_nr = 0;
665 else
666 aio_nr -= nr;
667 spin_unlock(&aio_nr_lock);
668}
669
670
671
672
673static struct kioctx *ioctx_alloc(unsigned nr_events)
674{
675 struct mm_struct *mm = current->mm;
676 struct kioctx *ctx;
677 int err = -ENOMEM;
678
679
680
681
682
683 unsigned int max_reqs = nr_events;
684
685
686
687
688
689
690
691
692
693
694 nr_events = max(nr_events, num_possible_cpus() * 4);
695 nr_events *= 2;
696
697
698 if (nr_events > (0x10000000U / sizeof(struct io_event))) {
699 pr_debug("ENOMEM: nr_events too high\n");
700 return ERR_PTR(-EINVAL);
701 }
702
703 if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
704 return ERR_PTR(-EAGAIN);
705
706 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
707 if (!ctx)
708 return ERR_PTR(-ENOMEM);
709
710 ctx->max_reqs = max_reqs;
711
712 spin_lock_init(&ctx->ctx_lock);
713 spin_lock_init(&ctx->completion_lock);
714 mutex_init(&ctx->ring_lock);
715
716
717 mutex_lock(&ctx->ring_lock);
718 init_waitqueue_head(&ctx->wait);
719
720 INIT_LIST_HEAD(&ctx->active_reqs);
721
722 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
723 goto err;
724
725 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
726 goto err;
727
728 ctx->cpu = alloc_percpu(struct kioctx_cpu);
729 if (!ctx->cpu)
730 goto err;
731
732 err = aio_setup_ring(ctx, nr_events);
733 if (err < 0)
734 goto err;
735
736 atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
737 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
738 if (ctx->req_batch < 1)
739 ctx->req_batch = 1;
740
741
742 spin_lock(&aio_nr_lock);
743 if (aio_nr + ctx->max_reqs > aio_max_nr ||
744 aio_nr + ctx->max_reqs < aio_nr) {
745 spin_unlock(&aio_nr_lock);
746 err = -EAGAIN;
747 goto err_ctx;
748 }
749 aio_nr += ctx->max_reqs;
750 spin_unlock(&aio_nr_lock);
751
752 percpu_ref_get(&ctx->users);
753 percpu_ref_get(&ctx->reqs);
754
755 err = ioctx_add_table(ctx, mm);
756 if (err)
757 goto err_cleanup;
758
759
760 mutex_unlock(&ctx->ring_lock);
761
762 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
763 ctx, ctx->user_id, mm, ctx->nr_events);
764 return ctx;
765
766err_cleanup:
767 aio_nr_sub(ctx->max_reqs);
768err_ctx:
769 atomic_set(&ctx->dead, 1);
770 if (ctx->mmap_size)
771 vm_munmap(ctx->mmap_base, ctx->mmap_size);
772 aio_free_ring(ctx);
773err:
774 mutex_unlock(&ctx->ring_lock);
775 free_percpu(ctx->cpu);
776 percpu_ref_exit(&ctx->reqs);
777 percpu_ref_exit(&ctx->users);
778 kmem_cache_free(kioctx_cachep, ctx);
779 pr_debug("error allocating ioctx %d\n", err);
780 return ERR_PTR(err);
781}
782
783
784
785
786
787
788static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
789 struct ctx_rq_wait *wait)
790{
791 struct kioctx_table *table;
792
793 spin_lock(&mm->ioctx_lock);
794 if (atomic_xchg(&ctx->dead, 1)) {
795 spin_unlock(&mm->ioctx_lock);
796 return -EINVAL;
797 }
798
799 table = rcu_dereference_raw(mm->ioctx_table);
800 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
801 RCU_INIT_POINTER(table->table[ctx->id], NULL);
802 spin_unlock(&mm->ioctx_lock);
803
804
805 wake_up_all(&ctx->wait);
806
807
808
809
810
811
812
813
814 aio_nr_sub(ctx->max_reqs);
815
816 if (ctx->mmap_size)
817 vm_munmap(ctx->mmap_base, ctx->mmap_size);
818
819 ctx->rq_wait = wait;
820 percpu_ref_kill(&ctx->users);
821 return 0;
822}
823
824
825
826
827
828
829
830
831
832void exit_aio(struct mm_struct *mm)
833{
834 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
835 struct ctx_rq_wait wait;
836 int i, skipped;
837
838 if (!table)
839 return;
840
841 atomic_set(&wait.count, table->nr);
842 init_completion(&wait.comp);
843
844 skipped = 0;
845 for (i = 0; i < table->nr; ++i) {
846 struct kioctx *ctx =
847 rcu_dereference_protected(table->table[i], true);
848
849 if (!ctx) {
850 skipped++;
851 continue;
852 }
853
854
855
856
857
858
859
860
861 ctx->mmap_size = 0;
862 kill_ioctx(mm, ctx, &wait);
863 }
864
865 if (!atomic_sub_and_test(skipped, &wait.count)) {
866
867 wait_for_completion(&wait.comp);
868 }
869
870 RCU_INIT_POINTER(mm->ioctx_table, NULL);
871 kfree(table);
872}
873
874static void put_reqs_available(struct kioctx *ctx, unsigned nr)
875{
876 struct kioctx_cpu *kcpu;
877 unsigned long flags;
878
879 local_irq_save(flags);
880 kcpu = this_cpu_ptr(ctx->cpu);
881 kcpu->reqs_available += nr;
882
883 while (kcpu->reqs_available >= ctx->req_batch * 2) {
884 kcpu->reqs_available -= ctx->req_batch;
885 atomic_add(ctx->req_batch, &ctx->reqs_available);
886 }
887
888 local_irq_restore(flags);
889}
890
891static bool get_reqs_available(struct kioctx *ctx)
892{
893 struct kioctx_cpu *kcpu;
894 bool ret = false;
895 unsigned long flags;
896
897 local_irq_save(flags);
898 kcpu = this_cpu_ptr(ctx->cpu);
899 if (!kcpu->reqs_available) {
900 int old, avail = atomic_read(&ctx->reqs_available);
901
902 do {
903 if (avail < ctx->req_batch)
904 goto out;
905
906 old = avail;
907 avail = atomic_cmpxchg(&ctx->reqs_available,
908 avail, avail - ctx->req_batch);
909 } while (avail != old);
910
911 kcpu->reqs_available += ctx->req_batch;
912 }
913
914 ret = true;
915 kcpu->reqs_available--;
916out:
917 local_irq_restore(flags);
918 return ret;
919}
920
921
922
923
924
925
926
927
928static void refill_reqs_available(struct kioctx *ctx, unsigned head,
929 unsigned tail)
930{
931 unsigned events_in_ring, completed;
932
933
934 head %= ctx->nr_events;
935 if (head <= tail)
936 events_in_ring = tail - head;
937 else
938 events_in_ring = ctx->nr_events - (head - tail);
939
940 completed = ctx->completed_events;
941 if (events_in_ring < completed)
942 completed -= events_in_ring;
943 else
944 completed = 0;
945
946 if (!completed)
947 return;
948
949 ctx->completed_events -= completed;
950 put_reqs_available(ctx, completed);
951}
952
953
954
955
956
957static void user_refill_reqs_available(struct kioctx *ctx)
958{
959 spin_lock_irq(&ctx->completion_lock);
960 if (ctx->completed_events) {
961 struct aio_ring *ring;
962 unsigned head;
963
964
965
966
967
968
969
970
971
972
973 ring = kmap_atomic(ctx->ring_pages[0]);
974 head = ring->head;
975 kunmap_atomic(ring);
976
977 refill_reqs_available(ctx, head, ctx->tail);
978 }
979
980 spin_unlock_irq(&ctx->completion_lock);
981}
982
983
984
985
986
987static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
988{
989 struct aio_kiocb *req;
990
991 if (!get_reqs_available(ctx)) {
992 user_refill_reqs_available(ctx);
993 if (!get_reqs_available(ctx))
994 return NULL;
995 }
996
997 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
998 if (unlikely(!req))
999 goto out_put;
1000
1001 percpu_ref_get(&ctx->reqs);
1002 INIT_LIST_HEAD(&req->ki_list);
1003 req->ki_ctx = ctx;
1004 return req;
1005out_put:
1006 put_reqs_available(ctx, 1);
1007 return NULL;
1008}
1009
1010static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1011{
1012 struct aio_ring __user *ring = (void __user *)ctx_id;
1013 struct mm_struct *mm = current->mm;
1014 struct kioctx *ctx, *ret = NULL;
1015 struct kioctx_table *table;
1016 unsigned id;
1017
1018 if (get_user(id, &ring->id))
1019 return NULL;
1020
1021 rcu_read_lock();
1022 table = rcu_dereference(mm->ioctx_table);
1023
1024 if (!table || id >= table->nr)
1025 goto out;
1026
1027 id = array_index_nospec(id, table->nr);
1028 ctx = rcu_dereference(table->table[id]);
1029 if (ctx && ctx->user_id == ctx_id) {
1030 if (percpu_ref_tryget_live(&ctx->users))
1031 ret = ctx;
1032 }
1033out:
1034 rcu_read_unlock();
1035 return ret;
1036}
1037
1038
1039
1040
1041static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1042{
1043 struct kioctx *ctx = iocb->ki_ctx;
1044 struct aio_ring *ring;
1045 struct io_event *ev_page, *event;
1046 unsigned tail, pos, head;
1047 unsigned long flags;
1048
1049
1050
1051
1052
1053
1054 spin_lock_irqsave(&ctx->completion_lock, flags);
1055
1056 tail = ctx->tail;
1057 pos = tail + AIO_EVENTS_OFFSET;
1058
1059 if (++tail >= ctx->nr_events)
1060 tail = 0;
1061
1062 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1063 event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1064
1065 event->obj = (u64)(unsigned long)iocb->ki_user_iocb;
1066 event->data = iocb->ki_user_data;
1067 event->res = res;
1068 event->res2 = res2;
1069
1070 kunmap_atomic(ev_page);
1071 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1072
1073 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
1074 ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
1075 res, res2);
1076
1077
1078
1079
1080 smp_wmb();
1081
1082 ctx->tail = tail;
1083
1084 ring = kmap_atomic(ctx->ring_pages[0]);
1085 head = ring->head;
1086 ring->tail = tail;
1087 kunmap_atomic(ring);
1088 flush_dcache_page(ctx->ring_pages[0]);
1089
1090 ctx->completed_events++;
1091 if (ctx->completed_events > 1)
1092 refill_reqs_available(ctx, head, tail);
1093 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1094
1095 pr_debug("added to ring %p at [%u]\n", iocb, tail);
1096
1097
1098
1099
1100
1101
1102 if (iocb->ki_eventfd) {
1103 eventfd_signal(iocb->ki_eventfd, 1);
1104 eventfd_ctx_put(iocb->ki_eventfd);
1105 }
1106
1107 kmem_cache_free(kiocb_cachep, iocb);
1108
1109
1110
1111
1112
1113
1114
1115 smp_mb();
1116
1117 if (waitqueue_active(&ctx->wait))
1118 wake_up(&ctx->wait);
1119
1120 percpu_ref_put(&ctx->reqs);
1121}
1122
1123
1124
1125
1126
1127static long aio_read_events_ring(struct kioctx *ctx,
1128 struct io_event __user *event, long nr)
1129{
1130 struct aio_ring *ring;
1131 unsigned head, tail, pos;
1132 long ret = 0;
1133 int copy_ret;
1134
1135
1136
1137
1138
1139
1140
1141 sched_annotate_sleep();
1142 mutex_lock(&ctx->ring_lock);
1143
1144
1145 ring = kmap_atomic(ctx->ring_pages[0]);
1146 head = ring->head;
1147 tail = ring->tail;
1148 kunmap_atomic(ring);
1149
1150
1151
1152
1153
1154 smp_rmb();
1155
1156 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1157
1158 if (head == tail)
1159 goto out;
1160
1161 head %= ctx->nr_events;
1162 tail %= ctx->nr_events;
1163
1164 while (ret < nr) {
1165 long avail;
1166 struct io_event *ev;
1167 struct page *page;
1168
1169 avail = (head <= tail ? tail : ctx->nr_events) - head;
1170 if (head == tail)
1171 break;
1172
1173 pos = head + AIO_EVENTS_OFFSET;
1174 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
1175 pos %= AIO_EVENTS_PER_PAGE;
1176
1177 avail = min(avail, nr - ret);
1178 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
1179
1180 ev = kmap(page);
1181 copy_ret = copy_to_user(event + ret, ev + pos,
1182 sizeof(*ev) * avail);
1183 kunmap(page);
1184
1185 if (unlikely(copy_ret)) {
1186 ret = -EFAULT;
1187 goto out;
1188 }
1189
1190 ret += avail;
1191 head += avail;
1192 head %= ctx->nr_events;
1193 }
1194
1195 ring = kmap_atomic(ctx->ring_pages[0]);
1196 ring->head = head;
1197 kunmap_atomic(ring);
1198 flush_dcache_page(ctx->ring_pages[0]);
1199
1200 pr_debug("%li h%u t%u\n", ret, head, tail);
1201out:
1202 mutex_unlock(&ctx->ring_lock);
1203
1204 return ret;
1205}
1206
1207static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1208 struct io_event __user *event, long *i)
1209{
1210 long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1211
1212 if (ret > 0)
1213 *i += ret;
1214
1215 if (unlikely(atomic_read(&ctx->dead)))
1216 ret = -EINVAL;
1217
1218 if (!*i)
1219 *i = ret;
1220
1221 return ret < 0 || *i >= min_nr;
1222}
1223
1224static long read_events(struct kioctx *ctx, long min_nr, long nr,
1225 struct io_event __user *event,
1226 ktime_t until)
1227{
1228 long ret = 0;
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 if (until == 0)
1245 aio_read_events(ctx, min_nr, nr, event, &ret);
1246 else
1247 wait_event_interruptible_hrtimeout(ctx->wait,
1248 aio_read_events(ctx, min_nr, nr, event, &ret),
1249 until);
1250 return ret;
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1267{
1268 struct kioctx *ioctx = NULL;
1269 unsigned long ctx;
1270 long ret;
1271
1272 ret = get_user(ctx, ctxp);
1273 if (unlikely(ret))
1274 goto out;
1275
1276 ret = -EINVAL;
1277 if (unlikely(ctx || nr_events == 0)) {
1278 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1279 ctx, nr_events);
1280 goto out;
1281 }
1282
1283 ioctx = ioctx_alloc(nr_events);
1284 ret = PTR_ERR(ioctx);
1285 if (!IS_ERR(ioctx)) {
1286 ret = put_user(ioctx->user_id, ctxp);
1287 if (ret)
1288 kill_ioctx(current->mm, ioctx, NULL);
1289 percpu_ref_put(&ioctx->users);
1290 }
1291
1292out:
1293 return ret;
1294}
1295
1296#ifdef CONFIG_COMPAT
1297COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
1298{
1299 struct kioctx *ioctx = NULL;
1300 unsigned long ctx;
1301 long ret;
1302
1303 ret = get_user(ctx, ctx32p);
1304 if (unlikely(ret))
1305 goto out;
1306
1307 ret = -EINVAL;
1308 if (unlikely(ctx || nr_events == 0)) {
1309 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1310 ctx, nr_events);
1311 goto out;
1312 }
1313
1314 ioctx = ioctx_alloc(nr_events);
1315 ret = PTR_ERR(ioctx);
1316 if (!IS_ERR(ioctx)) {
1317
1318 ret = put_user((u32)ioctx->user_id, ctx32p);
1319 if (ret)
1320 kill_ioctx(current->mm, ioctx, NULL);
1321 percpu_ref_put(&ioctx->users);
1322 }
1323
1324out:
1325 return ret;
1326}
1327#endif
1328
1329
1330
1331
1332
1333
1334
1335SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1336{
1337 struct kioctx *ioctx = lookup_ioctx(ctx);
1338 if (likely(NULL != ioctx)) {
1339 struct ctx_rq_wait wait;
1340 int ret;
1341
1342 init_completion(&wait.comp);
1343 atomic_set(&wait.count, 1);
1344
1345
1346
1347
1348
1349 ret = kill_ioctx(current->mm, ioctx, &wait);
1350 percpu_ref_put(&ioctx->users);
1351
1352
1353
1354
1355
1356 if (!ret)
1357 wait_for_completion(&wait.comp);
1358
1359 return ret;
1360 }
1361 pr_debug("EINVAL: invalid context id\n");
1362 return -EINVAL;
1363}
1364
1365static void aio_remove_iocb(struct aio_kiocb *iocb)
1366{
1367 struct kioctx *ctx = iocb->ki_ctx;
1368 unsigned long flags;
1369
1370 spin_lock_irqsave(&ctx->ctx_lock, flags);
1371 list_del(&iocb->ki_list);
1372 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1373}
1374
1375static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
1376{
1377 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1378
1379 if (!list_empty_careful(&iocb->ki_list))
1380 aio_remove_iocb(iocb);
1381
1382 if (kiocb->ki_flags & IOCB_WRITE) {
1383 struct inode *inode = file_inode(kiocb->ki_filp);
1384
1385
1386
1387
1388
1389 if (S_ISREG(inode->i_mode))
1390 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
1391 file_end_write(kiocb->ki_filp);
1392 }
1393
1394 fput(kiocb->ki_filp);
1395 aio_complete(iocb, res, res2);
1396}
1397
1398static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
1399{
1400 int ret;
1401
1402 req->ki_filp = fget(iocb->aio_fildes);
1403 if (unlikely(!req->ki_filp))
1404 return -EBADF;
1405 req->ki_complete = aio_complete_rw;
1406 req->ki_pos = iocb->aio_offset;
1407 req->ki_flags = iocb_flags(req->ki_filp);
1408 if (iocb->aio_flags & IOCB_FLAG_RESFD)
1409 req->ki_flags |= IOCB_EVENTFD;
1410 req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp));
1411 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1412
1413
1414
1415
1416
1417 ret = ioprio_check_cap(iocb->aio_reqprio);
1418 if (ret) {
1419 pr_debug("aio ioprio check cap error: %d\n", ret);
1420 goto out_fput;
1421 }
1422
1423 req->ki_ioprio = iocb->aio_reqprio;
1424 } else
1425 req->ki_ioprio = get_current_ioprio();
1426
1427 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
1428 if (unlikely(ret))
1429 goto out_fput;
1430
1431 req->ki_flags &= ~IOCB_HIPRI;
1432 return 0;
1433
1434out_fput:
1435 fput(req->ki_filp);
1436 return ret;
1437}
1438
1439static ssize_t aio_setup_rw(int rw, struct iocb *iocb,
1440 struct iovec **iovec, bool vectored, bool compat,
1441 struct iov_iter *iter)
1442{
1443 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1444 size_t len = iocb->aio_nbytes;
1445
1446 if (!vectored) {
1447 ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
1448 *iovec = NULL;
1449 return ret;
1450 }
1451#ifdef CONFIG_COMPAT
1452 if (compat)
1453 return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec,
1454 iter);
1455#endif
1456 return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter);
1457}
1458
1459static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1460{
1461 switch (ret) {
1462 case -EIOCBQUEUED:
1463 break;
1464 case -ERESTARTSYS:
1465 case -ERESTARTNOINTR:
1466 case -ERESTARTNOHAND:
1467 case -ERESTART_RESTARTBLOCK:
1468
1469
1470
1471
1472 ret = -EINTR;
1473
1474 default:
1475 aio_complete_rw(req, ret, 0);
1476 }
1477}
1478
1479static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
1480 bool compat)
1481{
1482 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1483 struct iov_iter iter;
1484 struct file *file;
1485 ssize_t ret;
1486
1487 ret = aio_prep_rw(req, iocb);
1488 if (ret)
1489 return ret;
1490 file = req->ki_filp;
1491
1492 ret = -EBADF;
1493 if (unlikely(!(file->f_mode & FMODE_READ)))
1494 goto out_fput;
1495 ret = -EINVAL;
1496 if (unlikely(!file->f_op->read_iter))
1497 goto out_fput;
1498
1499 ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
1500 if (ret < 0)
1501 goto out_fput;
1502 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1503 if (!ret)
1504 aio_rw_done(req, call_read_iter(file, req, &iter));
1505 kfree(iovec);
1506out_fput:
1507 if (unlikely(ret))
1508 fput(file);
1509 return ret;
1510}
1511
1512static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
1513 bool compat)
1514{
1515 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1516 struct iov_iter iter;
1517 struct file *file;
1518 ssize_t ret;
1519
1520 ret = aio_prep_rw(req, iocb);
1521 if (ret)
1522 return ret;
1523 file = req->ki_filp;
1524
1525 ret = -EBADF;
1526 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1527 goto out_fput;
1528 ret = -EINVAL;
1529 if (unlikely(!file->f_op->write_iter))
1530 goto out_fput;
1531
1532 ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
1533 if (ret < 0)
1534 goto out_fput;
1535 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1536 if (!ret) {
1537
1538
1539
1540
1541
1542
1543
1544 if (S_ISREG(file_inode(file)->i_mode)) {
1545 __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
1546 __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
1547 }
1548 req->ki_flags |= IOCB_WRITE;
1549 aio_rw_done(req, call_write_iter(file, req, &iter));
1550 }
1551 kfree(iovec);
1552out_fput:
1553 if (unlikely(ret))
1554 fput(file);
1555 return ret;
1556}
1557
1558static void aio_fsync_work(struct work_struct *work)
1559{
1560 struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
1561 int ret;
1562
1563 ret = vfs_fsync(req->file, req->datasync);
1564 fput(req->file);
1565 aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
1566}
1567
1568static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
1569{
1570 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1571 iocb->aio_rw_flags))
1572 return -EINVAL;
1573
1574 req->file = fget(iocb->aio_fildes);
1575 if (unlikely(!req->file))
1576 return -EBADF;
1577 if (unlikely(!req->file->f_op->fsync)) {
1578 fput(req->file);
1579 return -EINVAL;
1580 }
1581
1582 req->datasync = datasync;
1583 INIT_WORK(&req->work, aio_fsync_work);
1584 schedule_work(&req->work);
1585 return 0;
1586}
1587
1588static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1589 bool compat)
1590{
1591 struct aio_kiocb *req;
1592 struct iocb iocb;
1593 ssize_t ret;
1594
1595 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
1596 return -EFAULT;
1597
1598
1599 if (unlikely(iocb.aio_reserved2)) {
1600 pr_debug("EINVAL: reserve field set\n");
1601 return -EINVAL;
1602 }
1603
1604
1605 if (unlikely(
1606 (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
1607 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
1608 ((ssize_t)iocb.aio_nbytes < 0)
1609 )) {
1610 pr_debug("EINVAL: overflow check\n");
1611 return -EINVAL;
1612 }
1613
1614 req = aio_get_req(ctx);
1615 if (unlikely(!req))
1616 return -EAGAIN;
1617
1618 if (iocb.aio_flags & IOCB_FLAG_RESFD) {
1619
1620
1621
1622
1623
1624
1625 req->ki_eventfd = eventfd_ctx_fdget((int) iocb.aio_resfd);
1626 if (IS_ERR(req->ki_eventfd)) {
1627 ret = PTR_ERR(req->ki_eventfd);
1628 req->ki_eventfd = NULL;
1629 goto out_put_req;
1630 }
1631 }
1632
1633 ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
1634 if (unlikely(ret)) {
1635 pr_debug("EFAULT: aio_key\n");
1636 goto out_put_req;
1637 }
1638
1639 req->ki_user_iocb = user_iocb;
1640 req->ki_user_data = iocb.aio_data;
1641
1642 switch (iocb.aio_lio_opcode) {
1643 case IOCB_CMD_PREAD:
1644 ret = aio_read(&req->rw, &iocb, false, compat);
1645 break;
1646 case IOCB_CMD_PWRITE:
1647 ret = aio_write(&req->rw, &iocb, false, compat);
1648 break;
1649 case IOCB_CMD_PREADV:
1650 ret = aio_read(&req->rw, &iocb, true, compat);
1651 break;
1652 case IOCB_CMD_PWRITEV:
1653 ret = aio_write(&req->rw, &iocb, true, compat);
1654 break;
1655 case IOCB_CMD_FSYNC:
1656 ret = aio_fsync(&req->fsync, &iocb, false);
1657 break;
1658 case IOCB_CMD_FDSYNC:
1659 ret = aio_fsync(&req->fsync, &iocb, true);
1660 break;
1661 default:
1662 pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
1663 ret = -EINVAL;
1664 break;
1665 }
1666
1667
1668
1669
1670
1671
1672 if (ret)
1673 goto out_put_req;
1674 return 0;
1675out_put_req:
1676 put_reqs_available(ctx, 1);
1677 percpu_ref_put(&ctx->reqs);
1678 if (req->ki_eventfd)
1679 eventfd_ctx_put(req->ki_eventfd);
1680 kmem_cache_free(kiocb_cachep, req);
1681 return ret;
1682}
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1697 struct iocb __user * __user *, iocbpp)
1698{
1699 struct kioctx *ctx;
1700 long ret = 0;
1701 int i = 0;
1702 struct blk_plug plug;
1703
1704 if (unlikely(nr < 0))
1705 return -EINVAL;
1706
1707 ctx = lookup_ioctx(ctx_id);
1708 if (unlikely(!ctx)) {
1709 pr_debug("EINVAL: invalid context id\n");
1710 return -EINVAL;
1711 }
1712
1713 if (nr > ctx->nr_events)
1714 nr = ctx->nr_events;
1715
1716 blk_start_plug(&plug);
1717 for (i = 0; i < nr; i++) {
1718 struct iocb __user *user_iocb;
1719
1720 if (unlikely(get_user(user_iocb, iocbpp + i))) {
1721 ret = -EFAULT;
1722 break;
1723 }
1724
1725 ret = io_submit_one(ctx, user_iocb, false);
1726 if (ret)
1727 break;
1728 }
1729 blk_finish_plug(&plug);
1730
1731 percpu_ref_put(&ctx->users);
1732 return i ? i : ret;
1733}
1734
1735#ifdef CONFIG_COMPAT
1736COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
1737 int, nr, compat_uptr_t __user *, iocbpp)
1738{
1739 struct kioctx *ctx;
1740 long ret = 0;
1741 int i = 0;
1742 struct blk_plug plug;
1743
1744 if (unlikely(nr < 0))
1745 return -EINVAL;
1746
1747 ctx = lookup_ioctx(ctx_id);
1748 if (unlikely(!ctx)) {
1749 pr_debug("EINVAL: invalid context id\n");
1750 return -EINVAL;
1751 }
1752
1753 if (nr > ctx->nr_events)
1754 nr = ctx->nr_events;
1755
1756 blk_start_plug(&plug);
1757 for (i = 0; i < nr; i++) {
1758 compat_uptr_t user_iocb;
1759
1760 if (unlikely(get_user(user_iocb, iocbpp + i))) {
1761 ret = -EFAULT;
1762 break;
1763 }
1764
1765 ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
1766 if (ret)
1767 break;
1768 }
1769 blk_finish_plug(&plug);
1770
1771 percpu_ref_put(&ctx->users);
1772 return i ? i : ret;
1773}
1774#endif
1775
1776
1777
1778
1779static struct aio_kiocb *
1780lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
1781{
1782 struct aio_kiocb *kiocb;
1783
1784 assert_spin_locked(&ctx->ctx_lock);
1785
1786
1787 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
1788 if (kiocb->ki_user_iocb == iocb)
1789 return kiocb;
1790 }
1791 return NULL;
1792}
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1805 struct io_event __user *, result)
1806{
1807 struct kioctx *ctx;
1808 struct aio_kiocb *kiocb;
1809 int ret = -EINVAL;
1810 u32 key;
1811
1812 if (unlikely(get_user(key, &iocb->aio_key)))
1813 return -EFAULT;
1814 if (unlikely(key != KIOCB_KEY))
1815 return -EINVAL;
1816
1817 ctx = lookup_ioctx(ctx_id);
1818 if (unlikely(!ctx))
1819 return -EINVAL;
1820
1821 spin_lock_irq(&ctx->ctx_lock);
1822 kiocb = lookup_kiocb(ctx, iocb);
1823 if (kiocb) {
1824 ret = kiocb->ki_cancel(&kiocb->rw);
1825 list_del_init(&kiocb->ki_list);
1826 }
1827 spin_unlock_irq(&ctx->ctx_lock);
1828
1829 if (!ret) {
1830
1831
1832
1833
1834
1835 ret = -EINPROGRESS;
1836 }
1837
1838 percpu_ref_put(&ctx->users);
1839
1840 return ret;
1841}
1842
1843static long do_io_getevents(aio_context_t ctx_id,
1844 long min_nr,
1845 long nr,
1846 struct io_event __user *events,
1847 struct timespec64 *ts)
1848{
1849 ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
1850 struct kioctx *ioctx = lookup_ioctx(ctx_id);
1851 long ret = -EINVAL;
1852
1853 if (likely(ioctx)) {
1854 if (likely(min_nr <= nr && min_nr >= 0))
1855 ret = read_events(ioctx, min_nr, nr, events, until);
1856 percpu_ref_put(&ioctx->users);
1857 }
1858
1859 return ret;
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1875 long, min_nr,
1876 long, nr,
1877 struct io_event __user *, events,
1878 struct timespec __user *, timeout)
1879{
1880 struct timespec64 ts;
1881 int ret;
1882
1883 if (timeout && unlikely(get_timespec64(&ts, timeout)))
1884 return -EFAULT;
1885
1886 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
1887 if (!ret && signal_pending(current))
1888 ret = -EINTR;
1889 return ret;
1890}
1891
1892struct __aio_sigset {
1893 const sigset_t __user *sigmask;
1894 size_t sigsetsize;
1895};
1896
1897SYSCALL_DEFINE6(io_pgetevents,
1898 aio_context_t, ctx_id,
1899 long, min_nr,
1900 long, nr,
1901 struct io_event __user *, events,
1902 struct timespec __user *, timeout,
1903 const struct __aio_sigset __user *, usig)
1904{
1905 struct __aio_sigset ksig = { NULL, };
1906 sigset_t ksigmask, sigsaved;
1907 struct timespec64 ts;
1908 bool interrupted;
1909 int ret;
1910
1911 if (timeout && unlikely(get_timespec64(&ts, timeout)))
1912 return -EFAULT;
1913
1914 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
1915 return -EFAULT;
1916
1917
1918 ret = set_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize);
1919 if (ret)
1920 return ret;
1921
1922 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
1923
1924 interrupted = signal_pending(current);
1925 restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted);
1926 if (interrupted && !ret)
1927 ret = -ERESTARTNOHAND;
1928
1929 return ret;
1930}
1931
1932#ifdef CONFIG_COMPAT
1933COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
1934 compat_long_t, min_nr,
1935 compat_long_t, nr,
1936 struct io_event __user *, events,
1937 struct compat_timespec __user *, timeout)
1938{
1939 struct timespec64 t;
1940 int ret;
1941
1942 if (timeout && compat_get_timespec64(&t, timeout))
1943 return -EFAULT;
1944
1945 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
1946 if (!ret && signal_pending(current))
1947 ret = -EINTR;
1948 return ret;
1949}
1950
1951
1952struct __compat_aio_sigset {
1953 compat_sigset_t __user *sigmask;
1954 compat_size_t sigsetsize;
1955};
1956
1957COMPAT_SYSCALL_DEFINE6(io_pgetevents,
1958 compat_aio_context_t, ctx_id,
1959 compat_long_t, min_nr,
1960 compat_long_t, nr,
1961 struct io_event __user *, events,
1962 struct compat_timespec __user *, timeout,
1963 const struct __compat_aio_sigset __user *, usig)
1964{
1965 struct __compat_aio_sigset ksig = { NULL, };
1966 sigset_t ksigmask, sigsaved;
1967 struct timespec64 t;
1968 bool interrupted;
1969 int ret;
1970
1971 if (timeout && compat_get_timespec64(&t, timeout))
1972 return -EFAULT;
1973
1974 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
1975 return -EFAULT;
1976
1977 ret = set_compat_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize);
1978 if (ret)
1979 return ret;
1980
1981 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
1982
1983 interrupted = signal_pending(current);
1984 restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted);
1985 if (interrupted && !ret)
1986 ret = -ERESTARTNOHAND;
1987
1988 return ret;
1989}
1990#endif
1991