1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/time.h>
15#include <linux/aio_abi.h>
16#include <linux/module.h>
17#include <linux/syscalls.h>
18#include <linux/backing-dev.h>
19#include <linux/uio.h>
20
21#define DEBUG 0
22
23#include <linux/sched.h>
24#include <linux/fs.h>
25#include <linux/file.h>
26#include <linux/mm.h>
27#include <linux/mman.h>
28#include <linux/mmu_context.h>
29#include <linux/slab.h>
30#include <linux/timer.h>
31#include <linux/aio.h>
32#include <linux/highmem.h>
33#include <linux/workqueue.h>
34#include <linux/security.h>
35#include <linux/eventfd.h>
36#include <linux/blkdev.h>
37#include <linux/compat.h>
38
39#include <asm/kmap_types.h>
40#include <asm/uaccess.h>
41
42#if DEBUG > 1
43#define dprintk printk
44#else
45#define dprintk(x...) do { ; } while (0)
46#endif
47
48
49static DEFINE_SPINLOCK(aio_nr_lock);
50unsigned long aio_nr;
51unsigned long aio_max_nr = 0x10000;
52
53
54static struct kmem_cache *kiocb_cachep;
55static struct kmem_cache *kioctx_cachep;
56
57static struct workqueue_struct *aio_wq;
58
59
60static void aio_fput_routine(struct work_struct *);
61static DECLARE_WORK(fput_work, aio_fput_routine);
62
63static DEFINE_SPINLOCK(fput_lock);
64static LIST_HEAD(fput_head);
65
66static void aio_kick_handler(struct work_struct *);
67static void aio_queue_work(struct kioctx *);
68
69
70
71
72
73static int __init aio_setup(void)
74{
75 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
76 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
77
78 aio_wq = alloc_workqueue("aio", 0, 1);
79 BUG_ON(!aio_wq);
80
81 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
82
83 return 0;
84}
85__initcall(aio_setup);
86
87static void aio_free_ring(struct kioctx *ctx)
88{
89 struct aio_ring_info *info = &ctx->ring_info;
90 long i;
91
92 for (i=0; i<info->nr_pages; i++)
93 put_page(info->ring_pages[i]);
94
95 if (info->mmap_size) {
96 down_write(&ctx->mm->mmap_sem);
97 do_munmap(ctx->mm, info->mmap_base, info->mmap_size);
98 up_write(&ctx->mm->mmap_sem);
99 }
100
101 if (info->ring_pages && info->ring_pages != info->internal_pages)
102 kfree(info->ring_pages);
103 info->ring_pages = NULL;
104 info->nr = 0;
105}
106
107static int aio_setup_ring(struct kioctx *ctx)
108{
109 struct aio_ring *ring;
110 struct aio_ring_info *info = &ctx->ring_info;
111 unsigned nr_events = ctx->max_reqs;
112 unsigned long size;
113 int nr_pages;
114
115
116 nr_events += 2;
117
118 size = sizeof(struct aio_ring);
119 size += sizeof(struct io_event) * nr_events;
120 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
121
122 if (nr_pages < 0)
123 return -EINVAL;
124
125 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
126
127 info->nr = 0;
128 info->ring_pages = info->internal_pages;
129 if (nr_pages > AIO_RING_PAGES) {
130 info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
131 if (!info->ring_pages)
132 return -ENOMEM;
133 }
134
135 info->mmap_size = nr_pages * PAGE_SIZE;
136 dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
137 down_write(&ctx->mm->mmap_sem);
138 info->mmap_base = do_mmap(NULL, 0, info->mmap_size,
139 PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
140 0);
141 if (IS_ERR((void *)info->mmap_base)) {
142 up_write(&ctx->mm->mmap_sem);
143 info->mmap_size = 0;
144 aio_free_ring(ctx);
145 return -EAGAIN;
146 }
147
148 dprintk("mmap address: 0x%08lx\n", info->mmap_base);
149 info->nr_pages = get_user_pages(current, ctx->mm,
150 info->mmap_base, nr_pages,
151 1, 0, info->ring_pages, NULL);
152 up_write(&ctx->mm->mmap_sem);
153
154 if (unlikely(info->nr_pages != nr_pages)) {
155 aio_free_ring(ctx);
156 return -EAGAIN;
157 }
158
159 ctx->user_id = info->mmap_base;
160
161 info->nr = nr_events;
162
163 ring = kmap_atomic(info->ring_pages[0], KM_USER0);
164 ring->nr = nr_events;
165 ring->id = ctx->user_id;
166 ring->head = ring->tail = 0;
167 ring->magic = AIO_RING_MAGIC;
168 ring->compat_features = AIO_RING_COMPAT_FEATURES;
169 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
170 ring->header_length = sizeof(struct aio_ring);
171 kunmap_atomic(ring, KM_USER0);
172
173 return 0;
174}
175
176
177
178
179
180#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
181#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
182#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
183
184#define aio_ring_event(info, nr, km) ({ \
185 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
186 struct io_event *__event; \
187 __event = kmap_atomic( \
188 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
189 __event += pos % AIO_EVENTS_PER_PAGE; \
190 __event; \
191})
192
193#define put_aio_ring_event(event, km) do { \
194 struct io_event *__event = (event); \
195 (void)__event; \
196 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
197} while(0)
198
199static void ctx_rcu_free(struct rcu_head *head)
200{
201 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
202 unsigned nr_events = ctx->max_reqs;
203
204 kmem_cache_free(kioctx_cachep, ctx);
205
206 if (nr_events) {
207 spin_lock(&aio_nr_lock);
208 BUG_ON(aio_nr - nr_events > aio_nr);
209 aio_nr -= nr_events;
210 spin_unlock(&aio_nr_lock);
211 }
212}
213
214
215
216
217
218static void __put_ioctx(struct kioctx *ctx)
219{
220 BUG_ON(ctx->reqs_active);
221
222 cancel_delayed_work(&ctx->wq);
223 cancel_work_sync(&ctx->wq.work);
224 aio_free_ring(ctx);
225 mmdrop(ctx->mm);
226 ctx->mm = NULL;
227 pr_debug("__put_ioctx: freeing %p\n", ctx);
228 call_rcu(&ctx->rcu_head, ctx_rcu_free);
229}
230
231static inline int try_get_ioctx(struct kioctx *kioctx)
232{
233 return atomic_inc_not_zero(&kioctx->users);
234}
235
236static inline void put_ioctx(struct kioctx *kioctx)
237{
238 BUG_ON(atomic_read(&kioctx->users) <= 0);
239 if (unlikely(atomic_dec_and_test(&kioctx->users)))
240 __put_ioctx(kioctx);
241}
242
243
244
245
246static struct kioctx *ioctx_alloc(unsigned nr_events)
247{
248 struct mm_struct *mm;
249 struct kioctx *ctx;
250 int did_sync = 0;
251
252
253 if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
254 (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
255 pr_debug("ENOMEM: nr_events too high\n");
256 return ERR_PTR(-EINVAL);
257 }
258
259 if ((unsigned long)nr_events > aio_max_nr)
260 return ERR_PTR(-EAGAIN);
261
262 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
263 if (!ctx)
264 return ERR_PTR(-ENOMEM);
265
266 ctx->max_reqs = nr_events;
267 mm = ctx->mm = current->mm;
268 atomic_inc(&mm->mm_count);
269
270 atomic_set(&ctx->users, 2);
271 spin_lock_init(&ctx->ctx_lock);
272 spin_lock_init(&ctx->ring_info.ring_lock);
273 init_waitqueue_head(&ctx->wait);
274
275 INIT_LIST_HEAD(&ctx->active_reqs);
276 INIT_LIST_HEAD(&ctx->run_list);
277 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
278
279 if (aio_setup_ring(ctx) < 0)
280 goto out_freectx;
281
282
283 do {
284 spin_lock_bh(&aio_nr_lock);
285 if (aio_nr + nr_events > aio_max_nr ||
286 aio_nr + nr_events < aio_nr)
287 ctx->max_reqs = 0;
288 else
289 aio_nr += ctx->max_reqs;
290 spin_unlock_bh(&aio_nr_lock);
291 if (ctx->max_reqs || did_sync)
292 break;
293
294
295 synchronize_rcu();
296 did_sync = 1;
297 ctx->max_reqs = nr_events;
298 } while (1);
299
300 if (ctx->max_reqs == 0)
301 goto out_cleanup;
302
303
304 spin_lock(&mm->ioctx_lock);
305 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
306 spin_unlock(&mm->ioctx_lock);
307
308 dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
309 ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
310 return ctx;
311
312out_cleanup:
313 __put_ioctx(ctx);
314 return ERR_PTR(-EAGAIN);
315
316out_freectx:
317 mmdrop(mm);
318 kmem_cache_free(kioctx_cachep, ctx);
319 ctx = ERR_PTR(-ENOMEM);
320
321 dprintk("aio: error allocating ioctx %p\n", ctx);
322 return ctx;
323}
324
325
326
327
328
329
330static void aio_cancel_all(struct kioctx *ctx)
331{
332 int (*cancel)(struct kiocb *, struct io_event *);
333 struct io_event res;
334 spin_lock_irq(&ctx->ctx_lock);
335 ctx->dead = 1;
336 while (!list_empty(&ctx->active_reqs)) {
337 struct list_head *pos = ctx->active_reqs.next;
338 struct kiocb *iocb = list_kiocb(pos);
339 list_del_init(&iocb->ki_list);
340 cancel = iocb->ki_cancel;
341 kiocbSetCancelled(iocb);
342 if (cancel) {
343 iocb->ki_users++;
344 spin_unlock_irq(&ctx->ctx_lock);
345 cancel(iocb, &res);
346 spin_lock_irq(&ctx->ctx_lock);
347 }
348 }
349 spin_unlock_irq(&ctx->ctx_lock);
350}
351
352static void wait_for_all_aios(struct kioctx *ctx)
353{
354 struct task_struct *tsk = current;
355 DECLARE_WAITQUEUE(wait, tsk);
356
357 spin_lock_irq(&ctx->ctx_lock);
358 if (!ctx->reqs_active)
359 goto out;
360
361 add_wait_queue(&ctx->wait, &wait);
362 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
363 while (ctx->reqs_active) {
364 spin_unlock_irq(&ctx->ctx_lock);
365 io_schedule();
366 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
367 spin_lock_irq(&ctx->ctx_lock);
368 }
369 __set_task_state(tsk, TASK_RUNNING);
370 remove_wait_queue(&ctx->wait, &wait);
371
372out:
373 spin_unlock_irq(&ctx->ctx_lock);
374}
375
376
377
378
379ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
380{
381 while (iocb->ki_users) {
382 set_current_state(TASK_UNINTERRUPTIBLE);
383 if (!iocb->ki_users)
384 break;
385 io_schedule();
386 }
387 __set_current_state(TASK_RUNNING);
388 return iocb->ki_user_data;
389}
390EXPORT_SYMBOL(wait_on_sync_kiocb);
391
392
393
394
395
396
397
398
399void exit_aio(struct mm_struct *mm)
400{
401 struct kioctx *ctx;
402
403 while (!hlist_empty(&mm->ioctx_list)) {
404 ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
405 hlist_del_rcu(&ctx->list);
406
407 aio_cancel_all(ctx);
408
409 wait_for_all_aios(ctx);
410
411
412
413 cancel_work_sync(&ctx->wq.work);
414
415 if (1 != atomic_read(&ctx->users))
416 printk(KERN_DEBUG
417 "exit_aio:ioctx still alive: %d %d %d\n",
418 atomic_read(&ctx->users), ctx->dead,
419 ctx->reqs_active);
420 put_ioctx(ctx);
421 }
422}
423
424
425
426
427
428
429
430
431
432
433
434static struct kiocb *__aio_get_req(struct kioctx *ctx)
435{
436 struct kiocb *req = NULL;
437
438 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
439 if (unlikely(!req))
440 return NULL;
441
442 req->ki_flags = 0;
443 req->ki_users = 2;
444 req->ki_key = 0;
445 req->ki_ctx = ctx;
446 req->ki_cancel = NULL;
447 req->ki_retry = NULL;
448 req->ki_dtor = NULL;
449 req->private = NULL;
450 req->ki_iovec = NULL;
451 INIT_LIST_HEAD(&req->ki_run_list);
452 req->ki_eventfd = NULL;
453
454 return req;
455}
456
457
458
459
460
461#define KIOCB_BATCH_SIZE 32L
462struct kiocb_batch {
463 struct list_head head;
464 long count;
465};
466
467static void kiocb_batch_init(struct kiocb_batch *batch, long total)
468{
469 INIT_LIST_HEAD(&batch->head);
470 batch->count = total;
471}
472
473static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
474{
475 struct kiocb *req, *n;
476
477 if (list_empty(&batch->head))
478 return;
479
480 spin_lock_irq(&ctx->ctx_lock);
481 list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
482 list_del(&req->ki_batch);
483 list_del(&req->ki_list);
484 kmem_cache_free(kiocb_cachep, req);
485 ctx->reqs_active--;
486 }
487 if (unlikely(!ctx->reqs_active && ctx->dead))
488 wake_up_all(&ctx->wait);
489 spin_unlock_irq(&ctx->ctx_lock);
490}
491
492
493
494
495
496static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
497{
498 unsigned short allocated, to_alloc;
499 long avail;
500 bool called_fput = false;
501 struct kiocb *req, *n;
502 struct aio_ring *ring;
503
504 to_alloc = min(batch->count, KIOCB_BATCH_SIZE);
505 for (allocated = 0; allocated < to_alloc; allocated++) {
506 req = __aio_get_req(ctx);
507 if (!req)
508
509 break;
510 list_add(&req->ki_batch, &batch->head);
511 }
512
513 if (allocated == 0)
514 goto out;
515
516retry:
517 spin_lock_irq(&ctx->ctx_lock);
518 ring = kmap_atomic(ctx->ring_info.ring_pages[0]);
519
520 avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active;
521 BUG_ON(avail < 0);
522 if (avail == 0 && !called_fput) {
523
524
525
526
527
528
529
530
531 kunmap_atomic(ring);
532 spin_unlock_irq(&ctx->ctx_lock);
533 aio_fput_routine(NULL);
534 called_fput = true;
535 goto retry;
536 }
537
538 if (avail < allocated) {
539
540 list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
541 list_del(&req->ki_batch);
542 kmem_cache_free(kiocb_cachep, req);
543 if (--allocated <= avail)
544 break;
545 }
546 }
547
548 batch->count -= allocated;
549 list_for_each_entry(req, &batch->head, ki_batch) {
550 list_add(&req->ki_list, &ctx->active_reqs);
551 ctx->reqs_active++;
552 }
553
554 kunmap_atomic(ring);
555 spin_unlock_irq(&ctx->ctx_lock);
556
557out:
558 return allocated;
559}
560
561static inline struct kiocb *aio_get_req(struct kioctx *ctx,
562 struct kiocb_batch *batch)
563{
564 struct kiocb *req;
565
566 if (list_empty(&batch->head))
567 if (kiocb_batch_refill(ctx, batch) == 0)
568 return NULL;
569 req = list_first_entry(&batch->head, struct kiocb, ki_batch);
570 list_del(&req->ki_batch);
571 return req;
572}
573
574static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
575{
576 assert_spin_locked(&ctx->ctx_lock);
577
578 if (req->ki_eventfd != NULL)
579 eventfd_ctx_put(req->ki_eventfd);
580 if (req->ki_dtor)
581 req->ki_dtor(req);
582 if (req->ki_iovec != &req->ki_inline_vec)
583 kfree(req->ki_iovec);
584 kmem_cache_free(kiocb_cachep, req);
585 ctx->reqs_active--;
586
587 if (unlikely(!ctx->reqs_active && ctx->dead))
588 wake_up_all(&ctx->wait);
589}
590
591static void aio_fput_routine(struct work_struct *data)
592{
593 spin_lock_irq(&fput_lock);
594 while (likely(!list_empty(&fput_head))) {
595 struct kiocb *req = list_kiocb(fput_head.next);
596 struct kioctx *ctx = req->ki_ctx;
597
598 list_del(&req->ki_list);
599 spin_unlock_irq(&fput_lock);
600
601
602 if (req->ki_filp != NULL)
603 fput(req->ki_filp);
604
605
606 rcu_read_lock();
607 spin_lock_irq(&ctx->ctx_lock);
608 really_put_req(ctx, req);
609
610
611
612
613 spin_unlock_irq(&ctx->ctx_lock);
614 rcu_read_unlock();
615
616 spin_lock_irq(&fput_lock);
617 }
618 spin_unlock_irq(&fput_lock);
619}
620
621
622
623
624static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
625{
626 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
627 req, atomic_long_read(&req->ki_filp->f_count));
628
629 assert_spin_locked(&ctx->ctx_lock);
630
631 req->ki_users--;
632 BUG_ON(req->ki_users < 0);
633 if (likely(req->ki_users))
634 return 0;
635 list_del(&req->ki_list);
636 req->ki_cancel = NULL;
637 req->ki_retry = NULL;
638
639
640
641
642
643
644
645 if (unlikely(!fput_atomic(req->ki_filp))) {
646 spin_lock(&fput_lock);
647 list_add(&req->ki_list, &fput_head);
648 spin_unlock(&fput_lock);
649 schedule_work(&fput_work);
650 } else {
651 req->ki_filp = NULL;
652 really_put_req(ctx, req);
653 }
654 return 1;
655}
656
657
658
659
660
661int aio_put_req(struct kiocb *req)
662{
663 struct kioctx *ctx = req->ki_ctx;
664 int ret;
665 spin_lock_irq(&ctx->ctx_lock);
666 ret = __aio_put_req(ctx, req);
667 spin_unlock_irq(&ctx->ctx_lock);
668 return ret;
669}
670EXPORT_SYMBOL(aio_put_req);
671
672static struct kioctx *lookup_ioctx(unsigned long ctx_id)
673{
674 struct mm_struct *mm = current->mm;
675 struct kioctx *ctx, *ret = NULL;
676 struct hlist_node *n;
677
678 rcu_read_lock();
679
680 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
681
682
683
684
685
686
687 if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
688 ret = ctx;
689 break;
690 }
691 }
692
693 rcu_read_unlock();
694 return ret;
695}
696
697
698
699
700
701
702
703
704
705
706static inline int __queue_kicked_iocb(struct kiocb *iocb)
707{
708 struct kioctx *ctx = iocb->ki_ctx;
709
710 assert_spin_locked(&ctx->ctx_lock);
711
712 if (list_empty(&iocb->ki_run_list)) {
713 list_add_tail(&iocb->ki_run_list,
714 &ctx->run_list);
715 return 1;
716 }
717 return 0;
718}
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742static ssize_t aio_run_iocb(struct kiocb *iocb)
743{
744 struct kioctx *ctx = iocb->ki_ctx;
745 ssize_t (*retry)(struct kiocb *);
746 ssize_t ret;
747
748 if (!(retry = iocb->ki_retry)) {
749 printk("aio_run_iocb: iocb->ki_retry = NULL\n");
750 return 0;
751 }
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772 kiocbClearKicked(iocb);
773
774
775
776
777
778
779
780 iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
781 spin_unlock_irq(&ctx->ctx_lock);
782
783
784 if (kiocbIsCancelled(iocb)) {
785 ret = -EINTR;
786 aio_complete(iocb, ret, 0);
787
788 goto out;
789 }
790
791
792
793
794
795 ret = retry(iocb);
796
797 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
798
799
800
801
802 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
803 ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
804 ret = -EINTR;
805 aio_complete(iocb, ret, 0);
806 }
807out:
808 spin_lock_irq(&ctx->ctx_lock);
809
810 if (-EIOCBRETRY == ret) {
811
812
813
814
815
816
817
818
819 INIT_LIST_HEAD(&iocb->ki_run_list);
820
821
822 if (kiocbIsKicked(iocb)) {
823 __queue_kicked_iocb(iocb);
824
825
826
827
828
829
830
831 aio_queue_work(ctx);
832 }
833 }
834 return ret;
835}
836
837
838
839
840
841
842
843
844static int __aio_run_iocbs(struct kioctx *ctx)
845{
846 struct kiocb *iocb;
847 struct list_head run_list;
848
849 assert_spin_locked(&ctx->ctx_lock);
850
851 list_replace_init(&ctx->run_list, &run_list);
852 while (!list_empty(&run_list)) {
853 iocb = list_entry(run_list.next, struct kiocb,
854 ki_run_list);
855 list_del(&iocb->ki_run_list);
856
857
858
859 iocb->ki_users++;
860 aio_run_iocb(iocb);
861 __aio_put_req(ctx, iocb);
862 }
863 if (!list_empty(&ctx->run_list))
864 return 1;
865 return 0;
866}
867
868static void aio_queue_work(struct kioctx * ctx)
869{
870 unsigned long timeout;
871
872
873
874
875 smp_mb();
876 if (waitqueue_active(&ctx->wait))
877 timeout = 1;
878 else
879 timeout = HZ/10;
880 queue_delayed_work(aio_wq, &ctx->wq, timeout);
881}
882
883
884
885
886
887
888
889
890static inline void aio_run_all_iocbs(struct kioctx *ctx)
891{
892 spin_lock_irq(&ctx->ctx_lock);
893 while (__aio_run_iocbs(ctx))
894 ;
895 spin_unlock_irq(&ctx->ctx_lock);
896}
897
898
899
900
901
902
903
904
905
906
907static void aio_kick_handler(struct work_struct *work)
908{
909 struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
910 mm_segment_t oldfs = get_fs();
911 struct mm_struct *mm;
912 int requeue;
913
914 set_fs(USER_DS);
915 use_mm(ctx->mm);
916 spin_lock_irq(&ctx->ctx_lock);
917 requeue =__aio_run_iocbs(ctx);
918 mm = ctx->mm;
919 spin_unlock_irq(&ctx->ctx_lock);
920 unuse_mm(mm);
921 set_fs(oldfs);
922
923
924
925 if (requeue)
926 queue_delayed_work(aio_wq, &ctx->wq, 0);
927}
928
929
930
931
932
933
934
935static void try_queue_kicked_iocb(struct kiocb *iocb)
936{
937 struct kioctx *ctx = iocb->ki_ctx;
938 unsigned long flags;
939 int run = 0;
940
941 spin_lock_irqsave(&ctx->ctx_lock, flags);
942
943
944 if (!kiocbTryKick(iocb))
945 run = __queue_kicked_iocb(iocb);
946 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
947 if (run)
948 aio_queue_work(ctx);
949}
950
951
952
953
954
955
956
957
958void kick_iocb(struct kiocb *iocb)
959{
960
961
962 if (is_sync_kiocb(iocb)) {
963 kiocbSetKicked(iocb);
964 wake_up_process(iocb->ki_obj.tsk);
965 return;
966 }
967
968 try_queue_kicked_iocb(iocb);
969}
970EXPORT_SYMBOL(kick_iocb);
971
972
973
974
975
976
977int aio_complete(struct kiocb *iocb, long res, long res2)
978{
979 struct kioctx *ctx = iocb->ki_ctx;
980 struct aio_ring_info *info;
981 struct aio_ring *ring;
982 struct io_event *event;
983 unsigned long flags;
984 unsigned long tail;
985 int ret;
986
987
988
989
990
991
992
993
994 if (is_sync_kiocb(iocb)) {
995 BUG_ON(iocb->ki_users != 1);
996 iocb->ki_user_data = res;
997 iocb->ki_users = 0;
998 wake_up_process(iocb->ki_obj.tsk);
999 return 1;
1000 }
1001
1002 info = &ctx->ring_info;
1003
1004
1005
1006
1007
1008
1009
1010 spin_lock_irqsave(&ctx->ctx_lock, flags);
1011
1012 if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
1013 list_del_init(&iocb->ki_run_list);
1014
1015
1016
1017
1018
1019 if (kiocbIsCancelled(iocb))
1020 goto put_rq;
1021
1022 ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
1023
1024 tail = info->tail;
1025 event = aio_ring_event(info, tail, KM_IRQ0);
1026 if (++tail >= info->nr)
1027 tail = 0;
1028
1029 event->obj = (u64)(unsigned long)iocb->ki_obj.user;
1030 event->data = iocb->ki_user_data;
1031 event->res = res;
1032 event->res2 = res2;
1033
1034 dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
1035 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
1036 res, res2);
1037
1038
1039
1040
1041 smp_wmb();
1042
1043 info->tail = tail;
1044 ring->tail = tail;
1045
1046 put_aio_ring_event(event, KM_IRQ0);
1047 kunmap_atomic(ring, KM_IRQ1);
1048
1049 pr_debug("added to ring %p at [%lu]\n", iocb, tail);
1050
1051
1052
1053
1054
1055
1056 if (iocb->ki_eventfd != NULL)
1057 eventfd_signal(iocb->ki_eventfd, 1);
1058
1059put_rq:
1060
1061 ret = __aio_put_req(ctx, iocb);
1062
1063
1064
1065
1066
1067
1068
1069 smp_mb();
1070
1071 if (waitqueue_active(&ctx->wait))
1072 wake_up(&ctx->wait);
1073
1074 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1075 return ret;
1076}
1077EXPORT_SYMBOL(aio_complete);
1078
1079
1080
1081
1082
1083
1084
1085static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
1086{
1087 struct aio_ring_info *info = &ioctx->ring_info;
1088 struct aio_ring *ring;
1089 unsigned long head;
1090 int ret = 0;
1091
1092 ring = kmap_atomic(info->ring_pages[0], KM_USER0);
1093 dprintk("in aio_read_evt h%lu t%lu m%lu\n",
1094 (unsigned long)ring->head, (unsigned long)ring->tail,
1095 (unsigned long)ring->nr);
1096
1097 if (ring->head == ring->tail)
1098 goto out;
1099
1100 spin_lock(&info->ring_lock);
1101
1102 head = ring->head % info->nr;
1103 if (head != ring->tail) {
1104 struct io_event *evp = aio_ring_event(info, head, KM_USER1);
1105 *ent = *evp;
1106 head = (head + 1) % info->nr;
1107 smp_mb();
1108 ring->head = head;
1109 ret = 1;
1110 put_aio_ring_event(evp, KM_USER1);
1111 }
1112 spin_unlock(&info->ring_lock);
1113
1114out:
1115 kunmap_atomic(ring, KM_USER0);
1116 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
1117 (unsigned long)ring->head, (unsigned long)ring->tail);
1118 return ret;
1119}
1120
1121struct aio_timeout {
1122 struct timer_list timer;
1123 int timed_out;
1124 struct task_struct *p;
1125};
1126
1127static void timeout_func(unsigned long data)
1128{
1129 struct aio_timeout *to = (struct aio_timeout *)data;
1130
1131 to->timed_out = 1;
1132 wake_up_process(to->p);
1133}
1134
1135static inline void init_timeout(struct aio_timeout *to)
1136{
1137 setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
1138 to->timed_out = 0;
1139 to->p = current;
1140}
1141
1142static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
1143 const struct timespec *ts)
1144{
1145 to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
1146 if (time_after(to->timer.expires, jiffies))
1147 add_timer(&to->timer);
1148 else
1149 to->timed_out = 1;
1150}
1151
1152static inline void clear_timeout(struct aio_timeout *to)
1153{
1154 del_singleshot_timer_sync(&to->timer);
1155}
1156
1157static int read_events(struct kioctx *ctx,
1158 long min_nr, long nr,
1159 struct io_event __user *event,
1160 struct timespec __user *timeout)
1161{
1162 long start_jiffies = jiffies;
1163 struct task_struct *tsk = current;
1164 DECLARE_WAITQUEUE(wait, tsk);
1165 int ret;
1166 int i = 0;
1167 struct io_event ent;
1168 struct aio_timeout to;
1169 int retry = 0;
1170
1171
1172
1173
1174 memset(&ent, 0, sizeof(ent));
1175retry:
1176 ret = 0;
1177 while (likely(i < nr)) {
1178 ret = aio_read_evt(ctx, &ent);
1179 if (unlikely(ret <= 0))
1180 break;
1181
1182 dprintk("read event: %Lx %Lx %Lx %Lx\n",
1183 ent.data, ent.obj, ent.res, ent.res2);
1184
1185
1186 ret = -EFAULT;
1187 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1188 dprintk("aio: lost an event due to EFAULT.\n");
1189 break;
1190 }
1191 ret = 0;
1192
1193
1194 event ++;
1195 i ++;
1196 }
1197
1198 if (min_nr <= i)
1199 return i;
1200 if (ret)
1201 return ret;
1202
1203
1204
1205
1206 if (!retry && unlikely(!list_empty(&ctx->run_list))) {
1207 retry = 1;
1208 aio_run_all_iocbs(ctx);
1209 goto retry;
1210 }
1211
1212 init_timeout(&to);
1213 if (timeout) {
1214 struct timespec ts;
1215 ret = -EFAULT;
1216 if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
1217 goto out;
1218
1219 set_timeout(start_jiffies, &to, &ts);
1220 }
1221
1222 while (likely(i < nr)) {
1223 add_wait_queue_exclusive(&ctx->wait, &wait);
1224 do {
1225 set_task_state(tsk, TASK_INTERRUPTIBLE);
1226 ret = aio_read_evt(ctx, &ent);
1227 if (ret)
1228 break;
1229 if (min_nr <= i)
1230 break;
1231 if (unlikely(ctx->dead)) {
1232 ret = -EINVAL;
1233 break;
1234 }
1235 if (to.timed_out)
1236 break;
1237
1238
1239 if (ctx->reqs_active)
1240 io_schedule();
1241 else
1242 schedule();
1243 if (signal_pending(tsk)) {
1244 ret = -EINTR;
1245 break;
1246 }
1247
1248 } while (1) ;
1249
1250 set_task_state(tsk, TASK_RUNNING);
1251 remove_wait_queue(&ctx->wait, &wait);
1252
1253 if (unlikely(ret <= 0))
1254 break;
1255
1256 ret = -EFAULT;
1257 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1258 dprintk("aio: lost an event due to EFAULT.\n");
1259 break;
1260 }
1261
1262
1263 event ++;
1264 i ++;
1265 }
1266
1267 if (timeout)
1268 clear_timeout(&to);
1269out:
1270 destroy_timer_on_stack(&to.timer);
1271 return i ? i : ret;
1272}
1273
1274
1275
1276
1277static void io_destroy(struct kioctx *ioctx)
1278{
1279 struct mm_struct *mm = current->mm;
1280 int was_dead;
1281
1282
1283 spin_lock(&mm->ioctx_lock);
1284 was_dead = ioctx->dead;
1285 ioctx->dead = 1;
1286 hlist_del_rcu(&ioctx->list);
1287 spin_unlock(&mm->ioctx_lock);
1288
1289 dprintk("aio_release(%p)\n", ioctx);
1290 if (likely(!was_dead))
1291 put_ioctx(ioctx);
1292
1293 aio_cancel_all(ioctx);
1294 wait_for_all_aios(ioctx);
1295
1296
1297
1298
1299
1300
1301 wake_up_all(&ioctx->wait);
1302 put_ioctx(ioctx);
1303}
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1319{
1320 struct kioctx *ioctx = NULL;
1321 unsigned long ctx;
1322 long ret;
1323
1324 ret = get_user(ctx, ctxp);
1325 if (unlikely(ret))
1326 goto out;
1327
1328 ret = -EINVAL;
1329 if (unlikely(ctx || nr_events == 0)) {
1330 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
1331 ctx, nr_events);
1332 goto out;
1333 }
1334
1335 ioctx = ioctx_alloc(nr_events);
1336 ret = PTR_ERR(ioctx);
1337 if (!IS_ERR(ioctx)) {
1338 ret = put_user(ioctx->user_id, ctxp);
1339 if (!ret) {
1340 put_ioctx(ioctx);
1341 return 0;
1342 }
1343 io_destroy(ioctx);
1344 }
1345
1346out:
1347 return ret;
1348}
1349
1350
1351
1352
1353
1354
1355
1356SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1357{
1358 struct kioctx *ioctx = lookup_ioctx(ctx);
1359 if (likely(NULL != ioctx)) {
1360 io_destroy(ioctx);
1361 return 0;
1362 }
1363 pr_debug("EINVAL: io_destroy: invalid context id\n");
1364 return -EINVAL;
1365}
1366
1367static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
1368{
1369 struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
1370
1371 BUG_ON(ret <= 0);
1372
1373 while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
1374 ssize_t this = min((ssize_t)iov->iov_len, ret);
1375 iov->iov_base += this;
1376 iov->iov_len -= this;
1377 iocb->ki_left -= this;
1378 ret -= this;
1379 if (iov->iov_len == 0) {
1380 iocb->ki_cur_seg++;
1381 iov++;
1382 }
1383 }
1384
1385
1386
1387 BUG_ON(ret > 0 && iocb->ki_left == 0);
1388}
1389
1390static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
1391{
1392 struct file *file = iocb->ki_filp;
1393 struct address_space *mapping = file->f_mapping;
1394 struct inode *inode = mapping->host;
1395 ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
1396 unsigned long, loff_t);
1397 ssize_t ret = 0;
1398 unsigned short opcode;
1399
1400 if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
1401 (iocb->ki_opcode == IOCB_CMD_PREAD)) {
1402 rw_op = file->f_op->aio_read;
1403 opcode = IOCB_CMD_PREADV;
1404 } else {
1405 rw_op = file->f_op->aio_write;
1406 opcode = IOCB_CMD_PWRITEV;
1407 }
1408
1409
1410 if (iocb->ki_pos < 0)
1411 return -EINVAL;
1412
1413 do {
1414 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
1415 iocb->ki_nr_segs - iocb->ki_cur_seg,
1416 iocb->ki_pos);
1417 if (ret > 0)
1418 aio_advance_iovec(iocb, ret);
1419
1420
1421
1422 } while (ret > 0 && iocb->ki_left > 0 &&
1423 (opcode == IOCB_CMD_PWRITEV ||
1424 (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
1425
1426
1427
1428 if ((ret == 0) || (iocb->ki_left == 0))
1429 ret = iocb->ki_nbytes - iocb->ki_left;
1430
1431
1432
1433 if (opcode == IOCB_CMD_PWRITEV
1434 && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
1435 && iocb->ki_nbytes - iocb->ki_left)
1436 ret = iocb->ki_nbytes - iocb->ki_left;
1437
1438 return ret;
1439}
1440
1441static ssize_t aio_fdsync(struct kiocb *iocb)
1442{
1443 struct file *file = iocb->ki_filp;
1444 ssize_t ret = -EINVAL;
1445
1446 if (file->f_op->aio_fsync)
1447 ret = file->f_op->aio_fsync(iocb, 1);
1448 return ret;
1449}
1450
1451static ssize_t aio_fsync(struct kiocb *iocb)
1452{
1453 struct file *file = iocb->ki_filp;
1454 ssize_t ret = -EINVAL;
1455
1456 if (file->f_op->aio_fsync)
1457 ret = file->f_op->aio_fsync(iocb, 0);
1458 return ret;
1459}
1460
1461static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
1462{
1463 ssize_t ret;
1464
1465#ifdef CONFIG_COMPAT
1466 if (compat)
1467 ret = compat_rw_copy_check_uvector(type,
1468 (struct compat_iovec __user *)kiocb->ki_buf,
1469 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1470 &kiocb->ki_iovec, 1);
1471 else
1472#endif
1473 ret = rw_copy_check_uvector(type,
1474 (struct iovec __user *)kiocb->ki_buf,
1475 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1476 &kiocb->ki_iovec, 1);
1477 if (ret < 0)
1478 goto out;
1479
1480 kiocb->ki_nr_segs = kiocb->ki_nbytes;
1481 kiocb->ki_cur_seg = 0;
1482
1483 kiocb->ki_nbytes = ret;
1484 kiocb->ki_left = ret;
1485
1486 ret = 0;
1487out:
1488 return ret;
1489}
1490
1491static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
1492{
1493 kiocb->ki_iovec = &kiocb->ki_inline_vec;
1494 kiocb->ki_iovec->iov_base = kiocb->ki_buf;
1495 kiocb->ki_iovec->iov_len = kiocb->ki_left;
1496 kiocb->ki_nr_segs = 1;
1497 kiocb->ki_cur_seg = 0;
1498 return 0;
1499}
1500
1501
1502
1503
1504
1505
1506static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1507{
1508 struct file *file = kiocb->ki_filp;
1509 ssize_t ret = 0;
1510
1511 switch (kiocb->ki_opcode) {
1512 case IOCB_CMD_PREAD:
1513 ret = -EBADF;
1514 if (unlikely(!(file->f_mode & FMODE_READ)))
1515 break;
1516 ret = -EFAULT;
1517 if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
1518 kiocb->ki_left)))
1519 break;
1520 ret = security_file_permission(file, MAY_READ);
1521 if (unlikely(ret))
1522 break;
1523 ret = aio_setup_single_vector(kiocb);
1524 if (ret)
1525 break;
1526 ret = -EINVAL;
1527 if (file->f_op->aio_read)
1528 kiocb->ki_retry = aio_rw_vect_retry;
1529 break;
1530 case IOCB_CMD_PWRITE:
1531 ret = -EBADF;
1532 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1533 break;
1534 ret = -EFAULT;
1535 if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
1536 kiocb->ki_left)))
1537 break;
1538 ret = security_file_permission(file, MAY_WRITE);
1539 if (unlikely(ret))
1540 break;
1541 ret = aio_setup_single_vector(kiocb);
1542 if (ret)
1543 break;
1544 ret = -EINVAL;
1545 if (file->f_op->aio_write)
1546 kiocb->ki_retry = aio_rw_vect_retry;
1547 break;
1548 case IOCB_CMD_PREADV:
1549 ret = -EBADF;
1550 if (unlikely(!(file->f_mode & FMODE_READ)))
1551 break;
1552 ret = security_file_permission(file, MAY_READ);
1553 if (unlikely(ret))
1554 break;
1555 ret = aio_setup_vectored_rw(READ, kiocb, compat);
1556 if (ret)
1557 break;
1558 ret = -EINVAL;
1559 if (file->f_op->aio_read)
1560 kiocb->ki_retry = aio_rw_vect_retry;
1561 break;
1562 case IOCB_CMD_PWRITEV:
1563 ret = -EBADF;
1564 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1565 break;
1566 ret = security_file_permission(file, MAY_WRITE);
1567 if (unlikely(ret))
1568 break;
1569 ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
1570 if (ret)
1571 break;
1572 ret = -EINVAL;
1573 if (file->f_op->aio_write)
1574 kiocb->ki_retry = aio_rw_vect_retry;
1575 break;
1576 case IOCB_CMD_FDSYNC:
1577 ret = -EINVAL;
1578 if (file->f_op->aio_fsync)
1579 kiocb->ki_retry = aio_fdsync;
1580 break;
1581 case IOCB_CMD_FSYNC:
1582 ret = -EINVAL;
1583 if (file->f_op->aio_fsync)
1584 kiocb->ki_retry = aio_fsync;
1585 break;
1586 default:
1587 dprintk("EINVAL: io_submit: no operation provided\n");
1588 ret = -EINVAL;
1589 }
1590
1591 if (!kiocb->ki_retry)
1592 return ret;
1593
1594 return 0;
1595}
1596
1597static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1598 struct iocb *iocb, struct kiocb_batch *batch,
1599 bool compat)
1600{
1601 struct kiocb *req;
1602 struct file *file;
1603 ssize_t ret;
1604
1605
1606 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
1607 pr_debug("EINVAL: io_submit: reserve field set\n");
1608 return -EINVAL;
1609 }
1610
1611
1612 if (unlikely(
1613 (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1614 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1615 ((ssize_t)iocb->aio_nbytes < 0)
1616 )) {
1617 pr_debug("EINVAL: io_submit: overflow check\n");
1618 return -EINVAL;
1619 }
1620
1621 file = fget(iocb->aio_fildes);
1622 if (unlikely(!file))
1623 return -EBADF;
1624
1625 req = aio_get_req(ctx, batch);
1626 if (unlikely(!req)) {
1627 fput(file);
1628 return -EAGAIN;
1629 }
1630 req->ki_filp = file;
1631 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1632
1633
1634
1635
1636
1637
1638 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1639 if (IS_ERR(req->ki_eventfd)) {
1640 ret = PTR_ERR(req->ki_eventfd);
1641 req->ki_eventfd = NULL;
1642 goto out_put_req;
1643 }
1644 }
1645
1646 ret = put_user(req->ki_key, &user_iocb->aio_key);
1647 if (unlikely(ret)) {
1648 dprintk("EFAULT: aio_key\n");
1649 goto out_put_req;
1650 }
1651
1652 req->ki_obj.user = user_iocb;
1653 req->ki_user_data = iocb->aio_data;
1654 req->ki_pos = iocb->aio_offset;
1655
1656 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
1657 req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1658 req->ki_opcode = iocb->aio_lio_opcode;
1659
1660 ret = aio_setup_iocb(req, compat);
1661
1662 if (ret)
1663 goto out_put_req;
1664
1665 spin_lock_irq(&ctx->ctx_lock);
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 if (ctx->dead) {
1679 spin_unlock_irq(&ctx->ctx_lock);
1680 ret = -EINVAL;
1681 goto out_put_req;
1682 }
1683 aio_run_iocb(req);
1684 if (!list_empty(&ctx->run_list)) {
1685
1686 while (__aio_run_iocbs(ctx))
1687 ;
1688 }
1689 spin_unlock_irq(&ctx->ctx_lock);
1690
1691 aio_put_req(req);
1692 return 0;
1693
1694out_put_req:
1695 aio_put_req(req);
1696 aio_put_req(req);
1697 return ret;
1698}
1699
1700long do_io_submit(aio_context_t ctx_id, long nr,
1701 struct iocb __user *__user *iocbpp, bool compat)
1702{
1703 struct kioctx *ctx;
1704 long ret = 0;
1705 int i = 0;
1706 struct blk_plug plug;
1707 struct kiocb_batch batch;
1708
1709 if (unlikely(nr < 0))
1710 return -EINVAL;
1711
1712 if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
1713 nr = LONG_MAX/sizeof(*iocbpp);
1714
1715 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
1716 return -EFAULT;
1717
1718 ctx = lookup_ioctx(ctx_id);
1719 if (unlikely(!ctx)) {
1720 pr_debug("EINVAL: io_submit: invalid context id\n");
1721 return -EINVAL;
1722 }
1723
1724 kiocb_batch_init(&batch, nr);
1725
1726 blk_start_plug(&plug);
1727
1728
1729
1730
1731
1732 for (i=0; i<nr; i++) {
1733 struct iocb __user *user_iocb;
1734 struct iocb tmp;
1735
1736 if (unlikely(__get_user(user_iocb, iocbpp + i))) {
1737 ret = -EFAULT;
1738 break;
1739 }
1740
1741 if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
1742 ret = -EFAULT;
1743 break;
1744 }
1745
1746 ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat);
1747 if (ret)
1748 break;
1749 }
1750 blk_finish_plug(&plug);
1751
1752 kiocb_batch_free(ctx, &batch);
1753 put_ioctx(ctx);
1754 return i ? i : ret;
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1770 struct iocb __user * __user *, iocbpp)
1771{
1772 return do_io_submit(ctx_id, nr, iocbpp, 0);
1773}
1774
1775
1776
1777
1778static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1779 u32 key)
1780{
1781 struct list_head *pos;
1782
1783 assert_spin_locked(&ctx->ctx_lock);
1784
1785
1786 list_for_each(pos, &ctx->active_reqs) {
1787 struct kiocb *kiocb = list_kiocb(pos);
1788 if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
1789 return kiocb;
1790 }
1791 return NULL;
1792}
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1805 struct io_event __user *, result)
1806{
1807 int (*cancel)(struct kiocb *iocb, struct io_event *res);
1808 struct kioctx *ctx;
1809 struct kiocb *kiocb;
1810 u32 key;
1811 int ret;
1812
1813 ret = get_user(key, &iocb->aio_key);
1814 if (unlikely(ret))
1815 return -EFAULT;
1816
1817 ctx = lookup_ioctx(ctx_id);
1818 if (unlikely(!ctx))
1819 return -EINVAL;
1820
1821 spin_lock_irq(&ctx->ctx_lock);
1822 ret = -EAGAIN;
1823 kiocb = lookup_kiocb(ctx, iocb, key);
1824 if (kiocb && kiocb->ki_cancel) {
1825 cancel = kiocb->ki_cancel;
1826 kiocb->ki_users ++;
1827 kiocbSetCancelled(kiocb);
1828 } else
1829 cancel = NULL;
1830 spin_unlock_irq(&ctx->ctx_lock);
1831
1832 if (NULL != cancel) {
1833 struct io_event tmp;
1834 pr_debug("calling cancel\n");
1835 memset(&tmp, 0, sizeof(tmp));
1836 tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
1837 tmp.data = kiocb->ki_user_data;
1838 ret = cancel(kiocb, &tmp);
1839 if (!ret) {
1840
1841
1842
1843 if (copy_to_user(result, &tmp, sizeof(tmp)))
1844 ret = -EFAULT;
1845 }
1846 } else
1847 ret = -EINVAL;
1848
1849 put_ioctx(ctx);
1850
1851 return ret;
1852}
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1868 long, min_nr,
1869 long, nr,
1870 struct io_event __user *, events,
1871 struct timespec __user *, timeout)
1872{
1873 struct kioctx *ioctx = lookup_ioctx(ctx_id);
1874 long ret = -EINVAL;
1875
1876 if (likely(ioctx)) {
1877 if (likely(min_nr <= nr && min_nr >= 0))
1878 ret = read_events(ioctx, min_nr, nr, events, timeout);
1879 put_ioctx(ioctx);
1880 }
1881
1882 asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
1883 return ret;
1884}
1885