1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include <linux/module.h>
53#include <linux/moduleparam.h>
54#include <linux/sched.h>
55#include <linux/fs.h>
56#include <linux/file.h>
57#include <linux/stat.h>
58#include <linux/errno.h>
59#include <linux/major.h>
60#include <linux/wait.h>
61#include <linux/blkdev.h>
62#include <linux/blkpg.h>
63#include <linux/init.h>
64#include <linux/swap.h>
65#include <linux/slab.h>
66#include <linux/compat.h>
67#include <linux/suspend.h>
68#include <linux/freezer.h>
69#include <linux/mutex.h>
70#include <linux/writeback.h>
71#include <linux/completion.h>
72#include <linux/highmem.h>
73#include <linux/kthread.h>
74#include <linux/splice.h>
75#include <linux/sysfs.h>
76#include <linux/miscdevice.h>
77#include <linux/falloc.h>
78#include <linux/uio.h>
79#include <linux/ioprio.h>
80#include <linux/blk-cgroup.h>
81
82#include "loop.h"
83
84#include <linux/uaccess.h>
85
86static DEFINE_IDR(loop_index_idr);
87static DEFINE_MUTEX(loop_ctl_mutex);
88static DEFINE_MUTEX(loop_validate_mutex);
89
90
91
92
93
94
95
96
97
98
99
100
101
102static int loop_global_lock_killable(struct loop_device *lo, bool global)
103{
104 int err;
105
106 if (global) {
107 err = mutex_lock_killable(&loop_validate_mutex);
108 if (err)
109 return err;
110 }
111 err = mutex_lock_killable(&lo->lo_mutex);
112 if (err && global)
113 mutex_unlock(&loop_validate_mutex);
114 return err;
115}
116
117
118
119
120
121
122
123static void loop_global_unlock(struct loop_device *lo, bool global)
124{
125 mutex_unlock(&lo->lo_mutex);
126 if (global)
127 mutex_unlock(&loop_validate_mutex);
128}
129
130static int max_part;
131static int part_shift;
132
133static int transfer_xor(struct loop_device *lo, int cmd,
134 struct page *raw_page, unsigned raw_off,
135 struct page *loop_page, unsigned loop_off,
136 int size, sector_t real_block)
137{
138 char *raw_buf = kmap_atomic(raw_page) + raw_off;
139 char *loop_buf = kmap_atomic(loop_page) + loop_off;
140 char *in, *out, *key;
141 int i, keysize;
142
143 if (cmd == READ) {
144 in = raw_buf;
145 out = loop_buf;
146 } else {
147 in = loop_buf;
148 out = raw_buf;
149 }
150
151 key = lo->lo_encrypt_key;
152 keysize = lo->lo_encrypt_key_size;
153 for (i = 0; i < size; i++)
154 *out++ = *in++ ^ key[(i & 511) % keysize];
155
156 kunmap_atomic(loop_buf);
157 kunmap_atomic(raw_buf);
158 cond_resched();
159 return 0;
160}
161
162static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
163{
164 if (unlikely(info->lo_encrypt_key_size <= 0))
165 return -EINVAL;
166 return 0;
167}
168
169static struct loop_func_table none_funcs = {
170 .number = LO_CRYPT_NONE,
171};
172
173static struct loop_func_table xor_funcs = {
174 .number = LO_CRYPT_XOR,
175 .transfer = transfer_xor,
176 .init = xor_init
177};
178
179
180static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
181 &none_funcs,
182 &xor_funcs
183};
184
185static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
186{
187 loff_t loopsize;
188
189
190 loopsize = i_size_read(file->f_mapping->host);
191 if (offset > 0)
192 loopsize -= offset;
193
194 if (loopsize < 0)
195 return 0;
196
197 if (sizelimit > 0 && sizelimit < loopsize)
198 loopsize = sizelimit;
199
200
201
202
203 return loopsize >> 9;
204}
205
206static loff_t get_loop_size(struct loop_device *lo, struct file *file)
207{
208 return get_size(lo->lo_offset, lo->lo_sizelimit, file);
209}
210
211static void __loop_update_dio(struct loop_device *lo, bool dio)
212{
213 struct file *file = lo->lo_backing_file;
214 struct address_space *mapping = file->f_mapping;
215 struct inode *inode = mapping->host;
216 unsigned short sb_bsize = 0;
217 unsigned dio_align = 0;
218 bool use_dio;
219
220 if (inode->i_sb->s_bdev) {
221 sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
222 dio_align = sb_bsize - 1;
223 }
224
225
226
227
228
229
230
231
232
233
234
235 if (dio) {
236 if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
237 !(lo->lo_offset & dio_align) &&
238 mapping->a_ops->direct_IO &&
239 !lo->transfer)
240 use_dio = true;
241 else
242 use_dio = false;
243 } else {
244 use_dio = false;
245 }
246
247 if (lo->use_dio == use_dio)
248 return;
249
250
251 vfs_fsync(file, 0);
252
253
254
255
256
257
258 if (lo->lo_state == Lo_bound)
259 blk_mq_freeze_queue(lo->lo_queue);
260 lo->use_dio = use_dio;
261 if (use_dio) {
262 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
263 lo->lo_flags |= LO_FLAGS_DIRECT_IO;
264 } else {
265 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
266 lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
267 }
268 if (lo->lo_state == Lo_bound)
269 blk_mq_unfreeze_queue(lo->lo_queue);
270}
271
272
273
274
275
276static int
277loop_validate_block_size(unsigned short bsize)
278{
279 if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
280 return -EINVAL;
281
282 return 0;
283}
284
285
286
287
288
289
290
291
292
293static void loop_set_size(struct loop_device *lo, loff_t size)
294{
295 if (!set_capacity_revalidate_and_notify(lo->lo_disk, size, true))
296 kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
297}
298
299static inline int
300lo_do_transfer(struct loop_device *lo, int cmd,
301 struct page *rpage, unsigned roffs,
302 struct page *lpage, unsigned loffs,
303 int size, sector_t rblock)
304{
305 int ret;
306
307 ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
308 if (likely(!ret))
309 return 0;
310
311 printk_ratelimited(KERN_ERR
312 "loop: Transfer error at byte offset %llu, length %i.\n",
313 (unsigned long long)rblock << 9, size);
314 return ret;
315}
316
317static inline void loop_iov_iter_bvec(struct iov_iter *i,
318 unsigned int direction, const struct bio_vec *bvec,
319 unsigned long nr_segs, size_t count)
320{
321 iov_iter_bvec(i, direction, bvec, nr_segs, count);
322 i->type |= ITER_BVEC_FLAG_NO_REF;
323}
324
325static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
326{
327 struct iov_iter i;
328 ssize_t bw;
329
330 loop_iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
331
332 file_start_write(file);
333 bw = vfs_iter_write(file, &i, ppos, 0);
334 file_end_write(file);
335
336 if (likely(bw == bvec->bv_len))
337 return 0;
338
339 printk_ratelimited(KERN_ERR
340 "loop: Write error at byte offset %llu, length %i.\n",
341 (unsigned long long)*ppos, bvec->bv_len);
342 if (bw >= 0)
343 bw = -EIO;
344 return bw;
345}
346
347static int lo_write_simple(struct loop_device *lo, struct request *rq,
348 loff_t pos)
349{
350 struct bio_vec bvec;
351 struct req_iterator iter;
352 int ret = 0;
353
354 rq_for_each_segment(bvec, rq, iter) {
355 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
356 if (ret < 0)
357 break;
358 cond_resched();
359 }
360
361 return ret;
362}
363
364
365
366
367
368
369static int lo_write_transfer(struct loop_device *lo, struct request *rq,
370 loff_t pos)
371{
372 struct bio_vec bvec, b;
373 struct req_iterator iter;
374 struct page *page;
375 int ret = 0;
376
377 page = alloc_page(GFP_NOIO);
378 if (unlikely(!page))
379 return -ENOMEM;
380
381 rq_for_each_segment(bvec, rq, iter) {
382 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
383 bvec.bv_offset, bvec.bv_len, pos >> 9);
384 if (unlikely(ret))
385 break;
386
387 b.bv_page = page;
388 b.bv_offset = 0;
389 b.bv_len = bvec.bv_len;
390 ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
391 if (ret < 0)
392 break;
393 }
394
395 __free_page(page);
396 return ret;
397}
398
399static int lo_read_simple(struct loop_device *lo, struct request *rq,
400 loff_t pos)
401{
402 struct bio_vec bvec;
403 struct req_iterator iter;
404 struct iov_iter i;
405 ssize_t len;
406
407 rq_for_each_segment(bvec, rq, iter) {
408 loop_iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
409 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
410 if (len < 0)
411 return len;
412
413 flush_dcache_page(bvec.bv_page);
414
415 if (len != bvec.bv_len) {
416 struct bio *bio;
417
418 __rq_for_each_bio(bio, rq)
419 zero_fill_bio(bio);
420 break;
421 }
422 cond_resched();
423 }
424
425 return 0;
426}
427
428static int lo_read_transfer(struct loop_device *lo, struct request *rq,
429 loff_t pos)
430{
431 struct bio_vec bvec, b;
432 struct req_iterator iter;
433 struct iov_iter i;
434 struct page *page;
435 ssize_t len;
436 int ret = 0;
437
438 page = alloc_page(GFP_NOIO);
439 if (unlikely(!page))
440 return -ENOMEM;
441
442 rq_for_each_segment(bvec, rq, iter) {
443 loff_t offset = pos;
444
445 b.bv_page = page;
446 b.bv_offset = 0;
447 b.bv_len = bvec.bv_len;
448
449 loop_iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
450 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
451 if (len < 0) {
452 ret = len;
453 goto out_free_page;
454 }
455
456 ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
457 bvec.bv_offset, len, offset >> 9);
458 if (ret)
459 goto out_free_page;
460
461 flush_dcache_page(bvec.bv_page);
462
463 if (len != bvec.bv_len) {
464 struct bio *bio;
465
466 __rq_for_each_bio(bio, rq)
467 zero_fill_bio(bio);
468 break;
469 }
470 }
471
472 ret = 0;
473out_free_page:
474 __free_page(page);
475 return ret;
476}
477
478static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
479 int mode)
480{
481
482
483
484
485
486
487 struct file *file = lo->lo_backing_file;
488 struct request_queue *q = lo->lo_queue;
489 int ret;
490
491 mode |= FALLOC_FL_KEEP_SIZE;
492
493 if (!blk_queue_discard(q)) {
494 ret = -EOPNOTSUPP;
495 goto out;
496 }
497
498 ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
499 if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
500 ret = -EIO;
501 out:
502 return ret;
503}
504
505static int lo_req_flush(struct loop_device *lo, struct request *rq)
506{
507 struct file *file = lo->lo_backing_file;
508 int ret = vfs_fsync(file, 0);
509 if (unlikely(ret && ret != -EINVAL))
510 ret = -EIO;
511
512 return ret;
513}
514
515static void lo_complete_rq(struct request *rq)
516{
517 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
518 blk_status_t ret = BLK_STS_OK;
519
520 if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
521 req_op(rq) != REQ_OP_READ) {
522 if (cmd->ret < 0)
523 ret = errno_to_blk_status(cmd->ret);
524 goto end_io;
525 }
526
527
528
529
530
531 if (cmd->ret) {
532 blk_update_request(rq, BLK_STS_OK, cmd->ret);
533 cmd->ret = 0;
534 blk_mq_requeue_request(rq, true);
535 } else {
536 if (cmd->use_aio) {
537 struct bio *bio = rq->bio;
538
539 while (bio) {
540 zero_fill_bio(bio);
541 bio = bio->bi_next;
542 }
543 }
544 ret = BLK_STS_IOERR;
545end_io:
546 blk_mq_end_request(rq, ret);
547 }
548}
549
550static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
551{
552 struct request *rq = blk_mq_rq_from_pdu(cmd);
553
554 if (!atomic_dec_and_test(&cmd->ref))
555 return;
556 kfree(cmd->bvec);
557 cmd->bvec = NULL;
558 if (likely(!blk_should_fake_timeout(rq->q)))
559 blk_mq_complete_request(rq);
560}
561
562static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
563{
564 struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
565
566 if (cmd->css)
567 css_put(cmd->css);
568 cmd->ret = ret;
569 lo_rw_aio_do_completion(cmd);
570}
571
572static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
573 loff_t pos, bool rw)
574{
575 struct iov_iter iter;
576 struct bio_vec *bvec;
577 struct request *rq = blk_mq_rq_from_pdu(cmd);
578 struct bio *bio = rq->bio;
579 struct file *file = lo->lo_backing_file;
580 unsigned int offset;
581 int segments = 0;
582 int ret;
583
584 if (rq->bio != rq->biotail) {
585 struct req_iterator iter;
586 struct bio_vec tmp;
587
588 __rq_for_each_bio(bio, rq)
589 segments += bio_segments(bio);
590 bvec = kmalloc_array(segments, sizeof(struct bio_vec),
591 GFP_NOIO);
592 if (!bvec)
593 return -EIO;
594 cmd->bvec = bvec;
595
596
597
598
599
600
601
602 rq_for_each_segment(tmp, rq, iter) {
603 *bvec = tmp;
604 bvec++;
605 }
606 bvec = cmd->bvec;
607 offset = 0;
608 } else {
609
610
611
612
613
614 offset = bio->bi_iter.bi_bvec_done;
615 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
616 segments = bio_segments(bio);
617 }
618 atomic_set(&cmd->ref, 2);
619
620 loop_iov_iter_bvec(&iter, rw, bvec, segments, blk_rq_bytes(rq));
621 iter.iov_offset = offset;
622
623 cmd->iocb.ki_pos = pos;
624 cmd->iocb.ki_filp = file;
625 cmd->iocb.ki_complete = lo_rw_aio_complete;
626 cmd->iocb.ki_flags = IOCB_DIRECT;
627 cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
628 if (cmd->css)
629 kthread_associate_blkcg(cmd->css);
630
631 if (rw == WRITE)
632 ret = call_write_iter(file, &cmd->iocb, &iter);
633 else
634 ret = call_read_iter(file, &cmd->iocb, &iter);
635
636 lo_rw_aio_do_completion(cmd);
637 kthread_associate_blkcg(NULL);
638
639 if (ret != -EIOCBQUEUED)
640 cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
641 return 0;
642}
643
644static int do_req_filebacked(struct loop_device *lo, struct request *rq)
645{
646 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
647 loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
648
649
650
651
652
653
654
655
656
657
658 switch (req_op(rq)) {
659 case REQ_OP_FLUSH:
660 return lo_req_flush(lo, rq);
661 case REQ_OP_WRITE_ZEROES:
662
663
664
665
666 return lo_fallocate(lo, rq, pos,
667 (rq->cmd_flags & REQ_NOUNMAP) ?
668 FALLOC_FL_ZERO_RANGE :
669 FALLOC_FL_PUNCH_HOLE);
670 case REQ_OP_DISCARD:
671 return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
672 case REQ_OP_WRITE:
673 if (lo->transfer)
674 return lo_write_transfer(lo, rq, pos);
675 else if (cmd->use_aio)
676 return lo_rw_aio(lo, cmd, pos, WRITE);
677 else
678 return lo_write_simple(lo, rq, pos);
679 case REQ_OP_READ:
680 if (lo->transfer)
681 return lo_read_transfer(lo, rq, pos);
682 else if (cmd->use_aio)
683 return lo_rw_aio(lo, cmd, pos, READ);
684 else
685 return lo_read_simple(lo, rq, pos);
686 default:
687 WARN_ON_ONCE(1);
688 return -EIO;
689 }
690}
691
692static inline void loop_update_dio(struct loop_device *lo)
693{
694 __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
695 lo->use_dio);
696}
697
698static void loop_reread_partitions(struct loop_device *lo,
699 struct block_device *bdev)
700{
701 int rc;
702
703 mutex_lock(&bdev->bd_mutex);
704 rc = bdev_disk_changed(bdev, false);
705 mutex_unlock(&bdev->bd_mutex);
706 if (rc)
707 pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
708 __func__, lo->lo_number, lo->lo_file_name, rc);
709}
710
711static inline int is_loop_device(struct file *file)
712{
713 struct inode *i = file->f_mapping->host;
714
715 return i && S_ISBLK(i->i_mode) && imajor(i) == LOOP_MAJOR;
716}
717
718static int loop_validate_file(struct file *file, struct block_device *bdev)
719{
720 struct inode *inode = file->f_mapping->host;
721 struct file *f = file;
722
723
724 while (is_loop_device(f)) {
725 struct loop_device *l;
726
727 lockdep_assert_held(&loop_validate_mutex);
728 if (f->f_mapping->host->i_bdev == bdev)
729 return -EBADF;
730
731 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
732 if (l->lo_state != Lo_bound)
733 return -EINVAL;
734
735 rmb();
736 f = l->lo_backing_file;
737 }
738 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
739 return -EINVAL;
740 return 0;
741}
742
743
744
745
746
747
748
749
750
751static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
752 unsigned int arg)
753{
754 struct file *file = fget(arg);
755 struct file *old_file;
756 int error;
757 bool partscan;
758 bool is_loop;
759
760 if (!file)
761 return -EBADF;
762 is_loop = is_loop_device(file);
763 error = loop_global_lock_killable(lo, is_loop);
764 if (error)
765 goto out_putf;
766 error = -ENXIO;
767 if (lo->lo_state != Lo_bound)
768 goto out_err;
769
770
771 error = -EINVAL;
772 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
773 goto out_err;
774
775 error = loop_validate_file(file, bdev);
776 if (error)
777 goto out_err;
778
779 old_file = lo->lo_backing_file;
780
781 error = -EINVAL;
782
783
784 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
785 goto out_err;
786
787
788 blk_mq_freeze_queue(lo->lo_queue);
789 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
790 lo->lo_backing_file = file;
791 lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
792 mapping_set_gfp_mask(file->f_mapping,
793 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
794 loop_update_dio(lo);
795 blk_mq_unfreeze_queue(lo->lo_queue);
796 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
797 loop_global_unlock(lo, is_loop);
798
799
800
801
802
803 if (!is_loop) {
804 mutex_lock(&loop_validate_mutex);
805 mutex_unlock(&loop_validate_mutex);
806 }
807
808
809
810
811
812 fput(old_file);
813 if (partscan)
814 loop_reread_partitions(lo, bdev);
815 return 0;
816
817out_err:
818 loop_global_unlock(lo, is_loop);
819out_putf:
820 fput(file);
821 return error;
822}
823
824
825
826static ssize_t loop_attr_show(struct device *dev, char *page,
827 ssize_t (*callback)(struct loop_device *, char *))
828{
829 struct gendisk *disk = dev_to_disk(dev);
830 struct loop_device *lo = disk->private_data;
831
832 return callback(lo, page);
833}
834
835#define LOOP_ATTR_RO(_name) \
836static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \
837static ssize_t loop_attr_do_show_##_name(struct device *d, \
838 struct device_attribute *attr, char *b) \
839{ \
840 return loop_attr_show(d, b, loop_attr_##_name##_show); \
841} \
842static struct device_attribute loop_attr_##_name = \
843 __ATTR(_name, 0444, loop_attr_do_show_##_name, NULL);
844
845static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
846{
847 ssize_t ret;
848 char *p = NULL;
849
850 spin_lock_irq(&lo->lo_lock);
851 if (lo->lo_backing_file)
852 p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
853 spin_unlock_irq(&lo->lo_lock);
854
855 if (IS_ERR_OR_NULL(p))
856 ret = PTR_ERR(p);
857 else {
858 ret = strlen(p);
859 memmove(buf, p, ret);
860 buf[ret++] = '\n';
861 buf[ret] = 0;
862 }
863
864 return ret;
865}
866
867static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
868{
869 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
870}
871
872static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
873{
874 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
875}
876
877static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
878{
879 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
880
881 return sprintf(buf, "%s\n", autoclear ? "1" : "0");
882}
883
884static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
885{
886 int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
887
888 return sprintf(buf, "%s\n", partscan ? "1" : "0");
889}
890
891static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
892{
893 int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
894
895 return sprintf(buf, "%s\n", dio ? "1" : "0");
896}
897
898LOOP_ATTR_RO(backing_file);
899LOOP_ATTR_RO(offset);
900LOOP_ATTR_RO(sizelimit);
901LOOP_ATTR_RO(autoclear);
902LOOP_ATTR_RO(partscan);
903LOOP_ATTR_RO(dio);
904
905static struct attribute *loop_attrs[] = {
906 &loop_attr_backing_file.attr,
907 &loop_attr_offset.attr,
908 &loop_attr_sizelimit.attr,
909 &loop_attr_autoclear.attr,
910 &loop_attr_partscan.attr,
911 &loop_attr_dio.attr,
912 NULL,
913};
914
915static struct attribute_group loop_attribute_group = {
916 .name = "loop",
917 .attrs= loop_attrs,
918};
919
920static void loop_sysfs_init(struct loop_device *lo)
921{
922 lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
923 &loop_attribute_group);
924}
925
926static void loop_sysfs_exit(struct loop_device *lo)
927{
928 if (lo->sysfs_inited)
929 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
930 &loop_attribute_group);
931}
932
933static void loop_config_discard(struct loop_device *lo)
934{
935 struct file *file = lo->lo_backing_file;
936 struct inode *inode = file->f_mapping->host;
937 struct request_queue *q = lo->lo_queue;
938 u32 granularity, max_discard_sectors;
939
940
941
942
943
944
945
946
947 if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
948 struct request_queue *backingq;
949
950 backingq = bdev_get_queue(inode->i_bdev);
951
952 max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
953 granularity = backingq->limits.discard_granularity ?:
954 queue_physical_block_size(backingq);
955
956
957
958
959
960
961
962 } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
963 max_discard_sectors = 0;
964 granularity = 0;
965
966 } else {
967 max_discard_sectors = UINT_MAX >> 9;
968 granularity = inode->i_sb->s_blocksize;
969 }
970
971 if (max_discard_sectors) {
972 q->limits.discard_granularity = granularity;
973 blk_queue_max_discard_sectors(q, max_discard_sectors);
974 blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
975 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
976 } else {
977 q->limits.discard_granularity = 0;
978 blk_queue_max_discard_sectors(q, 0);
979 blk_queue_max_write_zeroes_sectors(q, 0);
980 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
981 }
982 q->limits.discard_alignment = 0;
983}
984
985static void loop_unprepare_queue(struct loop_device *lo)
986{
987 kthread_flush_worker(&lo->worker);
988 kthread_stop(lo->worker_task);
989}
990
991static int loop_kthread_worker_fn(void *worker_ptr)
992{
993 current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
994 return kthread_worker_fn(worker_ptr);
995}
996
997static int loop_prepare_queue(struct loop_device *lo)
998{
999 kthread_init_worker(&lo->worker);
1000 lo->worker_task = kthread_run(loop_kthread_worker_fn,
1001 &lo->worker, "loop%d", lo->lo_number);
1002 if (IS_ERR(lo->worker_task))
1003 return -ENOMEM;
1004 set_user_nice(lo->worker_task, MIN_NICE);
1005 return 0;
1006}
1007
1008static void loop_update_rotational(struct loop_device *lo)
1009{
1010 struct file *file = lo->lo_backing_file;
1011 struct inode *file_inode = file->f_mapping->host;
1012 struct block_device *file_bdev = file_inode->i_sb->s_bdev;
1013 struct request_queue *q = lo->lo_queue;
1014 bool nonrot = true;
1015
1016
1017 if (file_bdev)
1018 nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev));
1019
1020 if (nonrot)
1021 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
1022 else
1023 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
1024}
1025
1026static int
1027loop_release_xfer(struct loop_device *lo)
1028{
1029 int err = 0;
1030 struct loop_func_table *xfer = lo->lo_encryption;
1031
1032 if (xfer) {
1033 if (xfer->release)
1034 err = xfer->release(lo);
1035 lo->transfer = NULL;
1036 lo->lo_encryption = NULL;
1037 module_put(xfer->owner);
1038 }
1039 return err;
1040}
1041
1042static int
1043loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
1044 const struct loop_info64 *i)
1045{
1046 int err = 0;
1047
1048 if (xfer) {
1049 struct module *owner = xfer->owner;
1050
1051 if (!try_module_get(owner))
1052 return -EINVAL;
1053 if (xfer->init)
1054 err = xfer->init(lo, i);
1055 if (err)
1056 module_put(owner);
1057 else
1058 lo->lo_encryption = xfer;
1059 }
1060 return err;
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071static int
1072loop_set_status_from_info(struct loop_device *lo,
1073 const struct loop_info64 *info)
1074{
1075 int err;
1076 struct loop_func_table *xfer;
1077 kuid_t uid = current_uid();
1078
1079 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
1080 return -EINVAL;
1081
1082 err = loop_release_xfer(lo);
1083 if (err)
1084 return err;
1085
1086 if (info->lo_encrypt_type) {
1087 unsigned int type = info->lo_encrypt_type;
1088
1089 if (type >= MAX_LO_CRYPT)
1090 return -EINVAL;
1091 xfer = xfer_funcs[type];
1092 if (xfer == NULL)
1093 return -EINVAL;
1094 } else
1095 xfer = NULL;
1096
1097 err = loop_init_xfer(lo, xfer, info);
1098 if (err)
1099 return err;
1100
1101 lo->lo_offset = info->lo_offset;
1102 lo->lo_sizelimit = info->lo_sizelimit;
1103 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1104 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1105 lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1106 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
1107
1108 if (!xfer)
1109 xfer = &none_funcs;
1110 lo->transfer = xfer->transfer;
1111 lo->ioctl = xfer->ioctl;
1112
1113 lo->lo_flags = info->lo_flags;
1114
1115 lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1116 lo->lo_init[0] = info->lo_init[0];
1117 lo->lo_init[1] = info->lo_init[1];
1118 if (info->lo_encrypt_key_size) {
1119 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1120 info->lo_encrypt_key_size);
1121 lo->lo_key_owner = uid;
1122 }
1123
1124 return 0;
1125}
1126
1127static int loop_configure(struct loop_device *lo, fmode_t mode,
1128 struct block_device *bdev,
1129 const struct loop_config *config)
1130{
1131 struct file *file = fget(config->fd);
1132 struct inode *inode;
1133 struct address_space *mapping;
1134 struct block_device *claimed_bdev = NULL;
1135 int error;
1136 loff_t size;
1137 bool partscan;
1138 unsigned short bsize;
1139 bool is_loop;
1140
1141 if (!file)
1142 return -EBADF;
1143 is_loop = is_loop_device(file);
1144
1145
1146 __module_get(THIS_MODULE);
1147
1148
1149
1150
1151
1152 if (!(mode & FMODE_EXCL)) {
1153 claimed_bdev = bdev->bd_contains;
1154 error = bd_prepare_to_claim(bdev, claimed_bdev, loop_configure);
1155 if (error)
1156 goto out_putf;
1157 }
1158
1159 error = loop_global_lock_killable(lo, is_loop);
1160 if (error)
1161 goto out_bdev;
1162
1163 error = -EBUSY;
1164 if (lo->lo_state != Lo_unbound)
1165 goto out_unlock;
1166
1167 error = loop_validate_file(file, bdev);
1168 if (error)
1169 goto out_unlock;
1170
1171 mapping = file->f_mapping;
1172 inode = mapping->host;
1173
1174 if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
1175 error = -EINVAL;
1176 goto out_unlock;
1177 }
1178
1179 if (config->block_size) {
1180 error = loop_validate_block_size(config->block_size);
1181 if (error)
1182 goto out_unlock;
1183 }
1184
1185 error = loop_set_status_from_info(lo, &config->info);
1186 if (error)
1187 goto out_unlock;
1188
1189 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
1190 !file->f_op->write_iter)
1191 lo->lo_flags |= LO_FLAGS_READ_ONLY;
1192
1193 error = loop_prepare_queue(lo);
1194 if (error)
1195 goto out_unlock;
1196
1197 error = 0;
1198
1199 set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
1200
1201 lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
1202 lo->lo_device = bdev;
1203 lo->lo_backing_file = file;
1204 lo->old_gfp_mask = mapping_gfp_mask(mapping);
1205 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
1206
1207 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
1208 blk_queue_write_cache(lo->lo_queue, true, false);
1209
1210 if (config->block_size)
1211 bsize = config->block_size;
1212 else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
1213
1214 bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
1215 else
1216 bsize = 512;
1217
1218 blk_queue_logical_block_size(lo->lo_queue, bsize);
1219 blk_queue_physical_block_size(lo->lo_queue, bsize);
1220 blk_queue_io_min(lo->lo_queue, bsize);
1221
1222 loop_config_discard(lo);
1223 loop_update_rotational(lo);
1224 loop_update_dio(lo);
1225 loop_sysfs_init(lo);
1226
1227 size = get_loop_size(lo, file);
1228 loop_set_size(lo, size);
1229
1230
1231 wmb();
1232
1233 lo->lo_state = Lo_bound;
1234 if (part_shift)
1235 lo->lo_flags |= LO_FLAGS_PARTSCAN;
1236 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
1237 if (partscan)
1238 lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
1239
1240
1241
1242
1243 bdgrab(bdev);
1244 loop_global_unlock(lo, is_loop);
1245 if (partscan)
1246 loop_reread_partitions(lo, bdev);
1247 if (claimed_bdev)
1248 bd_abort_claiming(bdev, claimed_bdev, loop_configure);
1249 return 0;
1250
1251out_unlock:
1252 loop_global_unlock(lo, is_loop);
1253out_bdev:
1254 if (claimed_bdev)
1255 bd_abort_claiming(bdev, claimed_bdev, loop_configure);
1256out_putf:
1257 fput(file);
1258
1259 module_put(THIS_MODULE);
1260 return error;
1261}
1262
1263static int __loop_clr_fd(struct loop_device *lo, bool release)
1264{
1265 struct file *filp = NULL;
1266 gfp_t gfp = lo->old_gfp_mask;
1267 struct block_device *bdev = lo->lo_device;
1268 int err = 0;
1269 bool partscan = false;
1270 int lo_number;
1271
1272
1273
1274
1275
1276
1277 mutex_lock(&loop_validate_mutex);
1278 mutex_unlock(&loop_validate_mutex);
1279
1280
1281
1282
1283
1284 mutex_lock(&lo->lo_mutex);
1285 if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
1286 err = -ENXIO;
1287 goto out_unlock;
1288 }
1289
1290 filp = lo->lo_backing_file;
1291 if (filp == NULL) {
1292 err = -EINVAL;
1293 goto out_unlock;
1294 }
1295
1296 if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
1297 blk_queue_write_cache(lo->lo_queue, false, false);
1298
1299
1300 blk_mq_freeze_queue(lo->lo_queue);
1301
1302 spin_lock_irq(&lo->lo_lock);
1303 lo->lo_backing_file = NULL;
1304 spin_unlock_irq(&lo->lo_lock);
1305
1306 loop_release_xfer(lo);
1307 lo->transfer = NULL;
1308 lo->ioctl = NULL;
1309 lo->lo_device = NULL;
1310 lo->lo_encryption = NULL;
1311 lo->lo_offset = 0;
1312 lo->lo_sizelimit = 0;
1313 lo->lo_encrypt_key_size = 0;
1314 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
1315 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
1316 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1317 blk_queue_logical_block_size(lo->lo_queue, 512);
1318 blk_queue_physical_block_size(lo->lo_queue, 512);
1319 blk_queue_io_min(lo->lo_queue, 512);
1320 if (bdev) {
1321 bdput(bdev);
1322 invalidate_bdev(bdev);
1323 bdev->bd_inode->i_mapping->wb_err = 0;
1324 }
1325 set_capacity(lo->lo_disk, 0);
1326 loop_sysfs_exit(lo);
1327 if (bdev) {
1328 bd_set_nr_sectors(bdev, 0);
1329
1330 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1331 }
1332 mapping_set_gfp_mask(filp->f_mapping, gfp);
1333
1334 module_put(THIS_MODULE);
1335 blk_mq_unfreeze_queue(lo->lo_queue);
1336
1337 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
1338 lo_number = lo->lo_number;
1339 loop_unprepare_queue(lo);
1340out_unlock:
1341 mutex_unlock(&lo->lo_mutex);
1342 if (partscan) {
1343
1344
1345
1346
1347
1348
1349
1350
1351 if (!release)
1352 mutex_lock(&bdev->bd_mutex);
1353 err = bdev_disk_changed(bdev, false);
1354 if (!release)
1355 mutex_unlock(&bdev->bd_mutex);
1356 if (err)
1357 pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
1358 __func__, lo_number, err);
1359
1360 err = 0;
1361 }
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 mutex_lock(&lo->lo_mutex);
1373 lo->lo_flags = 0;
1374 if (!part_shift)
1375 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1376 lo->lo_state = Lo_unbound;
1377 mutex_unlock(&lo->lo_mutex);
1378
1379
1380
1381
1382
1383
1384 if (filp)
1385 fput(filp);
1386 return err;
1387}
1388
1389static int loop_clr_fd(struct loop_device *lo)
1390{
1391 int err;
1392
1393 err = mutex_lock_killable(&lo->lo_mutex);
1394 if (err)
1395 return err;
1396 if (lo->lo_state != Lo_bound) {
1397 mutex_unlock(&lo->lo_mutex);
1398 return -ENXIO;
1399 }
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410 if (atomic_read(&lo->lo_refcnt) > 1) {
1411 lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
1412 mutex_unlock(&lo->lo_mutex);
1413 return 0;
1414 }
1415 lo->lo_state = Lo_rundown;
1416 mutex_unlock(&lo->lo_mutex);
1417
1418 return __loop_clr_fd(lo, false);
1419}
1420
1421static int
1422loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1423{
1424 int err;
1425 struct block_device *bdev;
1426 kuid_t uid = current_uid();
1427 int prev_lo_flags;
1428 bool partscan = false;
1429 bool size_changed = false;
1430
1431 err = mutex_lock_killable(&lo->lo_mutex);
1432 if (err)
1433 return err;
1434 if (lo->lo_encrypt_key_size &&
1435 !uid_eq(lo->lo_key_owner, uid) &&
1436 !capable(CAP_SYS_ADMIN)) {
1437 err = -EPERM;
1438 goto out_unlock;
1439 }
1440 if (lo->lo_state != Lo_bound) {
1441 err = -ENXIO;
1442 goto out_unlock;
1443 }
1444
1445 if (lo->lo_offset != info->lo_offset ||
1446 lo->lo_sizelimit != info->lo_sizelimit) {
1447 size_changed = true;
1448 sync_blockdev(lo->lo_device);
1449 invalidate_bdev(lo->lo_device);
1450 }
1451
1452
1453 blk_mq_freeze_queue(lo->lo_queue);
1454
1455 if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
1456
1457 err = -EAGAIN;
1458 pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1459 __func__, lo->lo_number, lo->lo_file_name,
1460 lo->lo_device->bd_inode->i_mapping->nrpages);
1461 goto out_unfreeze;
1462 }
1463
1464 prev_lo_flags = lo->lo_flags;
1465
1466 err = loop_set_status_from_info(lo, info);
1467 if (err)
1468 goto out_unfreeze;
1469
1470
1471 lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
1472
1473 lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
1474
1475 lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
1476
1477 if (size_changed) {
1478 loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
1479 lo->lo_backing_file);
1480 loop_set_size(lo, new_size);
1481 }
1482
1483 loop_config_discard(lo);
1484
1485
1486 __loop_update_dio(lo, lo->use_dio);
1487
1488out_unfreeze:
1489 blk_mq_unfreeze_queue(lo->lo_queue);
1490
1491 if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
1492 !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
1493 lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
1494 bdev = lo->lo_device;
1495 partscan = true;
1496 }
1497out_unlock:
1498 mutex_unlock(&lo->lo_mutex);
1499 if (partscan)
1500 loop_reread_partitions(lo, bdev);
1501
1502 return err;
1503}
1504
1505static int
1506loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1507{
1508 struct path path;
1509 struct kstat stat;
1510 int ret;
1511
1512 ret = mutex_lock_killable(&lo->lo_mutex);
1513 if (ret)
1514 return ret;
1515 if (lo->lo_state != Lo_bound) {
1516 mutex_unlock(&lo->lo_mutex);
1517 return -ENXIO;
1518 }
1519
1520 memset(info, 0, sizeof(*info));
1521 info->lo_number = lo->lo_number;
1522 info->lo_offset = lo->lo_offset;
1523 info->lo_sizelimit = lo->lo_sizelimit;
1524 info->lo_flags = lo->lo_flags;
1525 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1526 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1527 info->lo_encrypt_type =
1528 lo->lo_encryption ? lo->lo_encryption->number : 0;
1529 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1530 info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1531 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1532 lo->lo_encrypt_key_size);
1533 }
1534
1535
1536 path = lo->lo_backing_file->f_path;
1537 path_get(&path);
1538 mutex_unlock(&lo->lo_mutex);
1539 ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
1540 if (!ret) {
1541 info->lo_device = huge_encode_dev(stat.dev);
1542 info->lo_inode = stat.ino;
1543 info->lo_rdevice = huge_encode_dev(stat.rdev);
1544 }
1545 path_put(&path);
1546 return ret;
1547}
1548
1549static void
1550loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1551{
1552 memset(info64, 0, sizeof(*info64));
1553 info64->lo_number = info->lo_number;
1554 info64->lo_device = info->lo_device;
1555 info64->lo_inode = info->lo_inode;
1556 info64->lo_rdevice = info->lo_rdevice;
1557 info64->lo_offset = info->lo_offset;
1558 info64->lo_sizelimit = 0;
1559 info64->lo_encrypt_type = info->lo_encrypt_type;
1560 info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1561 info64->lo_flags = info->lo_flags;
1562 info64->lo_init[0] = info->lo_init[0];
1563 info64->lo_init[1] = info->lo_init[1];
1564 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1565 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1566 else
1567 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1568 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1569}
1570
1571static int
1572loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1573{
1574 memset(info, 0, sizeof(*info));
1575 info->lo_number = info64->lo_number;
1576 info->lo_device = info64->lo_device;
1577 info->lo_inode = info64->lo_inode;
1578 info->lo_rdevice = info64->lo_rdevice;
1579 info->lo_offset = info64->lo_offset;
1580 info->lo_encrypt_type = info64->lo_encrypt_type;
1581 info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1582 info->lo_flags = info64->lo_flags;
1583 info->lo_init[0] = info64->lo_init[0];
1584 info->lo_init[1] = info64->lo_init[1];
1585 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1586 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1587 else
1588 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1589 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1590
1591
1592 if (info->lo_device != info64->lo_device ||
1593 info->lo_rdevice != info64->lo_rdevice ||
1594 info->lo_inode != info64->lo_inode ||
1595 info->lo_offset != info64->lo_offset)
1596 return -EOVERFLOW;
1597
1598 return 0;
1599}
1600
1601static int
1602loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1603{
1604 struct loop_info info;
1605 struct loop_info64 info64;
1606
1607 if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1608 return -EFAULT;
1609 loop_info64_from_old(&info, &info64);
1610 return loop_set_status(lo, &info64);
1611}
1612
1613static int
1614loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1615{
1616 struct loop_info64 info64;
1617
1618 if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1619 return -EFAULT;
1620 return loop_set_status(lo, &info64);
1621}
1622
1623static int
1624loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1625 struct loop_info info;
1626 struct loop_info64 info64;
1627 int err;
1628
1629 if (!arg)
1630 return -EINVAL;
1631 err = loop_get_status(lo, &info64);
1632 if (!err)
1633 err = loop_info64_to_old(&info64, &info);
1634 if (!err && copy_to_user(arg, &info, sizeof(info)))
1635 err = -EFAULT;
1636
1637 return err;
1638}
1639
1640static int
1641loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1642 struct loop_info64 info64;
1643 int err;
1644
1645 if (!arg)
1646 return -EINVAL;
1647 err = loop_get_status(lo, &info64);
1648 if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1649 err = -EFAULT;
1650
1651 return err;
1652}
1653
1654static int loop_set_capacity(struct loop_device *lo)
1655{
1656 loff_t size;
1657
1658 if (unlikely(lo->lo_state != Lo_bound))
1659 return -ENXIO;
1660
1661 size = get_loop_size(lo, lo->lo_backing_file);
1662 loop_set_size(lo, size);
1663
1664 return 0;
1665}
1666
1667static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1668{
1669 int error = -ENXIO;
1670 if (lo->lo_state != Lo_bound)
1671 goto out;
1672
1673 __loop_update_dio(lo, !!arg);
1674 if (lo->use_dio == !!arg)
1675 return 0;
1676 error = -EINVAL;
1677 out:
1678 return error;
1679}
1680
1681static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1682{
1683 int err = 0;
1684
1685 if (lo->lo_state != Lo_bound)
1686 return -ENXIO;
1687
1688 err = loop_validate_block_size(arg);
1689 if (err)
1690 return err;
1691
1692 if (lo->lo_queue->limits.logical_block_size == arg)
1693 return 0;
1694
1695 sync_blockdev(lo->lo_device);
1696 invalidate_bdev(lo->lo_device);
1697
1698 blk_mq_freeze_queue(lo->lo_queue);
1699
1700
1701 if (lo->lo_device->bd_inode->i_mapping->nrpages) {
1702 err = -EAGAIN;
1703 pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1704 __func__, lo->lo_number, lo->lo_file_name,
1705 lo->lo_device->bd_inode->i_mapping->nrpages);
1706 goto out_unfreeze;
1707 }
1708
1709 blk_queue_logical_block_size(lo->lo_queue, arg);
1710 blk_queue_physical_block_size(lo->lo_queue, arg);
1711 blk_queue_io_min(lo->lo_queue, arg);
1712 loop_update_dio(lo);
1713out_unfreeze:
1714 blk_mq_unfreeze_queue(lo->lo_queue);
1715
1716 return err;
1717}
1718
1719static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
1720 unsigned long arg)
1721{
1722 int err;
1723
1724 err = mutex_lock_killable(&lo->lo_mutex);
1725 if (err)
1726 return err;
1727 switch (cmd) {
1728 case LOOP_SET_CAPACITY:
1729 err = loop_set_capacity(lo);
1730 break;
1731 case LOOP_SET_DIRECT_IO:
1732 err = loop_set_dio(lo, arg);
1733 break;
1734 case LOOP_SET_BLOCK_SIZE:
1735 err = loop_set_block_size(lo, arg);
1736 break;
1737 default:
1738 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1739 }
1740 mutex_unlock(&lo->lo_mutex);
1741 return err;
1742}
1743
1744static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1745 unsigned int cmd, unsigned long arg)
1746{
1747 struct loop_device *lo = bdev->bd_disk->private_data;
1748 void __user *argp = (void __user *) arg;
1749 int err;
1750
1751 switch (cmd) {
1752 case LOOP_SET_FD: {
1753
1754
1755
1756
1757
1758 struct loop_config config;
1759
1760 memset(&config, 0, sizeof(config));
1761 config.fd = arg;
1762
1763 return loop_configure(lo, mode, bdev, &config);
1764 }
1765 case LOOP_CONFIGURE: {
1766 struct loop_config config;
1767
1768 if (copy_from_user(&config, argp, sizeof(config)))
1769 return -EFAULT;
1770
1771 return loop_configure(lo, mode, bdev, &config);
1772 }
1773 case LOOP_CHANGE_FD:
1774 return loop_change_fd(lo, bdev, arg);
1775 case LOOP_CLR_FD:
1776 return loop_clr_fd(lo);
1777 case LOOP_SET_STATUS:
1778 err = -EPERM;
1779 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1780 err = loop_set_status_old(lo, argp);
1781 }
1782 break;
1783 case LOOP_GET_STATUS:
1784 return loop_get_status_old(lo, argp);
1785 case LOOP_SET_STATUS64:
1786 err = -EPERM;
1787 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1788 err = loop_set_status64(lo, argp);
1789 }
1790 break;
1791 case LOOP_GET_STATUS64:
1792 return loop_get_status64(lo, argp);
1793 case LOOP_SET_CAPACITY:
1794 case LOOP_SET_DIRECT_IO:
1795 case LOOP_SET_BLOCK_SIZE:
1796 if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
1797 return -EPERM;
1798
1799 default:
1800 err = lo_simple_ioctl(lo, cmd, arg);
1801 break;
1802 }
1803
1804 return err;
1805}
1806
1807#ifdef CONFIG_COMPAT
1808struct compat_loop_info {
1809 compat_int_t lo_number;
1810 compat_dev_t lo_device;
1811 compat_ulong_t lo_inode;
1812 compat_dev_t lo_rdevice;
1813 compat_int_t lo_offset;
1814 compat_int_t lo_encrypt_type;
1815 compat_int_t lo_encrypt_key_size;
1816 compat_int_t lo_flags;
1817 char lo_name[LO_NAME_SIZE];
1818 unsigned char lo_encrypt_key[LO_KEY_SIZE];
1819 compat_ulong_t lo_init[2];
1820 char reserved[4];
1821};
1822
1823
1824
1825
1826
1827static noinline int
1828loop_info64_from_compat(const struct compat_loop_info __user *arg,
1829 struct loop_info64 *info64)
1830{
1831 struct compat_loop_info info;
1832
1833 if (copy_from_user(&info, arg, sizeof(info)))
1834 return -EFAULT;
1835
1836 memset(info64, 0, sizeof(*info64));
1837 info64->lo_number = info.lo_number;
1838 info64->lo_device = info.lo_device;
1839 info64->lo_inode = info.lo_inode;
1840 info64->lo_rdevice = info.lo_rdevice;
1841 info64->lo_offset = info.lo_offset;
1842 info64->lo_sizelimit = 0;
1843 info64->lo_encrypt_type = info.lo_encrypt_type;
1844 info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1845 info64->lo_flags = info.lo_flags;
1846 info64->lo_init[0] = info.lo_init[0];
1847 info64->lo_init[1] = info.lo_init[1];
1848 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1849 memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1850 else
1851 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1852 memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1853 return 0;
1854}
1855
1856
1857
1858
1859
1860static noinline int
1861loop_info64_to_compat(const struct loop_info64 *info64,
1862 struct compat_loop_info __user *arg)
1863{
1864 struct compat_loop_info info;
1865
1866 memset(&info, 0, sizeof(info));
1867 info.lo_number = info64->lo_number;
1868 info.lo_device = info64->lo_device;
1869 info.lo_inode = info64->lo_inode;
1870 info.lo_rdevice = info64->lo_rdevice;
1871 info.lo_offset = info64->lo_offset;
1872 info.lo_encrypt_type = info64->lo_encrypt_type;
1873 info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1874 info.lo_flags = info64->lo_flags;
1875 info.lo_init[0] = info64->lo_init[0];
1876 info.lo_init[1] = info64->lo_init[1];
1877 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1878 memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1879 else
1880 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1881 memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1882
1883
1884 if (info.lo_device != info64->lo_device ||
1885 info.lo_rdevice != info64->lo_rdevice ||
1886 info.lo_inode != info64->lo_inode ||
1887 info.lo_offset != info64->lo_offset ||
1888 info.lo_init[0] != info64->lo_init[0] ||
1889 info.lo_init[1] != info64->lo_init[1])
1890 return -EOVERFLOW;
1891
1892 if (copy_to_user(arg, &info, sizeof(info)))
1893 return -EFAULT;
1894 return 0;
1895}
1896
1897static int
1898loop_set_status_compat(struct loop_device *lo,
1899 const struct compat_loop_info __user *arg)
1900{
1901 struct loop_info64 info64;
1902 int ret;
1903
1904 ret = loop_info64_from_compat(arg, &info64);
1905 if (ret < 0)
1906 return ret;
1907 return loop_set_status(lo, &info64);
1908}
1909
1910static int
1911loop_get_status_compat(struct loop_device *lo,
1912 struct compat_loop_info __user *arg)
1913{
1914 struct loop_info64 info64;
1915 int err;
1916
1917 if (!arg)
1918 return -EINVAL;
1919 err = loop_get_status(lo, &info64);
1920 if (!err)
1921 err = loop_info64_to_compat(&info64, arg);
1922 return err;
1923}
1924
1925static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1926 unsigned int cmd, unsigned long arg)
1927{
1928 struct loop_device *lo = bdev->bd_disk->private_data;
1929 int err;
1930
1931 switch(cmd) {
1932 case LOOP_SET_STATUS:
1933 err = loop_set_status_compat(lo,
1934 (const struct compat_loop_info __user *)arg);
1935 break;
1936 case LOOP_GET_STATUS:
1937 err = loop_get_status_compat(lo,
1938 (struct compat_loop_info __user *)arg);
1939 break;
1940 case LOOP_SET_CAPACITY:
1941 case LOOP_CLR_FD:
1942 case LOOP_GET_STATUS64:
1943 case LOOP_SET_STATUS64:
1944 case LOOP_CONFIGURE:
1945 arg = (unsigned long) compat_ptr(arg);
1946
1947 case LOOP_SET_FD:
1948 case LOOP_CHANGE_FD:
1949 case LOOP_SET_BLOCK_SIZE:
1950 case LOOP_SET_DIRECT_IO:
1951 err = lo_ioctl(bdev, mode, cmd, arg);
1952 break;
1953 default:
1954 err = -ENOIOCTLCMD;
1955 break;
1956 }
1957 return err;
1958}
1959#endif
1960
1961static int lo_open(struct block_device *bdev, fmode_t mode)
1962{
1963 struct loop_device *lo = bdev->bd_disk->private_data;
1964 int err;
1965
1966 err = mutex_lock_killable(&lo->lo_mutex);
1967 if (err)
1968 return err;
1969 if (lo->lo_state == Lo_deleting)
1970 err = -ENXIO;
1971 else
1972 atomic_inc(&lo->lo_refcnt);
1973 mutex_unlock(&lo->lo_mutex);
1974 return err;
1975}
1976
1977static void lo_release(struct gendisk *disk, fmode_t mode)
1978{
1979 struct loop_device *lo = disk->private_data;
1980
1981 mutex_lock(&lo->lo_mutex);
1982 if (atomic_dec_return(&lo->lo_refcnt))
1983 goto out_unlock;
1984
1985 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1986 if (lo->lo_state != Lo_bound)
1987 goto out_unlock;
1988 lo->lo_state = Lo_rundown;
1989 mutex_unlock(&lo->lo_mutex);
1990
1991
1992
1993
1994 __loop_clr_fd(lo, true);
1995 return;
1996 } else if (lo->lo_state == Lo_bound) {
1997
1998
1999
2000
2001 blk_mq_freeze_queue(lo->lo_queue);
2002 blk_mq_unfreeze_queue(lo->lo_queue);
2003 }
2004
2005out_unlock:
2006 mutex_unlock(&lo->lo_mutex);
2007}
2008
2009static const struct block_device_operations lo_fops = {
2010 .owner = THIS_MODULE,
2011 .open = lo_open,
2012 .release = lo_release,
2013 .ioctl = lo_ioctl,
2014#ifdef CONFIG_COMPAT
2015 .compat_ioctl = lo_compat_ioctl,
2016#endif
2017};
2018
2019
2020
2021
2022static int max_loop;
2023module_param(max_loop, int, 0444);
2024MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
2025module_param(max_part, int, 0444);
2026MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
2027MODULE_LICENSE("GPL");
2028MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
2029
2030int loop_register_transfer(struct loop_func_table *funcs)
2031{
2032 unsigned int n = funcs->number;
2033
2034 if (n >= MAX_LO_CRYPT || xfer_funcs[n])
2035 return -EINVAL;
2036 xfer_funcs[n] = funcs;
2037 return 0;
2038}
2039
2040static int unregister_transfer_cb(int id, void *ptr, void *data)
2041{
2042 struct loop_device *lo = ptr;
2043 struct loop_func_table *xfer = data;
2044
2045 mutex_lock(&lo->lo_mutex);
2046 if (lo->lo_encryption == xfer)
2047 loop_release_xfer(lo);
2048 mutex_unlock(&lo->lo_mutex);
2049 return 0;
2050}
2051
2052int loop_unregister_transfer(int number)
2053{
2054 unsigned int n = number;
2055 struct loop_func_table *xfer;
2056
2057 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
2058 return -EINVAL;
2059
2060 xfer_funcs[n] = NULL;
2061 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
2062 return 0;
2063}
2064
2065EXPORT_SYMBOL(loop_register_transfer);
2066EXPORT_SYMBOL(loop_unregister_transfer);
2067
2068static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
2069 const struct blk_mq_queue_data *bd)
2070{
2071 struct request *rq = bd->rq;
2072 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
2073 struct loop_device *lo = rq->q->queuedata;
2074
2075 blk_mq_start_request(rq);
2076
2077 if (lo->lo_state != Lo_bound)
2078 return BLK_STS_IOERR;
2079
2080 switch (req_op(rq)) {
2081 case REQ_OP_FLUSH:
2082 case REQ_OP_DISCARD:
2083 case REQ_OP_WRITE_ZEROES:
2084 cmd->use_aio = false;
2085 break;
2086 default:
2087 cmd->use_aio = lo->use_dio;
2088 break;
2089 }
2090
2091
2092#ifdef CONFIG_BLK_CGROUP
2093 if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
2094 cmd->css = &bio_blkcg(rq->bio)->css;
2095 css_get(cmd->css);
2096 } else
2097#endif
2098 cmd->css = NULL;
2099 kthread_queue_work(&lo->worker, &cmd->work);
2100
2101 return BLK_STS_OK;
2102}
2103
2104static void loop_handle_cmd(struct loop_cmd *cmd)
2105{
2106 struct request *rq = blk_mq_rq_from_pdu(cmd);
2107 const bool write = op_is_write(req_op(rq));
2108 struct loop_device *lo = rq->q->queuedata;
2109 int ret = 0;
2110
2111 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
2112 ret = -EIO;
2113 goto failed;
2114 }
2115
2116 ret = do_req_filebacked(lo, rq);
2117 failed:
2118
2119 if (!cmd->use_aio || ret) {
2120 if (ret == -EOPNOTSUPP)
2121 cmd->ret = ret;
2122 else
2123 cmd->ret = ret ? -EIO : 0;
2124 if (likely(!blk_should_fake_timeout(rq->q)))
2125 blk_mq_complete_request(rq);
2126 }
2127}
2128
2129static void loop_queue_work(struct kthread_work *work)
2130{
2131 struct loop_cmd *cmd =
2132 container_of(work, struct loop_cmd, work);
2133
2134 loop_handle_cmd(cmd);
2135}
2136
2137static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
2138 unsigned int hctx_idx, unsigned int numa_node)
2139{
2140 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
2141
2142 kthread_init_work(&cmd->work, loop_queue_work);
2143 return 0;
2144}
2145
2146static const struct blk_mq_ops loop_mq_ops = {
2147 .queue_rq = loop_queue_rq,
2148 .init_request = loop_init_request,
2149 .complete = lo_complete_rq,
2150};
2151
2152static int loop_add(int i)
2153{
2154 struct loop_device *lo;
2155 struct gendisk *disk;
2156 int err;
2157
2158 err = -ENOMEM;
2159 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
2160 if (!lo)
2161 goto out;
2162 lo->lo_state = Lo_unbound;
2163
2164 err = mutex_lock_killable(&loop_ctl_mutex);
2165 if (err)
2166 goto out_free_dev;
2167
2168
2169 if (i >= 0) {
2170 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
2171 if (err == -ENOSPC)
2172 err = -EEXIST;
2173 } else {
2174 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
2175 }
2176 if (err < 0)
2177 goto out_unlock;
2178 i = err;
2179
2180 err = -ENOMEM;
2181 lo->tag_set.ops = &loop_mq_ops;
2182 lo->tag_set.nr_hw_queues = 1;
2183 lo->tag_set.queue_depth = 128;
2184 lo->tag_set.numa_node = NUMA_NO_NODE;
2185 lo->tag_set.cmd_size = sizeof(struct loop_cmd);
2186 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
2187 lo->tag_set.driver_data = lo;
2188
2189 err = blk_mq_alloc_tag_set(&lo->tag_set);
2190 if (err)
2191 goto out_free_idr;
2192
2193 lo->lo_queue = blk_mq_init_queue(&lo->tag_set);
2194 if (IS_ERR(lo->lo_queue)) {
2195 err = PTR_ERR(lo->lo_queue);
2196 goto out_cleanup_tags;
2197 }
2198 lo->lo_queue->queuedata = lo;
2199
2200 blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
2201
2202
2203
2204
2205
2206
2207
2208 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
2209
2210 err = -ENOMEM;
2211 disk = lo->lo_disk = alloc_disk(1 << part_shift);
2212 if (!disk)
2213 goto out_free_queue;
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233 if (!part_shift)
2234 disk->flags |= GENHD_FL_NO_PART_SCAN;
2235 disk->flags |= GENHD_FL_EXT_DEVT;
2236 atomic_set(&lo->lo_refcnt, 0);
2237 mutex_init(&lo->lo_mutex);
2238 lo->lo_number = i;
2239 spin_lock_init(&lo->lo_lock);
2240 disk->major = LOOP_MAJOR;
2241 disk->first_minor = i << part_shift;
2242 disk->fops = &lo_fops;
2243 disk->private_data = lo;
2244 disk->queue = lo->lo_queue;
2245 sprintf(disk->disk_name, "loop%d", i);
2246 add_disk(disk);
2247 mutex_unlock(&loop_ctl_mutex);
2248 return i;
2249
2250out_free_queue:
2251 blk_cleanup_queue(lo->lo_queue);
2252out_cleanup_tags:
2253 blk_mq_free_tag_set(&lo->tag_set);
2254out_free_idr:
2255 idr_remove(&loop_index_idr, i);
2256out_unlock:
2257 mutex_unlock(&loop_ctl_mutex);
2258out_free_dev:
2259 kfree(lo);
2260out:
2261 return err;
2262}
2263
2264static void loop_remove(struct loop_device *lo)
2265{
2266 del_gendisk(lo->lo_disk);
2267 blk_cleanup_queue(lo->lo_queue);
2268 blk_mq_free_tag_set(&lo->tag_set);
2269 put_disk(lo->lo_disk);
2270 mutex_destroy(&lo->lo_mutex);
2271 kfree(lo);
2272}
2273
2274static void loop_probe(dev_t dev)
2275{
2276 int idx = MINOR(dev) >> part_shift;
2277
2278 if (max_loop && idx >= max_loop)
2279 return;
2280 loop_add(idx);
2281}
2282
2283static int loop_control_remove(int idx)
2284{
2285 struct loop_device *lo;
2286 int ret;
2287
2288 if (idx < 0) {
2289 pr_warn("deleting an unspecified loop device is not supported.\n");
2290 return -EINVAL;
2291 }
2292
2293 ret = mutex_lock_killable(&loop_ctl_mutex);
2294 if (ret)
2295 return ret;
2296
2297 lo = idr_find(&loop_index_idr, idx);
2298 if (!lo) {
2299 ret = -ENODEV;
2300 goto out_unlock_ctrl;
2301 }
2302
2303 ret = mutex_lock_killable(&lo->lo_mutex);
2304 if (ret)
2305 goto out_unlock_ctrl;
2306 if (lo->lo_state != Lo_unbound ||
2307 atomic_read(&lo->lo_refcnt) > 0) {
2308 mutex_unlock(&lo->lo_mutex);
2309 ret = -EBUSY;
2310 goto out_unlock_ctrl;
2311 }
2312 lo->lo_state = Lo_deleting;
2313 mutex_unlock(&lo->lo_mutex);
2314
2315 idr_remove(&loop_index_idr, lo->lo_number);
2316 loop_remove(lo);
2317out_unlock_ctrl:
2318 mutex_unlock(&loop_ctl_mutex);
2319 return ret;
2320}
2321
2322static int loop_control_get_free(int idx)
2323{
2324 struct loop_device *lo;
2325 int id, ret;
2326
2327 ret = mutex_lock_killable(&loop_ctl_mutex);
2328 if (ret)
2329 return ret;
2330 idr_for_each_entry(&loop_index_idr, lo, id) {
2331 if (lo->lo_state == Lo_unbound)
2332 goto found;
2333 }
2334 mutex_unlock(&loop_ctl_mutex);
2335 return loop_add(-1);
2336found:
2337 mutex_unlock(&loop_ctl_mutex);
2338 return id;
2339}
2340
2341static long loop_control_ioctl(struct file *file, unsigned int cmd,
2342 unsigned long parm)
2343{
2344 switch (cmd) {
2345 case LOOP_CTL_ADD:
2346 return loop_add(parm);
2347 case LOOP_CTL_REMOVE:
2348 return loop_control_remove(parm);
2349 case LOOP_CTL_GET_FREE:
2350 return loop_control_get_free(parm);
2351 default:
2352 return -ENOSYS;
2353 }
2354}
2355
2356static const struct file_operations loop_ctl_fops = {
2357 .open = nonseekable_open,
2358 .unlocked_ioctl = loop_control_ioctl,
2359 .compat_ioctl = loop_control_ioctl,
2360 .owner = THIS_MODULE,
2361 .llseek = noop_llseek,
2362};
2363
2364static struct miscdevice loop_misc = {
2365 .minor = LOOP_CTRL_MINOR,
2366 .name = "loop-control",
2367 .fops = &loop_ctl_fops,
2368};
2369
2370MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
2371MODULE_ALIAS("devname:loop-control");
2372
2373static int __init loop_init(void)
2374{
2375 int i, nr;
2376 unsigned long range;
2377 int err;
2378
2379 part_shift = 0;
2380 if (max_part > 0) {
2381 part_shift = fls(max_part);
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391 max_part = (1UL << part_shift) - 1;
2392 }
2393
2394 if ((1UL << part_shift) > DISK_MAX_PARTS) {
2395 err = -EINVAL;
2396 goto err_out;
2397 }
2398
2399 if (max_loop > 1UL << (MINORBITS - part_shift)) {
2400 err = -EINVAL;
2401 goto err_out;
2402 }
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412 if (max_loop) {
2413 nr = max_loop;
2414 range = max_loop << part_shift;
2415 } else {
2416 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
2417 range = 1UL << MINORBITS;
2418 }
2419
2420 err = misc_register(&loop_misc);
2421 if (err < 0)
2422 goto err_out;
2423
2424
2425 if (__register_blkdev(LOOP_MAJOR, "loop", loop_probe)) {
2426 err = -EIO;
2427 goto misc_out;
2428 }
2429
2430
2431 for (i = 0; i < nr; i++)
2432 loop_add(i);
2433
2434 printk(KERN_INFO "loop: module loaded\n");
2435 return 0;
2436
2437misc_out:
2438 misc_deregister(&loop_misc);
2439err_out:
2440 return err;
2441}
2442
2443static void __exit loop_exit(void)
2444{
2445 struct loop_device *lo;
2446 int id;
2447
2448 unregister_blkdev(LOOP_MAJOR, "loop");
2449 misc_deregister(&loop_misc);
2450
2451 mutex_lock(&loop_ctl_mutex);
2452 idr_for_each_entry(&loop_index_idr, lo, id)
2453 loop_remove(lo);
2454 mutex_unlock(&loop_ctl_mutex);
2455
2456 idr_destroy(&loop_index_idr);
2457}
2458
2459module_init(loop_init);
2460module_exit(loop_exit);
2461
2462#ifndef MODULE
2463static int __init max_loop_setup(char *str)
2464{
2465 max_loop = simple_strtol(str, NULL, 0);
2466 return 1;
2467}
2468
2469__setup("max_loop=", max_loop_setup);
2470#endif
2471