linux/drivers/block/loop.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/block/loop.c
   3 *
   4 *  Written by Theodore Ts'o, 3/29/93
   5 *
   6 * Copyright 1993 by Theodore Ts'o.  Redistribution of this file is
   7 * permitted under the GNU General Public License.
   8 *
   9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
  10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
  11 *
  12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
  13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
  14 *
  15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
  16 *
  17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
  18 *
  19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
  20 *
  21 * Loadable modules and other fixes by AK, 1998
  22 *
  23 * Make real block number available to downstream transfer functions, enables
  24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
  25 * Reed H. Petty, rhp@draper.net
  26 *
  27 * Maximum number of loop devices now dynamic via max_loop module parameter.
  28 * Russell Kroll <rkroll@exploits.org> 19990701
  29 *
  30 * Maximum number of loop devices when compiled-in now selectable by passing
  31 * max_loop=<1-255> to the kernel on boot.
  32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
  33 *
  34 * Completely rewrite request handling to be make_request_fn style and
  35 * non blocking, pushing work to a helper thread. Lots of fixes from
  36 * Al Viro too.
  37 * Jens Axboe <axboe@suse.de>, Nov 2000
  38 *
  39 * Support up to 256 loop devices
  40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
  41 *
  42 * Support for falling back on the write file operation when the address space
  43 * operations write_begin is not available on the backing filesystem.
  44 * Anton Altaparmakov, 16 Feb 2005
  45 *
  46 * Still To Fix:
  47 * - Advisory locking is ignored here.
  48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
  49 *
  50 */
  51
  52#include <linux/module.h>
  53#include <linux/moduleparam.h>
  54#include <linux/sched.h>
  55#include <linux/fs.h>
  56#include <linux/file.h>
  57#include <linux/stat.h>
  58#include <linux/errno.h>
  59#include <linux/major.h>
  60#include <linux/wait.h>
  61#include <linux/blkdev.h>
  62#include <linux/blkpg.h>
  63#include <linux/init.h>
  64#include <linux/swap.h>
  65#include <linux/slab.h>
  66#include <linux/compat.h>
  67#include <linux/suspend.h>
  68#include <linux/freezer.h>
  69#include <linux/mutex.h>
  70#include <linux/writeback.h>
  71#include <linux/completion.h>
  72#include <linux/highmem.h>
  73#include <linux/kthread.h>
  74#include <linux/splice.h>
  75#include <linux/sysfs.h>
  76#include <linux/miscdevice.h>
  77#include <linux/falloc.h>
  78#include <linux/uio.h>
  79#include <linux/ioprio.h>
  80#include <linux/blk-cgroup.h>
  81
  82#include "loop.h"
  83
  84#include <linux/uaccess.h>
  85
  86static DEFINE_IDR(loop_index_idr);
  87static DEFINE_MUTEX(loop_ctl_mutex);
  88
  89static int max_part;
  90static int part_shift;
  91
  92static int transfer_xor(struct loop_device *lo, int cmd,
  93                        struct page *raw_page, unsigned raw_off,
  94                        struct page *loop_page, unsigned loop_off,
  95                        int size, sector_t real_block)
  96{
  97        char *raw_buf = kmap_atomic(raw_page) + raw_off;
  98        char *loop_buf = kmap_atomic(loop_page) + loop_off;
  99        char *in, *out, *key;
 100        int i, keysize;
 101
 102        if (cmd == READ) {
 103                in = raw_buf;
 104                out = loop_buf;
 105        } else {
 106                in = loop_buf;
 107                out = raw_buf;
 108        }
 109
 110        key = lo->lo_encrypt_key;
 111        keysize = lo->lo_encrypt_key_size;
 112        for (i = 0; i < size; i++)
 113                *out++ = *in++ ^ key[(i & 511) % keysize];
 114
 115        kunmap_atomic(loop_buf);
 116        kunmap_atomic(raw_buf);
 117        cond_resched();
 118        return 0;
 119}
 120
 121static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
 122{
 123        if (unlikely(info->lo_encrypt_key_size <= 0))
 124                return -EINVAL;
 125        return 0;
 126}
 127
 128static struct loop_func_table none_funcs = {
 129        .number = LO_CRYPT_NONE,
 130}; 
 131
 132static struct loop_func_table xor_funcs = {
 133        .number = LO_CRYPT_XOR,
 134        .transfer = transfer_xor,
 135        .init = xor_init
 136}; 
 137
 138/* xfer_funcs[0] is special - its release function is never called */
 139static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
 140        &none_funcs,
 141        &xor_funcs
 142};
 143
 144static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
 145{
 146        loff_t loopsize;
 147
 148        /* Compute loopsize in bytes */
 149        loopsize = i_size_read(file->f_mapping->host);
 150        if (offset > 0)
 151                loopsize -= offset;
 152        /* offset is beyond i_size, weird but possible */
 153        if (loopsize < 0)
 154                return 0;
 155
 156        if (sizelimit > 0 && sizelimit < loopsize)
 157                loopsize = sizelimit;
 158        /*
 159         * Unfortunately, if we want to do I/O on the device,
 160         * the number of 512-byte sectors has to fit into a sector_t.
 161         */
 162        return loopsize >> 9;
 163}
 164
 165static loff_t get_loop_size(struct loop_device *lo, struct file *file)
 166{
 167        return get_size(lo->lo_offset, lo->lo_sizelimit, file);
 168}
 169
 170static void __loop_update_dio(struct loop_device *lo, bool dio)
 171{
 172        struct file *file = lo->lo_backing_file;
 173        struct address_space *mapping = file->f_mapping;
 174        struct inode *inode = mapping->host;
 175        unsigned short sb_bsize = 0;
 176        unsigned dio_align = 0;
 177        bool use_dio;
 178
 179        if (inode->i_sb->s_bdev) {
 180                sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
 181                dio_align = sb_bsize - 1;
 182        }
 183
 184        /*
 185         * We support direct I/O only if lo_offset is aligned with the
 186         * logical I/O size of backing device, and the logical block
 187         * size of loop is bigger than the backing device's and the loop
 188         * needn't transform transfer.
 189         *
 190         * TODO: the above condition may be loosed in the future, and
 191         * direct I/O may be switched runtime at that time because most
 192         * of requests in sane applications should be PAGE_SIZE aligned
 193         */
 194        if (dio) {
 195                if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
 196                                !(lo->lo_offset & dio_align) &&
 197                                mapping->a_ops->direct_IO &&
 198                                !lo->transfer)
 199                        use_dio = true;
 200                else
 201                        use_dio = false;
 202        } else {
 203                use_dio = false;
 204        }
 205
 206        if (lo->use_dio == use_dio)
 207                return;
 208
 209        /* flush dirty pages before changing direct IO */
 210        vfs_fsync(file, 0);
 211
 212        /*
 213         * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
 214         * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
 215         * will get updated by ioctl(LOOP_GET_STATUS)
 216         */
 217        blk_mq_freeze_queue(lo->lo_queue);
 218        lo->use_dio = use_dio;
 219        if (use_dio) {
 220                blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
 221                lo->lo_flags |= LO_FLAGS_DIRECT_IO;
 222        } else {
 223                blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
 224                lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
 225        }
 226        blk_mq_unfreeze_queue(lo->lo_queue);
 227}
 228
 229static int
 230figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
 231{
 232        loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
 233        sector_t x = (sector_t)size;
 234        struct block_device *bdev = lo->lo_device;
 235
 236        if (unlikely((loff_t)x != size))
 237                return -EFBIG;
 238        if (lo->lo_offset != offset)
 239                lo->lo_offset = offset;
 240        if (lo->lo_sizelimit != sizelimit)
 241                lo->lo_sizelimit = sizelimit;
 242        set_capacity(lo->lo_disk, x);
 243        bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
 244        /* let user-space know about the new size */
 245        kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
 246        return 0;
 247}
 248
 249static inline int
 250lo_do_transfer(struct loop_device *lo, int cmd,
 251               struct page *rpage, unsigned roffs,
 252               struct page *lpage, unsigned loffs,
 253               int size, sector_t rblock)
 254{
 255        int ret;
 256
 257        ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
 258        if (likely(!ret))
 259                return 0;
 260
 261        printk_ratelimited(KERN_ERR
 262                "loop: Transfer error at byte offset %llu, length %i.\n",
 263                (unsigned long long)rblock << 9, size);
 264        return ret;
 265}
 266
 267static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
 268{
 269        struct iov_iter i;
 270        ssize_t bw;
 271
 272        iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
 273
 274        file_start_write(file);
 275        bw = vfs_iter_write(file, &i, ppos, 0);
 276        file_end_write(file);
 277
 278        if (likely(bw ==  bvec->bv_len))
 279                return 0;
 280
 281        printk_ratelimited(KERN_ERR
 282                "loop: Write error at byte offset %llu, length %i.\n",
 283                (unsigned long long)*ppos, bvec->bv_len);
 284        if (bw >= 0)
 285                bw = -EIO;
 286        return bw;
 287}
 288
 289static int lo_write_simple(struct loop_device *lo, struct request *rq,
 290                loff_t pos)
 291{
 292        struct bio_vec bvec;
 293        struct req_iterator iter;
 294        int ret = 0;
 295
 296        rq_for_each_segment(bvec, rq, iter) {
 297                ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
 298                if (ret < 0)
 299                        break;
 300                cond_resched();
 301        }
 302
 303        return ret;
 304}
 305
 306/*
 307 * This is the slow, transforming version that needs to double buffer the
 308 * data as it cannot do the transformations in place without having direct
 309 * access to the destination pages of the backing file.
 310 */
 311static int lo_write_transfer(struct loop_device *lo, struct request *rq,
 312                loff_t pos)
 313{
 314        struct bio_vec bvec, b;
 315        struct req_iterator iter;
 316        struct page *page;
 317        int ret = 0;
 318
 319        page = alloc_page(GFP_NOIO);
 320        if (unlikely(!page))
 321                return -ENOMEM;
 322
 323        rq_for_each_segment(bvec, rq, iter) {
 324                ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
 325                        bvec.bv_offset, bvec.bv_len, pos >> 9);
 326                if (unlikely(ret))
 327                        break;
 328
 329                b.bv_page = page;
 330                b.bv_offset = 0;
 331                b.bv_len = bvec.bv_len;
 332                ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
 333                if (ret < 0)
 334                        break;
 335        }
 336
 337        __free_page(page);
 338        return ret;
 339}
 340
 341static int lo_read_simple(struct loop_device *lo, struct request *rq,
 342                loff_t pos)
 343{
 344        struct bio_vec bvec;
 345        struct req_iterator iter;
 346        struct iov_iter i;
 347        ssize_t len;
 348
 349        rq_for_each_segment(bvec, rq, iter) {
 350                iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
 351                len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
 352                if (len < 0)
 353                        return len;
 354
 355                flush_dcache_page(bvec.bv_page);
 356
 357                if (len != bvec.bv_len) {
 358                        struct bio *bio;
 359
 360                        __rq_for_each_bio(bio, rq)
 361                                zero_fill_bio(bio);
 362                        break;
 363                }
 364                cond_resched();
 365        }
 366
 367        return 0;
 368}
 369
 370static int lo_read_transfer(struct loop_device *lo, struct request *rq,
 371                loff_t pos)
 372{
 373        struct bio_vec bvec, b;
 374        struct req_iterator iter;
 375        struct iov_iter i;
 376        struct page *page;
 377        ssize_t len;
 378        int ret = 0;
 379
 380        page = alloc_page(GFP_NOIO);
 381        if (unlikely(!page))
 382                return -ENOMEM;
 383
 384        rq_for_each_segment(bvec, rq, iter) {
 385                loff_t offset = pos;
 386
 387                b.bv_page = page;
 388                b.bv_offset = 0;
 389                b.bv_len = bvec.bv_len;
 390
 391                iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
 392                len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
 393                if (len < 0) {
 394                        ret = len;
 395                        goto out_free_page;
 396                }
 397
 398                ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
 399                        bvec.bv_offset, len, offset >> 9);
 400                if (ret)
 401                        goto out_free_page;
 402
 403                flush_dcache_page(bvec.bv_page);
 404
 405                if (len != bvec.bv_len) {
 406                        struct bio *bio;
 407
 408                        __rq_for_each_bio(bio, rq)
 409                                zero_fill_bio(bio);
 410                        break;
 411                }
 412        }
 413
 414        ret = 0;
 415out_free_page:
 416        __free_page(page);
 417        return ret;
 418}
 419
 420static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
 421                        int mode)
 422{
 423        /*
 424         * We use fallocate to manipulate the space mappings used by the image
 425         * a.k.a. discard/zerorange. However we do not support this if
 426         * encryption is enabled, because it may give an attacker useful
 427         * information.
 428         */
 429        struct file *file = lo->lo_backing_file;
 430        int ret;
 431
 432        mode |= FALLOC_FL_KEEP_SIZE;
 433
 434        if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
 435                ret = -EOPNOTSUPP;
 436                goto out;
 437        }
 438
 439        ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
 440        if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
 441                ret = -EIO;
 442 out:
 443        return ret;
 444}
 445
 446static int lo_req_flush(struct loop_device *lo, struct request *rq)
 447{
 448        struct file *file = lo->lo_backing_file;
 449        int ret = vfs_fsync(file, 0);
 450        if (unlikely(ret && ret != -EINVAL))
 451                ret = -EIO;
 452
 453        return ret;
 454}
 455
 456static void lo_complete_rq(struct request *rq)
 457{
 458        struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 459        blk_status_t ret = BLK_STS_OK;
 460
 461        if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
 462            req_op(rq) != REQ_OP_READ) {
 463                if (cmd->ret < 0)
 464                        ret = BLK_STS_IOERR;
 465                goto end_io;
 466        }
 467
 468        /*
 469         * Short READ - if we got some data, advance our request and
 470         * retry it. If we got no data, end the rest with EIO.
 471         */
 472        if (cmd->ret) {
 473                blk_update_request(rq, BLK_STS_OK, cmd->ret);
 474                cmd->ret = 0;
 475                blk_mq_requeue_request(rq, true);
 476        } else {
 477                if (cmd->use_aio) {
 478                        struct bio *bio = rq->bio;
 479
 480                        while (bio) {
 481                                zero_fill_bio(bio);
 482                                bio = bio->bi_next;
 483                        }
 484                }
 485                ret = BLK_STS_IOERR;
 486end_io:
 487                blk_mq_end_request(rq, ret);
 488        }
 489}
 490
 491static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
 492{
 493        struct request *rq = blk_mq_rq_from_pdu(cmd);
 494
 495        if (!atomic_dec_and_test(&cmd->ref))
 496                return;
 497        kfree(cmd->bvec);
 498        cmd->bvec = NULL;
 499        blk_mq_complete_request(rq);
 500}
 501
 502static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
 503{
 504        struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
 505
 506        if (cmd->css)
 507                css_put(cmd->css);
 508        cmd->ret = ret;
 509        lo_rw_aio_do_completion(cmd);
 510}
 511
 512static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
 513                     loff_t pos, bool rw)
 514{
 515        struct iov_iter iter;
 516        struct req_iterator rq_iter;
 517        struct bio_vec *bvec;
 518        struct request *rq = blk_mq_rq_from_pdu(cmd);
 519        struct bio *bio = rq->bio;
 520        struct file *file = lo->lo_backing_file;
 521        struct bio_vec tmp;
 522        unsigned int offset;
 523        int nr_bvec = 0;
 524        int ret;
 525
 526        rq_for_each_bvec(tmp, rq, rq_iter)
 527                nr_bvec++;
 528
 529        if (rq->bio != rq->biotail) {
 530
 531                bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
 532                                     GFP_NOIO);
 533                if (!bvec)
 534                        return -EIO;
 535                cmd->bvec = bvec;
 536
 537                /*
 538                 * The bios of the request may be started from the middle of
 539                 * the 'bvec' because of bio splitting, so we can't directly
 540                 * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
 541                 * API will take care of all details for us.
 542                 */
 543                rq_for_each_bvec(tmp, rq, rq_iter) {
 544                        *bvec = tmp;
 545                        bvec++;
 546                }
 547                bvec = cmd->bvec;
 548                offset = 0;
 549        } else {
 550                /*
 551                 * Same here, this bio may be started from the middle of the
 552                 * 'bvec' because of bio splitting, so offset from the bvec
 553                 * must be passed to iov iterator
 554                 */
 555                offset = bio->bi_iter.bi_bvec_done;
 556                bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
 557        }
 558        atomic_set(&cmd->ref, 2);
 559
 560        iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
 561        iter.iov_offset = offset;
 562
 563        cmd->iocb.ki_pos = pos;
 564        cmd->iocb.ki_filp = file;
 565        cmd->iocb.ki_complete = lo_rw_aio_complete;
 566        cmd->iocb.ki_flags = IOCB_DIRECT;
 567        cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
 568        if (cmd->css)
 569                kthread_associate_blkcg(cmd->css);
 570
 571        if (rw == WRITE)
 572                ret = call_write_iter(file, &cmd->iocb, &iter);
 573        else
 574                ret = call_read_iter(file, &cmd->iocb, &iter);
 575
 576        lo_rw_aio_do_completion(cmd);
 577        kthread_associate_blkcg(NULL);
 578
 579        if (ret != -EIOCBQUEUED)
 580                cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
 581        return 0;
 582}
 583
 584static int do_req_filebacked(struct loop_device *lo, struct request *rq)
 585{
 586        struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 587        loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
 588
 589        /*
 590         * lo_write_simple and lo_read_simple should have been covered
 591         * by io submit style function like lo_rw_aio(), one blocker
 592         * is that lo_read_simple() need to call flush_dcache_page after
 593         * the page is written from kernel, and it isn't easy to handle
 594         * this in io submit style function which submits all segments
 595         * of the req at one time. And direct read IO doesn't need to
 596         * run flush_dcache_page().
 597         */
 598        switch (req_op(rq)) {
 599        case REQ_OP_FLUSH:
 600                return lo_req_flush(lo, rq);
 601        case REQ_OP_WRITE_ZEROES:
 602                /*
 603                 * If the caller doesn't want deallocation, call zeroout to
 604                 * write zeroes the range.  Otherwise, punch them out.
 605                 */
 606                return lo_fallocate(lo, rq, pos,
 607                        (rq->cmd_flags & REQ_NOUNMAP) ?
 608                                FALLOC_FL_ZERO_RANGE :
 609                                FALLOC_FL_PUNCH_HOLE);
 610        case REQ_OP_DISCARD:
 611                return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
 612        case REQ_OP_WRITE:
 613                if (lo->transfer)
 614                        return lo_write_transfer(lo, rq, pos);
 615                else if (cmd->use_aio)
 616                        return lo_rw_aio(lo, cmd, pos, WRITE);
 617                else
 618                        return lo_write_simple(lo, rq, pos);
 619        case REQ_OP_READ:
 620                if (lo->transfer)
 621                        return lo_read_transfer(lo, rq, pos);
 622                else if (cmd->use_aio)
 623                        return lo_rw_aio(lo, cmd, pos, READ);
 624                else
 625                        return lo_read_simple(lo, rq, pos);
 626        default:
 627                WARN_ON_ONCE(1);
 628                return -EIO;
 629        }
 630}
 631
 632static inline void loop_update_dio(struct loop_device *lo)
 633{
 634        __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) |
 635                        lo->use_dio);
 636}
 637
 638static void loop_reread_partitions(struct loop_device *lo,
 639                                   struct block_device *bdev)
 640{
 641        int rc;
 642
 643        mutex_lock(&bdev->bd_mutex);
 644        rc = bdev_disk_changed(bdev, false);
 645        mutex_unlock(&bdev->bd_mutex);
 646        if (rc)
 647                pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
 648                        __func__, lo->lo_number, lo->lo_file_name, rc);
 649}
 650
 651static inline int is_loop_device(struct file *file)
 652{
 653        struct inode *i = file->f_mapping->host;
 654
 655        return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
 656}
 657
 658static int loop_validate_file(struct file *file, struct block_device *bdev)
 659{
 660        struct inode    *inode = file->f_mapping->host;
 661        struct file     *f = file;
 662
 663        /* Avoid recursion */
 664        while (is_loop_device(f)) {
 665                struct loop_device *l;
 666
 667                if (f->f_mapping->host->i_bdev == bdev)
 668                        return -EBADF;
 669
 670                l = f->f_mapping->host->i_bdev->bd_disk->private_data;
 671                if (l->lo_state != Lo_bound) {
 672                        return -EINVAL;
 673                }
 674                f = l->lo_backing_file;
 675        }
 676        if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
 677                return -EINVAL;
 678        return 0;
 679}
 680
 681/*
 682 * loop_change_fd switched the backing store of a loopback device to
 683 * a new file. This is useful for operating system installers to free up
 684 * the original file and in High Availability environments to switch to
 685 * an alternative location for the content in case of server meltdown.
 686 * This can only work if the loop device is used read-only, and if the
 687 * new backing store is the same size and type as the old backing store.
 688 */
 689static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
 690                          unsigned int arg)
 691{
 692        struct file     *file = NULL, *old_file;
 693        int             error;
 694        bool            partscan;
 695
 696        error = mutex_lock_killable(&loop_ctl_mutex);
 697        if (error)
 698                return error;
 699        error = -ENXIO;
 700        if (lo->lo_state != Lo_bound)
 701                goto out_err;
 702
 703        /* the loop device has to be read-only */
 704        error = -EINVAL;
 705        if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
 706                goto out_err;
 707
 708        error = -EBADF;
 709        file = fget(arg);
 710        if (!file)
 711                goto out_err;
 712
 713        error = loop_validate_file(file, bdev);
 714        if (error)
 715                goto out_err;
 716
 717        old_file = lo->lo_backing_file;
 718
 719        error = -EINVAL;
 720
 721        /* size of the new backing store needs to be the same */
 722        if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
 723                goto out_err;
 724
 725        /* and ... switch */
 726        blk_mq_freeze_queue(lo->lo_queue);
 727        mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
 728        lo->lo_backing_file = file;
 729        lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
 730        mapping_set_gfp_mask(file->f_mapping,
 731                             lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 732        loop_update_dio(lo);
 733        blk_mq_unfreeze_queue(lo->lo_queue);
 734        partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
 735        mutex_unlock(&loop_ctl_mutex);
 736        /*
 737         * We must drop file reference outside of loop_ctl_mutex as dropping
 738         * the file ref can take bd_mutex which creates circular locking
 739         * dependency.
 740         */
 741        fput(old_file);
 742        if (partscan)
 743                loop_reread_partitions(lo, bdev);
 744        return 0;
 745
 746out_err:
 747        mutex_unlock(&loop_ctl_mutex);
 748        if (file)
 749                fput(file);
 750        return error;
 751}
 752
 753/* loop sysfs attributes */
 754
 755static ssize_t loop_attr_show(struct device *dev, char *page,
 756                              ssize_t (*callback)(struct loop_device *, char *))
 757{
 758        struct gendisk *disk = dev_to_disk(dev);
 759        struct loop_device *lo = disk->private_data;
 760
 761        return callback(lo, page);
 762}
 763
 764#define LOOP_ATTR_RO(_name)                                             \
 765static ssize_t loop_attr_##_name##_show(struct loop_device *, char *);  \
 766static ssize_t loop_attr_do_show_##_name(struct device *d,              \
 767                                struct device_attribute *attr, char *b) \
 768{                                                                       \
 769        return loop_attr_show(d, b, loop_attr_##_name##_show);          \
 770}                                                                       \
 771static struct device_attribute loop_attr_##_name =                      \
 772        __ATTR(_name, 0444, loop_attr_do_show_##_name, NULL);
 773
 774static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
 775{
 776        ssize_t ret;
 777        char *p = NULL;
 778
 779        spin_lock_irq(&lo->lo_lock);
 780        if (lo->lo_backing_file)
 781                p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
 782        spin_unlock_irq(&lo->lo_lock);
 783
 784        if (IS_ERR_OR_NULL(p))
 785                ret = PTR_ERR(p);
 786        else {
 787                ret = strlen(p);
 788                memmove(buf, p, ret);
 789                buf[ret++] = '\n';
 790                buf[ret] = 0;
 791        }
 792
 793        return ret;
 794}
 795
 796static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
 797{
 798        return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
 799}
 800
 801static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
 802{
 803        return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
 804}
 805
 806static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
 807{
 808        int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
 809
 810        return sprintf(buf, "%s\n", autoclear ? "1" : "0");
 811}
 812
 813static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
 814{
 815        int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
 816
 817        return sprintf(buf, "%s\n", partscan ? "1" : "0");
 818}
 819
 820static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
 821{
 822        int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
 823
 824        return sprintf(buf, "%s\n", dio ? "1" : "0");
 825}
 826
 827LOOP_ATTR_RO(backing_file);
 828LOOP_ATTR_RO(offset);
 829LOOP_ATTR_RO(sizelimit);
 830LOOP_ATTR_RO(autoclear);
 831LOOP_ATTR_RO(partscan);
 832LOOP_ATTR_RO(dio);
 833
 834static struct attribute *loop_attrs[] = {
 835        &loop_attr_backing_file.attr,
 836        &loop_attr_offset.attr,
 837        &loop_attr_sizelimit.attr,
 838        &loop_attr_autoclear.attr,
 839        &loop_attr_partscan.attr,
 840        &loop_attr_dio.attr,
 841        NULL,
 842};
 843
 844static struct attribute_group loop_attribute_group = {
 845        .name = "loop",
 846        .attrs= loop_attrs,
 847};
 848
 849static void loop_sysfs_init(struct loop_device *lo)
 850{
 851        lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
 852                                                &loop_attribute_group);
 853}
 854
 855static void loop_sysfs_exit(struct loop_device *lo)
 856{
 857        if (lo->sysfs_inited)
 858                sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
 859                                   &loop_attribute_group);
 860}
 861
 862static void loop_config_discard(struct loop_device *lo)
 863{
 864        struct file *file = lo->lo_backing_file;
 865        struct inode *inode = file->f_mapping->host;
 866        struct request_queue *q = lo->lo_queue;
 867
 868        /*
 869         * We use punch hole to reclaim the free space used by the
 870         * image a.k.a. discard. However we do not support discard if
 871         * encryption is enabled, because it may give an attacker
 872         * useful information.
 873         */
 874        if ((!file->f_op->fallocate) ||
 875            lo->lo_encrypt_key_size) {
 876                q->limits.discard_granularity = 0;
 877                q->limits.discard_alignment = 0;
 878                blk_queue_max_discard_sectors(q, 0);
 879                blk_queue_max_write_zeroes_sectors(q, 0);
 880                blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
 881                return;
 882        }
 883
 884        q->limits.discard_granularity = inode->i_sb->s_blocksize;
 885        q->limits.discard_alignment = 0;
 886
 887        blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
 888        blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
 889        blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
 890}
 891
 892static void loop_unprepare_queue(struct loop_device *lo)
 893{
 894        kthread_flush_worker(&lo->worker);
 895        kthread_stop(lo->worker_task);
 896}
 897
 898static int loop_kthread_worker_fn(void *worker_ptr)
 899{
 900        current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
 901        return kthread_worker_fn(worker_ptr);
 902}
 903
 904static int loop_prepare_queue(struct loop_device *lo)
 905{
 906        kthread_init_worker(&lo->worker);
 907        lo->worker_task = kthread_run(loop_kthread_worker_fn,
 908                        &lo->worker, "loop%d", lo->lo_number);
 909        if (IS_ERR(lo->worker_task))
 910                return -ENOMEM;
 911        set_user_nice(lo->worker_task, MIN_NICE);
 912        return 0;
 913}
 914
 915static void loop_update_rotational(struct loop_device *lo)
 916{
 917        struct file *file = lo->lo_backing_file;
 918        struct inode *file_inode = file->f_mapping->host;
 919        struct block_device *file_bdev = file_inode->i_sb->s_bdev;
 920        struct request_queue *q = lo->lo_queue;
 921        bool nonrot = true;
 922
 923        /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
 924        if (file_bdev)
 925                nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev));
 926
 927        if (nonrot)
 928                blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
 929        else
 930                blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
 931}
 932
 933static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 934                       struct block_device *bdev, unsigned int arg)
 935{
 936        struct file     *file;
 937        struct inode    *inode;
 938        struct address_space *mapping;
 939        struct block_device *claimed_bdev = NULL;
 940        int             lo_flags = 0;
 941        int             error;
 942        loff_t          size;
 943        bool            partscan;
 944
 945        /* This is safe, since we have a reference from open(). */
 946        __module_get(THIS_MODULE);
 947
 948        error = -EBADF;
 949        file = fget(arg);
 950        if (!file)
 951                goto out;
 952
 953        /*
 954         * If we don't hold exclusive handle for the device, upgrade to it
 955         * here to avoid changing device under exclusive owner.
 956         */
 957        if (!(mode & FMODE_EXCL)) {
 958                claimed_bdev = bd_start_claiming(bdev, loop_set_fd);
 959                if (IS_ERR(claimed_bdev)) {
 960                        error = PTR_ERR(claimed_bdev);
 961                        goto out_putf;
 962                }
 963        }
 964
 965        error = mutex_lock_killable(&loop_ctl_mutex);
 966        if (error)
 967                goto out_bdev;
 968
 969        error = -EBUSY;
 970        if (lo->lo_state != Lo_unbound)
 971                goto out_unlock;
 972
 973        error = loop_validate_file(file, bdev);
 974        if (error)
 975                goto out_unlock;
 976
 977        mapping = file->f_mapping;
 978        inode = mapping->host;
 979
 980        if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
 981            !file->f_op->write_iter)
 982                lo_flags |= LO_FLAGS_READ_ONLY;
 983
 984        error = -EFBIG;
 985        size = get_loop_size(lo, file);
 986        if ((loff_t)(sector_t)size != size)
 987                goto out_unlock;
 988        error = loop_prepare_queue(lo);
 989        if (error)
 990                goto out_unlock;
 991
 992        error = 0;
 993
 994        set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
 995
 996        lo->use_dio = false;
 997        lo->lo_device = bdev;
 998        lo->lo_flags = lo_flags;
 999        lo->lo_backing_file = file;
1000        lo->transfer = NULL;
1001        lo->ioctl = NULL;
1002        lo->lo_sizelimit = 0;
1003        lo->old_gfp_mask = mapping_gfp_mask(mapping);
1004        mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
1005
1006        if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
1007                blk_queue_write_cache(lo->lo_queue, true, false);
1008
1009        if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) {
1010                /* In case of direct I/O, match underlying block size */
1011                unsigned short bsize = bdev_logical_block_size(
1012                        inode->i_sb->s_bdev);
1013
1014                blk_queue_logical_block_size(lo->lo_queue, bsize);
1015                blk_queue_physical_block_size(lo->lo_queue, bsize);
1016                blk_queue_io_min(lo->lo_queue, bsize);
1017        }
1018
1019        loop_update_rotational(lo);
1020        loop_update_dio(lo);
1021        set_capacity(lo->lo_disk, size);
1022        bd_set_size(bdev, size << 9);
1023        loop_sysfs_init(lo);
1024        /* let user-space know about the new size */
1025        kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1026
1027        set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
1028                      block_size(inode->i_bdev) : PAGE_SIZE);
1029
1030        lo->lo_state = Lo_bound;
1031        if (part_shift)
1032                lo->lo_flags |= LO_FLAGS_PARTSCAN;
1033        partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
1034
1035        /* Grab the block_device to prevent its destruction after we
1036         * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
1037         */
1038        bdgrab(bdev);
1039        mutex_unlock(&loop_ctl_mutex);
1040        if (partscan)
1041                loop_reread_partitions(lo, bdev);
1042        if (claimed_bdev)
1043                bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
1044        return 0;
1045
1046out_unlock:
1047        mutex_unlock(&loop_ctl_mutex);
1048out_bdev:
1049        if (claimed_bdev)
1050                bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
1051out_putf:
1052        fput(file);
1053out:
1054        /* This is safe: open() is still holding a reference. */
1055        module_put(THIS_MODULE);
1056        return error;
1057}
1058
1059static int
1060loop_release_xfer(struct loop_device *lo)
1061{
1062        int err = 0;
1063        struct loop_func_table *xfer = lo->lo_encryption;
1064
1065        if (xfer) {
1066                if (xfer->release)
1067                        err = xfer->release(lo);
1068                lo->transfer = NULL;
1069                lo->lo_encryption = NULL;
1070                module_put(xfer->owner);
1071        }
1072        return err;
1073}
1074
1075static int
1076loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
1077               const struct loop_info64 *i)
1078{
1079        int err = 0;
1080
1081        if (xfer) {
1082                struct module *owner = xfer->owner;
1083
1084                if (!try_module_get(owner))
1085                        return -EINVAL;
1086                if (xfer->init)
1087                        err = xfer->init(lo, i);
1088                if (err)
1089                        module_put(owner);
1090                else
1091                        lo->lo_encryption = xfer;
1092        }
1093        return err;
1094}
1095
1096static int __loop_clr_fd(struct loop_device *lo, bool release)
1097{
1098        struct file *filp = NULL;
1099        gfp_t gfp = lo->old_gfp_mask;
1100        struct block_device *bdev = lo->lo_device;
1101        int err = 0;
1102        bool partscan = false;
1103        int lo_number;
1104
1105        mutex_lock(&loop_ctl_mutex);
1106        if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
1107                err = -ENXIO;
1108                goto out_unlock;
1109        }
1110
1111        filp = lo->lo_backing_file;
1112        if (filp == NULL) {
1113                err = -EINVAL;
1114                goto out_unlock;
1115        }
1116
1117        /* freeze request queue during the transition */
1118        blk_mq_freeze_queue(lo->lo_queue);
1119
1120        spin_lock_irq(&lo->lo_lock);
1121        lo->lo_backing_file = NULL;
1122        spin_unlock_irq(&lo->lo_lock);
1123
1124        loop_release_xfer(lo);
1125        lo->transfer = NULL;
1126        lo->ioctl = NULL;
1127        lo->lo_device = NULL;
1128        lo->lo_encryption = NULL;
1129        lo->lo_offset = 0;
1130        lo->lo_sizelimit = 0;
1131        lo->lo_encrypt_key_size = 0;
1132        memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
1133        memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
1134        memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1135        blk_queue_logical_block_size(lo->lo_queue, 512);
1136        blk_queue_physical_block_size(lo->lo_queue, 512);
1137        blk_queue_io_min(lo->lo_queue, 512);
1138        if (bdev) {
1139                bdput(bdev);
1140                invalidate_bdev(bdev);
1141                bdev->bd_inode->i_mapping->wb_err = 0;
1142        }
1143        set_capacity(lo->lo_disk, 0);
1144        loop_sysfs_exit(lo);
1145        if (bdev) {
1146                bd_set_size(bdev, 0);
1147                /* let user-space know about this change */
1148                kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1149        }
1150        mapping_set_gfp_mask(filp->f_mapping, gfp);
1151        /* This is safe: open() is still holding a reference. */
1152        module_put(THIS_MODULE);
1153        blk_mq_unfreeze_queue(lo->lo_queue);
1154
1155        partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
1156        lo_number = lo->lo_number;
1157        loop_unprepare_queue(lo);
1158out_unlock:
1159        mutex_unlock(&loop_ctl_mutex);
1160        if (partscan) {
1161                /*
1162                 * bd_mutex has been held already in release path, so don't
1163                 * acquire it if this function is called in such case.
1164                 *
1165                 * If the reread partition isn't from release path, lo_refcnt
1166                 * must be at least one and it can only become zero when the
1167                 * current holder is released.
1168                 */
1169                if (!release)
1170                        mutex_lock(&bdev->bd_mutex);
1171                err = bdev_disk_changed(bdev, false);
1172                if (!release)
1173                        mutex_unlock(&bdev->bd_mutex);
1174                if (err)
1175                        pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
1176                                __func__, lo_number, err);
1177                /* Device is gone, no point in returning error */
1178                err = 0;
1179        }
1180
1181        /*
1182         * lo->lo_state is set to Lo_unbound here after above partscan has
1183         * finished.
1184         *
1185         * There cannot be anybody else entering __loop_clr_fd() as
1186         * lo->lo_backing_file is already cleared and Lo_rundown state
1187         * protects us from all the other places trying to change the 'lo'
1188         * device.
1189         */
1190        mutex_lock(&loop_ctl_mutex);
1191        lo->lo_flags = 0;
1192        if (!part_shift)
1193                lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1194        lo->lo_state = Lo_unbound;
1195        mutex_unlock(&loop_ctl_mutex);
1196
1197        /*
1198         * Need not hold loop_ctl_mutex to fput backing file.
1199         * Calling fput holding loop_ctl_mutex triggers a circular
1200         * lock dependency possibility warning as fput can take
1201         * bd_mutex which is usually taken before loop_ctl_mutex.
1202         */
1203        if (filp)
1204                fput(filp);
1205        return err;
1206}
1207
1208static int loop_clr_fd(struct loop_device *lo)
1209{
1210        int err;
1211
1212        err = mutex_lock_killable(&loop_ctl_mutex);
1213        if (err)
1214                return err;
1215        if (lo->lo_state != Lo_bound) {
1216                mutex_unlock(&loop_ctl_mutex);
1217                return -ENXIO;
1218        }
1219        /*
1220         * If we've explicitly asked to tear down the loop device,
1221         * and it has an elevated reference count, set it for auto-teardown when
1222         * the last reference goes away. This stops $!~#$@ udev from
1223         * preventing teardown because it decided that it needs to run blkid on
1224         * the loopback device whenever they appear. xfstests is notorious for
1225         * failing tests because blkid via udev races with a losetup
1226         * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
1227         * command to fail with EBUSY.
1228         */
1229        if (atomic_read(&lo->lo_refcnt) > 1) {
1230                lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
1231                mutex_unlock(&loop_ctl_mutex);
1232                return 0;
1233        }
1234        lo->lo_state = Lo_rundown;
1235        mutex_unlock(&loop_ctl_mutex);
1236
1237        return __loop_clr_fd(lo, false);
1238}
1239
1240static int
1241loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1242{
1243        int err;
1244        struct loop_func_table *xfer;
1245        kuid_t uid = current_uid();
1246        struct block_device *bdev;
1247        bool partscan = false;
1248
1249        err = mutex_lock_killable(&loop_ctl_mutex);
1250        if (err)
1251                return err;
1252        if (lo->lo_encrypt_key_size &&
1253            !uid_eq(lo->lo_key_owner, uid) &&
1254            !capable(CAP_SYS_ADMIN)) {
1255                err = -EPERM;
1256                goto out_unlock;
1257        }
1258        if (lo->lo_state != Lo_bound) {
1259                err = -ENXIO;
1260                goto out_unlock;
1261        }
1262        if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) {
1263                err = -EINVAL;
1264                goto out_unlock;
1265        }
1266
1267        if (lo->lo_offset != info->lo_offset ||
1268            lo->lo_sizelimit != info->lo_sizelimit) {
1269                sync_blockdev(lo->lo_device);
1270                kill_bdev(lo->lo_device);
1271        }
1272
1273        /* I/O need to be drained during transfer transition */
1274        blk_mq_freeze_queue(lo->lo_queue);
1275
1276        err = loop_release_xfer(lo);
1277        if (err)
1278                goto out_unfreeze;
1279
1280        if (info->lo_encrypt_type) {
1281                unsigned int type = info->lo_encrypt_type;
1282
1283                if (type >= MAX_LO_CRYPT) {
1284                        err = -EINVAL;
1285                        goto out_unfreeze;
1286                }
1287                xfer = xfer_funcs[type];
1288                if (xfer == NULL) {
1289                        err = -EINVAL;
1290                        goto out_unfreeze;
1291                }
1292        } else
1293                xfer = NULL;
1294
1295        err = loop_init_xfer(lo, xfer, info);
1296        if (err)
1297                goto out_unfreeze;
1298
1299        if (lo->lo_offset != info->lo_offset ||
1300            lo->lo_sizelimit != info->lo_sizelimit) {
1301                /* kill_bdev should have truncated all the pages */
1302                if (lo->lo_device->bd_inode->i_mapping->nrpages) {
1303                        err = -EAGAIN;
1304                        pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1305                                __func__, lo->lo_number, lo->lo_file_name,
1306                                lo->lo_device->bd_inode->i_mapping->nrpages);
1307                        goto out_unfreeze;
1308                }
1309                if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
1310                        err = -EFBIG;
1311                        goto out_unfreeze;
1312                }
1313        }
1314
1315        loop_config_discard(lo);
1316
1317        memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1318        memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1319        lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1320        lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
1321
1322        if (!xfer)
1323                xfer = &none_funcs;
1324        lo->transfer = xfer->transfer;
1325        lo->ioctl = xfer->ioctl;
1326
1327        if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
1328             (info->lo_flags & LO_FLAGS_AUTOCLEAR))
1329                lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
1330
1331        lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1332        lo->lo_init[0] = info->lo_init[0];
1333        lo->lo_init[1] = info->lo_init[1];
1334        if (info->lo_encrypt_key_size) {
1335                memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1336                       info->lo_encrypt_key_size);
1337                lo->lo_key_owner = uid;
1338        }
1339
1340        /* update dio if lo_offset or transfer is changed */
1341        __loop_update_dio(lo, lo->use_dio);
1342
1343out_unfreeze:
1344        blk_mq_unfreeze_queue(lo->lo_queue);
1345
1346        if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
1347             !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
1348                lo->lo_flags |= LO_FLAGS_PARTSCAN;
1349                lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
1350                bdev = lo->lo_device;
1351                partscan = true;
1352        }
1353out_unlock:
1354        mutex_unlock(&loop_ctl_mutex);
1355        if (partscan)
1356                loop_reread_partitions(lo, bdev);
1357
1358        return err;
1359}
1360
1361static int
1362loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1363{
1364        struct path path;
1365        struct kstat stat;
1366        int ret;
1367
1368        ret = mutex_lock_killable(&loop_ctl_mutex);
1369        if (ret)
1370                return ret;
1371        if (lo->lo_state != Lo_bound) {
1372                mutex_unlock(&loop_ctl_mutex);
1373                return -ENXIO;
1374        }
1375
1376        memset(info, 0, sizeof(*info));
1377        info->lo_number = lo->lo_number;
1378        info->lo_offset = lo->lo_offset;
1379        info->lo_sizelimit = lo->lo_sizelimit;
1380        info->lo_flags = lo->lo_flags;
1381        memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1382        memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1383        info->lo_encrypt_type =
1384                lo->lo_encryption ? lo->lo_encryption->number : 0;
1385        if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1386                info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1387                memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1388                       lo->lo_encrypt_key_size);
1389        }
1390
1391        /* Drop loop_ctl_mutex while we call into the filesystem. */
1392        path = lo->lo_backing_file->f_path;
1393        path_get(&path);
1394        mutex_unlock(&loop_ctl_mutex);
1395        ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
1396        if (!ret) {
1397                info->lo_device = huge_encode_dev(stat.dev);
1398                info->lo_inode = stat.ino;
1399                info->lo_rdevice = huge_encode_dev(stat.rdev);
1400        }
1401        path_put(&path);
1402        return ret;
1403}
1404
1405static void
1406loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1407{
1408        memset(info64, 0, sizeof(*info64));
1409        info64->lo_number = info->lo_number;
1410        info64->lo_device = info->lo_device;
1411        info64->lo_inode = info->lo_inode;
1412        info64->lo_rdevice = info->lo_rdevice;
1413        info64->lo_offset = info->lo_offset;
1414        info64->lo_sizelimit = 0;
1415        info64->lo_encrypt_type = info->lo_encrypt_type;
1416        info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1417        info64->lo_flags = info->lo_flags;
1418        info64->lo_init[0] = info->lo_init[0];
1419        info64->lo_init[1] = info->lo_init[1];
1420        if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1421                memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1422        else
1423                memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1424        memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1425}
1426
1427static int
1428loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1429{
1430        memset(info, 0, sizeof(*info));
1431        info->lo_number = info64->lo_number;
1432        info->lo_device = info64->lo_device;
1433        info->lo_inode = info64->lo_inode;
1434        info->lo_rdevice = info64->lo_rdevice;
1435        info->lo_offset = info64->lo_offset;
1436        info->lo_encrypt_type = info64->lo_encrypt_type;
1437        info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1438        info->lo_flags = info64->lo_flags;
1439        info->lo_init[0] = info64->lo_init[0];
1440        info->lo_init[1] = info64->lo_init[1];
1441        if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1442                memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1443        else
1444                memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1445        memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1446
1447        /* error in case values were truncated */
1448        if (info->lo_device != info64->lo_device ||
1449            info->lo_rdevice != info64->lo_rdevice ||
1450            info->lo_inode != info64->lo_inode ||
1451            info->lo_offset != info64->lo_offset)
1452                return -EOVERFLOW;
1453
1454        return 0;
1455}
1456
1457static int
1458loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1459{
1460        struct loop_info info;
1461        struct loop_info64 info64;
1462
1463        if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1464                return -EFAULT;
1465        loop_info64_from_old(&info, &info64);
1466        return loop_set_status(lo, &info64);
1467}
1468
1469static int
1470loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1471{
1472        struct loop_info64 info64;
1473
1474        if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1475                return -EFAULT;
1476        return loop_set_status(lo, &info64);
1477}
1478
1479static int
1480loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1481        struct loop_info info;
1482        struct loop_info64 info64;
1483        int err;
1484
1485        if (!arg)
1486                return -EINVAL;
1487        err = loop_get_status(lo, &info64);
1488        if (!err)
1489                err = loop_info64_to_old(&info64, &info);
1490        if (!err && copy_to_user(arg, &info, sizeof(info)))
1491                err = -EFAULT;
1492
1493        return err;
1494}
1495
1496static int
1497loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1498        struct loop_info64 info64;
1499        int err;
1500
1501        if (!arg)
1502                return -EINVAL;
1503        err = loop_get_status(lo, &info64);
1504        if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1505                err = -EFAULT;
1506
1507        return err;
1508}
1509
1510static int loop_set_capacity(struct loop_device *lo)
1511{
1512        if (unlikely(lo->lo_state != Lo_bound))
1513                return -ENXIO;
1514
1515        return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
1516}
1517
1518static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1519{
1520        int error = -ENXIO;
1521        if (lo->lo_state != Lo_bound)
1522                goto out;
1523
1524        __loop_update_dio(lo, !!arg);
1525        if (lo->use_dio == !!arg)
1526                return 0;
1527        error = -EINVAL;
1528 out:
1529        return error;
1530}
1531
1532static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1533{
1534        int err = 0;
1535
1536        if (lo->lo_state != Lo_bound)
1537                return -ENXIO;
1538
1539        if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
1540                return -EINVAL;
1541
1542        if (lo->lo_queue->limits.logical_block_size != arg) {
1543                sync_blockdev(lo->lo_device);
1544                kill_bdev(lo->lo_device);
1545        }
1546
1547        blk_mq_freeze_queue(lo->lo_queue);
1548
1549        /* kill_bdev should have truncated all the pages */
1550        if (lo->lo_queue->limits.logical_block_size != arg &&
1551                        lo->lo_device->bd_inode->i_mapping->nrpages) {
1552                err = -EAGAIN;
1553                pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1554                        __func__, lo->lo_number, lo->lo_file_name,
1555                        lo->lo_device->bd_inode->i_mapping->nrpages);
1556                goto out_unfreeze;
1557        }
1558
1559        blk_queue_logical_block_size(lo->lo_queue, arg);
1560        blk_queue_physical_block_size(lo->lo_queue, arg);
1561        blk_queue_io_min(lo->lo_queue, arg);
1562        loop_update_dio(lo);
1563out_unfreeze:
1564        blk_mq_unfreeze_queue(lo->lo_queue);
1565
1566        return err;
1567}
1568
1569static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
1570                           unsigned long arg)
1571{
1572        int err;
1573
1574        err = mutex_lock_killable(&loop_ctl_mutex);
1575        if (err)
1576                return err;
1577        switch (cmd) {
1578        case LOOP_SET_CAPACITY:
1579                err = loop_set_capacity(lo);
1580                break;
1581        case LOOP_SET_DIRECT_IO:
1582                err = loop_set_dio(lo, arg);
1583                break;
1584        case LOOP_SET_BLOCK_SIZE:
1585                err = loop_set_block_size(lo, arg);
1586                break;
1587        default:
1588                err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1589        }
1590        mutex_unlock(&loop_ctl_mutex);
1591        return err;
1592}
1593
1594static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1595        unsigned int cmd, unsigned long arg)
1596{
1597        struct loop_device *lo = bdev->bd_disk->private_data;
1598        int err;
1599
1600        switch (cmd) {
1601        case LOOP_SET_FD:
1602                return loop_set_fd(lo, mode, bdev, arg);
1603        case LOOP_CHANGE_FD:
1604                return loop_change_fd(lo, bdev, arg);
1605        case LOOP_CLR_FD:
1606                return loop_clr_fd(lo);
1607        case LOOP_SET_STATUS:
1608                err = -EPERM;
1609                if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1610                        err = loop_set_status_old(lo,
1611                                        (struct loop_info __user *)arg);
1612                }
1613                break;
1614        case LOOP_GET_STATUS:
1615                return loop_get_status_old(lo, (struct loop_info __user *) arg);
1616        case LOOP_SET_STATUS64:
1617                err = -EPERM;
1618                if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1619                        err = loop_set_status64(lo,
1620                                        (struct loop_info64 __user *) arg);
1621                }
1622                break;
1623        case LOOP_GET_STATUS64:
1624                return loop_get_status64(lo, (struct loop_info64 __user *) arg);
1625        case LOOP_SET_CAPACITY:
1626        case LOOP_SET_DIRECT_IO:
1627        case LOOP_SET_BLOCK_SIZE:
1628                if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
1629                        return -EPERM;
1630                /* Fall through */
1631        default:
1632                err = lo_simple_ioctl(lo, cmd, arg);
1633                break;
1634        }
1635
1636        return err;
1637}
1638
1639#ifdef CONFIG_COMPAT
1640struct compat_loop_info {
1641        compat_int_t    lo_number;      /* ioctl r/o */
1642        compat_dev_t    lo_device;      /* ioctl r/o */
1643        compat_ulong_t  lo_inode;       /* ioctl r/o */
1644        compat_dev_t    lo_rdevice;     /* ioctl r/o */
1645        compat_int_t    lo_offset;
1646        compat_int_t    lo_encrypt_type;
1647        compat_int_t    lo_encrypt_key_size;    /* ioctl w/o */
1648        compat_int_t    lo_flags;       /* ioctl r/o */
1649        char            lo_name[LO_NAME_SIZE];
1650        unsigned char   lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1651        compat_ulong_t  lo_init[2];
1652        char            reserved[4];
1653};
1654
1655/*
1656 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1657 * - noinlined to reduce stack space usage in main part of driver
1658 */
1659static noinline int
1660loop_info64_from_compat(const struct compat_loop_info __user *arg,
1661                        struct loop_info64 *info64)
1662{
1663        struct compat_loop_info info;
1664
1665        if (copy_from_user(&info, arg, sizeof(info)))
1666                return -EFAULT;
1667
1668        memset(info64, 0, sizeof(*info64));
1669        info64->lo_number = info.lo_number;
1670        info64->lo_device = info.lo_device;
1671        info64->lo_inode = info.lo_inode;
1672        info64->lo_rdevice = info.lo_rdevice;
1673        info64->lo_offset = info.lo_offset;
1674        info64->lo_sizelimit = 0;
1675        info64->lo_encrypt_type = info.lo_encrypt_type;
1676        info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1677        info64->lo_flags = info.lo_flags;
1678        info64->lo_init[0] = info.lo_init[0];
1679        info64->lo_init[1] = info.lo_init[1];
1680        if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1681                memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1682        else
1683                memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1684        memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1685        return 0;
1686}
1687
1688/*
1689 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1690 * - noinlined to reduce stack space usage in main part of driver
1691 */
1692static noinline int
1693loop_info64_to_compat(const struct loop_info64 *info64,
1694                      struct compat_loop_info __user *arg)
1695{
1696        struct compat_loop_info info;
1697
1698        memset(&info, 0, sizeof(info));
1699        info.lo_number = info64->lo_number;
1700        info.lo_device = info64->lo_device;
1701        info.lo_inode = info64->lo_inode;
1702        info.lo_rdevice = info64->lo_rdevice;
1703        info.lo_offset = info64->lo_offset;
1704        info.lo_encrypt_type = info64->lo_encrypt_type;
1705        info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1706        info.lo_flags = info64->lo_flags;
1707        info.lo_init[0] = info64->lo_init[0];
1708        info.lo_init[1] = info64->lo_init[1];
1709        if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1710                memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1711        else
1712                memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1713        memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1714
1715        /* error in case values were truncated */
1716        if (info.lo_device != info64->lo_device ||
1717            info.lo_rdevice != info64->lo_rdevice ||
1718            info.lo_inode != info64->lo_inode ||
1719            info.lo_offset != info64->lo_offset ||
1720            info.lo_init[0] != info64->lo_init[0] ||
1721            info.lo_init[1] != info64->lo_init[1])
1722                return -EOVERFLOW;
1723
1724        if (copy_to_user(arg, &info, sizeof(info)))
1725                return -EFAULT;
1726        return 0;
1727}
1728
1729static int
1730loop_set_status_compat(struct loop_device *lo,
1731                       const struct compat_loop_info __user *arg)
1732{
1733        struct loop_info64 info64;
1734        int ret;
1735
1736        ret = loop_info64_from_compat(arg, &info64);
1737        if (ret < 0)
1738                return ret;
1739        return loop_set_status(lo, &info64);
1740}
1741
1742static int
1743loop_get_status_compat(struct loop_device *lo,
1744                       struct compat_loop_info __user *arg)
1745{
1746        struct loop_info64 info64;
1747        int err;
1748
1749        if (!arg)
1750                return -EINVAL;
1751        err = loop_get_status(lo, &info64);
1752        if (!err)
1753                err = loop_info64_to_compat(&info64, arg);
1754        return err;
1755}
1756
1757static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1758                           unsigned int cmd, unsigned long arg)
1759{
1760        struct loop_device *lo = bdev->bd_disk->private_data;
1761        int err;
1762
1763        switch(cmd) {
1764        case LOOP_SET_STATUS:
1765                err = loop_set_status_compat(lo,
1766                             (const struct compat_loop_info __user *)arg);
1767                break;
1768        case LOOP_GET_STATUS:
1769                err = loop_get_status_compat(lo,
1770                                     (struct compat_loop_info __user *)arg);
1771                break;
1772        case LOOP_SET_CAPACITY:
1773        case LOOP_CLR_FD:
1774        case LOOP_GET_STATUS64:
1775        case LOOP_SET_STATUS64:
1776                arg = (unsigned long) compat_ptr(arg);
1777                /* fall through */
1778        case LOOP_SET_FD:
1779        case LOOP_CHANGE_FD:
1780        case LOOP_SET_BLOCK_SIZE:
1781        case LOOP_SET_DIRECT_IO:
1782                err = lo_ioctl(bdev, mode, cmd, arg);
1783                break;
1784        default:
1785                err = -ENOIOCTLCMD;
1786                break;
1787        }
1788        return err;
1789}
1790#endif
1791
1792static int lo_open(struct block_device *bdev, fmode_t mode)
1793{
1794        struct loop_device *lo;
1795        int err;
1796
1797        err = mutex_lock_killable(&loop_ctl_mutex);
1798        if (err)
1799                return err;
1800        lo = bdev->bd_disk->private_data;
1801        if (!lo) {
1802                err = -ENXIO;
1803                goto out;
1804        }
1805
1806        atomic_inc(&lo->lo_refcnt);
1807out:
1808        mutex_unlock(&loop_ctl_mutex);
1809        return err;
1810}
1811
1812static void lo_release(struct gendisk *disk, fmode_t mode)
1813{
1814        struct loop_device *lo;
1815
1816        mutex_lock(&loop_ctl_mutex);
1817        lo = disk->private_data;
1818        if (atomic_dec_return(&lo->lo_refcnt))
1819                goto out_unlock;
1820
1821        if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1822                if (lo->lo_state != Lo_bound)
1823                        goto out_unlock;
1824                lo->lo_state = Lo_rundown;
1825                mutex_unlock(&loop_ctl_mutex);
1826                /*
1827                 * In autoclear mode, stop the loop thread
1828                 * and remove configuration after last close.
1829                 */
1830                __loop_clr_fd(lo, true);
1831                return;
1832        } else if (lo->lo_state == Lo_bound) {
1833                /*
1834                 * Otherwise keep thread (if running) and config,
1835                 * but flush possible ongoing bios in thread.
1836                 */
1837                blk_mq_freeze_queue(lo->lo_queue);
1838                blk_mq_unfreeze_queue(lo->lo_queue);
1839        }
1840
1841out_unlock:
1842        mutex_unlock(&loop_ctl_mutex);
1843}
1844
1845static const struct block_device_operations lo_fops = {
1846        .owner =        THIS_MODULE,
1847        .open =         lo_open,
1848        .release =      lo_release,
1849        .ioctl =        lo_ioctl,
1850#ifdef CONFIG_COMPAT
1851        .compat_ioctl = lo_compat_ioctl,
1852#endif
1853};
1854
1855/*
1856 * And now the modules code and kernel interface.
1857 */
1858static int max_loop;
1859module_param(max_loop, int, 0444);
1860MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1861module_param(max_part, int, 0444);
1862MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1863MODULE_LICENSE("GPL");
1864MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1865
1866int loop_register_transfer(struct loop_func_table *funcs)
1867{
1868        unsigned int n = funcs->number;
1869
1870        if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1871                return -EINVAL;
1872        xfer_funcs[n] = funcs;
1873        return 0;
1874}
1875
1876static int unregister_transfer_cb(int id, void *ptr, void *data)
1877{
1878        struct loop_device *lo = ptr;
1879        struct loop_func_table *xfer = data;
1880
1881        mutex_lock(&loop_ctl_mutex);
1882        if (lo->lo_encryption == xfer)
1883                loop_release_xfer(lo);
1884        mutex_unlock(&loop_ctl_mutex);
1885        return 0;
1886}
1887
1888int loop_unregister_transfer(int number)
1889{
1890        unsigned int n = number;
1891        struct loop_func_table *xfer;
1892
1893        if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1894                return -EINVAL;
1895
1896        xfer_funcs[n] = NULL;
1897        idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1898        return 0;
1899}
1900
1901EXPORT_SYMBOL(loop_register_transfer);
1902EXPORT_SYMBOL(loop_unregister_transfer);
1903
1904static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1905                const struct blk_mq_queue_data *bd)
1906{
1907        struct request *rq = bd->rq;
1908        struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1909        struct loop_device *lo = rq->q->queuedata;
1910
1911        blk_mq_start_request(rq);
1912
1913        if (lo->lo_state != Lo_bound)
1914                return BLK_STS_IOERR;
1915
1916        switch (req_op(rq)) {
1917        case REQ_OP_FLUSH:
1918        case REQ_OP_DISCARD:
1919        case REQ_OP_WRITE_ZEROES:
1920                cmd->use_aio = false;
1921                break;
1922        default:
1923                cmd->use_aio = lo->use_dio;
1924                break;
1925        }
1926
1927        /* always use the first bio's css */
1928#ifdef CONFIG_BLK_CGROUP
1929        if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
1930                cmd->css = &bio_blkcg(rq->bio)->css;
1931                css_get(cmd->css);
1932        } else
1933#endif
1934                cmd->css = NULL;
1935        kthread_queue_work(&lo->worker, &cmd->work);
1936
1937        return BLK_STS_OK;
1938}
1939
1940static void loop_handle_cmd(struct loop_cmd *cmd)
1941{
1942        struct request *rq = blk_mq_rq_from_pdu(cmd);
1943        const bool write = op_is_write(req_op(rq));
1944        struct loop_device *lo = rq->q->queuedata;
1945        int ret = 0;
1946
1947        if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
1948                ret = -EIO;
1949                goto failed;
1950        }
1951
1952        ret = do_req_filebacked(lo, rq);
1953 failed:
1954        /* complete non-aio request */
1955        if (!cmd->use_aio || ret) {
1956                cmd->ret = ret ? -EIO : 0;
1957                blk_mq_complete_request(rq);
1958        }
1959}
1960
1961static void loop_queue_work(struct kthread_work *work)
1962{
1963        struct loop_cmd *cmd =
1964                container_of(work, struct loop_cmd, work);
1965
1966        loop_handle_cmd(cmd);
1967}
1968
1969static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
1970                unsigned int hctx_idx, unsigned int numa_node)
1971{
1972        struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1973
1974        kthread_init_work(&cmd->work, loop_queue_work);
1975        return 0;
1976}
1977
1978static const struct blk_mq_ops loop_mq_ops = {
1979        .queue_rq       = loop_queue_rq,
1980        .init_request   = loop_init_request,
1981        .complete       = lo_complete_rq,
1982};
1983
1984static int loop_add(struct loop_device **l, int i)
1985{
1986        struct loop_device *lo;
1987        struct gendisk *disk;
1988        int err;
1989
1990        err = -ENOMEM;
1991        lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1992        if (!lo)
1993                goto out;
1994
1995        lo->lo_state = Lo_unbound;
1996
1997        /* allocate id, if @id >= 0, we're requesting that specific id */
1998        if (i >= 0) {
1999                err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
2000                if (err == -ENOSPC)
2001                        err = -EEXIST;
2002        } else {
2003                err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
2004        }
2005        if (err < 0)
2006                goto out_free_dev;
2007        i = err;
2008
2009        err = -ENOMEM;
2010        lo->tag_set.ops = &loop_mq_ops;
2011        lo->tag_set.nr_hw_queues = 1;
2012        lo->tag_set.queue_depth = 128;
2013        lo->tag_set.numa_node = NUMA_NO_NODE;
2014        lo->tag_set.cmd_size = sizeof(struct loop_cmd);
2015        lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2016        lo->tag_set.driver_data = lo;
2017
2018        err = blk_mq_alloc_tag_set(&lo->tag_set);
2019        if (err)
2020                goto out_free_idr;
2021
2022        lo->lo_queue = blk_mq_init_queue(&lo->tag_set);
2023        if (IS_ERR(lo->lo_queue)) {
2024                err = PTR_ERR(lo->lo_queue);
2025                goto out_cleanup_tags;
2026        }
2027        lo->lo_queue->queuedata = lo;
2028
2029        blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
2030
2031        /*
2032         * By default, we do buffer IO, so it doesn't make sense to enable
2033         * merge because the I/O submitted to backing file is handled page by
2034         * page. For directio mode, merge does help to dispatch bigger request
2035         * to underlayer disk. We will enable merge once directio is enabled.
2036         */
2037        blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
2038
2039        err = -ENOMEM;
2040        disk = lo->lo_disk = alloc_disk(1 << part_shift);
2041        if (!disk)
2042                goto out_free_queue;
2043
2044        /*
2045         * Disable partition scanning by default. The in-kernel partition
2046         * scanning can be requested individually per-device during its
2047         * setup. Userspace can always add and remove partitions from all
2048         * devices. The needed partition minors are allocated from the
2049         * extended minor space, the main loop device numbers will continue
2050         * to match the loop minors, regardless of the number of partitions
2051         * used.
2052         *
2053         * If max_part is given, partition scanning is globally enabled for
2054         * all loop devices. The minors for the main loop devices will be
2055         * multiples of max_part.
2056         *
2057         * Note: Global-for-all-devices, set-only-at-init, read-only module
2058         * parameteters like 'max_loop' and 'max_part' make things needlessly
2059         * complicated, are too static, inflexible and may surprise
2060         * userspace tools. Parameters like this in general should be avoided.
2061         */
2062        if (!part_shift)
2063                disk->flags |= GENHD_FL_NO_PART_SCAN;
2064        disk->flags |= GENHD_FL_EXT_DEVT;
2065        atomic_set(&lo->lo_refcnt, 0);
2066        lo->lo_number           = i;
2067        spin_lock_init(&lo->lo_lock);
2068        disk->major             = LOOP_MAJOR;
2069        disk->first_minor       = i << part_shift;
2070        disk->fops              = &lo_fops;
2071        disk->private_data      = lo;
2072        disk->queue             = lo->lo_queue;
2073        sprintf(disk->disk_name, "loop%d", i);
2074        add_disk(disk);
2075        *l = lo;
2076        return lo->lo_number;
2077
2078out_free_queue:
2079        blk_cleanup_queue(lo->lo_queue);
2080out_cleanup_tags:
2081        blk_mq_free_tag_set(&lo->tag_set);
2082out_free_idr:
2083        idr_remove(&loop_index_idr, i);
2084out_free_dev:
2085        kfree(lo);
2086out:
2087        return err;
2088}
2089
2090static void loop_remove(struct loop_device *lo)
2091{
2092        del_gendisk(lo->lo_disk);
2093        blk_cleanup_queue(lo->lo_queue);
2094        blk_mq_free_tag_set(&lo->tag_set);
2095        put_disk(lo->lo_disk);
2096        kfree(lo);
2097}
2098
2099static int find_free_cb(int id, void *ptr, void *data)
2100{
2101        struct loop_device *lo = ptr;
2102        struct loop_device **l = data;
2103
2104        if (lo->lo_state == Lo_unbound) {
2105                *l = lo;
2106                return 1;
2107        }
2108        return 0;
2109}
2110
2111static int loop_lookup(struct loop_device **l, int i)
2112{
2113        struct loop_device *lo;
2114        int ret = -ENODEV;
2115
2116        if (i < 0) {
2117                int err;
2118
2119                err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
2120                if (err == 1) {
2121                        *l = lo;
2122                        ret = lo->lo_number;
2123                }
2124                goto out;
2125        }
2126
2127        /* lookup and return a specific i */
2128        lo = idr_find(&loop_index_idr, i);
2129        if (lo) {
2130                *l = lo;
2131                ret = lo->lo_number;
2132        }
2133out:
2134        return ret;
2135}
2136
2137static struct kobject *loop_probe(dev_t dev, int *part, void *data)
2138{
2139        struct loop_device *lo;
2140        struct kobject *kobj;
2141        int err;
2142
2143        mutex_lock(&loop_ctl_mutex);
2144        err = loop_lookup(&lo, MINOR(dev) >> part_shift);
2145        if (err < 0)
2146                err = loop_add(&lo, MINOR(dev) >> part_shift);
2147        if (err < 0)
2148                kobj = NULL;
2149        else
2150                kobj = get_disk_and_module(lo->lo_disk);
2151        mutex_unlock(&loop_ctl_mutex);
2152
2153        *part = 0;
2154        return kobj;
2155}
2156
2157static long loop_control_ioctl(struct file *file, unsigned int cmd,
2158                               unsigned long parm)
2159{
2160        struct loop_device *lo;
2161        int ret;
2162
2163        ret = mutex_lock_killable(&loop_ctl_mutex);
2164        if (ret)
2165                return ret;
2166
2167        ret = -ENOSYS;
2168        switch (cmd) {
2169        case LOOP_CTL_ADD:
2170                ret = loop_lookup(&lo, parm);
2171                if (ret >= 0) {
2172                        ret = -EEXIST;
2173                        break;
2174                }
2175                ret = loop_add(&lo, parm);
2176                break;
2177        case LOOP_CTL_REMOVE:
2178                ret = loop_lookup(&lo, parm);
2179                if (ret < 0)
2180                        break;
2181                if (lo->lo_state != Lo_unbound) {
2182                        ret = -EBUSY;
2183                        break;
2184                }
2185                if (atomic_read(&lo->lo_refcnt) > 0) {
2186                        ret = -EBUSY;
2187                        break;
2188                }
2189                lo->lo_disk->private_data = NULL;
2190                idr_remove(&loop_index_idr, lo->lo_number);
2191                loop_remove(lo);
2192                break;
2193        case LOOP_CTL_GET_FREE:
2194                ret = loop_lookup(&lo, -1);
2195                if (ret >= 0)
2196                        break;
2197                ret = loop_add(&lo, -1);
2198        }
2199        mutex_unlock(&loop_ctl_mutex);
2200
2201        return ret;
2202}
2203
2204static const struct file_operations loop_ctl_fops = {
2205        .open           = nonseekable_open,
2206        .unlocked_ioctl = loop_control_ioctl,
2207        .compat_ioctl   = loop_control_ioctl,
2208        .owner          = THIS_MODULE,
2209        .llseek         = noop_llseek,
2210};
2211
2212static struct miscdevice loop_misc = {
2213        .minor          = LOOP_CTRL_MINOR,
2214        .name           = "loop-control",
2215        .fops           = &loop_ctl_fops,
2216};
2217
2218MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
2219MODULE_ALIAS("devname:loop-control");
2220
2221static int __init loop_init(void)
2222{
2223        int i, nr;
2224        unsigned long range;
2225        struct loop_device *lo;
2226        int err;
2227
2228        part_shift = 0;
2229        if (max_part > 0) {
2230                part_shift = fls(max_part);
2231
2232                /*
2233                 * Adjust max_part according to part_shift as it is exported
2234                 * to user space so that user can decide correct minor number
2235                 * if [s]he want to create more devices.
2236                 *
2237                 * Note that -1 is required because partition 0 is reserved
2238                 * for the whole disk.
2239                 */
2240                max_part = (1UL << part_shift) - 1;
2241        }
2242
2243        if ((1UL << part_shift) > DISK_MAX_PARTS) {
2244                err = -EINVAL;
2245                goto err_out;
2246        }
2247
2248        if (max_loop > 1UL << (MINORBITS - part_shift)) {
2249                err = -EINVAL;
2250                goto err_out;
2251        }
2252
2253        /*
2254         * If max_loop is specified, create that many devices upfront.
2255         * This also becomes a hard limit. If max_loop is not specified,
2256         * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
2257         * init time. Loop devices can be requested on-demand with the
2258         * /dev/loop-control interface, or be instantiated by accessing
2259         * a 'dead' device node.
2260         */
2261        if (max_loop) {
2262                nr = max_loop;
2263                range = max_loop << part_shift;
2264        } else {
2265                nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
2266                range = 1UL << MINORBITS;
2267        }
2268
2269        err = misc_register(&loop_misc);
2270        if (err < 0)
2271                goto err_out;
2272
2273
2274        if (register_blkdev(LOOP_MAJOR, "loop")) {
2275                err = -EIO;
2276                goto misc_out;
2277        }
2278
2279        blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
2280                                  THIS_MODULE, loop_probe, NULL, NULL);
2281
2282        /* pre-create number of devices given by config or max_loop */
2283        mutex_lock(&loop_ctl_mutex);
2284        for (i = 0; i < nr; i++)
2285                loop_add(&lo, i);
2286        mutex_unlock(&loop_ctl_mutex);
2287
2288        printk(KERN_INFO "loop: module loaded\n");
2289        return 0;
2290
2291misc_out:
2292        misc_deregister(&loop_misc);
2293err_out:
2294        return err;
2295}
2296
2297static int loop_exit_cb(int id, void *ptr, void *data)
2298{
2299        struct loop_device *lo = ptr;
2300
2301        loop_remove(lo);
2302        return 0;
2303}
2304
2305static void __exit loop_exit(void)
2306{
2307        unsigned long range;
2308
2309        range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
2310
2311        idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
2312        idr_destroy(&loop_index_idr);
2313
2314        blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
2315        unregister_blkdev(LOOP_MAJOR, "loop");
2316
2317        misc_deregister(&loop_misc);
2318}
2319
2320module_init(loop_init);
2321module_exit(loop_exit);
2322
2323#ifndef MODULE
2324static int __init max_loop_setup(char *str)
2325{
2326        max_loop = simple_strtol(str, NULL, 0);
2327        return 1;
2328}
2329
2330__setup("max_loop=", max_loop_setup);
2331#endif
2332