linux/drivers/block/virtio_blk.c
<<
>>
Prefs
   1//#define DEBUG
   2#include <linux/spinlock.h>
   3#include <linux/slab.h>
   4#include <linux/blkdev.h>
   5#include <linux/hdreg.h>
   6#include <linux/module.h>
   7#include <linux/mutex.h>
   8#include <linux/interrupt.h>
   9#include <linux/virtio.h>
  10#include <linux/virtio_blk.h>
  11#include <linux/scatterlist.h>
  12#include <linux/string_helpers.h>
  13#include <scsi/scsi_cmnd.h>
  14#include <linux/idr.h>
  15#include <linux/blk-mq.h>
  16#include <linux/blk-mq-virtio.h>
  17#include <linux/numa.h>
  18
  19#define PART_BITS 4
  20#define VQ_NAME_LEN 16
  21
  22static int major;
  23static DEFINE_IDA(vd_index_ida);
  24
  25static struct workqueue_struct *virtblk_wq;
  26
  27struct virtio_blk_vq {
  28        struct virtqueue *vq;
  29        spinlock_t lock;
  30        char name[VQ_NAME_LEN];
  31} ____cacheline_aligned_in_smp;
  32
  33struct virtio_blk {
  34        struct virtio_device *vdev;
  35
  36        /* The disk structure for the kernel. */
  37        struct gendisk *disk;
  38
  39        /* Block layer tags. */
  40        struct blk_mq_tag_set tag_set;
  41
  42        /* Process context for config space updates */
  43        struct work_struct config_work;
  44
  45        /* What host tells us, plus 2 for header & tailer. */
  46        unsigned int sg_elems;
  47
  48        /* Ida index - used to track minor number allocations. */
  49        int index;
  50
  51        /* num of vqs */
  52        int num_vqs;
  53        struct virtio_blk_vq *vqs;
  54};
  55
  56struct virtblk_req {
  57#ifdef CONFIG_VIRTIO_BLK_SCSI
  58        struct scsi_request sreq;       /* for SCSI passthrough, must be first */
  59        u8 sense[SCSI_SENSE_BUFFERSIZE];
  60        struct virtio_scsi_inhdr in_hdr;
  61#endif
  62        struct virtio_blk_outhdr out_hdr;
  63        u8 status;
  64        struct scatterlist sg[];
  65};
  66
  67static inline int virtblk_result(struct virtblk_req *vbr)
  68{
  69        switch (vbr->status) {
  70        case VIRTIO_BLK_S_OK:
  71                return 0;
  72        case VIRTIO_BLK_S_UNSUPP:
  73                return -ENOTTY;
  74        default:
  75                return -EIO;
  76        }
  77}
  78
  79/*
  80 * If this is a packet command we need a couple of additional headers.  Behind
  81 * the normal outhdr we put a segment with the scsi command block, and before
  82 * the normal inhdr we put the sense data and the inhdr with additional status
  83 * information.
  84 */
  85#ifdef CONFIG_VIRTIO_BLK_SCSI
  86static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
  87                struct scatterlist *data_sg, bool have_data)
  88{
  89        struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
  90        unsigned int num_out = 0, num_in = 0;
  91
  92        sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
  93        sgs[num_out++] = &hdr;
  94        sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
  95        sgs[num_out++] = &cmd;
  96
  97        if (have_data) {
  98                if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
  99                        sgs[num_out++] = data_sg;
 100                else
 101                        sgs[num_out + num_in++] = data_sg;
 102        }
 103
 104        sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
 105        sgs[num_out + num_in++] = &sense;
 106        sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
 107        sgs[num_out + num_in++] = &inhdr;
 108        sg_init_one(&status, &vbr->status, sizeof(vbr->status));
 109        sgs[num_out + num_in++] = &status;
 110
 111        return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
 112}
 113
 114static inline void virtblk_scsi_reques_done(struct request *req)
 115{
 116        struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
 117        struct virtio_blk *vblk = req->q->queuedata;
 118        struct scsi_request *sreq = &vbr->sreq;
 119
 120        sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
 121        sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
 122        req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
 123}
 124
 125static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
 126                             unsigned int cmd, unsigned long data)
 127{
 128        struct gendisk *disk = bdev->bd_disk;
 129        struct virtio_blk *vblk = disk->private_data;
 130
 131        /*
 132         * Only allow the generic SCSI ioctls if the host can support it.
 133         */
 134        if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
 135                return -ENOTTY;
 136
 137        return scsi_cmd_blk_ioctl(bdev, mode, cmd,
 138                                  (void __user *)data);
 139}
 140#else
 141static inline int virtblk_add_req_scsi(struct virtqueue *vq,
 142                struct virtblk_req *vbr, struct scatterlist *data_sg,
 143                bool have_data)
 144{
 145        return -EIO;
 146}
 147static inline void virtblk_scsi_reques_done(struct request *req)
 148{
 149}
 150#define virtblk_ioctl   NULL
 151#endif /* CONFIG_VIRTIO_BLK_SCSI */
 152
 153static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
 154                struct scatterlist *data_sg, bool have_data)
 155{
 156        struct scatterlist hdr, status, *sgs[3];
 157        unsigned int num_out = 0, num_in = 0;
 158
 159        sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
 160        sgs[num_out++] = &hdr;
 161
 162        if (have_data) {
 163                if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
 164                        sgs[num_out++] = data_sg;
 165                else
 166                        sgs[num_out + num_in++] = data_sg;
 167        }
 168
 169        sg_init_one(&status, &vbr->status, sizeof(vbr->status));
 170        sgs[num_out + num_in++] = &status;
 171
 172        return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
 173}
 174
 175static inline void virtblk_request_done(struct request *req)
 176{
 177        struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
 178        int error = virtblk_result(vbr);
 179
 180        switch (req_op(req)) {
 181        case REQ_OP_SCSI_IN:
 182        case REQ_OP_SCSI_OUT:
 183                virtblk_scsi_reques_done(req);
 184                break;
 185        case REQ_OP_DRV_IN:
 186                req->errors = (error != 0);
 187                break;
 188        }
 189
 190        blk_mq_end_request(req, error);
 191}
 192
 193static void virtblk_done(struct virtqueue *vq)
 194{
 195        struct virtio_blk *vblk = vq->vdev->priv;
 196        bool req_done = false;
 197        int qid = vq->index;
 198        struct virtblk_req *vbr;
 199        unsigned long flags;
 200        unsigned int len;
 201
 202        spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
 203        do {
 204                virtqueue_disable_cb(vq);
 205                while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
 206                        struct request *req = blk_mq_rq_from_pdu(vbr);
 207
 208                        blk_mq_complete_request(req, req->errors);
 209                        req_done = true;
 210                }
 211                if (unlikely(virtqueue_is_broken(vq)))
 212                        break;
 213        } while (!virtqueue_enable_cb(vq));
 214
 215        /* In case queue is stopped waiting for more buffers. */
 216        if (req_done)
 217                blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
 218        spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
 219}
 220
 221static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 222                           const struct blk_mq_queue_data *bd)
 223{
 224        struct virtio_blk *vblk = hctx->queue->queuedata;
 225        struct request *req = bd->rq;
 226        struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
 227        unsigned long flags;
 228        unsigned int num;
 229        int qid = hctx->queue_num;
 230        int err;
 231        bool notify = false;
 232        u32 type;
 233
 234        BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
 235
 236        switch (req_op(req)) {
 237        case REQ_OP_READ:
 238        case REQ_OP_WRITE:
 239                type = 0;
 240                break;
 241        case REQ_OP_FLUSH:
 242                type = VIRTIO_BLK_T_FLUSH;
 243                break;
 244        case REQ_OP_SCSI_IN:
 245        case REQ_OP_SCSI_OUT:
 246                type = VIRTIO_BLK_T_SCSI_CMD;
 247                break;
 248        case REQ_OP_DRV_IN:
 249                type = VIRTIO_BLK_T_GET_ID;
 250                break;
 251        default:
 252                WARN_ON_ONCE(1);
 253                return BLK_MQ_RQ_QUEUE_ERROR;
 254        }
 255
 256        vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
 257        vbr->out_hdr.sector = type ?
 258                0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
 259        vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
 260
 261        blk_mq_start_request(req);
 262
 263        num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
 264        if (num) {
 265                if (rq_data_dir(req) == WRITE)
 266                        vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
 267                else
 268                        vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
 269        }
 270
 271        spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
 272        if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
 273                err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
 274        else
 275                err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
 276        if (err) {
 277                virtqueue_kick(vblk->vqs[qid].vq);
 278                blk_mq_stop_hw_queue(hctx);
 279                spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
 280                /* Out of mem doesn't actually happen, since we fall back
 281                 * to direct descriptors */
 282                if (err == -ENOMEM || err == -ENOSPC)
 283                        return BLK_MQ_RQ_QUEUE_BUSY;
 284                return BLK_MQ_RQ_QUEUE_ERROR;
 285        }
 286
 287        if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
 288                notify = true;
 289        spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
 290
 291        if (notify)
 292                virtqueue_notify(vblk->vqs[qid].vq);
 293        return BLK_MQ_RQ_QUEUE_OK;
 294}
 295
 296/* return id (s/n) string for *disk to *id_str
 297 */
 298static int virtblk_get_id(struct gendisk *disk, char *id_str)
 299{
 300        struct virtio_blk *vblk = disk->private_data;
 301        struct request_queue *q = vblk->disk->queue;
 302        struct request *req;
 303        int err;
 304
 305        req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL);
 306        if (IS_ERR(req))
 307                return PTR_ERR(req);
 308
 309        err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
 310        if (err)
 311                goto out;
 312
 313        err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
 314out:
 315        blk_put_request(req);
 316        return err;
 317}
 318
 319/* We provide getgeo only to please some old bootloader/partitioning tools */
 320static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
 321{
 322        struct virtio_blk *vblk = bd->bd_disk->private_data;
 323
 324        /* see if the host passed in geometry config */
 325        if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
 326                virtio_cread(vblk->vdev, struct virtio_blk_config,
 327                             geometry.cylinders, &geo->cylinders);
 328                virtio_cread(vblk->vdev, struct virtio_blk_config,
 329                             geometry.heads, &geo->heads);
 330                virtio_cread(vblk->vdev, struct virtio_blk_config,
 331                             geometry.sectors, &geo->sectors);
 332        } else {
 333                /* some standard values, similar to sd */
 334                geo->heads = 1 << 6;
 335                geo->sectors = 1 << 5;
 336                geo->cylinders = get_capacity(bd->bd_disk) >> 11;
 337        }
 338        return 0;
 339}
 340
 341static const struct block_device_operations virtblk_fops = {
 342        .ioctl  = virtblk_ioctl,
 343        .owner  = THIS_MODULE,
 344        .getgeo = virtblk_getgeo,
 345};
 346
 347static int index_to_minor(int index)
 348{
 349        return index << PART_BITS;
 350}
 351
 352static int minor_to_index(int minor)
 353{
 354        return minor >> PART_BITS;
 355}
 356
 357static ssize_t virtblk_serial_show(struct device *dev,
 358                                struct device_attribute *attr, char *buf)
 359{
 360        struct gendisk *disk = dev_to_disk(dev);
 361        int err;
 362
 363        /* sysfs gives us a PAGE_SIZE buffer */
 364        BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
 365
 366        buf[VIRTIO_BLK_ID_BYTES] = '\0';
 367        err = virtblk_get_id(disk, buf);
 368        if (!err)
 369                return strlen(buf);
 370
 371        if (err == -EIO) /* Unsupported? Make it empty. */
 372                return 0;
 373
 374        return err;
 375}
 376
 377static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
 378
 379static void virtblk_config_changed_work(struct work_struct *work)
 380{
 381        struct virtio_blk *vblk =
 382                container_of(work, struct virtio_blk, config_work);
 383        struct virtio_device *vdev = vblk->vdev;
 384        struct request_queue *q = vblk->disk->queue;
 385        char cap_str_2[10], cap_str_10[10];
 386        char *envp[] = { "RESIZE=1", NULL };
 387        u64 capacity;
 388
 389        /* Host must always specify the capacity. */
 390        virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
 391
 392        /* If capacity is too big, truncate with warning. */
 393        if ((sector_t)capacity != capacity) {
 394                dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
 395                         (unsigned long long)capacity);
 396                capacity = (sector_t)-1;
 397        }
 398
 399        string_get_size(capacity, queue_logical_block_size(q),
 400                        STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
 401        string_get_size(capacity, queue_logical_block_size(q),
 402                        STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
 403
 404        dev_notice(&vdev->dev,
 405                  "new size: %llu %d-byte logical blocks (%s/%s)\n",
 406                  (unsigned long long)capacity,
 407                  queue_logical_block_size(q),
 408                  cap_str_10, cap_str_2);
 409
 410        set_capacity(vblk->disk, capacity);
 411        revalidate_disk(vblk->disk);
 412        kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
 413}
 414
 415static void virtblk_config_changed(struct virtio_device *vdev)
 416{
 417        struct virtio_blk *vblk = vdev->priv;
 418
 419        queue_work(virtblk_wq, &vblk->config_work);
 420}
 421
 422static int init_vq(struct virtio_blk *vblk)
 423{
 424        int err;
 425        int i;
 426        vq_callback_t **callbacks;
 427        const char **names;
 428        struct virtqueue **vqs;
 429        unsigned short num_vqs;
 430        struct virtio_device *vdev = vblk->vdev;
 431        struct irq_affinity desc = { 0, };
 432
 433        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
 434                                   struct virtio_blk_config, num_queues,
 435                                   &num_vqs);
 436        if (err)
 437                num_vqs = 1;
 438
 439        vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
 440        if (!vblk->vqs)
 441                return -ENOMEM;
 442
 443        names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
 444        callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
 445        vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
 446        if (!names || !callbacks || !vqs) {
 447                err = -ENOMEM;
 448                goto out;
 449        }
 450
 451        for (i = 0; i < num_vqs; i++) {
 452                callbacks[i] = virtblk_done;
 453                snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
 454                names[i] = vblk->vqs[i].name;
 455        }
 456
 457        /* Discover virtqueues and write information to configuration.  */
 458        err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names,
 459                        &desc);
 460        if (err)
 461                goto out;
 462
 463        for (i = 0; i < num_vqs; i++) {
 464                spin_lock_init(&vblk->vqs[i].lock);
 465                vblk->vqs[i].vq = vqs[i];
 466        }
 467        vblk->num_vqs = num_vqs;
 468
 469out:
 470        kfree(vqs);
 471        kfree(callbacks);
 472        kfree(names);
 473        if (err)
 474                kfree(vblk->vqs);
 475        return err;
 476}
 477
 478/*
 479 * Legacy naming scheme used for virtio devices.  We are stuck with it for
 480 * virtio blk but don't ever use it for any new driver.
 481 */
 482static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
 483{
 484        const int base = 'z' - 'a' + 1;
 485        char *begin = buf + strlen(prefix);
 486        char *end = buf + buflen;
 487        char *p;
 488        int unit;
 489
 490        p = end - 1;
 491        *p = '\0';
 492        unit = base;
 493        do {
 494                if (p == begin)
 495                        return -EINVAL;
 496                *--p = 'a' + (index % unit);
 497                index = (index / unit) - 1;
 498        } while (index >= 0);
 499
 500        memmove(begin, p, end - p);
 501        memcpy(buf, prefix, strlen(prefix));
 502
 503        return 0;
 504}
 505
 506static int virtblk_get_cache_mode(struct virtio_device *vdev)
 507{
 508        u8 writeback;
 509        int err;
 510
 511        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
 512                                   struct virtio_blk_config, wce,
 513                                   &writeback);
 514
 515        /*
 516         * If WCE is not configurable and flush is not available,
 517         * assume no writeback cache is in use.
 518         */
 519        if (err)
 520                writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
 521
 522        return writeback;
 523}
 524
 525static void virtblk_update_cache_mode(struct virtio_device *vdev)
 526{
 527        u8 writeback = virtblk_get_cache_mode(vdev);
 528        struct virtio_blk *vblk = vdev->priv;
 529
 530        blk_queue_write_cache(vblk->disk->queue, writeback, false);
 531        revalidate_disk(vblk->disk);
 532}
 533
 534static const char *const virtblk_cache_types[] = {
 535        "write through", "write back"
 536};
 537
 538static ssize_t
 539virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
 540                         const char *buf, size_t count)
 541{
 542        struct gendisk *disk = dev_to_disk(dev);
 543        struct virtio_blk *vblk = disk->private_data;
 544        struct virtio_device *vdev = vblk->vdev;
 545        int i;
 546
 547        BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
 548        for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
 549                if (sysfs_streq(buf, virtblk_cache_types[i]))
 550                        break;
 551
 552        if (i < 0)
 553                return -EINVAL;
 554
 555        virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
 556        virtblk_update_cache_mode(vdev);
 557        return count;
 558}
 559
 560static ssize_t
 561virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
 562                         char *buf)
 563{
 564        struct gendisk *disk = dev_to_disk(dev);
 565        struct virtio_blk *vblk = disk->private_data;
 566        u8 writeback = virtblk_get_cache_mode(vblk->vdev);
 567
 568        BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
 569        return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
 570}
 571
 572static const struct device_attribute dev_attr_cache_type_ro =
 573        __ATTR(cache_type, S_IRUGO,
 574               virtblk_cache_type_show, NULL);
 575static const struct device_attribute dev_attr_cache_type_rw =
 576        __ATTR(cache_type, S_IRUGO|S_IWUSR,
 577               virtblk_cache_type_show, virtblk_cache_type_store);
 578
 579static int virtblk_init_request(void *data, struct request *rq,
 580                unsigned int hctx_idx, unsigned int request_idx,
 581                unsigned int numa_node)
 582{
 583        struct virtio_blk *vblk = data;
 584        struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
 585
 586#ifdef CONFIG_VIRTIO_BLK_SCSI
 587        vbr->sreq.sense = vbr->sense;
 588#endif
 589        sg_init_table(vbr->sg, vblk->sg_elems);
 590        return 0;
 591}
 592
 593static int virtblk_map_queues(struct blk_mq_tag_set *set)
 594{
 595        struct virtio_blk *vblk = set->driver_data;
 596
 597        return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
 598}
 599
 600static struct blk_mq_ops virtio_mq_ops = {
 601        .queue_rq       = virtio_queue_rq,
 602        .complete       = virtblk_request_done,
 603        .init_request   = virtblk_init_request,
 604        .map_queues     = virtblk_map_queues,
 605};
 606
 607static unsigned int virtblk_queue_depth;
 608module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
 609
 610static int virtblk_probe(struct virtio_device *vdev)
 611{
 612        struct virtio_blk *vblk;
 613        struct request_queue *q;
 614        int err, index;
 615
 616        u64 cap;
 617        u32 v, blk_size, sg_elems, opt_io_size;
 618        u16 min_io_size;
 619        u8 physical_block_exp, alignment_offset;
 620
 621        if (!vdev->config->get) {
 622                dev_err(&vdev->dev, "%s failure: config access disabled\n",
 623                        __func__);
 624                return -EINVAL;
 625        }
 626
 627        err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
 628                             GFP_KERNEL);
 629        if (err < 0)
 630                goto out;
 631        index = err;
 632
 633        /* We need to know how many segments before we allocate. */
 634        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
 635                                   struct virtio_blk_config, seg_max,
 636                                   &sg_elems);
 637
 638        /* We need at least one SG element, whatever they say. */
 639        if (err || !sg_elems)
 640                sg_elems = 1;
 641
 642        /* We need an extra sg elements at head and tail. */
 643        sg_elems += 2;
 644        vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
 645        if (!vblk) {
 646                err = -ENOMEM;
 647                goto out_free_index;
 648        }
 649
 650        vblk->vdev = vdev;
 651        vblk->sg_elems = sg_elems;
 652
 653        INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
 654
 655        err = init_vq(vblk);
 656        if (err)
 657                goto out_free_vblk;
 658
 659        /* FIXME: How many partitions?  How long is a piece of string? */
 660        vblk->disk = alloc_disk(1 << PART_BITS);
 661        if (!vblk->disk) {
 662                err = -ENOMEM;
 663                goto out_free_vq;
 664        }
 665
 666        /* Default queue sizing is to fill the ring. */
 667        if (!virtblk_queue_depth) {
 668                virtblk_queue_depth = vblk->vqs[0].vq->num_free;
 669                /* ... but without indirect descs, we use 2 descs per req */
 670                if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
 671                        virtblk_queue_depth /= 2;
 672        }
 673
 674        memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
 675        vblk->tag_set.ops = &virtio_mq_ops;
 676        vblk->tag_set.queue_depth = virtblk_queue_depth;
 677        vblk->tag_set.numa_node = NUMA_NO_NODE;
 678        vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
 679        vblk->tag_set.cmd_size =
 680                sizeof(struct virtblk_req) +
 681                sizeof(struct scatterlist) * sg_elems;
 682        vblk->tag_set.driver_data = vblk;
 683        vblk->tag_set.nr_hw_queues = vblk->num_vqs;
 684
 685        err = blk_mq_alloc_tag_set(&vblk->tag_set);
 686        if (err)
 687                goto out_put_disk;
 688
 689        q = blk_mq_init_queue(&vblk->tag_set);
 690        if (IS_ERR(q)) {
 691                err = -ENOMEM;
 692                goto out_free_tags;
 693        }
 694        vblk->disk->queue = q;
 695
 696        q->queuedata = vblk;
 697
 698        virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
 699
 700        vblk->disk->major = major;
 701        vblk->disk->first_minor = index_to_minor(index);
 702        vblk->disk->private_data = vblk;
 703        vblk->disk->fops = &virtblk_fops;
 704        vblk->disk->flags |= GENHD_FL_EXT_DEVT;
 705        vblk->index = index;
 706
 707        /* configure queue flush support */
 708        virtblk_update_cache_mode(vdev);
 709
 710        /* If disk is read-only in the host, the guest should obey */
 711        if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
 712                set_disk_ro(vblk->disk, 1);
 713
 714        /* Host must always specify the capacity. */
 715        virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
 716
 717        /* If capacity is too big, truncate with warning. */
 718        if ((sector_t)cap != cap) {
 719                dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
 720                         (unsigned long long)cap);
 721                cap = (sector_t)-1;
 722        }
 723        set_capacity(vblk->disk, cap);
 724
 725        /* We can handle whatever the host told us to handle. */
 726        blk_queue_max_segments(q, vblk->sg_elems-2);
 727
 728        /* No need to bounce any requests */
 729        blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
 730
 731        /* No real sector limit. */
 732        blk_queue_max_hw_sectors(q, -1U);
 733
 734        /* Host can optionally specify maximum segment size and number of
 735         * segments. */
 736        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
 737                                   struct virtio_blk_config, size_max, &v);
 738        if (!err)
 739                blk_queue_max_segment_size(q, v);
 740        else
 741                blk_queue_max_segment_size(q, -1U);
 742
 743        /* Host can optionally specify the block size of the device */
 744        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
 745                                   struct virtio_blk_config, blk_size,
 746                                   &blk_size);
 747        if (!err)
 748                blk_queue_logical_block_size(q, blk_size);
 749        else
 750                blk_size = queue_logical_block_size(q);
 751
 752        /* Use topology information if available */
 753        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
 754                                   struct virtio_blk_config, physical_block_exp,
 755                                   &physical_block_exp);
 756        if (!err && physical_block_exp)
 757                blk_queue_physical_block_size(q,
 758                                blk_size * (1 << physical_block_exp));
 759
 760        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
 761                                   struct virtio_blk_config, alignment_offset,
 762                                   &alignment_offset);
 763        if (!err && alignment_offset)
 764                blk_queue_alignment_offset(q, blk_size * alignment_offset);
 765
 766        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
 767                                   struct virtio_blk_config, min_io_size,
 768                                   &min_io_size);
 769        if (!err && min_io_size)
 770                blk_queue_io_min(q, blk_size * min_io_size);
 771
 772        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
 773                                   struct virtio_blk_config, opt_io_size,
 774                                   &opt_io_size);
 775        if (!err && opt_io_size)
 776                blk_queue_io_opt(q, blk_size * opt_io_size);
 777
 778        virtio_device_ready(vdev);
 779
 780        device_add_disk(&vdev->dev, vblk->disk);
 781        err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
 782        if (err)
 783                goto out_del_disk;
 784
 785        if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
 786                err = device_create_file(disk_to_dev(vblk->disk),
 787                                         &dev_attr_cache_type_rw);
 788        else
 789                err = device_create_file(disk_to_dev(vblk->disk),
 790                                         &dev_attr_cache_type_ro);
 791        if (err)
 792                goto out_del_disk;
 793        return 0;
 794
 795out_del_disk:
 796        del_gendisk(vblk->disk);
 797        blk_cleanup_queue(vblk->disk->queue);
 798out_free_tags:
 799        blk_mq_free_tag_set(&vblk->tag_set);
 800out_put_disk:
 801        put_disk(vblk->disk);
 802out_free_vq:
 803        vdev->config->del_vqs(vdev);
 804out_free_vblk:
 805        kfree(vblk);
 806out_free_index:
 807        ida_simple_remove(&vd_index_ida, index);
 808out:
 809        return err;
 810}
 811
 812static void virtblk_remove(struct virtio_device *vdev)
 813{
 814        struct virtio_blk *vblk = vdev->priv;
 815        int index = vblk->index;
 816        int refc;
 817
 818        /* Make sure no work handler is accessing the device. */
 819        flush_work(&vblk->config_work);
 820
 821        del_gendisk(vblk->disk);
 822        blk_cleanup_queue(vblk->disk->queue);
 823
 824        blk_mq_free_tag_set(&vblk->tag_set);
 825
 826        /* Stop all the virtqueues. */
 827        vdev->config->reset(vdev);
 828
 829        refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
 830        put_disk(vblk->disk);
 831        vdev->config->del_vqs(vdev);
 832        kfree(vblk->vqs);
 833        kfree(vblk);
 834
 835        /* Only free device id if we don't have any users */
 836        if (refc == 1)
 837                ida_simple_remove(&vd_index_ida, index);
 838}
 839
 840#ifdef CONFIG_PM_SLEEP
 841static int virtblk_freeze(struct virtio_device *vdev)
 842{
 843        struct virtio_blk *vblk = vdev->priv;
 844
 845        /* Ensure we don't receive any more interrupts */
 846        vdev->config->reset(vdev);
 847
 848        /* Make sure no work handler is accessing the device. */
 849        flush_work(&vblk->config_work);
 850
 851        blk_mq_stop_hw_queues(vblk->disk->queue);
 852
 853        vdev->config->del_vqs(vdev);
 854        return 0;
 855}
 856
 857static int virtblk_restore(struct virtio_device *vdev)
 858{
 859        struct virtio_blk *vblk = vdev->priv;
 860        int ret;
 861
 862        ret = init_vq(vdev->priv);
 863        if (ret)
 864                return ret;
 865
 866        virtio_device_ready(vdev);
 867
 868        blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
 869        return 0;
 870}
 871#endif
 872
 873static const struct virtio_device_id id_table[] = {
 874        { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
 875        { 0 },
 876};
 877
 878static unsigned int features_legacy[] = {
 879        VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
 880        VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
 881#ifdef CONFIG_VIRTIO_BLK_SCSI
 882        VIRTIO_BLK_F_SCSI,
 883#endif
 884        VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
 885        VIRTIO_BLK_F_MQ,
 886}
 887;
 888static unsigned int features[] = {
 889        VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
 890        VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
 891        VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
 892        VIRTIO_BLK_F_MQ,
 893};
 894
 895static struct virtio_driver virtio_blk = {
 896        .feature_table                  = features,
 897        .feature_table_size             = ARRAY_SIZE(features),
 898        .feature_table_legacy           = features_legacy,
 899        .feature_table_size_legacy      = ARRAY_SIZE(features_legacy),
 900        .driver.name                    = KBUILD_MODNAME,
 901        .driver.owner                   = THIS_MODULE,
 902        .id_table                       = id_table,
 903        .probe                          = virtblk_probe,
 904        .remove                         = virtblk_remove,
 905        .config_changed                 = virtblk_config_changed,
 906#ifdef CONFIG_PM_SLEEP
 907        .freeze                         = virtblk_freeze,
 908        .restore                        = virtblk_restore,
 909#endif
 910};
 911
 912static int __init init(void)
 913{
 914        int error;
 915
 916        virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
 917        if (!virtblk_wq)
 918                return -ENOMEM;
 919
 920        major = register_blkdev(0, "virtblk");
 921        if (major < 0) {
 922                error = major;
 923                goto out_destroy_workqueue;
 924        }
 925
 926        error = register_virtio_driver(&virtio_blk);
 927        if (error)
 928                goto out_unregister_blkdev;
 929        return 0;
 930
 931out_unregister_blkdev:
 932        unregister_blkdev(major, "virtblk");
 933out_destroy_workqueue:
 934        destroy_workqueue(virtblk_wq);
 935        return error;
 936}
 937
 938static void __exit fini(void)
 939{
 940        unregister_virtio_driver(&virtio_blk);
 941        unregister_blkdev(major, "virtblk");
 942        destroy_workqueue(virtblk_wq);
 943}
 944module_init(init);
 945module_exit(fini);
 946
 947MODULE_DEVICE_TABLE(virtio, id_table);
 948MODULE_DESCRIPTION("Virtio block driver");
 949MODULE_LICENSE("GPL");
 950