linux/drivers/nvdimm/nd_virtio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * virtio_pmem.c: Virtio pmem Driver
   4 *
   5 * Discovers persistent memory range information
   6 * from host and provides a virtio based flushing
   7 * interface.
   8 */
   9#include "virtio_pmem.h"
  10#include "nd.h"
  11
  12 /* The interrupt handler */
  13void virtio_pmem_host_ack(struct virtqueue *vq)
  14{
  15        struct virtio_pmem *vpmem = vq->vdev->priv;
  16        struct virtio_pmem_request *req_data, *req_buf;
  17        unsigned long flags;
  18        unsigned int len;
  19
  20        spin_lock_irqsave(&vpmem->pmem_lock, flags);
  21        while ((req_data = virtqueue_get_buf(vq, &len)) != NULL) {
  22                req_data->done = true;
  23                wake_up(&req_data->host_acked);
  24
  25                if (!list_empty(&vpmem->req_list)) {
  26                        req_buf = list_first_entry(&vpmem->req_list,
  27                                        struct virtio_pmem_request, list);
  28                        req_buf->wq_buf_avail = true;
  29                        wake_up(&req_buf->wq_buf);
  30                        list_del(&req_buf->list);
  31                }
  32        }
  33        spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
  34}
  35EXPORT_SYMBOL_GPL(virtio_pmem_host_ack);
  36
  37 /* The request submission function */
  38static int virtio_pmem_flush(struct nd_region *nd_region)
  39{
  40        struct virtio_device *vdev = nd_region->provider_data;
  41        struct virtio_pmem *vpmem  = vdev->priv;
  42        struct virtio_pmem_request *req_data;
  43        struct scatterlist *sgs[2], sg, ret;
  44        unsigned long flags;
  45        int err, err1;
  46
  47        might_sleep();
  48        req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
  49        if (!req_data)
  50                return -ENOMEM;
  51
  52        req_data->done = false;
  53        init_waitqueue_head(&req_data->host_acked);
  54        init_waitqueue_head(&req_data->wq_buf);
  55        INIT_LIST_HEAD(&req_data->list);
  56        req_data->req.type = cpu_to_le32(VIRTIO_PMEM_REQ_TYPE_FLUSH);
  57        sg_init_one(&sg, &req_data->req, sizeof(req_data->req));
  58        sgs[0] = &sg;
  59        sg_init_one(&ret, &req_data->resp.ret, sizeof(req_data->resp));
  60        sgs[1] = &ret;
  61
  62        spin_lock_irqsave(&vpmem->pmem_lock, flags);
  63         /*
  64          * If virtqueue_add_sgs returns -ENOSPC then req_vq virtual
  65          * queue does not have free descriptor. We add the request
  66          * to req_list and wait for host_ack to wake us up when free
  67          * slots are available.
  68          */
  69        while ((err = virtqueue_add_sgs(vpmem->req_vq, sgs, 1, 1, req_data,
  70                                        GFP_ATOMIC)) == -ENOSPC) {
  71
  72                dev_info(&vdev->dev, "failed to send command to virtio pmem device, no free slots in the virtqueue\n");
  73                req_data->wq_buf_avail = false;
  74                list_add_tail(&req_data->list, &vpmem->req_list);
  75                spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
  76
  77                /* A host response results in "host_ack" getting called */
  78                wait_event(req_data->wq_buf, req_data->wq_buf_avail);
  79                spin_lock_irqsave(&vpmem->pmem_lock, flags);
  80        }
  81        err1 = virtqueue_kick(vpmem->req_vq);
  82        spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
  83        /*
  84         * virtqueue_add_sgs failed with error different than -ENOSPC, we can't
  85         * do anything about that.
  86         */
  87        if (err || !err1) {
  88                dev_info(&vdev->dev, "failed to send command to virtio pmem device\n");
  89                err = -EIO;
  90        } else {
  91                /* A host repsonse results in "host_ack" getting called */
  92                wait_event(req_data->host_acked, req_data->done);
  93                err = le32_to_cpu(req_data->resp.ret);
  94        }
  95
  96        kfree(req_data);
  97        return err;
  98};
  99
 100/* The asynchronous flush callback function */
 101int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
 102{
 103        /*
 104         * Create child bio for asynchronous flush and chain with
 105         * parent bio. Otherwise directly call nd_region flush.
 106         */
 107        if (bio && bio->bi_iter.bi_sector != -1) {
 108                struct bio *child = bio_alloc(GFP_ATOMIC, 0);
 109
 110                if (!child)
 111                        return -ENOMEM;
 112                bio_copy_dev(child, bio);
 113                child->bi_opf = REQ_PREFLUSH;
 114                child->bi_iter.bi_sector = -1;
 115                bio_chain(child, bio);
 116                submit_bio(child);
 117                return 0;
 118        }
 119        if (virtio_pmem_flush(nd_region))
 120                return -EIO;
 121
 122        return 0;
 123};
 124EXPORT_SYMBOL_GPL(async_pmem_flush);
 125MODULE_LICENSE("GPL");
 126