linux/drivers/nvme/target/io-cmd.c
<<
>>
Prefs
   1/*
   2 * NVMe I/O command implementation.
   3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 */
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15#include <linux/blkdev.h>
  16#include <linux/module.h>
  17#include "nvmet.h"
  18
  19static void nvmet_bio_done(struct bio *bio)
  20{
  21        struct nvmet_req *req = bio->bi_private;
  22
  23        nvmet_req_complete(req,
  24                bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
  25
  26        if (bio != &req->inline_bio)
  27                bio_put(bio);
  28}
  29
  30static inline u32 nvmet_rw_len(struct nvmet_req *req)
  31{
  32        return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
  33                        req->ns->blksize_shift;
  34}
  35
  36static void nvmet_execute_rw(struct nvmet_req *req)
  37{
  38        int sg_cnt = req->sg_cnt;
  39        struct bio *bio = &req->inline_bio;
  40        struct scatterlist *sg;
  41        sector_t sector;
  42        blk_qc_t cookie;
  43        int op, op_flags = 0, i;
  44
  45        if (!req->sg_cnt) {
  46                nvmet_req_complete(req, 0);
  47                return;
  48        }
  49
  50        if (req->cmd->rw.opcode == nvme_cmd_write) {
  51                op = REQ_OP_WRITE;
  52                op_flags = REQ_SYNC | REQ_IDLE;
  53                if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
  54                        op_flags |= REQ_FUA;
  55        } else {
  56                op = REQ_OP_READ;
  57        }
  58
  59        sector = le64_to_cpu(req->cmd->rw.slba);
  60        sector <<= (req->ns->blksize_shift - 9);
  61
  62        bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
  63        bio_set_dev(bio, req->ns->bdev);
  64        bio->bi_iter.bi_sector = sector;
  65        bio->bi_private = req;
  66        bio->bi_end_io = nvmet_bio_done;
  67        bio_set_op_attrs(bio, op, op_flags);
  68
  69        for_each_sg(req->sg, sg, req->sg_cnt, i) {
  70                while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
  71                                != sg->length) {
  72                        struct bio *prev = bio;
  73
  74                        bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
  75                        bio_set_dev(bio, req->ns->bdev);
  76                        bio->bi_iter.bi_sector = sector;
  77                        bio_set_op_attrs(bio, op, op_flags);
  78
  79                        bio_chain(bio, prev);
  80                        submit_bio(prev);
  81                }
  82
  83                sector += sg->length >> 9;
  84                sg_cnt--;
  85        }
  86
  87        cookie = submit_bio(bio);
  88
  89        blk_poll(bdev_get_queue(req->ns->bdev), cookie);
  90}
  91
  92static void nvmet_execute_flush(struct nvmet_req *req)
  93{
  94        struct bio *bio = &req->inline_bio;
  95
  96        bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
  97        bio_set_dev(bio, req->ns->bdev);
  98        bio->bi_private = req;
  99        bio->bi_end_io = nvmet_bio_done;
 100        bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 101
 102        submit_bio(bio);
 103}
 104
 105static u16 nvmet_discard_range(struct nvmet_ns *ns,
 106                struct nvme_dsm_range *range, struct bio **bio)
 107{
 108        int ret;
 109
 110        ret = __blkdev_issue_discard(ns->bdev,
 111                        le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
 112                        le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
 113                        GFP_KERNEL, 0, bio);
 114        if (ret && ret != -EOPNOTSUPP)
 115                return NVME_SC_INTERNAL | NVME_SC_DNR;
 116        return 0;
 117}
 118
 119static void nvmet_execute_discard(struct nvmet_req *req)
 120{
 121        struct nvme_dsm_range range;
 122        struct bio *bio = NULL;
 123        int i;
 124        u16 status;
 125
 126        for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
 127                status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
 128                                sizeof(range));
 129                if (status)
 130                        break;
 131
 132                status = nvmet_discard_range(req->ns, &range, &bio);
 133                if (status)
 134                        break;
 135        }
 136
 137        if (bio) {
 138                bio->bi_private = req;
 139                bio->bi_end_io = nvmet_bio_done;
 140                if (status) {
 141                        bio->bi_status = BLK_STS_IOERR;
 142                        bio_endio(bio);
 143                } else {
 144                        submit_bio(bio);
 145                }
 146        } else {
 147                nvmet_req_complete(req, status);
 148        }
 149}
 150
 151static void nvmet_execute_dsm(struct nvmet_req *req)
 152{
 153        switch (le32_to_cpu(req->cmd->dsm.attributes)) {
 154        case NVME_DSMGMT_AD:
 155                nvmet_execute_discard(req);
 156                return;
 157        case NVME_DSMGMT_IDR:
 158        case NVME_DSMGMT_IDW:
 159        default:
 160                /* Not supported yet */
 161                nvmet_req_complete(req, 0);
 162                return;
 163        }
 164}
 165
 166static void nvmet_execute_write_zeroes(struct nvmet_req *req)
 167{
 168        struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
 169        struct bio *bio = NULL;
 170        u16 status = NVME_SC_SUCCESS;
 171        sector_t sector;
 172        sector_t nr_sector;
 173
 174        sector = le64_to_cpu(write_zeroes->slba) <<
 175                (req->ns->blksize_shift - 9);
 176        nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
 177                (req->ns->blksize_shift - 9));
 178
 179        if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
 180                                GFP_KERNEL, &bio, 0))
 181                status = NVME_SC_INTERNAL | NVME_SC_DNR;
 182
 183        if (bio) {
 184                bio->bi_private = req;
 185                bio->bi_end_io = nvmet_bio_done;
 186                submit_bio(bio);
 187        } else {
 188                nvmet_req_complete(req, status);
 189        }
 190}
 191
 192u16 nvmet_parse_io_cmd(struct nvmet_req *req)
 193{
 194        struct nvme_command *cmd = req->cmd;
 195        u16 ret;
 196
 197        ret = nvmet_check_ctrl_status(req, cmd);
 198        if (unlikely(ret)) {
 199                req->ns = NULL;
 200                return ret;
 201        }
 202
 203        req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
 204        if (unlikely(!req->ns))
 205                return NVME_SC_INVALID_NS | NVME_SC_DNR;
 206
 207        switch (cmd->common.opcode) {
 208        case nvme_cmd_read:
 209        case nvme_cmd_write:
 210                req->execute = nvmet_execute_rw;
 211                req->data_len = nvmet_rw_len(req);
 212                return 0;
 213        case nvme_cmd_flush:
 214                req->execute = nvmet_execute_flush;
 215                req->data_len = 0;
 216                return 0;
 217        case nvme_cmd_dsm:
 218                req->execute = nvmet_execute_dsm;
 219                req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
 220                        sizeof(struct nvme_dsm_range);
 221                return 0;
 222        case nvme_cmd_write_zeroes:
 223                req->execute = nvmet_execute_write_zeroes;
 224                return 0;
 225        default:
 226                pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
 227                       req->sq->qid);
 228                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
 229        }
 230}
 231