linux/drivers/nvme/target/io-cmd.c
<<
>>
Prefs
   1/*
   2 * NVMe I/O command implementation.
   3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 */
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15#include <linux/blkdev.h>
  16#include <linux/module.h>
  17#include "nvmet.h"
  18
  19static void nvmet_bio_done(struct bio *bio, int err)
  20{
  21        struct nvmet_req *req = bio->bi_private;
  22
  23        if (bio != &req->inline_bio)
  24                bio_put(bio);
  25        else
  26                kfree(bio->bio_aux);
  27
  28        if (err)
  29                nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
  30        else
  31                nvmet_req_complete(req, 0);
  32}
  33
  34static inline u32 nvmet_rw_len(struct nvmet_req *req)
  35{
  36        return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
  37                        req->ns->blksize_shift;
  38}
  39
  40static void nvmet_inline_bio_init(struct nvmet_req *req)
  41{
  42        struct bio *bio = &req->inline_bio;
  43
  44        bio_init(bio);
  45        bio->bi_max_vecs = NVMET_MAX_INLINE_BIOVEC;
  46        bio->bi_io_vec = req->inline_bvec;
  47}
  48
  49static void nvmet_execute_rw(struct nvmet_req *req)
  50{
  51        int sg_cnt = req->sg_cnt;
  52        struct scatterlist *sg;
  53        struct bio *bio;
  54        sector_t sector;
  55        int op, op_flags = 0, i;
  56
  57        if (!req->sg_cnt) {
  58                nvmet_req_complete(req, 0);
  59                return;
  60        }
  61
  62        if (req->cmd->rw.opcode == nvme_cmd_write) {
  63                op = REQ_OP_WRITE;
  64                op_flags = WRITE_ODIRECT;
  65                if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
  66                        op_flags |= REQ_FUA;
  67        } else
  68                op = REQ_OP_READ;
  69
  70        sector = le64_to_cpu(req->cmd->rw.slba);
  71        sector <<= (req->ns->blksize_shift - 9);
  72
  73        nvmet_inline_bio_init(req);
  74        bio = &req->inline_bio;
  75        bio->bi_bdev = req->ns->bdev;
  76        bio->bi_sector = sector;
  77        bio->bi_private = req;
  78        bio->bi_end_io = nvmet_bio_done;
  79        bio_set_op_attrs(bio, op, op_flags);
  80
  81        /* Inline bio has to setup bio_aux */
  82        bio->bio_aux = kmalloc(sizeof(struct bio_aux), GFP_KERNEL);
  83        bio_init_aux(bio, bio->bio_aux);
  84        
  85        for_each_sg(req->sg, sg, req->sg_cnt, i) {
  86                while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
  87                                != sg->length) {
  88                        struct bio *prev = bio;
  89
  90                        bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
  91                        bio->bi_bdev = req->ns->bdev;
  92                        bio->bi_sector = sector;
  93                        bio_set_op_attrs(bio, op, op_flags);
  94
  95                        bio_chain(bio, prev);
  96                        submit_bio(bio_data_dir(prev), prev);
  97                }
  98
  99                sector += sg->length >> 9;
 100                sg_cnt--;
 101        }
 102
 103        submit_bio(bio_data_dir(bio), bio);
 104}
 105
 106static void nvmet_execute_flush(struct nvmet_req *req)
 107{
 108        struct bio *bio;
 109
 110        nvmet_inline_bio_init(req);
 111        bio = &req->inline_bio;
 112
 113        bio->bi_bdev = req->ns->bdev;
 114        bio->bi_private = req;
 115        bio->bi_end_io = nvmet_bio_done;
 116        bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
 117
 118        submit_bio(bio_data_dir(bio), bio);
 119}
 120
 121static u16 nvmet_discard_range(struct nvmet_ns *ns,
 122                struct nvme_dsm_range *range, struct bio **bio)
 123{
 124        int ret;
 125
 126        ret = blkdev_issue_discard(ns->bdev,
 127                        le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
 128                        le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
 129                        GFP_KERNEL, 0);
 130        if (ret && ret != -EOPNOTSUPP) {
 131                return NVME_SC_INTERNAL | NVME_SC_DNR;
 132        }
 133
 134        return 0;
 135}
 136
 137static void nvmet_execute_discard(struct nvmet_req *req)
 138{
 139        struct nvme_dsm_range range;
 140        struct bio *bio = NULL;
 141        int i, err;
 142        u16 status;
 143
 144        for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
 145                status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
 146                                sizeof(range));
 147                if (status)
 148                        break;
 149
 150                status = nvmet_discard_range(req->ns, &range, &bio);
 151                if (status)
 152                        break;
 153        }
 154
 155        if (bio) {
 156                bio->bi_private = req;
 157                bio->bi_end_io = nvmet_bio_done;
 158                if (status) {
 159                        err = -EIO;
 160                        bio_endio(bio, err);
 161                } else {
 162                        submit_bio(bio_data_dir(bio), bio);
 163                }
 164        } else {
 165                nvmet_req_complete(req, status);
 166        }
 167}
 168
 169static void nvmet_execute_dsm(struct nvmet_req *req)
 170{
 171        switch (le32_to_cpu(req->cmd->dsm.attributes)) {
 172        case NVME_DSMGMT_AD:
 173                nvmet_execute_discard(req);
 174                return;
 175        case NVME_DSMGMT_IDR:
 176        case NVME_DSMGMT_IDW:
 177        default:
 178                /* Not supported yet */
 179                nvmet_req_complete(req, 0);
 180                return;
 181        }
 182}
 183
 184u16 nvmet_parse_io_cmd(struct nvmet_req *req)
 185{
 186        struct nvme_command *cmd = req->cmd;
 187        u16 ret;
 188
 189        ret = nvmet_check_ctrl_status(req, cmd);
 190        if (unlikely(ret))
 191                return ret;
 192
 193        req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
 194        if (unlikely(!req->ns))
 195                return NVME_SC_INVALID_NS | NVME_SC_DNR;
 196
 197        switch (cmd->common.opcode) {
 198        case nvme_cmd_read:
 199        case nvme_cmd_write:
 200                req->execute = nvmet_execute_rw;
 201                req->data_len = nvmet_rw_len(req);
 202                return 0;
 203        case nvme_cmd_flush:
 204                req->execute = nvmet_execute_flush;
 205                req->data_len = 0;
 206                return 0;
 207        case nvme_cmd_dsm:
 208                req->execute = nvmet_execute_dsm;
 209                req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
 210                        sizeof(struct nvme_dsm_range);
 211                return 0;
 212        default:
 213                pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
 214                       req->sq->qid);
 215                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
 216        }
 217}
 218