1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/blkdev.h>
16#include <linux/module.h>
17#include "nvmet.h"
18
19int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
20{
21 int ret;
22
23 ns->bdev = blkdev_get_by_path(ns->device_path,
24 FMODE_READ | FMODE_WRITE, NULL);
25 if (IS_ERR(ns->bdev)) {
26 ret = PTR_ERR(ns->bdev);
27 if (ret != -ENOTBLK) {
28 pr_err("failed to open block device %s: (%ld)\n",
29 ns->device_path, PTR_ERR(ns->bdev));
30 }
31 ns->bdev = NULL;
32 return ret;
33 }
34 ns->size = i_size_read(ns->bdev->bd_inode);
35 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
36 return 0;
37}
38
39void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
40{
41 if (ns->bdev) {
42 blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
43 ns->bdev = NULL;
44 }
45}
46
47static void nvmet_bio_done(struct bio *bio)
48{
49 struct nvmet_req *req = bio->bi_private;
50
51 nvmet_req_complete(req,
52 bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
53
54 if (bio != &req->b.inline_bio)
55 bio_put(bio);
56}
57
58static void nvmet_bdev_execute_rw(struct nvmet_req *req)
59{
60 int sg_cnt = req->sg_cnt;
61 struct bio *bio;
62 struct scatterlist *sg;
63 sector_t sector;
64 blk_qc_t cookie;
65 int op, op_flags = 0, i;
66
67 if (!req->sg_cnt) {
68 nvmet_req_complete(req, 0);
69 return;
70 }
71
72 if (req->cmd->rw.opcode == nvme_cmd_write) {
73 op = REQ_OP_WRITE;
74 op_flags = REQ_SYNC | REQ_IDLE;
75 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
76 op_flags |= REQ_FUA;
77 } else {
78 op = REQ_OP_READ;
79 }
80
81 if (is_pci_p2pdma_page(sg_page(req->sg)))
82 op_flags |= REQ_NOMERGE;
83
84 sector = le64_to_cpu(req->cmd->rw.slba);
85 sector <<= (req->ns->blksize_shift - 9);
86
87 if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
88 bio = &req->b.inline_bio;
89 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
90 } else {
91 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
92 }
93 bio_set_dev(bio, req->ns->bdev);
94 bio->bi_iter.bi_sector = sector;
95 bio->bi_private = req;
96 bio->bi_end_io = nvmet_bio_done;
97 bio_set_op_attrs(bio, op, op_flags);
98
99 for_each_sg(req->sg, sg, req->sg_cnt, i) {
100 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
101 != sg->length) {
102 struct bio *prev = bio;
103
104 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
105 bio_set_dev(bio, req->ns->bdev);
106 bio->bi_iter.bi_sector = sector;
107 bio_set_op_attrs(bio, op, op_flags);
108
109 bio_chain(bio, prev);
110 submit_bio(prev);
111 }
112
113 sector += sg->length >> 9;
114 sg_cnt--;
115 }
116
117 cookie = submit_bio(bio);
118
119 blk_poll(bdev_get_queue(req->ns->bdev), cookie);
120}
121
122static void nvmet_bdev_execute_flush(struct nvmet_req *req)
123{
124 struct bio *bio = &req->b.inline_bio;
125
126 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
127 bio_set_dev(bio, req->ns->bdev);
128 bio->bi_private = req;
129 bio->bi_end_io = nvmet_bio_done;
130 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
131
132 submit_bio(bio);
133}
134
135u16 nvmet_bdev_flush(struct nvmet_req *req)
136{
137 if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
138 return NVME_SC_INTERNAL | NVME_SC_DNR;
139 return 0;
140}
141
142static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns,
143 struct nvme_dsm_range *range, struct bio **bio)
144{
145 int ret;
146
147 ret = __blkdev_issue_discard(ns->bdev,
148 le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
149 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
150 GFP_KERNEL, 0, bio);
151 if (ret && ret != -EOPNOTSUPP)
152 return NVME_SC_INTERNAL | NVME_SC_DNR;
153 return 0;
154}
155
156static void nvmet_bdev_execute_discard(struct nvmet_req *req)
157{
158 struct nvme_dsm_range range;
159 struct bio *bio = NULL;
160 int i;
161 u16 status;
162
163 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
164 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
165 sizeof(range));
166 if (status)
167 break;
168
169 status = nvmet_bdev_discard_range(req->ns, &range, &bio);
170 if (status)
171 break;
172 }
173
174 if (bio) {
175 bio->bi_private = req;
176 bio->bi_end_io = nvmet_bio_done;
177 if (status) {
178 bio->bi_status = BLK_STS_IOERR;
179 bio_endio(bio);
180 } else {
181 submit_bio(bio);
182 }
183 } else {
184 nvmet_req_complete(req, status);
185 }
186}
187
188static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
189{
190 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
191 case NVME_DSMGMT_AD:
192 nvmet_bdev_execute_discard(req);
193 return;
194 case NVME_DSMGMT_IDR:
195 case NVME_DSMGMT_IDW:
196 default:
197
198 nvmet_req_complete(req, 0);
199 return;
200 }
201}
202
203static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
204{
205 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
206 struct bio *bio = NULL;
207 u16 status = NVME_SC_SUCCESS;
208 sector_t sector;
209 sector_t nr_sector;
210
211 sector = le64_to_cpu(write_zeroes->slba) <<
212 (req->ns->blksize_shift - 9);
213 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
214 (req->ns->blksize_shift - 9));
215
216 if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
217 GFP_KERNEL, &bio, 0))
218 status = NVME_SC_INTERNAL | NVME_SC_DNR;
219
220 if (bio) {
221 bio->bi_private = req;
222 bio->bi_end_io = nvmet_bio_done;
223 submit_bio(bio);
224 } else {
225 nvmet_req_complete(req, status);
226 }
227}
228
229u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
230{
231 struct nvme_command *cmd = req->cmd;
232
233 switch (cmd->common.opcode) {
234 case nvme_cmd_read:
235 case nvme_cmd_write:
236 req->execute = nvmet_bdev_execute_rw;
237 req->data_len = nvmet_rw_len(req);
238 return 0;
239 case nvme_cmd_flush:
240 req->execute = nvmet_bdev_execute_flush;
241 req->data_len = 0;
242 return 0;
243 case nvme_cmd_dsm:
244 req->execute = nvmet_bdev_execute_dsm;
245 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
246 sizeof(struct nvme_dsm_range);
247 return 0;
248 case nvme_cmd_write_zeroes:
249 req->execute = nvmet_bdev_execute_write_zeroes;
250 return 0;
251 default:
252 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
253 req->sq->qid);
254 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
255 }
256}
257