1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _NVMET_H
15#define _NVMET_H
16
17#include <linux/dma-mapping.h>
18#include <linux/types.h>
19#include <linux/device.h>
20#include <linux/kref.h>
21#include <linux/percpu-refcount.h>
22#include <linux/list.h>
23#include <linux/mutex.h>
24#include <linux/uuid.h>
25#include <linux/nvme.h>
26#include <linux/configfs.h>
27#include <linux/rcupdate.h>
28#include <linux/blkdev.h>
29
30#define NVMET_ASYNC_EVENTS 4
31#define NVMET_ERROR_LOG_SLOTS 128
32
33
34
35
36#define NVMET_AEN_CFG_OPTIONAL \
37 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
38
39
40
41
42#define NVMET_AEN_CFG_ALL \
43 (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
44 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
45 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
46
47
48
49
50
51#define IPO_IATTR_CONNECT_DATA(x) \
52 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
53#define IPO_IATTR_CONNECT_SQE(x) \
54 (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
55
56struct nvmet_ns {
57 struct list_head dev_link;
58 struct percpu_ref ref;
59 struct block_device *bdev;
60 struct file *file;
61 bool readonly;
62 u32 nsid;
63 u32 blksize_shift;
64 loff_t size;
65 u8 nguid[16];
66 uuid_t uuid;
67 u32 anagrpid;
68
69 bool buffered_io;
70 bool enabled;
71 struct nvmet_subsys *subsys;
72 const char *device_path;
73
74 struct config_group device_group;
75 struct config_group group;
76
77 struct completion disable_done;
78 mempool_t *bvec_pool;
79 struct kmem_cache *bvec_cache;
80};
81
82static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
83{
84 return container_of(to_config_group(item), struct nvmet_ns, group);
85}
86
87struct nvmet_cq {
88 u16 qid;
89 u16 size;
90};
91
92struct nvmet_sq {
93 struct nvmet_ctrl *ctrl;
94 struct percpu_ref ref;
95 u16 qid;
96 u16 size;
97 u32 sqhd;
98 struct completion free_done;
99 struct completion confirm_done;
100};
101
102struct nvmet_ana_group {
103 struct config_group group;
104 struct nvmet_port *port;
105 u32 grpid;
106};
107
108static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
109{
110 return container_of(to_config_group(item), struct nvmet_ana_group,
111 group);
112}
113
114
115
116
117
118
119
120
121
122
123struct nvmet_port {
124 struct list_head entry;
125 struct nvmf_disc_rsp_page_entry disc_addr;
126 struct config_group group;
127 struct config_group subsys_group;
128 struct list_head subsystems;
129 struct config_group referrals_group;
130 struct list_head referrals;
131 struct config_group ana_groups_group;
132 struct nvmet_ana_group ana_default_group;
133 enum nvme_ana_state *ana_state;
134 void *priv;
135 bool enabled;
136 int inline_data_size;
137};
138
139static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
140{
141 return container_of(to_config_group(item), struct nvmet_port,
142 group);
143}
144
145static inline struct nvmet_port *ana_groups_to_port(
146 struct config_item *item)
147{
148 return container_of(to_config_group(item), struct nvmet_port,
149 ana_groups_group);
150}
151
152struct nvmet_ctrl {
153 struct nvmet_subsys *subsys;
154 struct nvmet_cq **cqs;
155 struct nvmet_sq **sqs;
156
157 struct mutex lock;
158 u64 cap;
159 u32 cc;
160 u32 csts;
161
162 uuid_t hostid;
163 u16 cntlid;
164 u32 kato;
165
166 struct nvmet_port *port;
167
168 u32 aen_enabled;
169 unsigned long aen_masked;
170 struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
171 unsigned int nr_async_event_cmds;
172 struct list_head async_events;
173 struct work_struct async_event_work;
174
175 struct list_head subsys_entry;
176 struct kref ref;
177 struct delayed_work ka_work;
178 struct work_struct fatal_err_work;
179
180 const struct nvmet_fabrics_ops *ops;
181
182 __le32 *changed_ns_list;
183 u32 nr_changed_ns;
184
185 char subsysnqn[NVMF_NQN_FIELD_LEN];
186 char hostnqn[NVMF_NQN_FIELD_LEN];
187};
188
189struct nvmet_subsys {
190 enum nvme_subsys_type type;
191
192 struct mutex lock;
193 struct kref ref;
194
195 struct list_head namespaces;
196 unsigned int nr_namespaces;
197 unsigned int max_nsid;
198
199 struct list_head ctrls;
200
201 struct list_head hosts;
202 bool allow_any_host;
203
204 u16 max_qid;
205
206 u64 ver;
207 u64 serial;
208 char *subsysnqn;
209
210 struct config_group group;
211
212 struct config_group namespaces_group;
213 struct config_group allowed_hosts_group;
214};
215
216static inline struct nvmet_subsys *to_subsys(struct config_item *item)
217{
218 return container_of(to_config_group(item), struct nvmet_subsys, group);
219}
220
221static inline struct nvmet_subsys *namespaces_to_subsys(
222 struct config_item *item)
223{
224 return container_of(to_config_group(item), struct nvmet_subsys,
225 namespaces_group);
226}
227
228struct nvmet_host {
229 struct config_group group;
230};
231
232static inline struct nvmet_host *to_host(struct config_item *item)
233{
234 return container_of(to_config_group(item), struct nvmet_host, group);
235}
236
237static inline char *nvmet_host_name(struct nvmet_host *host)
238{
239 return config_item_name(&host->group.cg_item);
240}
241
242struct nvmet_host_link {
243 struct list_head entry;
244 struct nvmet_host *host;
245};
246
247struct nvmet_subsys_link {
248 struct list_head entry;
249 struct nvmet_subsys *subsys;
250};
251
252struct nvmet_req;
253struct nvmet_fabrics_ops {
254 struct module *owner;
255 unsigned int type;
256 unsigned int msdbd;
257 bool has_keyed_sgls : 1;
258 void (*queue_response)(struct nvmet_req *req);
259 int (*add_port)(struct nvmet_port *port);
260 void (*remove_port)(struct nvmet_port *port);
261 void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
262 void (*disc_traddr)(struct nvmet_req *req,
263 struct nvmet_port *port, char *traddr);
264};
265
266#define NVMET_MAX_INLINE_BIOVEC 8
267
268struct nvmet_req {
269 struct nvme_command *cmd;
270 struct nvme_completion *rsp;
271 struct nvmet_sq *sq;
272 struct nvmet_cq *cq;
273 struct nvmet_ns *ns;
274 struct scatterlist *sg;
275 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
276 union {
277 struct {
278 struct bio inline_bio;
279 } b;
280 struct {
281 bool mpool_alloc;
282 struct kiocb iocb;
283 struct bio_vec *bvec;
284 struct work_struct work;
285 } f;
286 };
287 int sg_cnt;
288
289 size_t data_len;
290
291 size_t transfer_len;
292
293 struct nvmet_port *port;
294
295 void (*execute)(struct nvmet_req *req);
296 const struct nvmet_fabrics_ops *ops;
297};
298
299extern struct workqueue_struct *buffered_io_wq;
300
301static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
302{
303 req->rsp->status = cpu_to_le16(status << 1);
304}
305
306static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
307{
308 req->rsp->result.u32 = cpu_to_le32(result);
309}
310
311
312
313
314static inline enum dma_data_direction
315nvmet_data_dir(struct nvmet_req *req)
316{
317 return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
318}
319
320struct nvmet_async_event {
321 struct list_head entry;
322 u8 event_type;
323 u8 event_info;
324 u8 log_page;
325};
326
327u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
328u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
329u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
330u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
331u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
332u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
333
334bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
335 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
336void nvmet_req_uninit(struct nvmet_req *req);
337void nvmet_req_execute(struct nvmet_req *req);
338void nvmet_req_complete(struct nvmet_req *req, u16 status);
339
340void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
341 u16 size);
342void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
343 u16 size);
344void nvmet_sq_destroy(struct nvmet_sq *sq);
345int nvmet_sq_init(struct nvmet_sq *sq);
346
347void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
348
349void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
350u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
351 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
352u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
353 struct nvmet_req *req, struct nvmet_ctrl **ret);
354void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
355u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
356
357struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
358 enum nvme_subsys_type type);
359void nvmet_subsys_put(struct nvmet_subsys *subsys);
360void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
361
362struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
363void nvmet_put_namespace(struct nvmet_ns *ns);
364int nvmet_ns_enable(struct nvmet_ns *ns);
365void nvmet_ns_disable(struct nvmet_ns *ns);
366struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
367void nvmet_ns_free(struct nvmet_ns *ns);
368
369void nvmet_send_ana_event(struct nvmet_subsys *subsys,
370 struct nvmet_port *port);
371void nvmet_port_send_ana_event(struct nvmet_port *port);
372
373int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
374void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
375
376int nvmet_enable_port(struct nvmet_port *port);
377void nvmet_disable_port(struct nvmet_port *port);
378
379void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
380void nvmet_referral_disable(struct nvmet_port *port);
381
382u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
383 size_t len);
384u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
385 size_t len);
386u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
387
388u32 nvmet_get_log_page_len(struct nvme_command *cmd);
389
390#define NVMET_QUEUE_SIZE 1024
391#define NVMET_NR_QUEUES 128
392#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
393
394
395
396
397
398#define NVMET_MAX_NAMESPACES 1024
399
400
401
402
403
404
405
406#define NVMET_MAX_ANAGRPS 128
407#define NVMET_DEFAULT_ANA_GRPID 1
408
409#define NVMET_KAS 10
410#define NVMET_DISC_KATO 120
411
412int __init nvmet_init_configfs(void);
413void __exit nvmet_exit_configfs(void);
414
415int __init nvmet_init_discovery(void);
416void nvmet_exit_discovery(void);
417
418extern struct nvmet_subsys *nvmet_disc_subsys;
419extern u64 nvmet_genctr;
420extern struct rw_semaphore nvmet_config_sem;
421
422extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
423extern u64 nvmet_ana_chgcnt;
424extern struct rw_semaphore nvmet_ana_sem;
425
426bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
427 const char *hostnqn);
428
429int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
430int nvmet_file_ns_enable(struct nvmet_ns *ns);
431void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
432void nvmet_file_ns_disable(struct nvmet_ns *ns);
433u16 nvmet_bdev_flush(struct nvmet_req *req);
434u16 nvmet_file_flush(struct nvmet_req *req);
435void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
436
437static inline u32 nvmet_rw_len(struct nvmet_req *req)
438{
439 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
440 req->ns->blksize_shift;
441}
442#endif
443