1
2
3
4
5
6#ifndef _NVME_FABRICS_H
7#define _NVME_FABRICS_H 1
8
9#include <linux/in.h>
10#include <linux/inet.h>
11
12#define NVMF_MIN_QUEUE_SIZE 16
13#define NVMF_MAX_QUEUE_SIZE 1024
14#define NVMF_DEF_QUEUE_SIZE 128
15#define NVMF_DEF_RECONNECT_DELAY 10
16
17#define NVMF_DEF_CTRL_LOSS_TMO 600
18
19
20
21
22
23
24
25
26
27struct nvmf_host {
28 struct kref ref;
29 struct list_head list;
30 char nqn[NVMF_NQN_SIZE];
31 uuid_t id;
32};
33
34
35
36
37enum {
38 NVMF_OPT_ERR = 0,
39 NVMF_OPT_TRANSPORT = 1 << 0,
40 NVMF_OPT_NQN = 1 << 1,
41 NVMF_OPT_TRADDR = 1 << 2,
42 NVMF_OPT_TRSVCID = 1 << 3,
43 NVMF_OPT_QUEUE_SIZE = 1 << 4,
44 NVMF_OPT_NR_IO_QUEUES = 1 << 5,
45 NVMF_OPT_TL_RETRY_COUNT = 1 << 6,
46 NVMF_OPT_KATO = 1 << 7,
47 NVMF_OPT_HOSTNQN = 1 << 8,
48 NVMF_OPT_RECONNECT_DELAY = 1 << 9,
49 NVMF_OPT_HOST_TRADDR = 1 << 10,
50 NVMF_OPT_CTRL_LOSS_TMO = 1 << 11,
51 NVMF_OPT_HOST_ID = 1 << 12,
52 NVMF_OPT_DUP_CONNECT = 1 << 13,
53 NVMF_OPT_DISABLE_SQFLOW = 1 << 14,
54 NVMF_OPT_HDR_DIGEST = 1 << 15,
55 NVMF_OPT_DATA_DIGEST = 1 << 16,
56 NVMF_OPT_NR_WRITE_QUEUES = 1 << 17,
57 NVMF_OPT_NR_POLL_QUEUES = 1 << 18,
58};
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91struct nvmf_ctrl_options {
92 unsigned mask;
93 char *transport;
94 char *subsysnqn;
95 char *traddr;
96 char *trsvcid;
97 char *host_traddr;
98 size_t queue_size;
99 unsigned int nr_io_queues;
100 unsigned int reconnect_delay;
101 bool discovery_nqn;
102 bool duplicate_connect;
103 unsigned int kato;
104 struct nvmf_host *host;
105 int max_reconnects;
106 bool disable_sqflow;
107 bool hdr_digest;
108 bool data_digest;
109 unsigned int nr_write_queues;
110 unsigned int nr_poll_queues;
111};
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138struct nvmf_transport_ops {
139 struct list_head entry;
140 struct module *module;
141 const char *name;
142 int required_opts;
143 int allowed_opts;
144 struct nvme_ctrl *(*create_ctrl)(struct device *dev,
145 struct nvmf_ctrl_options *opts);
146};
147
148static inline bool
149nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
150 struct nvmf_ctrl_options *opts)
151{
152 if (ctrl->state == NVME_CTRL_DELETING ||
153 ctrl->state == NVME_CTRL_DEAD ||
154 strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
155 strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
156 memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t)))
157 return false;
158
159 return true;
160}
161
162int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
163int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
164int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
165int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
166int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll);
167int nvmf_register_transport(struct nvmf_transport_ops *ops);
168void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
169void nvmf_free_options(struct nvmf_ctrl_options *opts);
170int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
171bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
172blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
173 struct request *rq);
174bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
175 bool queue_live);
176bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
177 struct nvmf_ctrl_options *opts);
178
179static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
180 bool queue_live)
181{
182 if (likely(ctrl->state == NVME_CTRL_LIVE ||
183 ctrl->state == NVME_CTRL_ADMIN_ONLY))
184 return true;
185 return __nvmf_check_ready(ctrl, rq, queue_live);
186}
187
188#endif
189