1
2
3
4
5
6#ifndef _NVME_FABRICS_H
7#define _NVME_FABRICS_H 1
8
9#include <linux/in.h>
10#include <linux/inet.h>
11
12#define NVMF_MIN_QUEUE_SIZE 16
13#define NVMF_MAX_QUEUE_SIZE 1024
14#define NVMF_DEF_QUEUE_SIZE 128
15#define NVMF_DEF_RECONNECT_DELAY 10
16
17#define NVMF_DEF_CTRL_LOSS_TMO 600
18
19
20
21
22
23
24
25
26
27struct nvmf_host {
28 struct kref ref;
29 struct list_head list;
30 char nqn[NVMF_NQN_SIZE];
31 uuid_t id;
32};
33
34
35
36
37enum {
38 NVMF_OPT_ERR = 0,
39 NVMF_OPT_TRANSPORT = 1 << 0,
40 NVMF_OPT_NQN = 1 << 1,
41 NVMF_OPT_TRADDR = 1 << 2,
42 NVMF_OPT_TRSVCID = 1 << 3,
43 NVMF_OPT_QUEUE_SIZE = 1 << 4,
44 NVMF_OPT_NR_IO_QUEUES = 1 << 5,
45 NVMF_OPT_TL_RETRY_COUNT = 1 << 6,
46 NVMF_OPT_KATO = 1 << 7,
47 NVMF_OPT_HOSTNQN = 1 << 8,
48 NVMF_OPT_RECONNECT_DELAY = 1 << 9,
49 NVMF_OPT_HOST_TRADDR = 1 << 10,
50 NVMF_OPT_CTRL_LOSS_TMO = 1 << 11,
51 NVMF_OPT_HOST_ID = 1 << 12,
52 NVMF_OPT_DUP_CONNECT = 1 << 13,
53 NVMF_OPT_DISABLE_SQFLOW = 1 << 14,
54 NVMF_OPT_HDR_DIGEST = 1 << 15,
55 NVMF_OPT_DATA_DIGEST = 1 << 16,
56 NVMF_OPT_NR_WRITE_QUEUES = 1 << 17,
57 NVMF_OPT_NR_POLL_QUEUES = 1 << 18,
58 NVMF_OPT_TOS = 1 << 19,
59};
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93struct nvmf_ctrl_options {
94 unsigned mask;
95 char *transport;
96 char *subsysnqn;
97 char *traddr;
98 char *trsvcid;
99 char *host_traddr;
100 size_t queue_size;
101 unsigned int nr_io_queues;
102 unsigned int reconnect_delay;
103 bool discovery_nqn;
104 bool duplicate_connect;
105 unsigned int kato;
106 struct nvmf_host *host;
107 int max_reconnects;
108 bool disable_sqflow;
109 bool hdr_digest;
110 bool data_digest;
111 unsigned int nr_write_queues;
112 unsigned int nr_poll_queues;
113 int tos;
114};
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141struct nvmf_transport_ops {
142 struct list_head entry;
143 struct module *module;
144 const char *name;
145 int required_opts;
146 int allowed_opts;
147 struct nvme_ctrl *(*create_ctrl)(struct device *dev,
148 struct nvmf_ctrl_options *opts);
149};
150
151static inline bool
152nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
153 struct nvmf_ctrl_options *opts)
154{
155 if (ctrl->state == NVME_CTRL_DELETING ||
156 ctrl->state == NVME_CTRL_DEAD ||
157 strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
158 strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
159 memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t)))
160 return false;
161
162 return true;
163}
164
165int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
166int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
167int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
168int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
169int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll);
170int nvmf_register_transport(struct nvmf_transport_ops *ops);
171void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
172void nvmf_free_options(struct nvmf_ctrl_options *opts);
173int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
174bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
175blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
176 struct request *rq);
177bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
178 bool queue_live);
179bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
180 struct nvmf_ctrl_options *opts);
181
182static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
183 bool queue_live)
184{
185 if (likely(ctrl->state == NVME_CTRL_LIVE))
186 return true;
187 return __nvmf_check_ready(ctrl, rq, queue_live);
188}
189
190#endif
191