1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/atomic.h>
16#include <linux/ctype.h>
17#include <linux/delay.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/nvme.h>
22#include <linux/slab.h>
23#include <linux/string.h>
24#include <linux/wait.h>
25#include <linux/inet.h>
26#include <asm/unaligned.h>
27
28#include <rdma/ib_verbs.h>
29#include <rdma/rdma_cm.h>
30#include <rdma/rw.h>
31
32#include <linux/nvme-rdma.h>
33#include "nvmet.h"
34
35
36
37
38#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE
39#define NVMET_RDMA_MAX_INLINE_SGE 4
40#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
41
42struct nvmet_rdma_cmd {
43 struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
44 struct ib_cqe cqe;
45 struct ib_recv_wr wr;
46 struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
47 struct nvme_command *nvme_cmd;
48 struct nvmet_rdma_queue *queue;
49};
50
51enum {
52 NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
53 NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
54};
55
56struct nvmet_rdma_rsp {
57 struct ib_sge send_sge;
58 struct ib_cqe send_cqe;
59 struct ib_send_wr send_wr;
60
61 struct nvmet_rdma_cmd *cmd;
62 struct nvmet_rdma_queue *queue;
63
64 struct ib_cqe read_cqe;
65 struct rdma_rw_ctx rw;
66
67 struct nvmet_req req;
68
69 bool allocated;
70 u8 n_rdma;
71 u32 flags;
72 u32 invalidate_rkey;
73
74 struct list_head wait_list;
75 struct list_head free_list;
76};
77
78enum nvmet_rdma_queue_state {
79 NVMET_RDMA_Q_CONNECTING,
80 NVMET_RDMA_Q_LIVE,
81 NVMET_RDMA_Q_DISCONNECTING,
82};
83
84struct nvmet_rdma_queue {
85 struct rdma_cm_id *cm_id;
86 struct nvmet_port *port;
87 struct ib_cq *cq;
88 atomic_t sq_wr_avail;
89 struct nvmet_rdma_device *dev;
90 spinlock_t state_lock;
91 enum nvmet_rdma_queue_state state;
92 struct nvmet_cq nvme_cq;
93 struct nvmet_sq nvme_sq;
94
95 struct nvmet_rdma_rsp *rsps;
96 struct list_head free_rsps;
97 spinlock_t rsps_lock;
98 struct nvmet_rdma_cmd *cmds;
99
100 struct work_struct release_work;
101 struct list_head rsp_wait_list;
102 struct list_head rsp_wr_wait_list;
103 spinlock_t rsp_wr_wait_lock;
104
105 int idx;
106 int host_qid;
107 int recv_queue_size;
108 int send_queue_size;
109
110 struct list_head queue_list;
111};
112
113struct nvmet_rdma_device {
114 struct ib_device *device;
115 struct ib_pd *pd;
116 struct ib_srq *srq;
117 struct nvmet_rdma_cmd *srq_cmds;
118 size_t srq_size;
119 struct kref ref;
120 struct list_head entry;
121 int inline_data_size;
122 int inline_page_count;
123};
124
125static bool nvmet_rdma_use_srq;
126module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
127MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
128
129static DEFINE_IDA(nvmet_rdma_queue_ida);
130static LIST_HEAD(nvmet_rdma_queue_list);
131static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
132
133static LIST_HEAD(device_list);
134static DEFINE_MUTEX(device_list_mutex);
135
136static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
137static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
138static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
139static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
140static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
141static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
142
143static const struct nvmet_fabrics_ops nvmet_rdma_ops;
144
145static int num_pages(int len)
146{
147 return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
148}
149
150
151static inline u32 get_unaligned_le24(const u8 *p)
152{
153 return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
154}
155
156static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
157{
158 return nvme_is_write(rsp->req.cmd) &&
159 rsp->req.transfer_len &&
160 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
161}
162
163static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
164{
165 return !nvme_is_write(rsp->req.cmd) &&
166 rsp->req.transfer_len &&
167 !rsp->req.rsp->status &&
168 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
169}
170
171static inline struct nvmet_rdma_rsp *
172nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
173{
174 struct nvmet_rdma_rsp *rsp;
175 unsigned long flags;
176
177 spin_lock_irqsave(&queue->rsps_lock, flags);
178 rsp = list_first_entry_or_null(&queue->free_rsps,
179 struct nvmet_rdma_rsp, free_list);
180 if (likely(rsp))
181 list_del(&rsp->free_list);
182 spin_unlock_irqrestore(&queue->rsps_lock, flags);
183
184 if (unlikely(!rsp)) {
185 rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
186 if (unlikely(!rsp))
187 return NULL;
188 rsp->allocated = true;
189 }
190
191 return rsp;
192}
193
194static inline void
195nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
196{
197 unsigned long flags;
198
199 if (rsp->allocated) {
200 kfree(rsp);
201 return;
202 }
203
204 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
205 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
206 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
207}
208
209static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
210 struct nvmet_rdma_cmd *c)
211{
212 struct scatterlist *sg;
213 struct ib_sge *sge;
214 int i;
215
216 if (!ndev->inline_data_size)
217 return;
218
219 sg = c->inline_sg;
220 sge = &c->sge[1];
221
222 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
223 if (sge->length)
224 ib_dma_unmap_page(ndev->device, sge->addr,
225 sge->length, DMA_FROM_DEVICE);
226 if (sg_page(sg))
227 __free_page(sg_page(sg));
228 }
229}
230
231static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
232 struct nvmet_rdma_cmd *c)
233{
234 struct scatterlist *sg;
235 struct ib_sge *sge;
236 struct page *pg;
237 int len;
238 int i;
239
240 if (!ndev->inline_data_size)
241 return 0;
242
243 sg = c->inline_sg;
244 sg_init_table(sg, ndev->inline_page_count);
245 sge = &c->sge[1];
246 len = ndev->inline_data_size;
247
248 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
249 pg = alloc_page(GFP_KERNEL);
250 if (!pg)
251 goto out_err;
252 sg_assign_page(sg, pg);
253 sge->addr = ib_dma_map_page(ndev->device,
254 pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
255 if (ib_dma_mapping_error(ndev->device, sge->addr))
256 goto out_err;
257 sge->length = min_t(int, len, PAGE_SIZE);
258 sge->lkey = ndev->pd->local_dma_lkey;
259 len -= sge->length;
260 }
261
262 return 0;
263out_err:
264 for (; i >= 0; i--, sg--, sge--) {
265 if (sge->length)
266 ib_dma_unmap_page(ndev->device, sge->addr,
267 sge->length, DMA_FROM_DEVICE);
268 if (sg_page(sg))
269 __free_page(sg_page(sg));
270 }
271 return -ENOMEM;
272}
273
274static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
275 struct nvmet_rdma_cmd *c, bool admin)
276{
277
278 c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
279 if (!c->nvme_cmd)
280 goto out;
281
282 c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
283 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
284 if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
285 goto out_free_cmd;
286
287 c->sge[0].length = sizeof(*c->nvme_cmd);
288 c->sge[0].lkey = ndev->pd->local_dma_lkey;
289
290 if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
291 goto out_unmap_cmd;
292
293 c->cqe.done = nvmet_rdma_recv_done;
294
295 c->wr.wr_cqe = &c->cqe;
296 c->wr.sg_list = c->sge;
297 c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
298
299 return 0;
300
301out_unmap_cmd:
302 ib_dma_unmap_single(ndev->device, c->sge[0].addr,
303 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
304out_free_cmd:
305 kfree(c->nvme_cmd);
306
307out:
308 return -ENOMEM;
309}
310
311static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
312 struct nvmet_rdma_cmd *c, bool admin)
313{
314 if (!admin)
315 nvmet_rdma_free_inline_pages(ndev, c);
316 ib_dma_unmap_single(ndev->device, c->sge[0].addr,
317 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
318 kfree(c->nvme_cmd);
319}
320
321static struct nvmet_rdma_cmd *
322nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
323 int nr_cmds, bool admin)
324{
325 struct nvmet_rdma_cmd *cmds;
326 int ret = -EINVAL, i;
327
328 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
329 if (!cmds)
330 goto out;
331
332 for (i = 0; i < nr_cmds; i++) {
333 ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
334 if (ret)
335 goto out_free;
336 }
337
338 return cmds;
339
340out_free:
341 while (--i >= 0)
342 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
343 kfree(cmds);
344out:
345 return ERR_PTR(ret);
346}
347
348static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
349 struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
350{
351 int i;
352
353 for (i = 0; i < nr_cmds; i++)
354 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
355 kfree(cmds);
356}
357
358static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
359 struct nvmet_rdma_rsp *r)
360{
361
362 r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
363 if (!r->req.rsp)
364 goto out;
365
366 r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
367 sizeof(*r->req.rsp), DMA_TO_DEVICE);
368 if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
369 goto out_free_rsp;
370
371 r->send_sge.length = sizeof(*r->req.rsp);
372 r->send_sge.lkey = ndev->pd->local_dma_lkey;
373
374 r->send_cqe.done = nvmet_rdma_send_done;
375
376 r->send_wr.wr_cqe = &r->send_cqe;
377 r->send_wr.sg_list = &r->send_sge;
378 r->send_wr.num_sge = 1;
379 r->send_wr.send_flags = IB_SEND_SIGNALED;
380
381
382 r->read_cqe.done = nvmet_rdma_read_data_done;
383 return 0;
384
385out_free_rsp:
386 kfree(r->req.rsp);
387out:
388 return -ENOMEM;
389}
390
391static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
392 struct nvmet_rdma_rsp *r)
393{
394 ib_dma_unmap_single(ndev->device, r->send_sge.addr,
395 sizeof(*r->req.rsp), DMA_TO_DEVICE);
396 kfree(r->req.rsp);
397}
398
399static int
400nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
401{
402 struct nvmet_rdma_device *ndev = queue->dev;
403 int nr_rsps = queue->recv_queue_size * 2;
404 int ret = -EINVAL, i;
405
406 queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
407 GFP_KERNEL);
408 if (!queue->rsps)
409 goto out;
410
411 for (i = 0; i < nr_rsps; i++) {
412 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
413
414 ret = nvmet_rdma_alloc_rsp(ndev, rsp);
415 if (ret)
416 goto out_free;
417
418 list_add_tail(&rsp->free_list, &queue->free_rsps);
419 }
420
421 return 0;
422
423out_free:
424 while (--i >= 0) {
425 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
426
427 list_del(&rsp->free_list);
428 nvmet_rdma_free_rsp(ndev, rsp);
429 }
430 kfree(queue->rsps);
431out:
432 return ret;
433}
434
435static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
436{
437 struct nvmet_rdma_device *ndev = queue->dev;
438 int i, nr_rsps = queue->recv_queue_size * 2;
439
440 for (i = 0; i < nr_rsps; i++) {
441 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
442
443 list_del(&rsp->free_list);
444 nvmet_rdma_free_rsp(ndev, rsp);
445 }
446 kfree(queue->rsps);
447}
448
449static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
450 struct nvmet_rdma_cmd *cmd)
451{
452 int ret;
453
454 ib_dma_sync_single_for_device(ndev->device,
455 cmd->sge[0].addr, cmd->sge[0].length,
456 DMA_FROM_DEVICE);
457
458 if (ndev->srq)
459 ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
460 else
461 ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
462
463 if (unlikely(ret))
464 pr_err("post_recv cmd failed\n");
465
466 return ret;
467}
468
469static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
470{
471 spin_lock(&queue->rsp_wr_wait_lock);
472 while (!list_empty(&queue->rsp_wr_wait_list)) {
473 struct nvmet_rdma_rsp *rsp;
474 bool ret;
475
476 rsp = list_entry(queue->rsp_wr_wait_list.next,
477 struct nvmet_rdma_rsp, wait_list);
478 list_del(&rsp->wait_list);
479
480 spin_unlock(&queue->rsp_wr_wait_lock);
481 ret = nvmet_rdma_execute_command(rsp);
482 spin_lock(&queue->rsp_wr_wait_lock);
483
484 if (!ret) {
485 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
486 break;
487 }
488 }
489 spin_unlock(&queue->rsp_wr_wait_lock);
490}
491
492
493static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
494{
495 struct nvmet_rdma_queue *queue = rsp->queue;
496
497 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
498
499 if (rsp->n_rdma) {
500 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
501 queue->cm_id->port_num, rsp->req.sg,
502 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
503 }
504
505 if (rsp->req.sg != rsp->cmd->inline_sg)
506 sgl_free(rsp->req.sg);
507
508 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
509 nvmet_rdma_process_wr_wait_list(queue);
510
511 nvmet_rdma_put_rsp(rsp);
512}
513
514static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
515{
516 if (queue->nvme_sq.ctrl) {
517 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
518 } else {
519
520
521
522
523
524 nvmet_rdma_queue_disconnect(queue);
525 }
526}
527
528static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
529{
530 struct nvmet_rdma_rsp *rsp =
531 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
532
533 nvmet_rdma_release_rsp(rsp);
534
535 if (unlikely(wc->status != IB_WC_SUCCESS &&
536 wc->status != IB_WC_WR_FLUSH_ERR)) {
537 pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
538 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
539 nvmet_rdma_error_comp(rsp->queue);
540 }
541}
542
543static void nvmet_rdma_queue_response(struct nvmet_req *req)
544{
545 struct nvmet_rdma_rsp *rsp =
546 container_of(req, struct nvmet_rdma_rsp, req);
547 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
548 struct ib_send_wr *first_wr;
549
550 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
551 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
552 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
553 } else {
554 rsp->send_wr.opcode = IB_WR_SEND;
555 }
556
557 if (nvmet_rdma_need_data_out(rsp))
558 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
559 cm_id->port_num, NULL, &rsp->send_wr);
560 else
561 first_wr = &rsp->send_wr;
562
563 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
564
565 ib_dma_sync_single_for_device(rsp->queue->dev->device,
566 rsp->send_sge.addr, rsp->send_sge.length,
567 DMA_TO_DEVICE);
568
569 if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
570 pr_err("sending cmd response failed\n");
571 nvmet_rdma_release_rsp(rsp);
572 }
573}
574
575static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
576{
577 struct nvmet_rdma_rsp *rsp =
578 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
579 struct nvmet_rdma_queue *queue = cq->cq_context;
580
581 WARN_ON(rsp->n_rdma <= 0);
582 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
583 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
584 queue->cm_id->port_num, rsp->req.sg,
585 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
586 rsp->n_rdma = 0;
587
588 if (unlikely(wc->status != IB_WC_SUCCESS)) {
589 nvmet_req_uninit(&rsp->req);
590 nvmet_rdma_release_rsp(rsp);
591 if (wc->status != IB_WC_WR_FLUSH_ERR) {
592 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
593 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
594 nvmet_rdma_error_comp(queue);
595 }
596 return;
597 }
598
599 nvmet_req_execute(&rsp->req);
600}
601
602static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
603 u64 off)
604{
605 int sg_count = num_pages(len);
606 struct scatterlist *sg;
607 int i;
608
609 sg = rsp->cmd->inline_sg;
610 for (i = 0; i < sg_count; i++, sg++) {
611 if (i < sg_count - 1)
612 sg_unmark_end(sg);
613 else
614 sg_mark_end(sg);
615 sg->offset = off;
616 sg->length = min_t(int, len, PAGE_SIZE - off);
617 len -= sg->length;
618 if (!i)
619 off = 0;
620 }
621
622 rsp->req.sg = rsp->cmd->inline_sg;
623 rsp->req.sg_cnt = sg_count;
624}
625
626static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
627{
628 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
629 u64 off = le64_to_cpu(sgl->addr);
630 u32 len = le32_to_cpu(sgl->length);
631
632 if (!nvme_is_write(rsp->req.cmd))
633 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
634
635 if (off + len > rsp->queue->dev->inline_data_size) {
636 pr_err("invalid inline data offset!\n");
637 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
638 }
639
640
641 if (!len)
642 return 0;
643
644 nvmet_rdma_use_inline_sg(rsp, len, off);
645 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
646 rsp->req.transfer_len += len;
647 return 0;
648}
649
650static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
651 struct nvme_keyed_sgl_desc *sgl, bool invalidate)
652{
653 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
654 u64 addr = le64_to_cpu(sgl->addr);
655 u32 len = get_unaligned_le24(sgl->length);
656 u32 key = get_unaligned_le32(sgl->key);
657 int ret;
658
659
660 if (!len)
661 return 0;
662
663 rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
664 if (!rsp->req.sg)
665 return NVME_SC_INTERNAL;
666
667 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
668 rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
669 nvmet_data_dir(&rsp->req));
670 if (ret < 0)
671 return NVME_SC_INTERNAL;
672 rsp->req.transfer_len += len;
673 rsp->n_rdma += ret;
674
675 if (invalidate) {
676 rsp->invalidate_rkey = key;
677 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
678 }
679
680 return 0;
681}
682
683static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
684{
685 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
686
687 switch (sgl->type >> 4) {
688 case NVME_SGL_FMT_DATA_DESC:
689 switch (sgl->type & 0xf) {
690 case NVME_SGL_FMT_OFFSET:
691 return nvmet_rdma_map_sgl_inline(rsp);
692 default:
693 pr_err("invalid SGL subtype: %#x\n", sgl->type);
694 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
695 }
696 case NVME_KEY_SGL_FMT_DATA_DESC:
697 switch (sgl->type & 0xf) {
698 case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
699 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
700 case NVME_SGL_FMT_ADDRESS:
701 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
702 default:
703 pr_err("invalid SGL subtype: %#x\n", sgl->type);
704 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
705 }
706 default:
707 pr_err("invalid SGL type: %#x\n", sgl->type);
708 return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
709 }
710}
711
712static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
713{
714 struct nvmet_rdma_queue *queue = rsp->queue;
715
716 if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
717 &queue->sq_wr_avail) < 0)) {
718 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
719 1 + rsp->n_rdma, queue->idx,
720 queue->nvme_sq.ctrl->cntlid);
721 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
722 return false;
723 }
724
725 if (nvmet_rdma_need_data_in(rsp)) {
726 if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
727 queue->cm_id->port_num, &rsp->read_cqe, NULL))
728 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
729 } else {
730 nvmet_req_execute(&rsp->req);
731 }
732
733 return true;
734}
735
736static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
737 struct nvmet_rdma_rsp *cmd)
738{
739 u16 status;
740
741 ib_dma_sync_single_for_cpu(queue->dev->device,
742 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
743 DMA_FROM_DEVICE);
744 ib_dma_sync_single_for_cpu(queue->dev->device,
745 cmd->send_sge.addr, cmd->send_sge.length,
746 DMA_TO_DEVICE);
747
748 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
749 &queue->nvme_sq, &nvmet_rdma_ops))
750 return;
751
752 status = nvmet_rdma_map_sgl(cmd);
753 if (status)
754 goto out_err;
755
756 if (unlikely(!nvmet_rdma_execute_command(cmd))) {
757 spin_lock(&queue->rsp_wr_wait_lock);
758 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
759 spin_unlock(&queue->rsp_wr_wait_lock);
760 }
761
762 return;
763
764out_err:
765 nvmet_req_complete(&cmd->req, status);
766}
767
768static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
769{
770 struct nvmet_rdma_cmd *cmd =
771 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
772 struct nvmet_rdma_queue *queue = cq->cq_context;
773 struct nvmet_rdma_rsp *rsp;
774
775 if (unlikely(wc->status != IB_WC_SUCCESS)) {
776 if (wc->status != IB_WC_WR_FLUSH_ERR) {
777 pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
778 wc->wr_cqe, ib_wc_status_msg(wc->status),
779 wc->status);
780 nvmet_rdma_error_comp(queue);
781 }
782 return;
783 }
784
785 if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
786 pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
787 nvmet_rdma_error_comp(queue);
788 return;
789 }
790
791 cmd->queue = queue;
792 rsp = nvmet_rdma_get_rsp(queue);
793 if (unlikely(!rsp)) {
794
795
796
797
798
799 nvmet_rdma_post_recv(queue->dev, cmd);
800 return;
801 }
802 rsp->queue = queue;
803 rsp->cmd = cmd;
804 rsp->flags = 0;
805 rsp->req.cmd = cmd->nvme_cmd;
806 rsp->req.port = queue->port;
807 rsp->n_rdma = 0;
808
809 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
810 unsigned long flags;
811
812 spin_lock_irqsave(&queue->state_lock, flags);
813 if (queue->state == NVMET_RDMA_Q_CONNECTING)
814 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
815 else
816 nvmet_rdma_put_rsp(rsp);
817 spin_unlock_irqrestore(&queue->state_lock, flags);
818 return;
819 }
820
821 nvmet_rdma_handle_command(queue, rsp);
822}
823
824static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
825{
826 if (!ndev->srq)
827 return;
828
829 nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
830 ib_destroy_srq(ndev->srq);
831}
832
833static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
834{
835 struct ib_srq_init_attr srq_attr = { NULL, };
836 struct ib_srq *srq;
837 size_t srq_size;
838 int ret, i;
839
840 srq_size = 4095;
841
842 srq_attr.attr.max_wr = srq_size;
843 srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
844 srq_attr.attr.srq_limit = 0;
845 srq_attr.srq_type = IB_SRQT_BASIC;
846 srq = ib_create_srq(ndev->pd, &srq_attr);
847 if (IS_ERR(srq)) {
848
849
850
851
852 pr_info("SRQ requested but not supported.\n");
853 return 0;
854 }
855
856 ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
857 if (IS_ERR(ndev->srq_cmds)) {
858 ret = PTR_ERR(ndev->srq_cmds);
859 goto out_destroy_srq;
860 }
861
862 ndev->srq = srq;
863 ndev->srq_size = srq_size;
864
865 for (i = 0; i < srq_size; i++) {
866 ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
867 if (ret)
868 goto out_free_cmds;
869 }
870
871 return 0;
872
873out_free_cmds:
874 nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
875out_destroy_srq:
876 ib_destroy_srq(srq);
877 return ret;
878}
879
880static void nvmet_rdma_free_dev(struct kref *ref)
881{
882 struct nvmet_rdma_device *ndev =
883 container_of(ref, struct nvmet_rdma_device, ref);
884
885 mutex_lock(&device_list_mutex);
886 list_del(&ndev->entry);
887 mutex_unlock(&device_list_mutex);
888
889 nvmet_rdma_destroy_srq(ndev);
890 ib_dealloc_pd(ndev->pd);
891
892 kfree(ndev);
893}
894
895static struct nvmet_rdma_device *
896nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
897{
898 struct nvmet_port *port = cm_id->context;
899 struct nvmet_rdma_device *ndev;
900 int inline_page_count;
901 int inline_sge_count;
902 int ret;
903
904 mutex_lock(&device_list_mutex);
905 list_for_each_entry(ndev, &device_list, entry) {
906 if (ndev->device->node_guid == cm_id->device->node_guid &&
907 kref_get_unless_zero(&ndev->ref))
908 goto out_unlock;
909 }
910
911 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
912 if (!ndev)
913 goto out_err;
914
915 inline_page_count = num_pages(port->inline_data_size);
916 inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
917 cm_id->device->attrs.max_recv_sge) - 1;
918 if (inline_page_count > inline_sge_count) {
919 pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
920 port->inline_data_size, cm_id->device->name,
921 inline_sge_count * PAGE_SIZE);
922 port->inline_data_size = inline_sge_count * PAGE_SIZE;
923 inline_page_count = inline_sge_count;
924 }
925 ndev->inline_data_size = port->inline_data_size;
926 ndev->inline_page_count = inline_page_count;
927 ndev->device = cm_id->device;
928 kref_init(&ndev->ref);
929
930 ndev->pd = ib_alloc_pd(ndev->device, 0);
931 if (IS_ERR(ndev->pd))
932 goto out_free_dev;
933
934 if (nvmet_rdma_use_srq) {
935 ret = nvmet_rdma_init_srq(ndev);
936 if (ret)
937 goto out_free_pd;
938 }
939
940 list_add(&ndev->entry, &device_list);
941out_unlock:
942 mutex_unlock(&device_list_mutex);
943 pr_debug("added %s.\n", ndev->device->name);
944 return ndev;
945
946out_free_pd:
947 ib_dealloc_pd(ndev->pd);
948out_free_dev:
949 kfree(ndev);
950out_err:
951 mutex_unlock(&device_list_mutex);
952 return NULL;
953}
954
955static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
956{
957 struct ib_qp_init_attr qp_attr;
958 struct nvmet_rdma_device *ndev = queue->dev;
959 int comp_vector, nr_cqe, ret, i;
960
961
962
963
964
965 comp_vector = !queue->host_qid ? 0 :
966 queue->idx % ndev->device->num_comp_vectors;
967
968
969
970
971 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
972
973 queue->cq = ib_alloc_cq(ndev->device, queue,
974 nr_cqe + 1, comp_vector,
975 IB_POLL_WORKQUEUE);
976 if (IS_ERR(queue->cq)) {
977 ret = PTR_ERR(queue->cq);
978 pr_err("failed to create CQ cqe= %d ret= %d\n",
979 nr_cqe + 1, ret);
980 goto out;
981 }
982
983 memset(&qp_attr, 0, sizeof(qp_attr));
984 qp_attr.qp_context = queue;
985 qp_attr.event_handler = nvmet_rdma_qp_event;
986 qp_attr.send_cq = queue->cq;
987 qp_attr.recv_cq = queue->cq;
988 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
989 qp_attr.qp_type = IB_QPT_RC;
990
991 qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
992 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
993 qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
994 ndev->device->attrs.max_send_sge);
995
996 if (ndev->srq) {
997 qp_attr.srq = ndev->srq;
998 } else {
999
1000 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
1001 qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
1002 }
1003
1004 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
1005 if (ret) {
1006 pr_err("failed to create_qp ret= %d\n", ret);
1007 goto err_destroy_cq;
1008 }
1009
1010 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
1011
1012 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1013 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
1014 qp_attr.cap.max_send_wr, queue->cm_id);
1015
1016 if (!ndev->srq) {
1017 for (i = 0; i < queue->recv_queue_size; i++) {
1018 queue->cmds[i].queue = queue;
1019 ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
1020 if (ret)
1021 goto err_destroy_qp;
1022 }
1023 }
1024
1025out:
1026 return ret;
1027
1028err_destroy_qp:
1029 rdma_destroy_qp(queue->cm_id);
1030err_destroy_cq:
1031 ib_free_cq(queue->cq);
1032 goto out;
1033}
1034
1035static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
1036{
1037 struct ib_qp *qp = queue->cm_id->qp;
1038
1039 ib_drain_qp(qp);
1040 rdma_destroy_id(queue->cm_id);
1041 ib_destroy_qp(qp);
1042 ib_free_cq(queue->cq);
1043}
1044
1045static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
1046{
1047 pr_debug("freeing queue %d\n", queue->idx);
1048
1049 nvmet_sq_destroy(&queue->nvme_sq);
1050
1051 nvmet_rdma_destroy_queue_ib(queue);
1052 if (!queue->dev->srq) {
1053 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1054 queue->recv_queue_size,
1055 !queue->host_qid);
1056 }
1057 nvmet_rdma_free_rsps(queue);
1058 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1059 kfree(queue);
1060}
1061
1062static void nvmet_rdma_release_queue_work(struct work_struct *w)
1063{
1064 struct nvmet_rdma_queue *queue =
1065 container_of(w, struct nvmet_rdma_queue, release_work);
1066 struct nvmet_rdma_device *dev = queue->dev;
1067
1068 nvmet_rdma_free_queue(queue);
1069
1070 kref_put(&dev->ref, nvmet_rdma_free_dev);
1071}
1072
1073static int
1074nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
1075 struct nvmet_rdma_queue *queue)
1076{
1077 struct nvme_rdma_cm_req *req;
1078
1079 req = (struct nvme_rdma_cm_req *)conn->private_data;
1080 if (!req || conn->private_data_len == 0)
1081 return NVME_RDMA_CM_INVALID_LEN;
1082
1083 if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
1084 return NVME_RDMA_CM_INVALID_RECFMT;
1085
1086 queue->host_qid = le16_to_cpu(req->qid);
1087
1088
1089
1090
1091
1092 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1093 queue->send_queue_size = le16_to_cpu(req->hrqsize);
1094
1095 if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
1096 return NVME_RDMA_CM_INVALID_HSQSIZE;
1097
1098
1099
1100 return 0;
1101}
1102
1103static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
1104 enum nvme_rdma_cm_status status)
1105{
1106 struct nvme_rdma_cm_rej rej;
1107
1108 pr_debug("rejecting connect request: status %d (%s)\n",
1109 status, nvme_rdma_cm_msg(status));
1110
1111 rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1112 rej.sts = cpu_to_le16(status);
1113
1114 return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
1115}
1116
1117static struct nvmet_rdma_queue *
1118nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1119 struct rdma_cm_id *cm_id,
1120 struct rdma_cm_event *event)
1121{
1122 struct nvmet_rdma_queue *queue;
1123 int ret;
1124
1125 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1126 if (!queue) {
1127 ret = NVME_RDMA_CM_NO_RSC;
1128 goto out_reject;
1129 }
1130
1131 ret = nvmet_sq_init(&queue->nvme_sq);
1132 if (ret) {
1133 ret = NVME_RDMA_CM_NO_RSC;
1134 goto out_free_queue;
1135 }
1136
1137 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
1138 if (ret)
1139 goto out_destroy_sq;
1140
1141
1142
1143
1144
1145 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
1146 queue->dev = ndev;
1147 queue->cm_id = cm_id;
1148
1149 spin_lock_init(&queue->state_lock);
1150 queue->state = NVMET_RDMA_Q_CONNECTING;
1151 INIT_LIST_HEAD(&queue->rsp_wait_list);
1152 INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
1153 spin_lock_init(&queue->rsp_wr_wait_lock);
1154 INIT_LIST_HEAD(&queue->free_rsps);
1155 spin_lock_init(&queue->rsps_lock);
1156 INIT_LIST_HEAD(&queue->queue_list);
1157
1158 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
1159 if (queue->idx < 0) {
1160 ret = NVME_RDMA_CM_NO_RSC;
1161 goto out_destroy_sq;
1162 }
1163
1164 ret = nvmet_rdma_alloc_rsps(queue);
1165 if (ret) {
1166 ret = NVME_RDMA_CM_NO_RSC;
1167 goto out_ida_remove;
1168 }
1169
1170 if (!ndev->srq) {
1171 queue->cmds = nvmet_rdma_alloc_cmds(ndev,
1172 queue->recv_queue_size,
1173 !queue->host_qid);
1174 if (IS_ERR(queue->cmds)) {
1175 ret = NVME_RDMA_CM_NO_RSC;
1176 goto out_free_responses;
1177 }
1178 }
1179
1180 ret = nvmet_rdma_create_queue_ib(queue);
1181 if (ret) {
1182 pr_err("%s: creating RDMA queue failed (%d).\n",
1183 __func__, ret);
1184 ret = NVME_RDMA_CM_NO_RSC;
1185 goto out_free_cmds;
1186 }
1187
1188 return queue;
1189
1190out_free_cmds:
1191 if (!ndev->srq) {
1192 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1193 queue->recv_queue_size,
1194 !queue->host_qid);
1195 }
1196out_free_responses:
1197 nvmet_rdma_free_rsps(queue);
1198out_ida_remove:
1199 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1200out_destroy_sq:
1201 nvmet_sq_destroy(&queue->nvme_sq);
1202out_free_queue:
1203 kfree(queue);
1204out_reject:
1205 nvmet_rdma_cm_reject(cm_id, ret);
1206 return NULL;
1207}
1208
1209static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
1210{
1211 struct nvmet_rdma_queue *queue = priv;
1212
1213 switch (event->event) {
1214 case IB_EVENT_COMM_EST:
1215 rdma_notify(queue->cm_id, event->event);
1216 break;
1217 default:
1218 pr_err("received IB QP event: %s (%d)\n",
1219 ib_event_msg(event->event), event->event);
1220 break;
1221 }
1222}
1223
1224static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
1225 struct nvmet_rdma_queue *queue,
1226 struct rdma_conn_param *p)
1227{
1228 struct rdma_conn_param param = { };
1229 struct nvme_rdma_cm_rep priv = { };
1230 int ret = -ENOMEM;
1231
1232 param.rnr_retry_count = 7;
1233 param.flow_control = 1;
1234 param.initiator_depth = min_t(u8, p->initiator_depth,
1235 queue->dev->device->attrs.max_qp_init_rd_atom);
1236 param.private_data = &priv;
1237 param.private_data_len = sizeof(priv);
1238 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1239 priv.crqsize = cpu_to_le16(queue->recv_queue_size);
1240
1241 ret = rdma_accept(cm_id, ¶m);
1242 if (ret)
1243 pr_err("rdma_accept failed (error code = %d)\n", ret);
1244
1245 return ret;
1246}
1247
1248static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1249 struct rdma_cm_event *event)
1250{
1251 struct nvmet_rdma_device *ndev;
1252 struct nvmet_rdma_queue *queue;
1253 int ret = -EINVAL;
1254
1255 ndev = nvmet_rdma_find_get_device(cm_id);
1256 if (!ndev) {
1257 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1258 return -ECONNREFUSED;
1259 }
1260
1261 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1262 if (!queue) {
1263 ret = -ENOMEM;
1264 goto put_device;
1265 }
1266 queue->port = cm_id->context;
1267
1268 if (queue->host_qid == 0) {
1269
1270 flush_scheduled_work();
1271 }
1272
1273 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1274 if (ret) {
1275 schedule_work(&queue->release_work);
1276
1277 return 0;
1278 }
1279
1280 mutex_lock(&nvmet_rdma_queue_mutex);
1281 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
1282 mutex_unlock(&nvmet_rdma_queue_mutex);
1283
1284 return 0;
1285
1286put_device:
1287 kref_put(&ndev->ref, nvmet_rdma_free_dev);
1288
1289 return ret;
1290}
1291
1292static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
1293{
1294 unsigned long flags;
1295
1296 spin_lock_irqsave(&queue->state_lock, flags);
1297 if (queue->state != NVMET_RDMA_Q_CONNECTING) {
1298 pr_warn("trying to establish a connected queue\n");
1299 goto out_unlock;
1300 }
1301 queue->state = NVMET_RDMA_Q_LIVE;
1302
1303 while (!list_empty(&queue->rsp_wait_list)) {
1304 struct nvmet_rdma_rsp *cmd;
1305
1306 cmd = list_first_entry(&queue->rsp_wait_list,
1307 struct nvmet_rdma_rsp, wait_list);
1308 list_del(&cmd->wait_list);
1309
1310 spin_unlock_irqrestore(&queue->state_lock, flags);
1311 nvmet_rdma_handle_command(queue, cmd);
1312 spin_lock_irqsave(&queue->state_lock, flags);
1313 }
1314
1315out_unlock:
1316 spin_unlock_irqrestore(&queue->state_lock, flags);
1317}
1318
1319static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1320{
1321 bool disconnect = false;
1322 unsigned long flags;
1323
1324 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
1325
1326 spin_lock_irqsave(&queue->state_lock, flags);
1327 switch (queue->state) {
1328 case NVMET_RDMA_Q_CONNECTING:
1329 case NVMET_RDMA_Q_LIVE:
1330 queue->state = NVMET_RDMA_Q_DISCONNECTING;
1331 disconnect = true;
1332 break;
1333 case NVMET_RDMA_Q_DISCONNECTING:
1334 break;
1335 }
1336 spin_unlock_irqrestore(&queue->state_lock, flags);
1337
1338 if (disconnect) {
1339 rdma_disconnect(queue->cm_id);
1340 schedule_work(&queue->release_work);
1341 }
1342}
1343
1344static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1345{
1346 bool disconnect = false;
1347
1348 mutex_lock(&nvmet_rdma_queue_mutex);
1349 if (!list_empty(&queue->queue_list)) {
1350 list_del_init(&queue->queue_list);
1351 disconnect = true;
1352 }
1353 mutex_unlock(&nvmet_rdma_queue_mutex);
1354
1355 if (disconnect)
1356 __nvmet_rdma_queue_disconnect(queue);
1357}
1358
1359static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1360 struct nvmet_rdma_queue *queue)
1361{
1362 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
1363
1364 mutex_lock(&nvmet_rdma_queue_mutex);
1365 if (!list_empty(&queue->queue_list))
1366 list_del_init(&queue->queue_list);
1367 mutex_unlock(&nvmet_rdma_queue_mutex);
1368
1369 pr_err("failed to connect queue %d\n", queue->idx);
1370 schedule_work(&queue->release_work);
1371}
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1389 struct nvmet_rdma_queue *queue)
1390{
1391 struct nvmet_port *port;
1392
1393 if (queue) {
1394
1395
1396
1397
1398
1399 return 0;
1400 }
1401
1402 port = cm_id->context;
1403
1404
1405
1406
1407
1408
1409
1410 if (xchg(&port->priv, NULL) != cm_id)
1411 return 0;
1412
1413
1414
1415
1416
1417 return 1;
1418}
1419
1420static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1421 struct rdma_cm_event *event)
1422{
1423 struct nvmet_rdma_queue *queue = NULL;
1424 int ret = 0;
1425
1426 if (cm_id->qp)
1427 queue = cm_id->qp->qp_context;
1428
1429 pr_debug("%s (%d): status %d id %p\n",
1430 rdma_event_msg(event->event), event->event,
1431 event->status, cm_id);
1432
1433 switch (event->event) {
1434 case RDMA_CM_EVENT_CONNECT_REQUEST:
1435 ret = nvmet_rdma_queue_connect(cm_id, event);
1436 break;
1437 case RDMA_CM_EVENT_ESTABLISHED:
1438 nvmet_rdma_queue_established(queue);
1439 break;
1440 case RDMA_CM_EVENT_ADDR_CHANGE:
1441 case RDMA_CM_EVENT_DISCONNECTED:
1442 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1443 nvmet_rdma_queue_disconnect(queue);
1444 break;
1445 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1446 ret = nvmet_rdma_device_removal(cm_id, queue);
1447 break;
1448 case RDMA_CM_EVENT_REJECTED:
1449 pr_debug("Connection rejected: %s\n",
1450 rdma_reject_msg(cm_id, event->status));
1451
1452 case RDMA_CM_EVENT_UNREACHABLE:
1453 case RDMA_CM_EVENT_CONNECT_ERROR:
1454 nvmet_rdma_queue_connect_fail(cm_id, queue);
1455 break;
1456 default:
1457 pr_err("received unrecognized RDMA CM event %d\n",
1458 event->event);
1459 break;
1460 }
1461
1462 return ret;
1463}
1464
1465static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
1466{
1467 struct nvmet_rdma_queue *queue;
1468
1469restart:
1470 mutex_lock(&nvmet_rdma_queue_mutex);
1471 list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
1472 if (queue->nvme_sq.ctrl == ctrl) {
1473 list_del_init(&queue->queue_list);
1474 mutex_unlock(&nvmet_rdma_queue_mutex);
1475
1476 __nvmet_rdma_queue_disconnect(queue);
1477 goto restart;
1478 }
1479 }
1480 mutex_unlock(&nvmet_rdma_queue_mutex);
1481}
1482
1483static int nvmet_rdma_add_port(struct nvmet_port *port)
1484{
1485 struct rdma_cm_id *cm_id;
1486 struct sockaddr_storage addr = { };
1487 __kernel_sa_family_t af;
1488 int ret;
1489
1490 switch (port->disc_addr.adrfam) {
1491 case NVMF_ADDR_FAMILY_IP4:
1492 af = AF_INET;
1493 break;
1494 case NVMF_ADDR_FAMILY_IP6:
1495 af = AF_INET6;
1496 break;
1497 default:
1498 pr_err("address family %d not supported\n",
1499 port->disc_addr.adrfam);
1500 return -EINVAL;
1501 }
1502
1503 if (port->inline_data_size < 0) {
1504 port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
1505 } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
1506 pr_warn("inline_data_size %u is too large, reducing to %u\n",
1507 port->inline_data_size,
1508 NVMET_RDMA_MAX_INLINE_DATA_SIZE);
1509 port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
1510 }
1511
1512 ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
1513 port->disc_addr.trsvcid, &addr);
1514 if (ret) {
1515 pr_err("malformed ip/port passed: %s:%s\n",
1516 port->disc_addr.traddr, port->disc_addr.trsvcid);
1517 return ret;
1518 }
1519
1520 cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
1521 RDMA_PS_TCP, IB_QPT_RC);
1522 if (IS_ERR(cm_id)) {
1523 pr_err("CM ID creation failed\n");
1524 return PTR_ERR(cm_id);
1525 }
1526
1527
1528
1529
1530
1531 ret = rdma_set_afonly(cm_id, 1);
1532 if (ret) {
1533 pr_err("rdma_set_afonly failed (%d)\n", ret);
1534 goto out_destroy_id;
1535 }
1536
1537 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
1538 if (ret) {
1539 pr_err("binding CM ID to %pISpcs failed (%d)\n",
1540 (struct sockaddr *)&addr, ret);
1541 goto out_destroy_id;
1542 }
1543
1544 ret = rdma_listen(cm_id, 128);
1545 if (ret) {
1546 pr_err("listening to %pISpcs failed (%d)\n",
1547 (struct sockaddr *)&addr, ret);
1548 goto out_destroy_id;
1549 }
1550
1551 pr_info("enabling port %d (%pISpcs)\n",
1552 le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
1553 port->priv = cm_id;
1554 return 0;
1555
1556out_destroy_id:
1557 rdma_destroy_id(cm_id);
1558 return ret;
1559}
1560
1561static void nvmet_rdma_remove_port(struct nvmet_port *port)
1562{
1563 struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
1564
1565 if (cm_id)
1566 rdma_destroy_id(cm_id);
1567}
1568
1569static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
1570 struct nvmet_port *port, char *traddr)
1571{
1572 struct rdma_cm_id *cm_id = port->priv;
1573
1574 if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
1575 struct nvmet_rdma_rsp *rsp =
1576 container_of(req, struct nvmet_rdma_rsp, req);
1577 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
1578 struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr;
1579
1580 sprintf(traddr, "%pISc", addr);
1581 } else {
1582 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
1583 }
1584}
1585
1586static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
1587 .owner = THIS_MODULE,
1588 .type = NVMF_TRTYPE_RDMA,
1589 .msdbd = 1,
1590 .has_keyed_sgls = 1,
1591 .add_port = nvmet_rdma_add_port,
1592 .remove_port = nvmet_rdma_remove_port,
1593 .queue_response = nvmet_rdma_queue_response,
1594 .delete_ctrl = nvmet_rdma_delete_ctrl,
1595 .disc_traddr = nvmet_rdma_disc_port_addr,
1596};
1597
1598static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
1599{
1600 struct nvmet_rdma_queue *queue, *tmp;
1601 struct nvmet_rdma_device *ndev;
1602 bool found = false;
1603
1604 mutex_lock(&device_list_mutex);
1605 list_for_each_entry(ndev, &device_list, entry) {
1606 if (ndev->device == ib_device) {
1607 found = true;
1608 break;
1609 }
1610 }
1611 mutex_unlock(&device_list_mutex);
1612
1613 if (!found)
1614 return;
1615
1616
1617
1618
1619
1620 mutex_lock(&nvmet_rdma_queue_mutex);
1621 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
1622 queue_list) {
1623 if (queue->dev->device != ib_device)
1624 continue;
1625
1626 pr_info("Removing queue %d\n", queue->idx);
1627 list_del_init(&queue->queue_list);
1628 __nvmet_rdma_queue_disconnect(queue);
1629 }
1630 mutex_unlock(&nvmet_rdma_queue_mutex);
1631
1632 flush_scheduled_work();
1633}
1634
1635static struct ib_client nvmet_rdma_ib_client = {
1636 .name = "nvmet_rdma",
1637 .remove = nvmet_rdma_remove_one
1638};
1639
1640static int __init nvmet_rdma_init(void)
1641{
1642 int ret;
1643
1644 ret = ib_register_client(&nvmet_rdma_ib_client);
1645 if (ret)
1646 return ret;
1647
1648 ret = nvmet_register_transport(&nvmet_rdma_ops);
1649 if (ret)
1650 goto err_ib_client;
1651
1652 return 0;
1653
1654err_ib_client:
1655 ib_unregister_client(&nvmet_rdma_ib_client);
1656 return ret;
1657}
1658
1659static void __exit nvmet_rdma_exit(void)
1660{
1661 nvmet_unregister_transport(&nvmet_rdma_ops);
1662 ib_unregister_client(&nvmet_rdma_ib_client);
1663 WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
1664 ida_destroy(&nvmet_rdma_queue_ida);
1665}
1666
1667module_init(nvmet_rdma_init);
1668module_exit(nvmet_rdma_exit);
1669
1670MODULE_LICENSE("GPL v2");
1671MODULE_ALIAS("nvmet-transport-1");
1672