linux/drivers/nvme/target/rdma.c
<<
>>
Prefs
   1/*
   2 * NVMe over Fabrics RDMA target.
   3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 */
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15#include <linux/atomic.h>
  16#include <linux/ctype.h>
  17#include <linux/delay.h>
  18#include <linux/err.h>
  19#include <linux/init.h>
  20#include <linux/module.h>
  21#include <linux/nvme.h>
  22#include <linux/slab.h>
  23#include <linux/string.h>
  24#include <linux/wait.h>
  25#include <linux/inet.h>
  26#include <asm/unaligned.h>
  27
  28#include <rdma/ib_verbs.h>
  29#include <rdma/rdma_cm.h>
  30#include <rdma/rw.h>
  31
  32#include <linux/nvme-rdma.h>
  33#include "nvmet.h"
  34
  35/*
  36 * We allow up to a page of inline data to go with the SQE
  37 */
  38#define NVMET_RDMA_INLINE_DATA_SIZE     PAGE_SIZE
  39
  40struct nvmet_rdma_cmd {
  41        struct ib_sge           sge[2];
  42        struct ib_cqe           cqe;
  43        struct ib_recv_wr       wr;
  44        struct scatterlist      inline_sg;
  45        struct page             *inline_page;
  46        struct nvme_command     *nvme_cmd;
  47        struct nvmet_rdma_queue *queue;
  48};
  49
  50enum {
  51        NVMET_RDMA_REQ_INLINE_DATA      = (1 << 0),
  52        NVMET_RDMA_REQ_INVALIDATE_RKEY  = (1 << 1),
  53};
  54
  55struct nvmet_rdma_rsp {
  56        struct ib_sge           send_sge;
  57        struct ib_cqe           send_cqe;
  58        struct ib_send_wr       send_wr;
  59
  60        struct nvmet_rdma_cmd   *cmd;
  61        struct nvmet_rdma_queue *queue;
  62
  63        struct ib_cqe           read_cqe;
  64        struct rdma_rw_ctx      rw;
  65
  66        struct nvmet_req        req;
  67
  68        u8                      n_rdma;
  69        u32                     flags;
  70        u32                     invalidate_rkey;
  71
  72        struct list_head        wait_list;
  73        struct list_head        free_list;
  74};
  75
  76enum nvmet_rdma_queue_state {
  77        NVMET_RDMA_Q_CONNECTING,
  78        NVMET_RDMA_Q_LIVE,
  79        NVMET_RDMA_Q_DISCONNECTING,
  80        NVMET_RDMA_IN_DEVICE_REMOVAL,
  81};
  82
  83struct nvmet_rdma_queue {
  84        struct rdma_cm_id       *cm_id;
  85        struct nvmet_port       *port;
  86        struct ib_cq            *cq;
  87        atomic_t                sq_wr_avail;
  88        struct nvmet_rdma_device *dev;
  89        spinlock_t              state_lock;
  90        enum nvmet_rdma_queue_state state;
  91        struct nvmet_cq         nvme_cq;
  92        struct nvmet_sq         nvme_sq;
  93
  94        struct nvmet_rdma_rsp   *rsps;
  95        struct list_head        free_rsps;
  96        spinlock_t              rsps_lock;
  97        struct nvmet_rdma_cmd   *cmds;
  98
  99        struct work_struct      release_work;
 100        struct list_head        rsp_wait_list;
 101        struct list_head        rsp_wr_wait_list;
 102        spinlock_t              rsp_wr_wait_lock;
 103
 104        int                     idx;
 105        int                     host_qid;
 106        int                     recv_queue_size;
 107        int                     send_queue_size;
 108
 109        struct list_head        queue_list;
 110};
 111
 112struct nvmet_rdma_device {
 113        struct ib_device        *device;
 114        struct ib_pd            *pd;
 115        struct ib_srq           *srq;
 116        struct nvmet_rdma_cmd   *srq_cmds;
 117        size_t                  srq_size;
 118        struct kref             ref;
 119        struct list_head        entry;
 120};
 121
 122static bool nvmet_rdma_use_srq;
 123module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
 124MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
 125
 126static DEFINE_IDA(nvmet_rdma_queue_ida);
 127static LIST_HEAD(nvmet_rdma_queue_list);
 128static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
 129
 130static LIST_HEAD(device_list);
 131static DEFINE_MUTEX(device_list_mutex);
 132
 133static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
 134static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
 135static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
 136static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
 137static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
 138static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
 139
 140static struct nvmet_fabrics_ops nvmet_rdma_ops;
 141
 142/* XXX: really should move to a generic header sooner or later.. */
 143static inline u32 get_unaligned_le24(const u8 *p)
 144{
 145        return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
 146}
 147
 148static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
 149{
 150        return nvme_is_write(rsp->req.cmd) &&
 151                rsp->req.data_len &&
 152                !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
 153}
 154
 155static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
 156{
 157        return !nvme_is_write(rsp->req.cmd) &&
 158                rsp->req.data_len &&
 159                !rsp->req.rsp->status &&
 160                !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
 161}
 162
 163static inline struct nvmet_rdma_rsp *
 164nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
 165{
 166        struct nvmet_rdma_rsp *rsp;
 167        unsigned long flags;
 168
 169        spin_lock_irqsave(&queue->rsps_lock, flags);
 170        rsp = list_first_entry(&queue->free_rsps,
 171                                struct nvmet_rdma_rsp, free_list);
 172        list_del(&rsp->free_list);
 173        spin_unlock_irqrestore(&queue->rsps_lock, flags);
 174
 175        return rsp;
 176}
 177
 178static inline void
 179nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
 180{
 181        unsigned long flags;
 182
 183        spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
 184        list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
 185        spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
 186}
 187
 188static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
 189{
 190        struct scatterlist *sg;
 191        int count;
 192
 193        if (!sgl || !nents)
 194                return;
 195
 196        for_each_sg(sgl, sg, nents, count)
 197                __free_page(sg_page(sg));
 198        kfree(sgl);
 199}
 200
 201static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
 202                u32 length)
 203{
 204        struct scatterlist *sg;
 205        struct page *page;
 206        unsigned int nent;
 207        int i = 0;
 208
 209        nent = DIV_ROUND_UP(length, PAGE_SIZE);
 210        sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
 211        if (!sg)
 212                goto out;
 213
 214        sg_init_table(sg, nent);
 215
 216        while (length) {
 217                u32 page_len = min_t(u32, length, PAGE_SIZE);
 218
 219                page = alloc_page(GFP_KERNEL);
 220                if (!page)
 221                        goto out_free_pages;
 222
 223                sg_set_page(&sg[i], page, page_len, 0);
 224                length -= page_len;
 225                i++;
 226        }
 227        *sgl = sg;
 228        *nents = nent;
 229        return 0;
 230
 231out_free_pages:
 232        while (i > 0) {
 233                i--;
 234                __free_page(sg_page(&sg[i]));
 235        }
 236        kfree(sg);
 237out:
 238        return NVME_SC_INTERNAL;
 239}
 240
 241static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
 242                        struct nvmet_rdma_cmd *c, bool admin)
 243{
 244        /* NVMe command / RDMA RECV */
 245        c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
 246        if (!c->nvme_cmd)
 247                goto out;
 248
 249        c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
 250                        sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
 251        if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
 252                goto out_free_cmd;
 253
 254        c->sge[0].length = sizeof(*c->nvme_cmd);
 255        c->sge[0].lkey = ndev->pd->local_dma_lkey;
 256
 257        if (!admin) {
 258                c->inline_page = alloc_pages(GFP_KERNEL,
 259                                get_order(NVMET_RDMA_INLINE_DATA_SIZE));
 260                if (!c->inline_page)
 261                        goto out_unmap_cmd;
 262                c->sge[1].addr = ib_dma_map_page(ndev->device,
 263                                c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE,
 264                                DMA_FROM_DEVICE);
 265                if (ib_dma_mapping_error(ndev->device, c->sge[1].addr))
 266                        goto out_free_inline_page;
 267                c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE;
 268                c->sge[1].lkey = ndev->pd->local_dma_lkey;
 269        }
 270
 271        c->cqe.done = nvmet_rdma_recv_done;
 272
 273        c->wr.wr_cqe = &c->cqe;
 274        c->wr.sg_list = c->sge;
 275        c->wr.num_sge = admin ? 1 : 2;
 276
 277        return 0;
 278
 279out_free_inline_page:
 280        if (!admin) {
 281                __free_pages(c->inline_page,
 282                                get_order(NVMET_RDMA_INLINE_DATA_SIZE));
 283        }
 284out_unmap_cmd:
 285        ib_dma_unmap_single(ndev->device, c->sge[0].addr,
 286                        sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
 287out_free_cmd:
 288        kfree(c->nvme_cmd);
 289
 290out:
 291        return -ENOMEM;
 292}
 293
 294static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
 295                struct nvmet_rdma_cmd *c, bool admin)
 296{
 297        if (!admin) {
 298                ib_dma_unmap_page(ndev->device, c->sge[1].addr,
 299                                NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE);
 300                __free_pages(c->inline_page,
 301                                get_order(NVMET_RDMA_INLINE_DATA_SIZE));
 302        }
 303        ib_dma_unmap_single(ndev->device, c->sge[0].addr,
 304                                sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
 305        kfree(c->nvme_cmd);
 306}
 307
 308static struct nvmet_rdma_cmd *
 309nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
 310                int nr_cmds, bool admin)
 311{
 312        struct nvmet_rdma_cmd *cmds;
 313        int ret = -EINVAL, i;
 314
 315        cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
 316        if (!cmds)
 317                goto out;
 318
 319        for (i = 0; i < nr_cmds; i++) {
 320                ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
 321                if (ret)
 322                        goto out_free;
 323        }
 324
 325        return cmds;
 326
 327out_free:
 328        while (--i >= 0)
 329                nvmet_rdma_free_cmd(ndev, cmds + i, admin);
 330        kfree(cmds);
 331out:
 332        return ERR_PTR(ret);
 333}
 334
 335static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
 336                struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
 337{
 338        int i;
 339
 340        for (i = 0; i < nr_cmds; i++)
 341                nvmet_rdma_free_cmd(ndev, cmds + i, admin);
 342        kfree(cmds);
 343}
 344
 345static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
 346                struct nvmet_rdma_rsp *r)
 347{
 348        /* NVMe CQE / RDMA SEND */
 349        r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
 350        if (!r->req.rsp)
 351                goto out;
 352
 353        r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
 354                        sizeof(*r->req.rsp), DMA_TO_DEVICE);
 355        if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
 356                goto out_free_rsp;
 357
 358        r->send_sge.length = sizeof(*r->req.rsp);
 359        r->send_sge.lkey = ndev->pd->local_dma_lkey;
 360
 361        r->send_cqe.done = nvmet_rdma_send_done;
 362
 363        r->send_wr.wr_cqe = &r->send_cqe;
 364        r->send_wr.sg_list = &r->send_sge;
 365        r->send_wr.num_sge = 1;
 366        r->send_wr.send_flags = IB_SEND_SIGNALED;
 367
 368        /* Data In / RDMA READ */
 369        r->read_cqe.done = nvmet_rdma_read_data_done;
 370        return 0;
 371
 372out_free_rsp:
 373        kfree(r->req.rsp);
 374out:
 375        return -ENOMEM;
 376}
 377
 378static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
 379                struct nvmet_rdma_rsp *r)
 380{
 381        ib_dma_unmap_single(ndev->device, r->send_sge.addr,
 382                                sizeof(*r->req.rsp), DMA_TO_DEVICE);
 383        kfree(r->req.rsp);
 384}
 385
 386static int
 387nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
 388{
 389        struct nvmet_rdma_device *ndev = queue->dev;
 390        int nr_rsps = queue->recv_queue_size * 2;
 391        int ret = -EINVAL, i;
 392
 393        queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
 394                        GFP_KERNEL);
 395        if (!queue->rsps)
 396                goto out;
 397
 398        for (i = 0; i < nr_rsps; i++) {
 399                struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
 400
 401                ret = nvmet_rdma_alloc_rsp(ndev, rsp);
 402                if (ret)
 403                        goto out_free;
 404
 405                list_add_tail(&rsp->free_list, &queue->free_rsps);
 406        }
 407
 408        return 0;
 409
 410out_free:
 411        while (--i >= 0) {
 412                struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
 413
 414                list_del(&rsp->free_list);
 415                nvmet_rdma_free_rsp(ndev, rsp);
 416        }
 417        kfree(queue->rsps);
 418out:
 419        return ret;
 420}
 421
 422static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
 423{
 424        struct nvmet_rdma_device *ndev = queue->dev;
 425        int i, nr_rsps = queue->recv_queue_size * 2;
 426
 427        for (i = 0; i < nr_rsps; i++) {
 428                struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
 429
 430                list_del(&rsp->free_list);
 431                nvmet_rdma_free_rsp(ndev, rsp);
 432        }
 433        kfree(queue->rsps);
 434}
 435
 436static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
 437                struct nvmet_rdma_cmd *cmd)
 438{
 439        struct ib_recv_wr *bad_wr;
 440
 441        if (ndev->srq)
 442                return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
 443        return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
 444}
 445
 446static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
 447{
 448        spin_lock(&queue->rsp_wr_wait_lock);
 449        while (!list_empty(&queue->rsp_wr_wait_list)) {
 450                struct nvmet_rdma_rsp *rsp;
 451                bool ret;
 452
 453                rsp = list_entry(queue->rsp_wr_wait_list.next,
 454                                struct nvmet_rdma_rsp, wait_list);
 455                list_del(&rsp->wait_list);
 456
 457                spin_unlock(&queue->rsp_wr_wait_lock);
 458                ret = nvmet_rdma_execute_command(rsp);
 459                spin_lock(&queue->rsp_wr_wait_lock);
 460
 461                if (!ret) {
 462                        list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
 463                        break;
 464                }
 465        }
 466        spin_unlock(&queue->rsp_wr_wait_lock);
 467}
 468
 469
 470static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
 471{
 472        struct nvmet_rdma_queue *queue = rsp->queue;
 473
 474        atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
 475
 476        if (rsp->n_rdma) {
 477                rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
 478                                queue->cm_id->port_num, rsp->req.sg,
 479                                rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
 480        }
 481
 482        if (rsp->req.sg != &rsp->cmd->inline_sg)
 483                nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
 484
 485        if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
 486                nvmet_rdma_process_wr_wait_list(queue);
 487
 488        nvmet_rdma_put_rsp(rsp);
 489}
 490
 491static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
 492{
 493        if (queue->nvme_sq.ctrl) {
 494                nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
 495        } else {
 496                /*
 497                 * we didn't setup the controller yet in case
 498                 * of admin connect error, just disconnect and
 499                 * cleanup the queue
 500                 */
 501                nvmet_rdma_queue_disconnect(queue);
 502        }
 503}
 504
 505static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
 506{
 507        struct nvmet_rdma_rsp *rsp =
 508                container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
 509
 510        nvmet_rdma_release_rsp(rsp);
 511
 512        if (unlikely(wc->status != IB_WC_SUCCESS &&
 513                     wc->status != IB_WC_WR_FLUSH_ERR)) {
 514                pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
 515                        wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
 516                nvmet_rdma_error_comp(rsp->queue);
 517        }
 518}
 519
 520static void nvmet_rdma_queue_response(struct nvmet_req *req)
 521{
 522        struct nvmet_rdma_rsp *rsp =
 523                container_of(req, struct nvmet_rdma_rsp, req);
 524        struct rdma_cm_id *cm_id = rsp->queue->cm_id;
 525        struct ib_send_wr *first_wr, *bad_wr;
 526
 527        if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
 528                rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
 529                rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
 530        } else {
 531                rsp->send_wr.opcode = IB_WR_SEND;
 532        }
 533
 534        if (nvmet_rdma_need_data_out(rsp))
 535                first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
 536                                cm_id->port_num, NULL, &rsp->send_wr);
 537        else
 538                first_wr = &rsp->send_wr;
 539
 540        nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
 541        if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
 542                pr_err("sending cmd response failed\n");
 543                nvmet_rdma_release_rsp(rsp);
 544        }
 545}
 546
 547static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
 548{
 549        struct nvmet_rdma_rsp *rsp =
 550                container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
 551        struct nvmet_rdma_queue *queue = cq->cq_context;
 552
 553        WARN_ON(rsp->n_rdma <= 0);
 554        atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
 555        rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
 556                        queue->cm_id->port_num, rsp->req.sg,
 557                        rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
 558        rsp->n_rdma = 0;
 559
 560        if (unlikely(wc->status != IB_WC_SUCCESS)) {
 561                nvmet_rdma_release_rsp(rsp);
 562                if (wc->status != IB_WC_WR_FLUSH_ERR) {
 563                        pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
 564                                wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
 565                        nvmet_rdma_error_comp(queue);
 566                }
 567                return;
 568        }
 569
 570        rsp->req.execute(&rsp->req);
 571}
 572
 573static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
 574                u64 off)
 575{
 576        sg_init_table(&rsp->cmd->inline_sg, 1);
 577        sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off);
 578        rsp->req.sg = &rsp->cmd->inline_sg;
 579        rsp->req.sg_cnt = 1;
 580}
 581
 582static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
 583{
 584        struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
 585        u64 off = le64_to_cpu(sgl->addr);
 586        u32 len = le32_to_cpu(sgl->length);
 587
 588        if (!nvme_is_write(rsp->req.cmd))
 589                return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
 590
 591        if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) {
 592                pr_err("invalid inline data offset!\n");
 593                return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
 594        }
 595
 596        /* no data command? */
 597        if (!len)
 598                return 0;
 599
 600        nvmet_rdma_use_inline_sg(rsp, len, off);
 601        rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
 602        return 0;
 603}
 604
 605static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
 606                struct nvme_keyed_sgl_desc *sgl, bool invalidate)
 607{
 608        struct rdma_cm_id *cm_id = rsp->queue->cm_id;
 609        u64 addr = le64_to_cpu(sgl->addr);
 610        u32 len = get_unaligned_le24(sgl->length);
 611        u32 key = get_unaligned_le32(sgl->key);
 612        int ret;
 613        u16 status;
 614
 615        /* no data command? */
 616        if (!len)
 617                return 0;
 618
 619        status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
 620                        len);
 621        if (status)
 622                return status;
 623
 624        ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
 625                        rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
 626                        nvmet_data_dir(&rsp->req));
 627        if (ret < 0)
 628                return NVME_SC_INTERNAL;
 629        rsp->n_rdma += ret;
 630
 631        if (invalidate) {
 632                rsp->invalidate_rkey = key;
 633                rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
 634        }
 635
 636        return 0;
 637}
 638
 639static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
 640{
 641        struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
 642
 643        switch (sgl->type >> 4) {
 644        case NVME_SGL_FMT_DATA_DESC:
 645                switch (sgl->type & 0xf) {
 646                case NVME_SGL_FMT_OFFSET:
 647                        return nvmet_rdma_map_sgl_inline(rsp);
 648                default:
 649                        pr_err("invalid SGL subtype: %#x\n", sgl->type);
 650                        return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
 651                }
 652        case NVME_KEY_SGL_FMT_DATA_DESC:
 653                switch (sgl->type & 0xf) {
 654                case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
 655                        return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
 656                case NVME_SGL_FMT_ADDRESS:
 657                        return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
 658                default:
 659                        pr_err("invalid SGL subtype: %#x\n", sgl->type);
 660                        return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
 661                }
 662        default:
 663                pr_err("invalid SGL type: %#x\n", sgl->type);
 664                return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
 665        }
 666}
 667
 668static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
 669{
 670        struct nvmet_rdma_queue *queue = rsp->queue;
 671
 672        if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
 673                        &queue->sq_wr_avail) < 0)) {
 674                pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
 675                                1 + rsp->n_rdma, queue->idx,
 676                                queue->nvme_sq.ctrl->cntlid);
 677                atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
 678                return false;
 679        }
 680
 681        if (nvmet_rdma_need_data_in(rsp)) {
 682                if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
 683                                queue->cm_id->port_num, &rsp->read_cqe, NULL))
 684                        nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
 685        } else {
 686                rsp->req.execute(&rsp->req);
 687        }
 688
 689        return true;
 690}
 691
 692static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
 693                struct nvmet_rdma_rsp *cmd)
 694{
 695        u16 status;
 696
 697        cmd->queue = queue;
 698        cmd->n_rdma = 0;
 699        cmd->req.port = queue->port;
 700
 701        if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
 702                        &queue->nvme_sq, &nvmet_rdma_ops))
 703                return;
 704
 705        status = nvmet_rdma_map_sgl(cmd);
 706        if (status)
 707                goto out_err;
 708
 709        if (unlikely(!nvmet_rdma_execute_command(cmd))) {
 710                spin_lock(&queue->rsp_wr_wait_lock);
 711                list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
 712                spin_unlock(&queue->rsp_wr_wait_lock);
 713        }
 714
 715        return;
 716
 717out_err:
 718        nvmet_req_complete(&cmd->req, status);
 719}
 720
 721static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 722{
 723        struct nvmet_rdma_cmd *cmd =
 724                container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
 725        struct nvmet_rdma_queue *queue = cq->cq_context;
 726        struct nvmet_rdma_rsp *rsp;
 727
 728        if (unlikely(wc->status != IB_WC_SUCCESS)) {
 729                if (wc->status != IB_WC_WR_FLUSH_ERR) {
 730                        pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
 731                                wc->wr_cqe, ib_wc_status_msg(wc->status),
 732                                wc->status);
 733                        nvmet_rdma_error_comp(queue);
 734                }
 735                return;
 736        }
 737
 738        if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
 739                pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
 740                nvmet_rdma_error_comp(queue);
 741                return;
 742        }
 743
 744        cmd->queue = queue;
 745        rsp = nvmet_rdma_get_rsp(queue);
 746        rsp->cmd = cmd;
 747        rsp->flags = 0;
 748        rsp->req.cmd = cmd->nvme_cmd;
 749
 750        if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
 751                unsigned long flags;
 752
 753                spin_lock_irqsave(&queue->state_lock, flags);
 754                if (queue->state == NVMET_RDMA_Q_CONNECTING)
 755                        list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
 756                else
 757                        nvmet_rdma_put_rsp(rsp);
 758                spin_unlock_irqrestore(&queue->state_lock, flags);
 759                return;
 760        }
 761
 762        nvmet_rdma_handle_command(queue, rsp);
 763}
 764
 765static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
 766{
 767        if (!ndev->srq)
 768                return;
 769
 770        nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
 771        ib_destroy_srq(ndev->srq);
 772}
 773
 774static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
 775{
 776        struct ib_srq_init_attr srq_attr = { NULL, };
 777        struct ib_srq *srq;
 778        size_t srq_size;
 779        int ret, i;
 780
 781        srq_size = 4095;        /* XXX: tune */
 782
 783        srq_attr.attr.max_wr = srq_size;
 784        srq_attr.attr.max_sge = 2;
 785        srq_attr.attr.srq_limit = 0;
 786        srq_attr.srq_type = IB_SRQT_BASIC;
 787        srq = ib_create_srq(ndev->pd, &srq_attr);
 788        if (IS_ERR(srq)) {
 789                /*
 790                 * If SRQs aren't supported we just go ahead and use normal
 791                 * non-shared receive queues.
 792                 */
 793                pr_info("SRQ requested but not supported.\n");
 794                return 0;
 795        }
 796
 797        ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
 798        if (IS_ERR(ndev->srq_cmds)) {
 799                ret = PTR_ERR(ndev->srq_cmds);
 800                goto out_destroy_srq;
 801        }
 802
 803        ndev->srq = srq;
 804        ndev->srq_size = srq_size;
 805
 806        for (i = 0; i < srq_size; i++)
 807                nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
 808
 809        return 0;
 810
 811out_destroy_srq:
 812        ib_destroy_srq(srq);
 813        return ret;
 814}
 815
 816static void nvmet_rdma_free_dev(struct kref *ref)
 817{
 818        struct nvmet_rdma_device *ndev =
 819                container_of(ref, struct nvmet_rdma_device, ref);
 820
 821        mutex_lock(&device_list_mutex);
 822        list_del(&ndev->entry);
 823        mutex_unlock(&device_list_mutex);
 824
 825        nvmet_rdma_destroy_srq(ndev);
 826        ib_dealloc_pd(ndev->pd);
 827
 828        kfree(ndev);
 829}
 830
 831static struct nvmet_rdma_device *
 832nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
 833{
 834        struct nvmet_rdma_device *ndev;
 835        int ret;
 836
 837        mutex_lock(&device_list_mutex);
 838        list_for_each_entry(ndev, &device_list, entry) {
 839                if (ndev->device->node_guid == cm_id->device->node_guid &&
 840                    kref_get_unless_zero(&ndev->ref))
 841                        goto out_unlock;
 842        }
 843
 844        ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
 845        if (!ndev)
 846                goto out_err;
 847
 848        ndev->device = cm_id->device;
 849        kref_init(&ndev->ref);
 850
 851        ndev->pd = ib_alloc_pd(ndev->device);
 852        if (IS_ERR(ndev->pd))
 853                goto out_free_dev;
 854
 855        if (nvmet_rdma_use_srq) {
 856                ret = nvmet_rdma_init_srq(ndev);
 857                if (ret)
 858                        goto out_free_pd;
 859        }
 860
 861        list_add(&ndev->entry, &device_list);
 862out_unlock:
 863        mutex_unlock(&device_list_mutex);
 864        pr_debug("added %s.\n", ndev->device->name);
 865        return ndev;
 866
 867out_free_pd:
 868        ib_dealloc_pd(ndev->pd);
 869out_free_dev:
 870        kfree(ndev);
 871out_err:
 872        mutex_unlock(&device_list_mutex);
 873        return NULL;
 874}
 875
 876static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
 877{
 878        struct ib_qp_init_attr qp_attr;
 879        struct nvmet_rdma_device *ndev = queue->dev;
 880        int comp_vector, nr_cqe, ret, i;
 881
 882        /*
 883         * Spread the io queues across completion vectors,
 884         * but still keep all admin queues on vector 0.
 885         */
 886        comp_vector = !queue->host_qid ? 0 :
 887                queue->idx % ndev->device->num_comp_vectors;
 888
 889        /*
 890         * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
 891         */
 892        nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
 893
 894        queue->cq = ib_alloc_cq(ndev->device, queue,
 895                        nr_cqe + 1, comp_vector,
 896                        IB_POLL_WORKQUEUE);
 897        if (IS_ERR(queue->cq)) {
 898                ret = PTR_ERR(queue->cq);
 899                pr_err("failed to create CQ cqe= %d ret= %d\n",
 900                       nr_cqe + 1, ret);
 901                goto out;
 902        }
 903
 904        memset(&qp_attr, 0, sizeof(qp_attr));
 905        qp_attr.qp_context = queue;
 906        qp_attr.event_handler = nvmet_rdma_qp_event;
 907        qp_attr.send_cq = queue->cq;
 908        qp_attr.recv_cq = queue->cq;
 909        qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
 910        qp_attr.qp_type = IB_QPT_RC;
 911        /* +1 for drain */
 912        qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
 913        qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
 914        qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
 915                                        ndev->device->attrs.max_sge);
 916
 917        if (ndev->srq) {
 918                qp_attr.srq = ndev->srq;
 919        } else {
 920                /* +1 for drain */
 921                qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
 922                qp_attr.cap.max_recv_sge = 2;
 923        }
 924
 925        ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
 926        if (ret) {
 927                pr_err("failed to create_qp ret= %d\n", ret);
 928                goto err_destroy_cq;
 929        }
 930
 931        atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
 932
 933        pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
 934                 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
 935                 qp_attr.cap.max_send_wr, queue->cm_id);
 936
 937        if (!ndev->srq) {
 938                for (i = 0; i < queue->recv_queue_size; i++) {
 939                        queue->cmds[i].queue = queue;
 940                        nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
 941                }
 942        }
 943
 944out:
 945        return ret;
 946
 947err_destroy_cq:
 948        ib_free_cq(queue->cq);
 949        goto out;
 950}
 951
 952static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
 953{
 954        rdma_destroy_qp(queue->cm_id);
 955        ib_free_cq(queue->cq);
 956}
 957
 958static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
 959{
 960        pr_info("freeing queue %d\n", queue->idx);
 961
 962        nvmet_sq_destroy(&queue->nvme_sq);
 963
 964        nvmet_rdma_destroy_queue_ib(queue);
 965        if (!queue->dev->srq) {
 966                nvmet_rdma_free_cmds(queue->dev, queue->cmds,
 967                                queue->recv_queue_size,
 968                                !queue->host_qid);
 969        }
 970        nvmet_rdma_free_rsps(queue);
 971        ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
 972        kfree(queue);
 973}
 974
 975static void nvmet_rdma_release_queue_work(struct work_struct *w)
 976{
 977        struct nvmet_rdma_queue *queue =
 978                container_of(w, struct nvmet_rdma_queue, release_work);
 979        struct rdma_cm_id *cm_id = queue->cm_id;
 980        struct nvmet_rdma_device *dev = queue->dev;
 981        enum nvmet_rdma_queue_state state = queue->state;
 982
 983        nvmet_rdma_free_queue(queue);
 984
 985        if (state != NVMET_RDMA_IN_DEVICE_REMOVAL)
 986                rdma_destroy_id(cm_id);
 987
 988        kref_put(&dev->ref, nvmet_rdma_free_dev);
 989}
 990
 991static int
 992nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
 993                                struct nvmet_rdma_queue *queue)
 994{
 995        struct nvme_rdma_cm_req *req;
 996
 997        req = (struct nvme_rdma_cm_req *)conn->private_data;
 998        if (!req || conn->private_data_len == 0)
 999                return NVME_RDMA_CM_INVALID_LEN;
1000
1001        if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
1002                return NVME_RDMA_CM_INVALID_RECFMT;
1003
1004        queue->host_qid = le16_to_cpu(req->qid);
1005
1006        /*
1007         * req->hsqsize corresponds to our recv queue size plus 1
1008         * req->hrqsize corresponds to our send queue size
1009         */
1010        queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1011        queue->send_queue_size = le16_to_cpu(req->hrqsize);
1012
1013        if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
1014                return NVME_RDMA_CM_INVALID_HSQSIZE;
1015
1016        /* XXX: Should we enforce some kind of max for IO queues? */
1017
1018        return 0;
1019}
1020
1021static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
1022                                enum nvme_rdma_cm_status status)
1023{
1024        struct nvme_rdma_cm_rej rej;
1025
1026        rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1027        rej.sts = cpu_to_le16(status);
1028
1029        return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
1030}
1031
1032static struct nvmet_rdma_queue *
1033nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1034                struct rdma_cm_id *cm_id,
1035                struct rdma_cm_event *event)
1036{
1037        struct nvmet_rdma_queue *queue;
1038        int ret;
1039
1040        queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1041        if (!queue) {
1042                ret = NVME_RDMA_CM_NO_RSC;
1043                goto out_reject;
1044        }
1045
1046        ret = nvmet_sq_init(&queue->nvme_sq);
1047        if (ret)
1048                goto out_free_queue;
1049
1050        ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
1051        if (ret)
1052                goto out_destroy_sq;
1053
1054        /*
1055         * Schedules the actual release because calling rdma_destroy_id from
1056         * inside a CM callback would trigger a deadlock. (great API design..)
1057         */
1058        INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
1059        queue->dev = ndev;
1060        queue->cm_id = cm_id;
1061
1062        spin_lock_init(&queue->state_lock);
1063        queue->state = NVMET_RDMA_Q_CONNECTING;
1064        INIT_LIST_HEAD(&queue->rsp_wait_list);
1065        INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
1066        spin_lock_init(&queue->rsp_wr_wait_lock);
1067        INIT_LIST_HEAD(&queue->free_rsps);
1068        spin_lock_init(&queue->rsps_lock);
1069
1070        queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
1071        if (queue->idx < 0) {
1072                ret = NVME_RDMA_CM_NO_RSC;
1073                goto out_free_queue;
1074        }
1075
1076        ret = nvmet_rdma_alloc_rsps(queue);
1077        if (ret) {
1078                ret = NVME_RDMA_CM_NO_RSC;
1079                goto out_ida_remove;
1080        }
1081
1082        if (!ndev->srq) {
1083                queue->cmds = nvmet_rdma_alloc_cmds(ndev,
1084                                queue->recv_queue_size,
1085                                !queue->host_qid);
1086                if (IS_ERR(queue->cmds)) {
1087                        ret = NVME_RDMA_CM_NO_RSC;
1088                        goto out_free_responses;
1089                }
1090        }
1091
1092        ret = nvmet_rdma_create_queue_ib(queue);
1093        if (ret) {
1094                pr_err("%s: creating RDMA queue failed (%d).\n",
1095                        __func__, ret);
1096                ret = NVME_RDMA_CM_NO_RSC;
1097                goto out_free_cmds;
1098        }
1099
1100        return queue;
1101
1102out_free_cmds:
1103        if (!ndev->srq) {
1104                nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1105                                queue->recv_queue_size,
1106                                !queue->host_qid);
1107        }
1108out_free_responses:
1109        nvmet_rdma_free_rsps(queue);
1110out_ida_remove:
1111        ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1112out_destroy_sq:
1113        nvmet_sq_destroy(&queue->nvme_sq);
1114out_free_queue:
1115        kfree(queue);
1116out_reject:
1117        nvmet_rdma_cm_reject(cm_id, ret);
1118        return NULL;
1119}
1120
1121static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
1122{
1123        struct nvmet_rdma_queue *queue = priv;
1124
1125        switch (event->event) {
1126        case IB_EVENT_COMM_EST:
1127                rdma_notify(queue->cm_id, event->event);
1128                break;
1129        default:
1130                pr_err("received unrecognized IB QP event %d\n", event->event);
1131                break;
1132        }
1133}
1134
1135static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
1136                struct nvmet_rdma_queue *queue,
1137                struct rdma_conn_param *p)
1138{
1139        struct rdma_conn_param  param = { };
1140        struct nvme_rdma_cm_rep priv = { };
1141        int ret = -ENOMEM;
1142
1143        param.rnr_retry_count = 7;
1144        param.flow_control = 1;
1145        param.initiator_depth = min_t(u8, p->initiator_depth,
1146                queue->dev->device->attrs.max_qp_init_rd_atom);
1147        param.private_data = &priv;
1148        param.private_data_len = sizeof(priv);
1149        priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1150        priv.crqsize = cpu_to_le16(queue->recv_queue_size);
1151
1152        ret = rdma_accept(cm_id, &param);
1153        if (ret)
1154                pr_err("rdma_accept failed (error code = %d)\n", ret);
1155
1156        return ret;
1157}
1158
1159static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1160                struct rdma_cm_event *event)
1161{
1162        struct nvmet_rdma_device *ndev;
1163        struct nvmet_rdma_queue *queue;
1164        int ret = -EINVAL;
1165
1166        ndev = nvmet_rdma_find_get_device(cm_id);
1167        if (!ndev) {
1168                pr_err("no client data!\n");
1169                nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1170                return -ECONNREFUSED;
1171        }
1172
1173        queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1174        if (!queue) {
1175                ret = -ENOMEM;
1176                goto put_device;
1177        }
1178        queue->port = cm_id->context;
1179
1180        ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1181        if (ret)
1182                goto release_queue;
1183
1184        mutex_lock(&nvmet_rdma_queue_mutex);
1185        list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
1186        mutex_unlock(&nvmet_rdma_queue_mutex);
1187
1188        return 0;
1189
1190release_queue:
1191        nvmet_rdma_free_queue(queue);
1192put_device:
1193        kref_put(&ndev->ref, nvmet_rdma_free_dev);
1194
1195        return ret;
1196}
1197
1198static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
1199{
1200        unsigned long flags;
1201
1202        spin_lock_irqsave(&queue->state_lock, flags);
1203        if (queue->state != NVMET_RDMA_Q_CONNECTING) {
1204                pr_warn("trying to establish a connected queue\n");
1205                goto out_unlock;
1206        }
1207        queue->state = NVMET_RDMA_Q_LIVE;
1208
1209        while (!list_empty(&queue->rsp_wait_list)) {
1210                struct nvmet_rdma_rsp *cmd;
1211
1212                cmd = list_first_entry(&queue->rsp_wait_list,
1213                                        struct nvmet_rdma_rsp, wait_list);
1214                list_del(&cmd->wait_list);
1215
1216                spin_unlock_irqrestore(&queue->state_lock, flags);
1217                nvmet_rdma_handle_command(queue, cmd);
1218                spin_lock_irqsave(&queue->state_lock, flags);
1219        }
1220
1221out_unlock:
1222        spin_unlock_irqrestore(&queue->state_lock, flags);
1223}
1224
1225static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1226{
1227        bool disconnect = false;
1228        unsigned long flags;
1229
1230        pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
1231
1232        spin_lock_irqsave(&queue->state_lock, flags);
1233        switch (queue->state) {
1234        case NVMET_RDMA_Q_CONNECTING:
1235        case NVMET_RDMA_Q_LIVE:
1236                queue->state = NVMET_RDMA_Q_DISCONNECTING;
1237        case NVMET_RDMA_IN_DEVICE_REMOVAL:
1238                disconnect = true;
1239                break;
1240        case NVMET_RDMA_Q_DISCONNECTING:
1241                break;
1242        }
1243        spin_unlock_irqrestore(&queue->state_lock, flags);
1244
1245        if (disconnect) {
1246                rdma_disconnect(queue->cm_id);
1247                ib_drain_qp(queue->cm_id->qp);
1248                schedule_work(&queue->release_work);
1249        }
1250}
1251
1252static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1253{
1254        bool disconnect = false;
1255
1256        mutex_lock(&nvmet_rdma_queue_mutex);
1257        if (!list_empty(&queue->queue_list)) {
1258                list_del_init(&queue->queue_list);
1259                disconnect = true;
1260        }
1261        mutex_unlock(&nvmet_rdma_queue_mutex);
1262
1263        if (disconnect)
1264                __nvmet_rdma_queue_disconnect(queue);
1265}
1266
1267static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1268                struct nvmet_rdma_queue *queue)
1269{
1270        WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
1271
1272        pr_err("failed to connect queue\n");
1273        schedule_work(&queue->release_work);
1274}
1275
1276/**
1277 * nvme_rdma_device_removal() - Handle RDMA device removal
1278 * @queue:      nvmet rdma queue (cm id qp_context)
1279 * @addr:       nvmet address (cm_id context)
1280 *
1281 * DEVICE_REMOVAL event notifies us that the RDMA device is about
1282 * to unplug so we should take care of destroying our RDMA resources.
1283 * This event will be generated for each allocated cm_id.
1284 *
1285 * Note that this event can be generated on a normal queue cm_id
1286 * and/or a device bound listener cm_id (where in this case
1287 * queue will be null).
1288 *
1289 * we claim ownership on destroying the cm_id. For queues we move
1290 * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port
1291 * we nullify the priv to prevent double cm_id destruction and destroying
1292 * the cm_id implicitely by returning a non-zero rc to the callout.
1293 */
1294static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1295                struct nvmet_rdma_queue *queue)
1296{
1297        unsigned long flags;
1298
1299        if (!queue) {
1300                struct nvmet_port *port = cm_id->context;
1301
1302                /*
1303                 * This is a listener cm_id. Make sure that
1304                 * future remove_port won't invoke a double
1305                 * cm_id destroy. use atomic xchg to make sure
1306                 * we don't compete with remove_port.
1307                 */
1308                if (xchg(&port->priv, NULL) != cm_id)
1309                        return 0;
1310        } else {
1311                /*
1312                 * This is a queue cm_id. Make sure that
1313                 * release queue will not destroy the cm_id
1314                 * and schedule all ctrl queues removal (only
1315                 * if the queue is not disconnecting already).
1316                 */
1317                spin_lock_irqsave(&queue->state_lock, flags);
1318                if (queue->state != NVMET_RDMA_Q_DISCONNECTING)
1319                        queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL;
1320                spin_unlock_irqrestore(&queue->state_lock, flags);
1321                nvmet_rdma_queue_disconnect(queue);
1322                flush_scheduled_work();
1323        }
1324
1325        /*
1326         * We need to return 1 so that the core will destroy
1327         * it's own ID.  What a great API design..
1328         */
1329        return 1;
1330}
1331
1332static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1333                struct rdma_cm_event *event)
1334{
1335        struct nvmet_rdma_queue *queue = NULL;
1336        int ret = 0;
1337
1338        if (cm_id->qp)
1339                queue = cm_id->qp->qp_context;
1340
1341        pr_debug("%s (%d): status %d id %p\n",
1342                rdma_event_msg(event->event), event->event,
1343                event->status, cm_id);
1344
1345        switch (event->event) {
1346        case RDMA_CM_EVENT_CONNECT_REQUEST:
1347                ret = nvmet_rdma_queue_connect(cm_id, event);
1348                break;
1349        case RDMA_CM_EVENT_ESTABLISHED:
1350                nvmet_rdma_queue_established(queue);
1351                break;
1352        case RDMA_CM_EVENT_ADDR_CHANGE:
1353        case RDMA_CM_EVENT_DISCONNECTED:
1354        case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1355                nvmet_rdma_queue_disconnect(queue);
1356                break;
1357        case RDMA_CM_EVENT_DEVICE_REMOVAL:
1358                ret = nvmet_rdma_device_removal(cm_id, queue);
1359                break;
1360        case RDMA_CM_EVENT_REJECTED:
1361        case RDMA_CM_EVENT_UNREACHABLE:
1362        case RDMA_CM_EVENT_CONNECT_ERROR:
1363                nvmet_rdma_queue_connect_fail(cm_id, queue);
1364                break;
1365        default:
1366                pr_err("received unrecognized RDMA CM event %d\n",
1367                        event->event);
1368                break;
1369        }
1370
1371        return ret;
1372}
1373
1374static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
1375{
1376        struct nvmet_rdma_queue *queue;
1377
1378restart:
1379        mutex_lock(&nvmet_rdma_queue_mutex);
1380        list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
1381                if (queue->nvme_sq.ctrl == ctrl) {
1382                        list_del_init(&queue->queue_list);
1383                        mutex_unlock(&nvmet_rdma_queue_mutex);
1384
1385                        __nvmet_rdma_queue_disconnect(queue);
1386                        goto restart;
1387                }
1388        }
1389        mutex_unlock(&nvmet_rdma_queue_mutex);
1390}
1391
1392static int nvmet_rdma_add_port(struct nvmet_port *port)
1393{
1394        struct rdma_cm_id *cm_id;
1395        struct sockaddr_in addr_in;
1396        u16 port_in;
1397        int ret;
1398
1399        switch (port->disc_addr.adrfam) {
1400        case NVMF_ADDR_FAMILY_IP4:
1401                break;
1402        default:
1403                pr_err("address family %d not supported\n",
1404                                port->disc_addr.adrfam);
1405                return -EINVAL;
1406        }
1407
1408        ret = kstrtou16(port->disc_addr.trsvcid, 0, &port_in);
1409        if (ret)
1410                return ret;
1411
1412        addr_in.sin_family = AF_INET;
1413        addr_in.sin_addr.s_addr = in_aton(port->disc_addr.traddr);
1414        addr_in.sin_port = htons(port_in);
1415
1416        cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
1417                        RDMA_PS_TCP, IB_QPT_RC);
1418        if (IS_ERR(cm_id)) {
1419                pr_err("CM ID creation failed\n");
1420                return PTR_ERR(cm_id);
1421        }
1422
1423        ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr_in);
1424        if (ret) {
1425                pr_err("binding CM ID to %pISpc failed (%d)\n", &addr_in, ret);
1426                goto out_destroy_id;
1427        }
1428
1429        ret = rdma_listen(cm_id, 128);
1430        if (ret) {
1431                pr_err("listening to %pISpc failed (%d)\n", &addr_in, ret);
1432                goto out_destroy_id;
1433        }
1434
1435        pr_info("enabling port %d (%pISpc)\n",
1436                le16_to_cpu(port->disc_addr.portid), &addr_in);
1437        port->priv = cm_id;
1438        return 0;
1439
1440out_destroy_id:
1441        rdma_destroy_id(cm_id);
1442        return ret;
1443}
1444
1445static void nvmet_rdma_remove_port(struct nvmet_port *port)
1446{
1447        struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
1448
1449        if (cm_id)
1450                rdma_destroy_id(cm_id);
1451}
1452
1453static struct nvmet_fabrics_ops nvmet_rdma_ops = {
1454        .owner                  = THIS_MODULE,
1455        .type                   = NVMF_TRTYPE_RDMA,
1456        .sqe_inline_size        = NVMET_RDMA_INLINE_DATA_SIZE,
1457        .msdbd                  = 1,
1458        .has_keyed_sgls         = 1,
1459        .add_port               = nvmet_rdma_add_port,
1460        .remove_port            = nvmet_rdma_remove_port,
1461        .queue_response         = nvmet_rdma_queue_response,
1462        .delete_ctrl            = nvmet_rdma_delete_ctrl,
1463};
1464
1465static int __init nvmet_rdma_init(void)
1466{
1467        return nvmet_register_transport(&nvmet_rdma_ops);
1468}
1469
1470static void __exit nvmet_rdma_exit(void)
1471{
1472        struct nvmet_rdma_queue *queue;
1473
1474        nvmet_unregister_transport(&nvmet_rdma_ops);
1475
1476        flush_scheduled_work();
1477
1478        mutex_lock(&nvmet_rdma_queue_mutex);
1479        while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
1480                        struct nvmet_rdma_queue, queue_list))) {
1481                list_del_init(&queue->queue_list);
1482
1483                mutex_unlock(&nvmet_rdma_queue_mutex);
1484                __nvmet_rdma_queue_disconnect(queue);
1485                mutex_lock(&nvmet_rdma_queue_mutex);
1486        }
1487        mutex_unlock(&nvmet_rdma_queue_mutex);
1488
1489        flush_scheduled_work();
1490        ida_destroy(&nvmet_rdma_queue_ida);
1491}
1492
1493module_init(nvmet_rdma_init);
1494module_exit(nvmet_rdma_exit);
1495
1496MODULE_LICENSE("GPL v2");
1497MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
1498