linux/drivers/infiniband/ulp/isert/ib_isert.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
   3 *
   4 * (c) Copyright 2013 RisingTide Systems LLC.
   5 *
   6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 ****************************************************************************/
  18
  19#include <linux/string.h>
  20#include <linux/module.h>
  21#include <linux/scatterlist.h>
  22#include <linux/socket.h>
  23#include <linux/in.h>
  24#include <linux/in6.h>
  25#include <rdma/ib_verbs.h>
  26#include <rdma/rdma_cm.h>
  27#include <target/target_core_base.h>
  28#include <target/target_core_fabric.h>
  29#include <target/iscsi/iscsi_transport.h>
  30
  31#include "isert_proto.h"
  32#include "ib_isert.h"
  33
  34#define ISERT_MAX_CONN          8
  35#define ISER_MAX_RX_CQ_LEN      (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
  36#define ISER_MAX_TX_CQ_LEN      (ISERT_QP_MAX_REQ_DTOS  * ISERT_MAX_CONN)
  37
  38static DEFINE_MUTEX(device_list_mutex);
  39static LIST_HEAD(device_list);
  40static struct workqueue_struct *isert_rx_wq;
  41static struct workqueue_struct *isert_comp_wq;
  42static struct kmem_cache *isert_cmd_cache;
  43
  44static void
  45isert_qp_event_callback(struct ib_event *e, void *context)
  46{
  47        struct isert_conn *isert_conn = (struct isert_conn *)context;
  48
  49        pr_err("isert_qp_event_callback event: %d\n", e->event);
  50        switch (e->event) {
  51        case IB_EVENT_COMM_EST:
  52                rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
  53                break;
  54        case IB_EVENT_QP_LAST_WQE_REACHED:
  55                pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
  56                break;
  57        default:
  58                break;
  59        }
  60}
  61
  62static int
  63isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
  64{
  65        int ret;
  66
  67        ret = ib_query_device(ib_dev, devattr);
  68        if (ret) {
  69                pr_err("ib_query_device() failed: %d\n", ret);
  70                return ret;
  71        }
  72        pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
  73        pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
  74
  75        return 0;
  76}
  77
  78static int
  79isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
  80{
  81        struct isert_device *device = isert_conn->conn_device;
  82        struct ib_qp_init_attr attr;
  83        struct ib_device_attr devattr;
  84        int ret, index, min_index = 0;
  85
  86        memset(&devattr, 0, sizeof(struct ib_device_attr));
  87        ret = isert_query_device(cma_id->device, &devattr);
  88        if (ret)
  89                return ret;
  90
  91        mutex_lock(&device_list_mutex);
  92        for (index = 0; index < device->cqs_used; index++)
  93                if (device->cq_active_qps[index] <
  94                    device->cq_active_qps[min_index])
  95                        min_index = index;
  96        device->cq_active_qps[min_index]++;
  97        pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
  98        mutex_unlock(&device_list_mutex);
  99
 100        memset(&attr, 0, sizeof(struct ib_qp_init_attr));
 101        attr.event_handler = isert_qp_event_callback;
 102        attr.qp_context = isert_conn;
 103        attr.send_cq = device->dev_tx_cq[min_index];
 104        attr.recv_cq = device->dev_rx_cq[min_index];
 105        attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
 106        attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
 107        /*
 108         * FIXME: Use devattr.max_sge - 2 for max_send_sge as
 109         * work-around for RDMA_READ..
 110         */
 111        attr.cap.max_send_sge = devattr.max_sge - 2;
 112        isert_conn->max_sge = attr.cap.max_send_sge;
 113
 114        attr.cap.max_recv_sge = 1;
 115        attr.sq_sig_type = IB_SIGNAL_REQ_WR;
 116        attr.qp_type = IB_QPT_RC;
 117
 118        pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
 119                 cma_id->device);
 120        pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
 121                 isert_conn->conn_pd->device);
 122
 123        ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
 124        if (ret) {
 125                pr_err("rdma_create_qp failed for cma_id %d\n", ret);
 126                return ret;
 127        }
 128        isert_conn->conn_qp = cma_id->qp;
 129        pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
 130
 131        return 0;
 132}
 133
 134static void
 135isert_cq_event_callback(struct ib_event *e, void *context)
 136{
 137        pr_debug("isert_cq_event_callback event: %d\n", e->event);
 138}
 139
 140static int
 141isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
 142{
 143        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 144        struct iser_rx_desc *rx_desc;
 145        struct ib_sge *rx_sg;
 146        u64 dma_addr;
 147        int i, j;
 148
 149        isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
 150                                sizeof(struct iser_rx_desc), GFP_KERNEL);
 151        if (!isert_conn->conn_rx_descs)
 152                goto fail;
 153
 154        rx_desc = isert_conn->conn_rx_descs;
 155
 156        for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
 157                dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
 158                                        ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
 159                if (ib_dma_mapping_error(ib_dev, dma_addr))
 160                        goto dma_map_fail;
 161
 162                rx_desc->dma_addr = dma_addr;
 163
 164                rx_sg = &rx_desc->rx_sg;
 165                rx_sg->addr = rx_desc->dma_addr;
 166                rx_sg->length = ISER_RX_PAYLOAD_SIZE;
 167                rx_sg->lkey = isert_conn->conn_mr->lkey;
 168        }
 169
 170        isert_conn->conn_rx_desc_head = 0;
 171        return 0;
 172
 173dma_map_fail:
 174        rx_desc = isert_conn->conn_rx_descs;
 175        for (j = 0; j < i; j++, rx_desc++) {
 176                ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
 177                                    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
 178        }
 179        kfree(isert_conn->conn_rx_descs);
 180        isert_conn->conn_rx_descs = NULL;
 181fail:
 182        return -ENOMEM;
 183}
 184
 185static void
 186isert_free_rx_descriptors(struct isert_conn *isert_conn)
 187{
 188        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 189        struct iser_rx_desc *rx_desc;
 190        int i;
 191
 192        if (!isert_conn->conn_rx_descs)
 193                return;
 194
 195        rx_desc = isert_conn->conn_rx_descs;
 196        for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
 197                ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
 198                                    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
 199        }
 200
 201        kfree(isert_conn->conn_rx_descs);
 202        isert_conn->conn_rx_descs = NULL;
 203}
 204
 205static void isert_cq_tx_callback(struct ib_cq *, void *);
 206static void isert_cq_rx_callback(struct ib_cq *, void *);
 207
 208static int
 209isert_create_device_ib_res(struct isert_device *device)
 210{
 211        struct ib_device *ib_dev = device->ib_device;
 212        struct isert_cq_desc *cq_desc;
 213        int ret = 0, i, j;
 214
 215        device->cqs_used = min_t(int, num_online_cpus(),
 216                                 device->ib_device->num_comp_vectors);
 217        device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
 218        pr_debug("Using %d CQs, device %s supports %d vectors\n",
 219                 device->cqs_used, device->ib_device->name,
 220                 device->ib_device->num_comp_vectors);
 221        device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
 222                                device->cqs_used, GFP_KERNEL);
 223        if (!device->cq_desc) {
 224                pr_err("Unable to allocate device->cq_desc\n");
 225                return -ENOMEM;
 226        }
 227        cq_desc = device->cq_desc;
 228
 229        device->dev_pd = ib_alloc_pd(ib_dev);
 230        if (IS_ERR(device->dev_pd)) {
 231                ret = PTR_ERR(device->dev_pd);
 232                pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
 233                goto out_cq_desc;
 234        }
 235
 236        for (i = 0; i < device->cqs_used; i++) {
 237                cq_desc[i].device = device;
 238                cq_desc[i].cq_index = i;
 239
 240                device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
 241                                                isert_cq_rx_callback,
 242                                                isert_cq_event_callback,
 243                                                (void *)&cq_desc[i],
 244                                                ISER_MAX_RX_CQ_LEN, i);
 245                if (IS_ERR(device->dev_rx_cq[i]))
 246                        goto out_cq;
 247
 248                device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
 249                                                isert_cq_tx_callback,
 250                                                isert_cq_event_callback,
 251                                                (void *)&cq_desc[i],
 252                                                ISER_MAX_TX_CQ_LEN, i);
 253                if (IS_ERR(device->dev_tx_cq[i]))
 254                        goto out_cq;
 255
 256                if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
 257                        goto out_cq;
 258
 259                if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
 260                        goto out_cq;
 261        }
 262
 263        device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
 264        if (IS_ERR(device->dev_mr)) {
 265                ret = PTR_ERR(device->dev_mr);
 266                pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
 267                goto out_cq;
 268        }
 269
 270        return 0;
 271
 272out_cq:
 273        for (j = 0; j < i; j++) {
 274                cq_desc = &device->cq_desc[j];
 275
 276                if (device->dev_rx_cq[j]) {
 277                        cancel_work_sync(&cq_desc->cq_rx_work);
 278                        ib_destroy_cq(device->dev_rx_cq[j]);
 279                }
 280                if (device->dev_tx_cq[j]) {
 281                        cancel_work_sync(&cq_desc->cq_tx_work);
 282                        ib_destroy_cq(device->dev_tx_cq[j]);
 283                }
 284        }
 285        ib_dealloc_pd(device->dev_pd);
 286
 287out_cq_desc:
 288        kfree(device->cq_desc);
 289
 290        return ret;
 291}
 292
 293static void
 294isert_free_device_ib_res(struct isert_device *device)
 295{
 296        struct isert_cq_desc *cq_desc;
 297        int i;
 298
 299        for (i = 0; i < device->cqs_used; i++) {
 300                cq_desc = &device->cq_desc[i];
 301
 302                cancel_work_sync(&cq_desc->cq_rx_work);
 303                cancel_work_sync(&cq_desc->cq_tx_work);
 304                ib_destroy_cq(device->dev_rx_cq[i]);
 305                ib_destroy_cq(device->dev_tx_cq[i]);
 306                device->dev_rx_cq[i] = NULL;
 307                device->dev_tx_cq[i] = NULL;
 308        }
 309
 310        ib_dereg_mr(device->dev_mr);
 311        ib_dealloc_pd(device->dev_pd);
 312        kfree(device->cq_desc);
 313}
 314
 315static void
 316isert_device_try_release(struct isert_device *device)
 317{
 318        mutex_lock(&device_list_mutex);
 319        device->refcount--;
 320        if (!device->refcount) {
 321                isert_free_device_ib_res(device);
 322                list_del(&device->dev_node);
 323                kfree(device);
 324        }
 325        mutex_unlock(&device_list_mutex);
 326}
 327
 328static struct isert_device *
 329isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
 330{
 331        struct isert_device *device;
 332        int ret;
 333
 334        mutex_lock(&device_list_mutex);
 335        list_for_each_entry(device, &device_list, dev_node) {
 336                if (device->ib_device->node_guid == cma_id->device->node_guid) {
 337                        device->refcount++;
 338                        mutex_unlock(&device_list_mutex);
 339                        return device;
 340                }
 341        }
 342
 343        device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
 344        if (!device) {
 345                mutex_unlock(&device_list_mutex);
 346                return ERR_PTR(-ENOMEM);
 347        }
 348
 349        INIT_LIST_HEAD(&device->dev_node);
 350
 351        device->ib_device = cma_id->device;
 352        ret = isert_create_device_ib_res(device);
 353        if (ret) {
 354                kfree(device);
 355                mutex_unlock(&device_list_mutex);
 356                return ERR_PTR(ret);
 357        }
 358
 359        device->refcount++;
 360        list_add_tail(&device->dev_node, &device_list);
 361        mutex_unlock(&device_list_mutex);
 362
 363        return device;
 364}
 365
 366static int
 367isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 368{
 369        struct iscsi_np *np = cma_id->context;
 370        struct isert_np *isert_np = np->np_context;
 371        struct isert_conn *isert_conn;
 372        struct isert_device *device;
 373        struct ib_device *ib_dev = cma_id->device;
 374        int ret = 0;
 375
 376        pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
 377                 cma_id, cma_id->context);
 378
 379        isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
 380        if (!isert_conn) {
 381                pr_err("Unable to allocate isert_conn\n");
 382                return -ENOMEM;
 383        }
 384        isert_conn->state = ISER_CONN_INIT;
 385        INIT_LIST_HEAD(&isert_conn->conn_accept_node);
 386        init_completion(&isert_conn->conn_login_comp);
 387        init_waitqueue_head(&isert_conn->conn_wait);
 388        init_waitqueue_head(&isert_conn->conn_wait_comp_err);
 389        kref_init(&isert_conn->conn_kref);
 390        kref_get(&isert_conn->conn_kref);
 391
 392        cma_id->context = isert_conn;
 393        isert_conn->conn_cm_id = cma_id;
 394        isert_conn->responder_resources = event->param.conn.responder_resources;
 395        isert_conn->initiator_depth = event->param.conn.initiator_depth;
 396        pr_debug("Using responder_resources: %u initiator_depth: %u\n",
 397                 isert_conn->responder_resources, isert_conn->initiator_depth);
 398
 399        isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
 400                                        ISER_RX_LOGIN_SIZE, GFP_KERNEL);
 401        if (!isert_conn->login_buf) {
 402                pr_err("Unable to allocate isert_conn->login_buf\n");
 403                ret = -ENOMEM;
 404                goto out;
 405        }
 406
 407        isert_conn->login_req_buf = isert_conn->login_buf;
 408        isert_conn->login_rsp_buf = isert_conn->login_buf +
 409                                    ISCSI_DEF_MAX_RECV_SEG_LEN;
 410        pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
 411                 isert_conn->login_buf, isert_conn->login_req_buf,
 412                 isert_conn->login_rsp_buf);
 413
 414        isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
 415                                (void *)isert_conn->login_req_buf,
 416                                ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
 417
 418        ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
 419        if (ret) {
 420                pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
 421                       ret);
 422                isert_conn->login_req_dma = 0;
 423                goto out_login_buf;
 424        }
 425
 426        isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
 427                                        (void *)isert_conn->login_rsp_buf,
 428                                        ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
 429
 430        ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
 431        if (ret) {
 432                pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
 433                       ret);
 434                isert_conn->login_rsp_dma = 0;
 435                goto out_req_dma_map;
 436        }
 437
 438        device = isert_device_find_by_ib_dev(cma_id);
 439        if (IS_ERR(device)) {
 440                ret = PTR_ERR(device);
 441                goto out_rsp_dma_map;
 442        }
 443
 444        isert_conn->conn_device = device;
 445        isert_conn->conn_pd = device->dev_pd;
 446        isert_conn->conn_mr = device->dev_mr;
 447
 448        ret = isert_conn_setup_qp(isert_conn, cma_id);
 449        if (ret)
 450                goto out_conn_dev;
 451
 452        mutex_lock(&isert_np->np_accept_mutex);
 453        list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
 454        mutex_unlock(&isert_np->np_accept_mutex);
 455
 456        pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
 457        wake_up(&isert_np->np_accept_wq);
 458        return 0;
 459
 460out_conn_dev:
 461        isert_device_try_release(device);
 462out_rsp_dma_map:
 463        ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
 464                            ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
 465out_req_dma_map:
 466        ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
 467                            ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
 468out_login_buf:
 469        kfree(isert_conn->login_buf);
 470out:
 471        kfree(isert_conn);
 472        return ret;
 473}
 474
 475static void
 476isert_connect_release(struct isert_conn *isert_conn)
 477{
 478        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 479        struct isert_device *device = isert_conn->conn_device;
 480        int cq_index;
 481
 482        pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
 483
 484        if (isert_conn->conn_qp) {
 485                cq_index = ((struct isert_cq_desc *)
 486                        isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
 487                pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
 488                isert_conn->conn_device->cq_active_qps[cq_index]--;
 489
 490                rdma_destroy_qp(isert_conn->conn_cm_id);
 491        }
 492
 493        isert_free_rx_descriptors(isert_conn);
 494        rdma_destroy_id(isert_conn->conn_cm_id);
 495
 496        if (isert_conn->login_buf) {
 497                ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
 498                                    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
 499                ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
 500                                    ISCSI_DEF_MAX_RECV_SEG_LEN,
 501                                    DMA_FROM_DEVICE);
 502                kfree(isert_conn->login_buf);
 503        }
 504        kfree(isert_conn);
 505
 506        if (device)
 507                isert_device_try_release(device);
 508
 509        pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
 510}
 511
 512static void
 513isert_connected_handler(struct rdma_cm_id *cma_id)
 514{
 515        return;
 516}
 517
 518static void
 519isert_release_conn_kref(struct kref *kref)
 520{
 521        struct isert_conn *isert_conn = container_of(kref,
 522                                struct isert_conn, conn_kref);
 523
 524        pr_debug("Calling isert_connect_release for final kref %s/%d\n",
 525                 current->comm, current->pid);
 526
 527        isert_connect_release(isert_conn);
 528}
 529
 530static void
 531isert_put_conn(struct isert_conn *isert_conn)
 532{
 533        kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
 534}
 535
 536static void
 537isert_disconnect_work(struct work_struct *work)
 538{
 539        struct isert_conn *isert_conn = container_of(work,
 540                                struct isert_conn, conn_logout_work);
 541
 542        pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
 543
 544        isert_conn->state = ISER_CONN_DOWN;
 545
 546        if (isert_conn->post_recv_buf_count == 0 &&
 547            atomic_read(&isert_conn->post_send_buf_count) == 0) {
 548                pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
 549                wake_up(&isert_conn->conn_wait);
 550        }
 551
 552        isert_put_conn(isert_conn);
 553}
 554
 555static void
 556isert_disconnected_handler(struct rdma_cm_id *cma_id)
 557{
 558        struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
 559
 560        INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
 561        schedule_work(&isert_conn->conn_logout_work);
 562}
 563
 564static int
 565isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 566{
 567        int ret = 0;
 568
 569        pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
 570                 event->event, event->status, cma_id->context, cma_id);
 571
 572        switch (event->event) {
 573        case RDMA_CM_EVENT_CONNECT_REQUEST:
 574                pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
 575                ret = isert_connect_request(cma_id, event);
 576                break;
 577        case RDMA_CM_EVENT_ESTABLISHED:
 578                pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
 579                isert_connected_handler(cma_id);
 580                break;
 581        case RDMA_CM_EVENT_DISCONNECTED:
 582                pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
 583                isert_disconnected_handler(cma_id);
 584                break;
 585        case RDMA_CM_EVENT_DEVICE_REMOVAL:
 586        case RDMA_CM_EVENT_ADDR_CHANGE:
 587                break;
 588        case RDMA_CM_EVENT_CONNECT_ERROR:
 589        default:
 590                pr_err("Unknown RDMA CMA event: %d\n", event->event);
 591                break;
 592        }
 593
 594        if (ret != 0) {
 595                pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
 596                       event->event, ret);
 597                dump_stack();
 598        }
 599
 600        return ret;
 601}
 602
 603static int
 604isert_post_recv(struct isert_conn *isert_conn, u32 count)
 605{
 606        struct ib_recv_wr *rx_wr, *rx_wr_failed;
 607        int i, ret;
 608        unsigned int rx_head = isert_conn->conn_rx_desc_head;
 609        struct iser_rx_desc *rx_desc;
 610
 611        for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
 612                rx_desc         = &isert_conn->conn_rx_descs[rx_head];
 613                rx_wr->wr_id    = (unsigned long)rx_desc;
 614                rx_wr->sg_list  = &rx_desc->rx_sg;
 615                rx_wr->num_sge  = 1;
 616                rx_wr->next     = rx_wr + 1;
 617                rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
 618        }
 619
 620        rx_wr--;
 621        rx_wr->next = NULL; /* mark end of work requests list */
 622
 623        isert_conn->post_recv_buf_count += count;
 624        ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
 625                                &rx_wr_failed);
 626        if (ret) {
 627                pr_err("ib_post_recv() failed with ret: %d\n", ret);
 628                isert_conn->post_recv_buf_count -= count;
 629        } else {
 630                pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
 631                isert_conn->conn_rx_desc_head = rx_head;
 632        }
 633        return ret;
 634}
 635
 636static int
 637isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
 638{
 639        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 640        struct ib_send_wr send_wr, *send_wr_failed;
 641        int ret;
 642
 643        ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
 644                                      ISER_HEADERS_LEN, DMA_TO_DEVICE);
 645
 646        send_wr.next    = NULL;
 647        send_wr.wr_id   = (unsigned long)tx_desc;
 648        send_wr.sg_list = tx_desc->tx_sg;
 649        send_wr.num_sge = tx_desc->num_sge;
 650        send_wr.opcode  = IB_WR_SEND;
 651        send_wr.send_flags = IB_SEND_SIGNALED;
 652
 653        atomic_inc(&isert_conn->post_send_buf_count);
 654
 655        ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
 656        if (ret) {
 657                pr_err("ib_post_send() failed, ret: %d\n", ret);
 658                atomic_dec(&isert_conn->post_send_buf_count);
 659        }
 660
 661        return ret;
 662}
 663
 664static void
 665isert_create_send_desc(struct isert_conn *isert_conn,
 666                       struct isert_cmd *isert_cmd,
 667                       struct iser_tx_desc *tx_desc)
 668{
 669        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 670
 671        ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
 672                                   ISER_HEADERS_LEN, DMA_TO_DEVICE);
 673
 674        memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
 675        tx_desc->iser_header.flags = ISER_VER;
 676
 677        tx_desc->num_sge = 1;
 678        tx_desc->isert_cmd = isert_cmd;
 679
 680        if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
 681                tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
 682                pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
 683        }
 684}
 685
 686static int
 687isert_init_tx_hdrs(struct isert_conn *isert_conn,
 688                   struct iser_tx_desc *tx_desc)
 689{
 690        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 691        u64 dma_addr;
 692
 693        dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
 694                        ISER_HEADERS_LEN, DMA_TO_DEVICE);
 695        if (ib_dma_mapping_error(ib_dev, dma_addr)) {
 696                pr_err("ib_dma_mapping_error() failed\n");
 697                return -ENOMEM;
 698        }
 699
 700        tx_desc->dma_addr = dma_addr;
 701        tx_desc->tx_sg[0].addr  = tx_desc->dma_addr;
 702        tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
 703        tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
 704
 705        pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
 706                 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
 707                 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
 708
 709        return 0;
 710}
 711
 712static void
 713isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
 714{
 715        isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
 716        send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
 717        send_wr->opcode = IB_WR_SEND;
 718        send_wr->send_flags = IB_SEND_SIGNALED;
 719        send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
 720        send_wr->num_sge = isert_cmd->tx_desc.num_sge;
 721}
 722
 723static int
 724isert_rdma_post_recvl(struct isert_conn *isert_conn)
 725{
 726        struct ib_recv_wr rx_wr, *rx_wr_fail;
 727        struct ib_sge sge;
 728        int ret;
 729
 730        memset(&sge, 0, sizeof(struct ib_sge));
 731        sge.addr = isert_conn->login_req_dma;
 732        sge.length = ISER_RX_LOGIN_SIZE;
 733        sge.lkey = isert_conn->conn_mr->lkey;
 734
 735        pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
 736                sge.addr, sge.length, sge.lkey);
 737
 738        memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
 739        rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
 740        rx_wr.sg_list = &sge;
 741        rx_wr.num_sge = 1;
 742
 743        isert_conn->post_recv_buf_count++;
 744        ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
 745        if (ret) {
 746                pr_err("ib_post_recv() failed: %d\n", ret);
 747                isert_conn->post_recv_buf_count--;
 748        }
 749
 750        pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
 751        return ret;
 752}
 753
 754static int
 755isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
 756                   u32 length)
 757{
 758        struct isert_conn *isert_conn = conn->context;
 759        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 760        struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
 761        int ret;
 762
 763        isert_create_send_desc(isert_conn, NULL, tx_desc);
 764
 765        memcpy(&tx_desc->iscsi_header, &login->rsp[0],
 766               sizeof(struct iscsi_hdr));
 767
 768        isert_init_tx_hdrs(isert_conn, tx_desc);
 769
 770        if (length > 0) {
 771                struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
 772
 773                ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
 774                                           length, DMA_TO_DEVICE);
 775
 776                memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
 777
 778                ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
 779                                              length, DMA_TO_DEVICE);
 780
 781                tx_dsg->addr    = isert_conn->login_rsp_dma;
 782                tx_dsg->length  = length;
 783                tx_dsg->lkey    = isert_conn->conn_mr->lkey;
 784                tx_desc->num_sge = 2;
 785        }
 786        if (!login->login_failed) {
 787                if (login->login_complete) {
 788                        ret = isert_alloc_rx_descriptors(isert_conn);
 789                        if (ret)
 790                                return ret;
 791
 792                        ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
 793                        if (ret)
 794                                return ret;
 795
 796                        isert_conn->state = ISER_CONN_UP;
 797                        goto post_send;
 798                }
 799
 800                ret = isert_rdma_post_recvl(isert_conn);
 801                if (ret)
 802                        return ret;
 803        }
 804post_send:
 805        ret = isert_post_send(isert_conn, tx_desc);
 806        if (ret)
 807                return ret;
 808
 809        return 0;
 810}
 811
 812static void
 813isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
 814                   struct isert_conn *isert_conn)
 815{
 816        struct iscsi_conn *conn = isert_conn->conn;
 817        struct iscsi_login *login = conn->conn_login;
 818        int size;
 819
 820        if (!login) {
 821                pr_err("conn->conn_login is NULL\n");
 822                dump_stack();
 823                return;
 824        }
 825
 826        if (login->first_request) {
 827                struct iscsi_login_req *login_req =
 828                        (struct iscsi_login_req *)&rx_desc->iscsi_header;
 829                /*
 830                 * Setup the initial iscsi_login values from the leading
 831                 * login request PDU.
 832                 */
 833                login->leading_connection = (!login_req->tsih) ? 1 : 0;
 834                login->current_stage =
 835                        (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
 836                         >> 2;
 837                login->version_min      = login_req->min_version;
 838                login->version_max      = login_req->max_version;
 839                memcpy(login->isid, login_req->isid, 6);
 840                login->cmd_sn           = be32_to_cpu(login_req->cmdsn);
 841                login->init_task_tag    = login_req->itt;
 842                login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
 843                login->cid              = be16_to_cpu(login_req->cid);
 844                login->tsih             = be16_to_cpu(login_req->tsih);
 845        }
 846
 847        memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
 848
 849        size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
 850        pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
 851                 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
 852        memcpy(login->req_buf, &rx_desc->data[0], size);
 853
 854        complete(&isert_conn->conn_login_comp);
 855}
 856
 857static void
 858isert_release_cmd(struct iscsi_cmd *cmd)
 859{
 860        struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd,
 861                                                   iscsi_cmd);
 862
 863        pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd);
 864
 865        kfree(cmd->buf_ptr);
 866        kfree(cmd->tmr_req);
 867
 868        kmem_cache_free(isert_cmd_cache, isert_cmd);
 869}
 870
 871static struct iscsi_cmd
 872*isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp)
 873{
 874        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
 875        struct isert_cmd *isert_cmd;
 876
 877        isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp);
 878        if (!isert_cmd) {
 879                pr_err("Unable to allocate isert_cmd\n");
 880                return NULL;
 881        }
 882        isert_cmd->conn = isert_conn;
 883        isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd;
 884
 885        return &isert_cmd->iscsi_cmd;
 886}
 887
 888static int
 889isert_handle_scsi_cmd(struct isert_conn *isert_conn,
 890                      struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc,
 891                      unsigned char *buf)
 892{
 893        struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
 894        struct iscsi_conn *conn = isert_conn->conn;
 895        struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
 896        struct scatterlist *sg;
 897        int imm_data, imm_data_len, unsol_data, sg_nents, rc;
 898        bool dump_payload = false;
 899
 900        rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
 901        if (rc < 0)
 902                return rc;
 903
 904        imm_data = cmd->immediate_data;
 905        imm_data_len = cmd->first_burst_len;
 906        unsol_data = cmd->unsolicited_data;
 907
 908        rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
 909        if (rc < 0) {
 910                return 0;
 911        } else if (rc > 0) {
 912                dump_payload = true;
 913                goto sequence_cmd;
 914        }
 915
 916        if (!imm_data)
 917                return 0;
 918
 919        sg = &cmd->se_cmd.t_data_sg[0];
 920        sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
 921
 922        pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
 923                 sg, sg_nents, &rx_desc->data[0], imm_data_len);
 924
 925        sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
 926
 927        cmd->write_data_done += imm_data_len;
 928
 929        if (cmd->write_data_done == cmd->se_cmd.data_length) {
 930                spin_lock_bh(&cmd->istate_lock);
 931                cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
 932                cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
 933                spin_unlock_bh(&cmd->istate_lock);
 934        }
 935
 936sequence_cmd:
 937        rc = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
 938
 939        if (!rc && dump_payload == false && unsol_data)
 940                iscsit_set_unsoliticed_dataout(cmd);
 941
 942        if (rc == CMDSN_ERROR_CANNOT_RECOVER)
 943                return iscsit_add_reject_from_cmd(
 944                           ISCSI_REASON_PROTOCOL_ERROR,
 945                           1, 0, (unsigned char *)hdr, cmd);
 946
 947        return 0;
 948}
 949
 950static int
 951isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
 952                           struct iser_rx_desc *rx_desc, unsigned char *buf)
 953{
 954        struct scatterlist *sg_start;
 955        struct iscsi_conn *conn = isert_conn->conn;
 956        struct iscsi_cmd *cmd = NULL;
 957        struct iscsi_data *hdr = (struct iscsi_data *)buf;
 958        u32 unsol_data_len = ntoh24(hdr->dlength);
 959        int rc, sg_nents, sg_off, page_off;
 960
 961        rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
 962        if (rc < 0)
 963                return rc;
 964        else if (!cmd)
 965                return 0;
 966        /*
 967         * FIXME: Unexpected unsolicited_data out
 968         */
 969        if (!cmd->unsolicited_data) {
 970                pr_err("Received unexpected solicited data payload\n");
 971                dump_stack();
 972                return -1;
 973        }
 974
 975        pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
 976                 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
 977
 978        sg_off = cmd->write_data_done / PAGE_SIZE;
 979        sg_start = &cmd->se_cmd.t_data_sg[sg_off];
 980        sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
 981        page_off = cmd->write_data_done % PAGE_SIZE;
 982        /*
 983         * FIXME: Non page-aligned unsolicited_data out
 984         */
 985        if (page_off) {
 986                pr_err("Received unexpected non-page aligned data payload\n");
 987                dump_stack();
 988                return -1;
 989        }
 990        pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
 991                 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
 992
 993        sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
 994                            unsol_data_len);
 995
 996        rc = iscsit_check_dataout_payload(cmd, hdr, false);
 997        if (rc < 0)
 998                return rc;
 999
1000        return 0;
1001}
1002
1003static int
1004isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1005                uint32_t read_stag, uint64_t read_va,
1006                uint32_t write_stag, uint64_t write_va)
1007{
1008        struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1009        struct iscsi_conn *conn = isert_conn->conn;
1010        struct iscsi_cmd *cmd;
1011        struct isert_cmd *isert_cmd;
1012        int ret = -EINVAL;
1013        u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1014
1015        switch (opcode) {
1016        case ISCSI_OP_SCSI_CMD:
1017                cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1018                if (!cmd)
1019                        break;
1020
1021                isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
1022                isert_cmd->read_stag = read_stag;
1023                isert_cmd->read_va = read_va;
1024                isert_cmd->write_stag = write_stag;
1025                isert_cmd->write_va = write_va;
1026
1027                ret = isert_handle_scsi_cmd(isert_conn, isert_cmd,
1028                                        rx_desc, (unsigned char *)hdr);
1029                break;
1030        case ISCSI_OP_NOOP_OUT:
1031                cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1032                if (!cmd)
1033                        break;
1034
1035                ret = iscsit_handle_nop_out(conn, cmd, (unsigned char *)hdr);
1036                break;
1037        case ISCSI_OP_SCSI_DATA_OUT:
1038                ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1039                                                (unsigned char *)hdr);
1040                break;
1041        case ISCSI_OP_SCSI_TMFUNC:
1042                cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1043                if (!cmd)
1044                        break;
1045
1046                ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1047                                                (unsigned char *)hdr);
1048                break;
1049        case ISCSI_OP_LOGOUT:
1050                cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1051                if (!cmd)
1052                        break;
1053
1054                ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1055                if (ret > 0)
1056                        wait_for_completion_timeout(&conn->conn_logout_comp,
1057                                                    SECONDS_FOR_LOGOUT_COMP *
1058                                                    HZ);
1059                break;
1060        default:
1061                pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1062                dump_stack();
1063                break;
1064        }
1065
1066        return ret;
1067}
1068
1069static void
1070isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1071{
1072        struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1073        uint64_t read_va = 0, write_va = 0;
1074        uint32_t read_stag = 0, write_stag = 0;
1075        int rc;
1076
1077        switch (iser_hdr->flags & 0xF0) {
1078        case ISCSI_CTRL:
1079                if (iser_hdr->flags & ISER_RSV) {
1080                        read_stag = be32_to_cpu(iser_hdr->read_stag);
1081                        read_va = be64_to_cpu(iser_hdr->read_va);
1082                        pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1083                                 read_stag, (unsigned long long)read_va);
1084                }
1085                if (iser_hdr->flags & ISER_WSV) {
1086                        write_stag = be32_to_cpu(iser_hdr->write_stag);
1087                        write_va = be64_to_cpu(iser_hdr->write_va);
1088                        pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1089                                 write_stag, (unsigned long long)write_va);
1090                }
1091
1092                pr_debug("ISER ISCSI_CTRL PDU\n");
1093                break;
1094        case ISER_HELLO:
1095                pr_err("iSER Hello message\n");
1096                break;
1097        default:
1098                pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1099                break;
1100        }
1101
1102        rc = isert_rx_opcode(isert_conn, rx_desc,
1103                             read_stag, read_va, write_stag, write_va);
1104}
1105
1106static void
1107isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1108                    unsigned long xfer_len)
1109{
1110        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1111        struct iscsi_hdr *hdr;
1112        u64 rx_dma;
1113        int rx_buflen, outstanding;
1114
1115        if ((char *)desc == isert_conn->login_req_buf) {
1116                rx_dma = isert_conn->login_req_dma;
1117                rx_buflen = ISER_RX_LOGIN_SIZE;
1118                pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1119                         rx_dma, rx_buflen);
1120        } else {
1121                rx_dma = desc->dma_addr;
1122                rx_buflen = ISER_RX_PAYLOAD_SIZE;
1123                pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1124                         rx_dma, rx_buflen);
1125        }
1126
1127        ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1128
1129        hdr = &desc->iscsi_header;
1130        pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1131                 hdr->opcode, hdr->itt, hdr->flags,
1132                 (int)(xfer_len - ISER_HEADERS_LEN));
1133
1134        if ((char *)desc == isert_conn->login_req_buf)
1135                isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1136                                   isert_conn);
1137        else
1138                isert_rx_do_work(desc, isert_conn);
1139
1140        ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1141                                      DMA_FROM_DEVICE);
1142
1143        isert_conn->post_recv_buf_count--;
1144        pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1145                 isert_conn->post_recv_buf_count);
1146
1147        if ((char *)desc == isert_conn->login_req_buf)
1148                return;
1149
1150        outstanding = isert_conn->post_recv_buf_count;
1151        if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1152                int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1153                                ISERT_MIN_POSTED_RX);
1154                err = isert_post_recv(isert_conn, count);
1155                if (err) {
1156                        pr_err("isert_post_recv() count: %d failed, %d\n",
1157                               count, err);
1158                }
1159        }
1160}
1161
1162static void
1163isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1164{
1165        struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1166        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1167
1168        pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n");
1169
1170        if (wr->sge) {
1171                ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1172                wr->sge = NULL;
1173        }
1174
1175        kfree(wr->send_wr);
1176        wr->send_wr = NULL;
1177
1178        kfree(isert_cmd->ib_sge);
1179        isert_cmd->ib_sge = NULL;
1180}
1181
1182static void
1183isert_put_cmd(struct isert_cmd *isert_cmd)
1184{
1185        struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1186        struct isert_conn *isert_conn = isert_cmd->conn;
1187        struct iscsi_conn *conn;
1188
1189        pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1190
1191        switch (cmd->iscsi_opcode) {
1192        case ISCSI_OP_SCSI_CMD:
1193                conn = isert_conn->conn;
1194
1195                spin_lock_bh(&conn->cmd_lock);
1196                if (!list_empty(&cmd->i_conn_node))
1197                        list_del(&cmd->i_conn_node);
1198                spin_unlock_bh(&conn->cmd_lock);
1199
1200                if (cmd->data_direction == DMA_TO_DEVICE)
1201                        iscsit_stop_dataout_timer(cmd);
1202
1203                isert_unmap_cmd(isert_cmd, isert_conn);
1204                /*
1205                 * Fall-through
1206                 */
1207        case ISCSI_OP_SCSI_TMFUNC:
1208                transport_generic_free_cmd(&cmd->se_cmd, 0);
1209                break;
1210        case ISCSI_OP_REJECT:
1211        case ISCSI_OP_NOOP_OUT:
1212                conn = isert_conn->conn;
1213
1214                spin_lock_bh(&conn->cmd_lock);
1215                if (!list_empty(&cmd->i_conn_node))
1216                        list_del(&cmd->i_conn_node);
1217                spin_unlock_bh(&conn->cmd_lock);
1218
1219                /*
1220                 * Handle special case for REJECT when iscsi_add_reject*() has
1221                 * overwritten the original iscsi_opcode assignment, and the
1222                 * associated cmd->se_cmd needs to be released.
1223                 */
1224                if (cmd->se_cmd.se_tfo != NULL) {
1225                        transport_generic_free_cmd(&cmd->se_cmd, 0);
1226                        break;
1227                }
1228                /*
1229                 * Fall-through
1230                 */
1231        default:
1232                isert_release_cmd(cmd);
1233                break;
1234        }
1235}
1236
1237static void
1238isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1239{
1240        if (tx_desc->dma_addr != 0) {
1241                pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1242                ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1243                                    ISER_HEADERS_LEN, DMA_TO_DEVICE);
1244                tx_desc->dma_addr = 0;
1245        }
1246}
1247
1248static void
1249isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1250                     struct ib_device *ib_dev)
1251{
1252        if (isert_cmd->sense_buf_dma != 0) {
1253                pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n");
1254                ib_dma_unmap_single(ib_dev, isert_cmd->sense_buf_dma,
1255                                    isert_cmd->sense_buf_len, DMA_TO_DEVICE);
1256                isert_cmd->sense_buf_dma = 0;
1257        }
1258
1259        isert_unmap_tx_desc(tx_desc, ib_dev);
1260        isert_put_cmd(isert_cmd);
1261}
1262
1263static void
1264isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1265                           struct isert_cmd *isert_cmd)
1266{
1267        struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1268        struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1269        struct se_cmd *se_cmd = &cmd->se_cmd;
1270        struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device;
1271
1272        iscsit_stop_dataout_timer(cmd);
1273
1274        if (wr->sge) {
1275                pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
1276                ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1277                wr->sge = NULL;
1278        }
1279
1280        if (isert_cmd->ib_sge) {
1281                pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
1282                kfree(isert_cmd->ib_sge);
1283                isert_cmd->ib_sge = NULL;
1284        }
1285
1286        cmd->write_data_done = se_cmd->data_length;
1287
1288        pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
1289        spin_lock_bh(&cmd->istate_lock);
1290        cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1291        cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1292        spin_unlock_bh(&cmd->istate_lock);
1293
1294        target_execute_cmd(se_cmd);
1295}
1296
1297static void
1298isert_do_control_comp(struct work_struct *work)
1299{
1300        struct isert_cmd *isert_cmd = container_of(work,
1301                        struct isert_cmd, comp_work);
1302        struct isert_conn *isert_conn = isert_cmd->conn;
1303        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1304        struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1305
1306        switch (cmd->i_state) {
1307        case ISTATE_SEND_TASKMGTRSP:
1308                pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1309
1310                atomic_dec(&isert_conn->post_send_buf_count);
1311                iscsit_tmr_post_handler(cmd, cmd->conn);
1312
1313                cmd->i_state = ISTATE_SENT_STATUS;
1314                isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1315                break;
1316        case ISTATE_SEND_REJECT:
1317                pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1318                atomic_dec(&isert_conn->post_send_buf_count);
1319
1320                cmd->i_state = ISTATE_SENT_STATUS;
1321                complete(&cmd->reject_comp);
1322                isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1323        case ISTATE_SEND_LOGOUTRSP:
1324                pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1325                /*
1326                 * Call atomic_dec(&isert_conn->post_send_buf_count)
1327                 * from isert_free_conn()
1328                 */
1329                isert_conn->logout_posted = true;
1330                iscsit_logout_post_handler(cmd, cmd->conn);
1331                break;
1332        default:
1333                pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1334                dump_stack();
1335                break;
1336        }
1337}
1338
1339static void
1340isert_response_completion(struct iser_tx_desc *tx_desc,
1341                          struct isert_cmd *isert_cmd,
1342                          struct isert_conn *isert_conn,
1343                          struct ib_device *ib_dev)
1344{
1345        struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1346
1347        if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1348            cmd->i_state == ISTATE_SEND_LOGOUTRSP) {
1349                isert_unmap_tx_desc(tx_desc, ib_dev);
1350
1351                INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1352                queue_work(isert_comp_wq, &isert_cmd->comp_work);
1353                return;
1354        }
1355        atomic_dec(&isert_conn->post_send_buf_count);
1356
1357        cmd->i_state = ISTATE_SENT_STATUS;
1358        isert_completion_put(tx_desc, isert_cmd, ib_dev);
1359}
1360
1361static void
1362isert_send_completion(struct iser_tx_desc *tx_desc,
1363                      struct isert_conn *isert_conn)
1364{
1365        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1366        struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1367        struct isert_rdma_wr *wr;
1368
1369        if (!isert_cmd) {
1370                atomic_dec(&isert_conn->post_send_buf_count);
1371                isert_unmap_tx_desc(tx_desc, ib_dev);
1372                return;
1373        }
1374        wr = &isert_cmd->rdma_wr;
1375
1376        switch (wr->iser_ib_op) {
1377        case ISER_IB_RECV:
1378                pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1379                dump_stack();
1380                break;
1381        case ISER_IB_SEND:
1382                pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1383                isert_response_completion(tx_desc, isert_cmd,
1384                                          isert_conn, ib_dev);
1385                break;
1386        case ISER_IB_RDMA_WRITE:
1387                pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1388                dump_stack();
1389                break;
1390        case ISER_IB_RDMA_READ:
1391                pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1392
1393                atomic_dec(&isert_conn->post_send_buf_count);
1394                isert_completion_rdma_read(tx_desc, isert_cmd);
1395                break;
1396        default:
1397                pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1398                dump_stack();
1399                break;
1400        }
1401}
1402
1403static void
1404isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1405{
1406        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1407
1408        if (tx_desc) {
1409                struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1410
1411                if (!isert_cmd)
1412                        isert_unmap_tx_desc(tx_desc, ib_dev);
1413                else
1414                        isert_completion_put(tx_desc, isert_cmd, ib_dev);
1415        }
1416
1417        if (isert_conn->post_recv_buf_count == 0 &&
1418            atomic_read(&isert_conn->post_send_buf_count) == 0) {
1419                pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1420                pr_debug("Calling wake_up from isert_cq_comp_err\n");
1421
1422                isert_conn->state = ISER_CONN_TERMINATING;
1423                wake_up(&isert_conn->conn_wait_comp_err);
1424        }
1425}
1426
1427static void
1428isert_cq_tx_work(struct work_struct *work)
1429{
1430        struct isert_cq_desc *cq_desc = container_of(work,
1431                                struct isert_cq_desc, cq_tx_work);
1432        struct isert_device *device = cq_desc->device;
1433        int cq_index = cq_desc->cq_index;
1434        struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1435        struct isert_conn *isert_conn;
1436        struct iser_tx_desc *tx_desc;
1437        struct ib_wc wc;
1438
1439        while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1440                tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1441                isert_conn = wc.qp->qp_context;
1442
1443                if (wc.status == IB_WC_SUCCESS) {
1444                        isert_send_completion(tx_desc, isert_conn);
1445                } else {
1446                        pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1447                        pr_debug("TX wc.status: 0x%08x\n", wc.status);
1448                        atomic_dec(&isert_conn->post_send_buf_count);
1449                        isert_cq_comp_err(tx_desc, isert_conn);
1450                }
1451        }
1452
1453        ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1454}
1455
1456static void
1457isert_cq_tx_callback(struct ib_cq *cq, void *context)
1458{
1459        struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1460
1461        INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1462        queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1463}
1464
1465static void
1466isert_cq_rx_work(struct work_struct *work)
1467{
1468        struct isert_cq_desc *cq_desc = container_of(work,
1469                        struct isert_cq_desc, cq_rx_work);
1470        struct isert_device *device = cq_desc->device;
1471        int cq_index = cq_desc->cq_index;
1472        struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1473        struct isert_conn *isert_conn;
1474        struct iser_rx_desc *rx_desc;
1475        struct ib_wc wc;
1476        unsigned long xfer_len;
1477
1478        while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1479                rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1480                isert_conn = wc.qp->qp_context;
1481
1482                if (wc.status == IB_WC_SUCCESS) {
1483                        xfer_len = (unsigned long)wc.byte_len;
1484                        isert_rx_completion(rx_desc, isert_conn, xfer_len);
1485                } else {
1486                        pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1487                        if (wc.status != IB_WC_WR_FLUSH_ERR)
1488                                pr_debug("RX wc.status: 0x%08x\n", wc.status);
1489
1490                        isert_conn->post_recv_buf_count--;
1491                        isert_cq_comp_err(NULL, isert_conn);
1492                }
1493        }
1494
1495        ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1496}
1497
1498static void
1499isert_cq_rx_callback(struct ib_cq *cq, void *context)
1500{
1501        struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1502
1503        INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1504        queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1505}
1506
1507static int
1508isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1509{
1510        struct ib_send_wr *wr_failed;
1511        int ret;
1512
1513        atomic_inc(&isert_conn->post_send_buf_count);
1514
1515        ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1516                           &wr_failed);
1517        if (ret) {
1518                pr_err("ib_post_send failed with %d\n", ret);
1519                atomic_dec(&isert_conn->post_send_buf_count);
1520                return ret;
1521        }
1522        return ret;
1523}
1524
1525static int
1526isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1527{
1528        struct isert_cmd *isert_cmd = container_of(cmd,
1529                                        struct isert_cmd, iscsi_cmd);
1530        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1531        struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1532        struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1533                                &isert_cmd->tx_desc.iscsi_header;
1534
1535        isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1536        iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1537        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1538        /*
1539         * Attach SENSE DATA payload to iSCSI Response PDU
1540         */
1541        if (cmd->se_cmd.sense_buffer &&
1542            ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1543            (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1544                struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1545                struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1546                u32 padding, sense_len;
1547
1548                put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1549                                   cmd->sense_buffer);
1550                cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1551
1552                padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1553                hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1554                sense_len = cmd->se_cmd.scsi_sense_length + padding;
1555
1556                isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
1557                                (void *)cmd->sense_buffer, sense_len,
1558                                DMA_TO_DEVICE);
1559
1560                isert_cmd->sense_buf_len = sense_len;
1561                tx_dsg->addr    = isert_cmd->sense_buf_dma;
1562                tx_dsg->length  = sense_len;
1563                tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1564                isert_cmd->tx_desc.num_sge = 2;
1565        }
1566
1567        isert_init_send_wr(isert_cmd, send_wr);
1568
1569        pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1570
1571        return isert_post_response(isert_conn, isert_cmd);
1572}
1573
1574static int
1575isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1576                bool nopout_response)
1577{
1578        struct isert_cmd *isert_cmd = container_of(cmd,
1579                                struct isert_cmd, iscsi_cmd);
1580        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1581        struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1582
1583        isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1584        iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1585                               &isert_cmd->tx_desc.iscsi_header,
1586                               nopout_response);
1587        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1588        isert_init_send_wr(isert_cmd, send_wr);
1589
1590        pr_debug("Posting NOPIN Reponse IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1591
1592        return isert_post_response(isert_conn, isert_cmd);
1593}
1594
1595static int
1596isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1597{
1598        struct isert_cmd *isert_cmd = container_of(cmd,
1599                                struct isert_cmd, iscsi_cmd);
1600        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1601        struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1602
1603        isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1604        iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1605                                &isert_cmd->tx_desc.iscsi_header);
1606        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1607        isert_init_send_wr(isert_cmd, send_wr);
1608
1609        pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1610
1611        return isert_post_response(isert_conn, isert_cmd);
1612}
1613
1614static int
1615isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1616{
1617        struct isert_cmd *isert_cmd = container_of(cmd,
1618                                struct isert_cmd, iscsi_cmd);
1619        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1620        struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1621
1622        isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1623        iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1624                                  &isert_cmd->tx_desc.iscsi_header);
1625        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1626        isert_init_send_wr(isert_cmd, send_wr);
1627
1628        pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1629
1630        return isert_post_response(isert_conn, isert_cmd);
1631}
1632
1633static int
1634isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1635{
1636        struct isert_cmd *isert_cmd = container_of(cmd,
1637                                struct isert_cmd, iscsi_cmd);
1638        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1639        struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1640
1641        isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1642        iscsit_build_reject(cmd, conn, (struct iscsi_reject *)
1643                                &isert_cmd->tx_desc.iscsi_header);
1644        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1645        isert_init_send_wr(isert_cmd, send_wr);
1646
1647        pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1648
1649        return isert_post_response(isert_conn, isert_cmd);
1650}
1651
1652static int
1653isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1654                    struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1655                    u32 data_left, u32 offset)
1656{
1657        struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1658        struct scatterlist *sg_start, *tmp_sg;
1659        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1660        u32 sg_off, page_off;
1661        int i = 0, sg_nents;
1662
1663        sg_off = offset / PAGE_SIZE;
1664        sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1665        sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
1666        page_off = offset % PAGE_SIZE;
1667
1668        send_wr->sg_list = ib_sge;
1669        send_wr->num_sge = sg_nents;
1670        send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1671        /*
1672         * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
1673         */
1674        for_each_sg(sg_start, tmp_sg, sg_nents, i) {
1675                pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
1676                         (unsigned long long)tmp_sg->dma_address,
1677                         tmp_sg->length, page_off);
1678
1679                ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
1680                ib_sge->length = min_t(u32, data_left,
1681                                ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
1682                ib_sge->lkey = isert_conn->conn_mr->lkey;
1683
1684                pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u\n",
1685                         ib_sge->addr, ib_sge->length);
1686                page_off = 0;
1687                data_left -= ib_sge->length;
1688                ib_sge++;
1689                pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
1690        }
1691
1692        pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
1693                 send_wr->sg_list, send_wr->num_sge);
1694
1695        return sg_nents;
1696}
1697
1698static int
1699isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1700{
1701        struct se_cmd *se_cmd = &cmd->se_cmd;
1702        struct isert_cmd *isert_cmd = container_of(cmd,
1703                                        struct isert_cmd, iscsi_cmd);
1704        struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1705        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1706        struct ib_send_wr *wr_failed, *send_wr;
1707        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1708        struct ib_sge *ib_sge;
1709        struct scatterlist *sg;
1710        u32 offset = 0, data_len, data_left, rdma_write_max;
1711        int rc, ret = 0, count, sg_nents, i, ib_sge_cnt;
1712
1713        pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length);
1714
1715        sg = &se_cmd->t_data_sg[0];
1716        sg_nents = se_cmd->t_data_nents;
1717
1718        count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1719        if (unlikely(!count)) {
1720                pr_err("Unable to map put_datain SGs\n");
1721                return -EINVAL;
1722        }
1723        wr->sge = sg;
1724        wr->num_sge = sg_nents;
1725        pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n",
1726                 count, sg, sg_nents);
1727
1728        ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1729        if (!ib_sge) {
1730                pr_warn("Unable to allocate datain ib_sge\n");
1731                ret = -ENOMEM;
1732                goto unmap_sg;
1733        }
1734        isert_cmd->ib_sge = ib_sge;
1735
1736        pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
1737                 ib_sge, se_cmd->t_data_nents);
1738
1739        wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1740        wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1741                                GFP_KERNEL);
1742        if (!wr->send_wr) {
1743                pr_err("Unable to allocate wr->send_wr\n");
1744                ret = -ENOMEM;
1745                goto unmap_sg;
1746        }
1747        pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1748                 wr->send_wr, wr->send_wr_num);
1749
1750        iscsit_increment_maxcmdsn(cmd, conn->sess);
1751        cmd->stat_sn = conn->stat_sn++;
1752
1753        wr->isert_cmd = isert_cmd;
1754        rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1755        data_left = se_cmd->data_length;
1756
1757        for (i = 0; i < wr->send_wr_num; i++) {
1758                send_wr = &isert_cmd->rdma_wr.send_wr[i];
1759                data_len = min(data_left, rdma_write_max);
1760
1761                send_wr->opcode = IB_WR_RDMA_WRITE;
1762                send_wr->send_flags = 0;
1763                send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
1764                send_wr->wr.rdma.rkey = isert_cmd->read_stag;
1765
1766                ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1767                                        send_wr, data_len, offset);
1768                ib_sge += ib_sge_cnt;
1769
1770                if (i + 1 == wr->send_wr_num)
1771                        send_wr->next = &isert_cmd->tx_desc.send_wr;
1772                else
1773                        send_wr->next = &wr->send_wr[i + 1];
1774
1775                offset += data_len;
1776                data_left -= data_len;
1777        }
1778        /*
1779         * Build isert_conn->tx_desc for iSCSI response PDU and attach
1780         */
1781        isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1782        iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
1783                             &isert_cmd->tx_desc.iscsi_header);
1784        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1785        isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
1786
1787        atomic_inc(&isert_conn->post_send_buf_count);
1788
1789        rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1790        if (rc) {
1791                pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
1792                atomic_dec(&isert_conn->post_send_buf_count);
1793        }
1794        pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
1795        return 1;
1796
1797unmap_sg:
1798        ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1799        return ret;
1800}
1801
1802static int
1803isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
1804{
1805        struct se_cmd *se_cmd = &cmd->se_cmd;
1806        struct isert_cmd *isert_cmd = container_of(cmd,
1807                                        struct isert_cmd, iscsi_cmd);
1808        struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1809        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1810        struct ib_send_wr *wr_failed, *send_wr;
1811        struct ib_sge *ib_sge;
1812        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1813        struct scatterlist *sg_start;
1814        u32 sg_off, sg_nents, page_off, va_offset = 0;
1815        u32 offset = 0, data_len, data_left, rdma_write_max;
1816        int rc, ret = 0, count, i, ib_sge_cnt;
1817
1818        pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
1819                 se_cmd->data_length, cmd->write_data_done);
1820
1821        sg_off = cmd->write_data_done / PAGE_SIZE;
1822        sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1823        page_off = cmd->write_data_done % PAGE_SIZE;
1824
1825        pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
1826                 sg_off, sg_start, page_off);
1827
1828        data_left = se_cmd->data_length - cmd->write_data_done;
1829        sg_nents = se_cmd->t_data_nents - sg_off;
1830
1831        pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
1832                 data_left, sg_nents);
1833
1834        count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1835        if (unlikely(!count)) {
1836                pr_err("Unable to map get_dataout SGs\n");
1837                return -EINVAL;
1838        }
1839        wr->sge = sg_start;
1840        wr->num_sge = sg_nents;
1841        pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
1842                 count, sg_start, sg_nents);
1843
1844        ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1845        if (!ib_sge) {
1846                pr_warn("Unable to allocate dataout ib_sge\n");
1847                ret = -ENOMEM;
1848                goto unmap_sg;
1849        }
1850        isert_cmd->ib_sge = ib_sge;
1851
1852        pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
1853                 ib_sge, sg_nents);
1854
1855        wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1856        wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1857                                GFP_KERNEL);
1858        if (!wr->send_wr) {
1859                pr_debug("Unable to allocate wr->send_wr\n");
1860                ret = -ENOMEM;
1861                goto unmap_sg;
1862        }
1863        pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1864                 wr->send_wr, wr->send_wr_num);
1865
1866        isert_cmd->tx_desc.isert_cmd = isert_cmd;
1867
1868        wr->iser_ib_op = ISER_IB_RDMA_READ;
1869        wr->isert_cmd = isert_cmd;
1870        rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1871        offset = cmd->write_data_done;
1872
1873        for (i = 0; i < wr->send_wr_num; i++) {
1874                send_wr = &isert_cmd->rdma_wr.send_wr[i];
1875                data_len = min(data_left, rdma_write_max);
1876
1877                send_wr->opcode = IB_WR_RDMA_READ;
1878                send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
1879                send_wr->wr.rdma.rkey = isert_cmd->write_stag;
1880
1881                ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1882                                        send_wr, data_len, offset);
1883                ib_sge += ib_sge_cnt;
1884
1885                if (i + 1 == wr->send_wr_num)
1886                        send_wr->send_flags = IB_SEND_SIGNALED;
1887                else
1888                        send_wr->next = &wr->send_wr[i + 1];
1889
1890                offset += data_len;
1891                va_offset += data_len;
1892                data_left -= data_len;
1893        }
1894
1895        atomic_inc(&isert_conn->post_send_buf_count);
1896
1897        rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1898        if (rc) {
1899                pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
1900                atomic_dec(&isert_conn->post_send_buf_count);
1901        }
1902        pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
1903        return 0;
1904
1905unmap_sg:
1906        ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1907        return ret;
1908}
1909
1910static int
1911isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
1912{
1913        int ret;
1914
1915        switch (state) {
1916        case ISTATE_SEND_NOPIN_WANT_RESPONSE:
1917                ret = isert_put_nopin(cmd, conn, false);
1918                break;
1919        default:
1920                pr_err("Unknown immediate state: 0x%02x\n", state);
1921                ret = -EINVAL;
1922                break;
1923        }
1924
1925        return ret;
1926}
1927
1928static int
1929isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
1930{
1931        int ret;
1932
1933        switch (state) {
1934        case ISTATE_SEND_LOGOUTRSP:
1935                ret = isert_put_logout_rsp(cmd, conn);
1936                if (!ret) {
1937                        pr_debug("Returning iSER Logout -EAGAIN\n");
1938                        ret = -EAGAIN;
1939                }
1940                break;
1941        case ISTATE_SEND_NOPIN:
1942                ret = isert_put_nopin(cmd, conn, true);
1943                break;
1944        case ISTATE_SEND_TASKMGTRSP:
1945                ret = isert_put_tm_rsp(cmd, conn);
1946                break;
1947        case ISTATE_SEND_REJECT:
1948                ret = isert_put_reject(cmd, conn);
1949                break;
1950        case ISTATE_SEND_STATUS:
1951                /*
1952                 * Special case for sending non GOOD SCSI status from TX thread
1953                 * context during pre se_cmd excecution failure.
1954                 */
1955                ret = isert_put_response(conn, cmd);
1956                break;
1957        default:
1958                pr_err("Unknown response state: 0x%02x\n", state);
1959                ret = -EINVAL;
1960                break;
1961        }
1962
1963        return ret;
1964}
1965
1966static int
1967isert_setup_np(struct iscsi_np *np,
1968               struct __kernel_sockaddr_storage *ksockaddr)
1969{
1970        struct isert_np *isert_np;
1971        struct rdma_cm_id *isert_lid;
1972        struct sockaddr *sa;
1973        int ret;
1974
1975        isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
1976        if (!isert_np) {
1977                pr_err("Unable to allocate struct isert_np\n");
1978                return -ENOMEM;
1979        }
1980        init_waitqueue_head(&isert_np->np_accept_wq);
1981        mutex_init(&isert_np->np_accept_mutex);
1982        INIT_LIST_HEAD(&isert_np->np_accept_list);
1983        init_completion(&isert_np->np_login_comp);
1984
1985        sa = (struct sockaddr *)ksockaddr;
1986        pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
1987        /*
1988         * Setup the np->np_sockaddr from the passed sockaddr setup
1989         * in iscsi_target_configfs.c code..
1990         */
1991        memcpy(&np->np_sockaddr, ksockaddr,
1992               sizeof(struct __kernel_sockaddr_storage));
1993
1994        isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
1995                                IB_QPT_RC);
1996        if (IS_ERR(isert_lid)) {
1997                pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
1998                       PTR_ERR(isert_lid));
1999                ret = PTR_ERR(isert_lid);
2000                goto out;
2001        }
2002
2003        ret = rdma_bind_addr(isert_lid, sa);
2004        if (ret) {
2005                pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2006                goto out_lid;
2007        }
2008
2009        ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2010        if (ret) {
2011                pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2012                goto out_lid;
2013        }
2014
2015        isert_np->np_cm_id = isert_lid;
2016        np->np_context = isert_np;
2017        pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2018
2019        return 0;
2020
2021out_lid:
2022        rdma_destroy_id(isert_lid);
2023out:
2024        kfree(isert_np);
2025        return ret;
2026}
2027
2028static int
2029isert_check_accept_queue(struct isert_np *isert_np)
2030{
2031        int empty;
2032
2033        mutex_lock(&isert_np->np_accept_mutex);
2034        empty = list_empty(&isert_np->np_accept_list);
2035        mutex_unlock(&isert_np->np_accept_mutex);
2036
2037        return empty;
2038}
2039
2040static int
2041isert_rdma_accept(struct isert_conn *isert_conn)
2042{
2043        struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2044        struct rdma_conn_param cp;
2045        int ret;
2046
2047        memset(&cp, 0, sizeof(struct rdma_conn_param));
2048        cp.responder_resources = isert_conn->responder_resources;
2049        cp.initiator_depth = isert_conn->initiator_depth;
2050        cp.retry_count = 7;
2051        cp.rnr_retry_count = 7;
2052
2053        pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2054
2055        ret = rdma_accept(cm_id, &cp);
2056        if (ret) {
2057                pr_err("rdma_accept() failed with: %d\n", ret);
2058                return ret;
2059        }
2060
2061        pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2062
2063        return 0;
2064}
2065
2066static int
2067isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2068{
2069        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2070        int ret;
2071
2072        pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2073
2074        ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2075        if (ret)
2076                return ret;
2077
2078        pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2079        return 0;
2080}
2081
2082static void
2083isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2084                    struct isert_conn *isert_conn)
2085{
2086        struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2087        struct rdma_route *cm_route = &cm_id->route;
2088        struct sockaddr_in *sock_in;
2089        struct sockaddr_in6 *sock_in6;
2090
2091        conn->login_family = np->np_sockaddr.ss_family;
2092
2093        if (np->np_sockaddr.ss_family == AF_INET6) {
2094                sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2095                snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2096                         &sock_in6->sin6_addr.in6_u);
2097                conn->login_port = ntohs(sock_in6->sin6_port);
2098
2099                sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2100                snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2101                         &sock_in6->sin6_addr.in6_u);
2102                conn->local_port = ntohs(sock_in6->sin6_port);
2103        } else {
2104                sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2105                sprintf(conn->login_ip, "%pI4",
2106                        &sock_in->sin_addr.s_addr);
2107                conn->login_port = ntohs(sock_in->sin_port);
2108
2109                sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2110                sprintf(conn->local_ip, "%pI4",
2111                        &sock_in->sin_addr.s_addr);
2112                conn->local_port = ntohs(sock_in->sin_port);
2113        }
2114}
2115
2116static int
2117isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2118{
2119        struct isert_np *isert_np = (struct isert_np *)np->np_context;
2120        struct isert_conn *isert_conn;
2121        int max_accept = 0, ret;
2122
2123accept_wait:
2124        ret = wait_event_interruptible(isert_np->np_accept_wq,
2125                        !isert_check_accept_queue(isert_np) ||
2126                        np->np_thread_state == ISCSI_NP_THREAD_RESET);
2127        if (max_accept > 5)
2128                return -ENODEV;
2129
2130        spin_lock_bh(&np->np_thread_lock);
2131        if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
2132                spin_unlock_bh(&np->np_thread_lock);
2133                pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2134                return -ENODEV;
2135        }
2136        spin_unlock_bh(&np->np_thread_lock);
2137
2138        mutex_lock(&isert_np->np_accept_mutex);
2139        if (list_empty(&isert_np->np_accept_list)) {
2140                mutex_unlock(&isert_np->np_accept_mutex);
2141                max_accept++;
2142                goto accept_wait;
2143        }
2144        isert_conn = list_first_entry(&isert_np->np_accept_list,
2145                        struct isert_conn, conn_accept_node);
2146        list_del_init(&isert_conn->conn_accept_node);
2147        mutex_unlock(&isert_np->np_accept_mutex);
2148
2149        conn->context = isert_conn;
2150        isert_conn->conn = conn;
2151        max_accept = 0;
2152
2153        ret = isert_rdma_post_recvl(isert_conn);
2154        if (ret)
2155                return ret;
2156
2157        ret = isert_rdma_accept(isert_conn);
2158        if (ret)
2159                return ret;
2160
2161        isert_set_conn_info(np, conn, isert_conn);
2162
2163        pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2164        return 0;
2165}
2166
2167static void
2168isert_free_np(struct iscsi_np *np)
2169{
2170        struct isert_np *isert_np = (struct isert_np *)np->np_context;
2171
2172        rdma_destroy_id(isert_np->np_cm_id);
2173
2174        np->np_context = NULL;
2175        kfree(isert_np);
2176}
2177
2178static void isert_free_conn(struct iscsi_conn *conn)
2179{
2180        struct isert_conn *isert_conn = conn->context;
2181
2182        pr_debug("isert_free_conn: Starting \n");
2183        /*
2184         * Decrement post_send_buf_count for special case when called
2185         * from isert_do_control_comp() -> iscsit_logout_post_handler()
2186         */
2187        if (isert_conn->logout_posted)
2188                atomic_dec(&isert_conn->post_send_buf_count);
2189
2190        if (isert_conn->conn_cm_id)
2191                rdma_disconnect(isert_conn->conn_cm_id);
2192        /*
2193         * Only wait for conn_wait_comp_err if the isert_conn made it
2194         * into full feature phase..
2195         */
2196        if (isert_conn->state > ISER_CONN_INIT) {
2197                pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2198                         isert_conn->state);
2199                wait_event(isert_conn->conn_wait_comp_err,
2200                           isert_conn->state == ISER_CONN_TERMINATING);
2201                pr_debug("isert_free_conn: After wait_event #1 >>>>>>>>>>>>\n");
2202        }
2203
2204        pr_debug("isert_free_conn: wait_event conn_wait %d\n", isert_conn->state);
2205        wait_event(isert_conn->conn_wait, isert_conn->state == ISER_CONN_DOWN);
2206        pr_debug("isert_free_conn: After wait_event #2 >>>>>>>>>>>>>>>>>>>>\n");
2207
2208        isert_put_conn(isert_conn);
2209}
2210
2211static struct iscsit_transport iser_target_transport = {
2212        .name                   = "IB/iSER",
2213        .transport_type         = ISCSI_INFINIBAND,
2214        .owner                  = THIS_MODULE,
2215        .iscsit_setup_np        = isert_setup_np,
2216        .iscsit_accept_np       = isert_accept_np,
2217        .iscsit_free_np         = isert_free_np,
2218        .iscsit_free_conn       = isert_free_conn,
2219        .iscsit_alloc_cmd       = isert_alloc_cmd,
2220        .iscsit_get_login_rx    = isert_get_login_rx,
2221        .iscsit_put_login_tx    = isert_put_login_tx,
2222        .iscsit_immediate_queue = isert_immediate_queue,
2223        .iscsit_response_queue  = isert_response_queue,
2224        .iscsit_get_dataout     = isert_get_dataout,
2225        .iscsit_queue_data_in   = isert_put_datain,
2226        .iscsit_queue_status    = isert_put_response,
2227};
2228
2229static int __init isert_init(void)
2230{
2231        int ret;
2232
2233        isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2234        if (!isert_rx_wq) {
2235                pr_err("Unable to allocate isert_rx_wq\n");
2236                return -ENOMEM;
2237        }
2238
2239        isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2240        if (!isert_comp_wq) {
2241                pr_err("Unable to allocate isert_comp_wq\n");
2242                ret = -ENOMEM;
2243                goto destroy_rx_wq;
2244        }
2245
2246        isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
2247                        sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
2248                        0, NULL);
2249        if (!isert_cmd_cache) {
2250                pr_err("Unable to create isert_cmd_cache\n");
2251                ret = -ENOMEM;
2252                goto destroy_tx_cq;
2253        }
2254
2255        iscsit_register_transport(&iser_target_transport);
2256        pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2257        return 0;
2258
2259destroy_tx_cq:
2260        destroy_workqueue(isert_comp_wq);
2261destroy_rx_wq:
2262        destroy_workqueue(isert_rx_wq);
2263        return ret;
2264}
2265
2266static void __exit isert_exit(void)
2267{
2268        kmem_cache_destroy(isert_cmd_cache);
2269        destroy_workqueue(isert_comp_wq);
2270        destroy_workqueue(isert_rx_wq);
2271        iscsit_unregister_transport(&iser_target_transport);
2272        pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2273}
2274
2275MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2276MODULE_VERSION("0.1");
2277MODULE_AUTHOR("nab@Linux-iSCSI.org");
2278MODULE_LICENSE("GPL");
2279
2280module_init(isert_init);
2281module_exit(isert_exit);
2282