linux/drivers/infiniband/ulp/srp/ib_srp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/module.h>
  34#include <linux/init.h>
  35#include <linux/slab.h>
  36#include <linux/err.h>
  37#include <linux/string.h>
  38#include <linux/parser.h>
  39#include <linux/random.h>
  40#include <linux/jiffies.h>
  41
  42#include <linux/atomic.h>
  43
  44#include <scsi/scsi.h>
  45#include <scsi/scsi_device.h>
  46#include <scsi/scsi_dbg.h>
  47#include <scsi/srp.h>
  48#include <scsi/scsi_transport_srp.h>
  49
  50#include "ib_srp.h"
  51
  52#define DRV_NAME        "ib_srp"
  53#define PFX             DRV_NAME ": "
  54#define DRV_VERSION     "0.2"
  55#define DRV_RELDATE     "November 1, 2005"
  56
  57MODULE_AUTHOR("Roland Dreier");
  58MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
  59                   "v" DRV_VERSION " (" DRV_RELDATE ")");
  60MODULE_LICENSE("Dual BSD/GPL");
  61
  62static unsigned int srp_sg_tablesize;
  63static unsigned int cmd_sg_entries;
  64static unsigned int indirect_sg_entries;
  65static bool allow_ext_sg;
  66static int topspin_workarounds = 1;
  67
  68module_param(srp_sg_tablesize, uint, 0444);
  69MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
  70
  71module_param(cmd_sg_entries, uint, 0444);
  72MODULE_PARM_DESC(cmd_sg_entries,
  73                 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
  74
  75module_param(indirect_sg_entries, uint, 0444);
  76MODULE_PARM_DESC(indirect_sg_entries,
  77                 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
  78
  79module_param(allow_ext_sg, bool, 0444);
  80MODULE_PARM_DESC(allow_ext_sg,
  81                  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
  82
  83module_param(topspin_workarounds, int, 0444);
  84MODULE_PARM_DESC(topspin_workarounds,
  85                 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
  86
  87static void srp_add_one(struct ib_device *device);
  88static void srp_remove_one(struct ib_device *device);
  89static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
  90static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
  91static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
  92
  93static struct scsi_transport_template *ib_srp_transport_template;
  94
  95static struct ib_client srp_client = {
  96        .name   = "srp",
  97        .add    = srp_add_one,
  98        .remove = srp_remove_one
  99};
 100
 101static struct ib_sa_client srp_sa_client;
 102
 103static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
 104{
 105        return (struct srp_target_port *) host->hostdata;
 106}
 107
 108static const char *srp_target_info(struct Scsi_Host *host)
 109{
 110        return host_to_target(host)->target_name;
 111}
 112
 113static int srp_target_is_topspin(struct srp_target_port *target)
 114{
 115        static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
 116        static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
 117
 118        return topspin_workarounds &&
 119                (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
 120                 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
 121}
 122
 123static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
 124                                   gfp_t gfp_mask,
 125                                   enum dma_data_direction direction)
 126{
 127        struct srp_iu *iu;
 128
 129        iu = kmalloc(sizeof *iu, gfp_mask);
 130        if (!iu)
 131                goto out;
 132
 133        iu->buf = kzalloc(size, gfp_mask);
 134        if (!iu->buf)
 135                goto out_free_iu;
 136
 137        iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
 138                                    direction);
 139        if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
 140                goto out_free_buf;
 141
 142        iu->size      = size;
 143        iu->direction = direction;
 144
 145        return iu;
 146
 147out_free_buf:
 148        kfree(iu->buf);
 149out_free_iu:
 150        kfree(iu);
 151out:
 152        return NULL;
 153}
 154
 155static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
 156{
 157        if (!iu)
 158                return;
 159
 160        ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
 161                            iu->direction);
 162        kfree(iu->buf);
 163        kfree(iu);
 164}
 165
 166static void srp_qp_event(struct ib_event *event, void *context)
 167{
 168        printk(KERN_ERR PFX "QP event %d\n", event->event);
 169}
 170
 171static int srp_init_qp(struct srp_target_port *target,
 172                       struct ib_qp *qp)
 173{
 174        struct ib_qp_attr *attr;
 175        int ret;
 176
 177        attr = kmalloc(sizeof *attr, GFP_KERNEL);
 178        if (!attr)
 179                return -ENOMEM;
 180
 181        ret = ib_find_pkey(target->srp_host->srp_dev->dev,
 182                           target->srp_host->port,
 183                           be16_to_cpu(target->path.pkey),
 184                           &attr->pkey_index);
 185        if (ret)
 186                goto out;
 187
 188        attr->qp_state        = IB_QPS_INIT;
 189        attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
 190                                    IB_ACCESS_REMOTE_WRITE);
 191        attr->port_num        = target->srp_host->port;
 192
 193        ret = ib_modify_qp(qp, attr,
 194                           IB_QP_STATE          |
 195                           IB_QP_PKEY_INDEX     |
 196                           IB_QP_ACCESS_FLAGS   |
 197                           IB_QP_PORT);
 198
 199out:
 200        kfree(attr);
 201        return ret;
 202}
 203
 204static int srp_new_cm_id(struct srp_target_port *target)
 205{
 206        struct ib_cm_id *new_cm_id;
 207
 208        new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
 209                                    srp_cm_handler, target);
 210        if (IS_ERR(new_cm_id))
 211                return PTR_ERR(new_cm_id);
 212
 213        if (target->cm_id)
 214                ib_destroy_cm_id(target->cm_id);
 215        target->cm_id = new_cm_id;
 216
 217        return 0;
 218}
 219
 220static int srp_create_target_ib(struct srp_target_port *target)
 221{
 222        struct ib_qp_init_attr *init_attr;
 223        int ret;
 224
 225        init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
 226        if (!init_attr)
 227                return -ENOMEM;
 228
 229        target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
 230                                       srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
 231        if (IS_ERR(target->recv_cq)) {
 232                ret = PTR_ERR(target->recv_cq);
 233                goto err;
 234        }
 235
 236        target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
 237                                       srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
 238        if (IS_ERR(target->send_cq)) {
 239                ret = PTR_ERR(target->send_cq);
 240                goto err_recv_cq;
 241        }
 242
 243        ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
 244
 245        init_attr->event_handler       = srp_qp_event;
 246        init_attr->cap.max_send_wr     = SRP_SQ_SIZE;
 247        init_attr->cap.max_recv_wr     = SRP_RQ_SIZE;
 248        init_attr->cap.max_recv_sge    = 1;
 249        init_attr->cap.max_send_sge    = 1;
 250        init_attr->sq_sig_type         = IB_SIGNAL_ALL_WR;
 251        init_attr->qp_type             = IB_QPT_RC;
 252        init_attr->send_cq             = target->send_cq;
 253        init_attr->recv_cq             = target->recv_cq;
 254
 255        target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
 256        if (IS_ERR(target->qp)) {
 257                ret = PTR_ERR(target->qp);
 258                goto err_send_cq;
 259        }
 260
 261        ret = srp_init_qp(target, target->qp);
 262        if (ret)
 263                goto err_qp;
 264
 265        kfree(init_attr);
 266        return 0;
 267
 268err_qp:
 269        ib_destroy_qp(target->qp);
 270
 271err_send_cq:
 272        ib_destroy_cq(target->send_cq);
 273
 274err_recv_cq:
 275        ib_destroy_cq(target->recv_cq);
 276
 277err:
 278        kfree(init_attr);
 279        return ret;
 280}
 281
 282static void srp_free_target_ib(struct srp_target_port *target)
 283{
 284        int i;
 285
 286        ib_destroy_qp(target->qp);
 287        ib_destroy_cq(target->send_cq);
 288        ib_destroy_cq(target->recv_cq);
 289
 290        for (i = 0; i < SRP_RQ_SIZE; ++i)
 291                srp_free_iu(target->srp_host, target->rx_ring[i]);
 292        for (i = 0; i < SRP_SQ_SIZE; ++i)
 293                srp_free_iu(target->srp_host, target->tx_ring[i]);
 294}
 295
 296static void srp_path_rec_completion(int status,
 297                                    struct ib_sa_path_rec *pathrec,
 298                                    void *target_ptr)
 299{
 300        struct srp_target_port *target = target_ptr;
 301
 302        target->status = status;
 303        if (status)
 304                shost_printk(KERN_ERR, target->scsi_host,
 305                             PFX "Got failed path rec status %d\n", status);
 306        else
 307                target->path = *pathrec;
 308        complete(&target->done);
 309}
 310
 311static int srp_lookup_path(struct srp_target_port *target)
 312{
 313        target->path.numb_path = 1;
 314
 315        init_completion(&target->done);
 316
 317        target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
 318                                                   target->srp_host->srp_dev->dev,
 319                                                   target->srp_host->port,
 320                                                   &target->path,
 321                                                   IB_SA_PATH_REC_SERVICE_ID    |
 322                                                   IB_SA_PATH_REC_DGID          |
 323                                                   IB_SA_PATH_REC_SGID          |
 324                                                   IB_SA_PATH_REC_NUMB_PATH     |
 325                                                   IB_SA_PATH_REC_PKEY,
 326                                                   SRP_PATH_REC_TIMEOUT_MS,
 327                                                   GFP_KERNEL,
 328                                                   srp_path_rec_completion,
 329                                                   target, &target->path_query);
 330        if (target->path_query_id < 0)
 331                return target->path_query_id;
 332
 333        wait_for_completion(&target->done);
 334
 335        if (target->status < 0)
 336                shost_printk(KERN_WARNING, target->scsi_host,
 337                             PFX "Path record query failed\n");
 338
 339        return target->status;
 340}
 341
 342static int srp_send_req(struct srp_target_port *target)
 343{
 344        struct {
 345                struct ib_cm_req_param param;
 346                struct srp_login_req   priv;
 347        } *req = NULL;
 348        int status;
 349
 350        req = kzalloc(sizeof *req, GFP_KERNEL);
 351        if (!req)
 352                return -ENOMEM;
 353
 354        req->param.primary_path               = &target->path;
 355        req->param.alternate_path             = NULL;
 356        req->param.service_id                 = target->service_id;
 357        req->param.qp_num                     = target->qp->qp_num;
 358        req->param.qp_type                    = target->qp->qp_type;
 359        req->param.private_data               = &req->priv;
 360        req->param.private_data_len           = sizeof req->priv;
 361        req->param.flow_control               = 1;
 362
 363        get_random_bytes(&req->param.starting_psn, 4);
 364        req->param.starting_psn              &= 0xffffff;
 365
 366        /*
 367         * Pick some arbitrary defaults here; we could make these
 368         * module parameters if anyone cared about setting them.
 369         */
 370        req->param.responder_resources        = 4;
 371        req->param.remote_cm_response_timeout = 20;
 372        req->param.local_cm_response_timeout  = 20;
 373        req->param.retry_count                = 7;
 374        req->param.rnr_retry_count            = 7;
 375        req->param.max_cm_retries             = 15;
 376
 377        req->priv.opcode        = SRP_LOGIN_REQ;
 378        req->priv.tag           = 0;
 379        req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
 380        req->priv.req_buf_fmt   = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
 381                                              SRP_BUF_FORMAT_INDIRECT);
 382        /*
 383         * In the published SRP specification (draft rev. 16a), the
 384         * port identifier format is 8 bytes of ID extension followed
 385         * by 8 bytes of GUID.  Older drafts put the two halves in the
 386         * opposite order, so that the GUID comes first.
 387         *
 388         * Targets conforming to these obsolete drafts can be
 389         * recognized by the I/O Class they report.
 390         */
 391        if (target->io_class == SRP_REV10_IB_IO_CLASS) {
 392                memcpy(req->priv.initiator_port_id,
 393                       &target->path.sgid.global.interface_id, 8);
 394                memcpy(req->priv.initiator_port_id + 8,
 395                       &target->initiator_ext, 8);
 396                memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
 397                memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
 398        } else {
 399                memcpy(req->priv.initiator_port_id,
 400                       &target->initiator_ext, 8);
 401                memcpy(req->priv.initiator_port_id + 8,
 402                       &target->path.sgid.global.interface_id, 8);
 403                memcpy(req->priv.target_port_id,     &target->id_ext, 8);
 404                memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
 405        }
 406
 407        /*
 408         * Topspin/Cisco SRP targets will reject our login unless we
 409         * zero out the first 8 bytes of our initiator port ID and set
 410         * the second 8 bytes to the local node GUID.
 411         */
 412        if (srp_target_is_topspin(target)) {
 413                shost_printk(KERN_DEBUG, target->scsi_host,
 414                             PFX "Topspin/Cisco initiator port ID workaround "
 415                             "activated for target GUID %016llx\n",
 416                             (unsigned long long) be64_to_cpu(target->ioc_guid));
 417                memset(req->priv.initiator_port_id, 0, 8);
 418                memcpy(req->priv.initiator_port_id + 8,
 419                       &target->srp_host->srp_dev->dev->node_guid, 8);
 420        }
 421
 422        status = ib_send_cm_req(target->cm_id, &req->param);
 423
 424        kfree(req);
 425
 426        return status;
 427}
 428
 429static void srp_disconnect_target(struct srp_target_port *target)
 430{
 431        /* XXX should send SRP_I_LOGOUT request */
 432
 433        init_completion(&target->done);
 434        if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
 435                shost_printk(KERN_DEBUG, target->scsi_host,
 436                             PFX "Sending CM DREQ failed\n");
 437                return;
 438        }
 439        wait_for_completion(&target->done);
 440}
 441
 442static bool srp_change_state(struct srp_target_port *target,
 443                            enum srp_target_state old,
 444                            enum srp_target_state new)
 445{
 446        bool changed = false;
 447
 448        spin_lock_irq(&target->lock);
 449        if (target->state == old) {
 450                target->state = new;
 451                changed = true;
 452        }
 453        spin_unlock_irq(&target->lock);
 454        return changed;
 455}
 456
 457static void srp_free_req_data(struct srp_target_port *target)
 458{
 459        struct ib_device *ibdev = target->srp_host->srp_dev->dev;
 460        struct srp_request *req;
 461        int i;
 462
 463        for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
 464                kfree(req->fmr_list);
 465                kfree(req->map_page);
 466                if (req->indirect_dma_addr) {
 467                        ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
 468                                            target->indirect_size,
 469                                            DMA_TO_DEVICE);
 470                }
 471                kfree(req->indirect_desc);
 472        }
 473}
 474
 475static void srp_remove_work(struct work_struct *work)
 476{
 477        struct srp_target_port *target =
 478                container_of(work, struct srp_target_port, work);
 479
 480        if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
 481                return;
 482
 483        spin_lock(&target->srp_host->target_lock);
 484        list_del(&target->list);
 485        spin_unlock(&target->srp_host->target_lock);
 486
 487        srp_remove_host(target->scsi_host);
 488        scsi_remove_host(target->scsi_host);
 489        ib_destroy_cm_id(target->cm_id);
 490        srp_free_target_ib(target);
 491        srp_free_req_data(target);
 492        scsi_host_put(target->scsi_host);
 493}
 494
 495static int srp_connect_target(struct srp_target_port *target)
 496{
 497        int retries = 3;
 498        int ret;
 499
 500        ret = srp_lookup_path(target);
 501        if (ret)
 502                return ret;
 503
 504        while (1) {
 505                init_completion(&target->done);
 506                ret = srp_send_req(target);
 507                if (ret)
 508                        return ret;
 509                wait_for_completion(&target->done);
 510
 511                /*
 512                 * The CM event handling code will set status to
 513                 * SRP_PORT_REDIRECT if we get a port redirect REJ
 514                 * back, or SRP_DLID_REDIRECT if we get a lid/qp
 515                 * redirect REJ back.
 516                 */
 517                switch (target->status) {
 518                case 0:
 519                        return 0;
 520
 521                case SRP_PORT_REDIRECT:
 522                        ret = srp_lookup_path(target);
 523                        if (ret)
 524                                return ret;
 525                        break;
 526
 527                case SRP_DLID_REDIRECT:
 528                        break;
 529
 530                case SRP_STALE_CONN:
 531                        /* Our current CM id was stale, and is now in timewait.
 532                         * Try to reconnect with a new one.
 533                         */
 534                        if (!retries-- || srp_new_cm_id(target)) {
 535                                shost_printk(KERN_ERR, target->scsi_host, PFX
 536                                             "giving up on stale connection\n");
 537                                target->status = -ECONNRESET;
 538                                return target->status;
 539                        }
 540
 541                        shost_printk(KERN_ERR, target->scsi_host, PFX
 542                                     "retrying stale connection\n");
 543                        break;
 544
 545                default:
 546                        return target->status;
 547                }
 548        }
 549}
 550
 551static void srp_unmap_data(struct scsi_cmnd *scmnd,
 552                           struct srp_target_port *target,
 553                           struct srp_request *req)
 554{
 555        struct ib_device *ibdev = target->srp_host->srp_dev->dev;
 556        struct ib_pool_fmr **pfmr;
 557
 558        if (!scsi_sglist(scmnd) ||
 559            (scmnd->sc_data_direction != DMA_TO_DEVICE &&
 560             scmnd->sc_data_direction != DMA_FROM_DEVICE))
 561                return;
 562
 563        pfmr = req->fmr_list;
 564        while (req->nfmr--)
 565                ib_fmr_pool_unmap(*pfmr++);
 566
 567        ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
 568                        scmnd->sc_data_direction);
 569}
 570
 571static void srp_remove_req(struct srp_target_port *target,
 572                           struct srp_request *req, s32 req_lim_delta)
 573{
 574        unsigned long flags;
 575
 576        srp_unmap_data(req->scmnd, target, req);
 577        spin_lock_irqsave(&target->lock, flags);
 578        target->req_lim += req_lim_delta;
 579        req->scmnd = NULL;
 580        list_add_tail(&req->list, &target->free_reqs);
 581        spin_unlock_irqrestore(&target->lock, flags);
 582}
 583
 584static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
 585{
 586        req->scmnd->result = DID_RESET << 16;
 587        req->scmnd->scsi_done(req->scmnd);
 588        srp_remove_req(target, req, 0);
 589}
 590
 591static int srp_reconnect_target(struct srp_target_port *target)
 592{
 593        struct ib_qp_attr qp_attr;
 594        struct ib_wc wc;
 595        int i, ret;
 596
 597        if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
 598                return -EAGAIN;
 599
 600        srp_disconnect_target(target);
 601        /*
 602         * Now get a new local CM ID so that we avoid confusing the
 603         * target in case things are really fouled up.
 604         */
 605        ret = srp_new_cm_id(target);
 606        if (ret)
 607                goto err;
 608
 609        qp_attr.qp_state = IB_QPS_RESET;
 610        ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
 611        if (ret)
 612                goto err;
 613
 614        ret = srp_init_qp(target, target->qp);
 615        if (ret)
 616                goto err;
 617
 618        while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
 619                ; /* nothing */
 620        while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
 621                ; /* nothing */
 622
 623        for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
 624                struct srp_request *req = &target->req_ring[i];
 625                if (req->scmnd)
 626                        srp_reset_req(target, req);
 627        }
 628
 629        INIT_LIST_HEAD(&target->free_tx);
 630        for (i = 0; i < SRP_SQ_SIZE; ++i)
 631                list_add(&target->tx_ring[i]->list, &target->free_tx);
 632
 633        target->qp_in_error = 0;
 634        ret = srp_connect_target(target);
 635        if (ret)
 636                goto err;
 637
 638        if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
 639                ret = -EAGAIN;
 640
 641        return ret;
 642
 643err:
 644        shost_printk(KERN_ERR, target->scsi_host,
 645                     PFX "reconnect failed (%d), removing target port.\n", ret);
 646
 647        /*
 648         * We couldn't reconnect, so kill our target port off.
 649         * However, we have to defer the real removal because we
 650         * are in the context of the SCSI error handler now, which
 651         * will deadlock if we call scsi_remove_host().
 652         *
 653         * Schedule our work inside the lock to avoid a race with
 654         * the flush_scheduled_work() in srp_remove_one().
 655         */
 656        spin_lock_irq(&target->lock);
 657        if (target->state == SRP_TARGET_CONNECTING) {
 658                target->state = SRP_TARGET_DEAD;
 659                INIT_WORK(&target->work, srp_remove_work);
 660                queue_work(ib_wq, &target->work);
 661        }
 662        spin_unlock_irq(&target->lock);
 663
 664        return ret;
 665}
 666
 667static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
 668                         unsigned int dma_len, u32 rkey)
 669{
 670        struct srp_direct_buf *desc = state->desc;
 671
 672        desc->va = cpu_to_be64(dma_addr);
 673        desc->key = cpu_to_be32(rkey);
 674        desc->len = cpu_to_be32(dma_len);
 675
 676        state->total_len += dma_len;
 677        state->desc++;
 678        state->ndesc++;
 679}
 680
 681static int srp_map_finish_fmr(struct srp_map_state *state,
 682                              struct srp_target_port *target)
 683{
 684        struct srp_device *dev = target->srp_host->srp_dev;
 685        struct ib_pool_fmr *fmr;
 686        u64 io_addr = 0;
 687
 688        if (!state->npages)
 689                return 0;
 690
 691        if (state->npages == 1) {
 692                srp_map_desc(state, state->base_dma_addr, state->fmr_len,
 693                             target->rkey);
 694                state->npages = state->fmr_len = 0;
 695                return 0;
 696        }
 697
 698        fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
 699                                   state->npages, io_addr);
 700        if (IS_ERR(fmr))
 701                return PTR_ERR(fmr);
 702
 703        *state->next_fmr++ = fmr;
 704        state->nfmr++;
 705
 706        srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
 707        state->npages = state->fmr_len = 0;
 708        return 0;
 709}
 710
 711static void srp_map_update_start(struct srp_map_state *state,
 712                                 struct scatterlist *sg, int sg_index,
 713                                 dma_addr_t dma_addr)
 714{
 715        state->unmapped_sg = sg;
 716        state->unmapped_index = sg_index;
 717        state->unmapped_addr = dma_addr;
 718}
 719
 720static int srp_map_sg_entry(struct srp_map_state *state,
 721                            struct srp_target_port *target,
 722                            struct scatterlist *sg, int sg_index,
 723                            int use_fmr)
 724{
 725        struct srp_device *dev = target->srp_host->srp_dev;
 726        struct ib_device *ibdev = dev->dev;
 727        dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
 728        unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
 729        unsigned int len;
 730        int ret;
 731
 732        if (!dma_len)
 733                return 0;
 734
 735        if (use_fmr == SRP_MAP_NO_FMR) {
 736                /* Once we're in direct map mode for a request, we don't
 737                 * go back to FMR mode, so no need to update anything
 738                 * other than the descriptor.
 739                 */
 740                srp_map_desc(state, dma_addr, dma_len, target->rkey);
 741                return 0;
 742        }
 743
 744        /* If we start at an offset into the FMR page, don't merge into
 745         * the current FMR. Finish it out, and use the kernel's MR for this
 746         * sg entry. This is to avoid potential bugs on some SRP targets
 747         * that were never quite defined, but went away when the initiator
 748         * avoided using FMR on such page fragments.
 749         */
 750        if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
 751                ret = srp_map_finish_fmr(state, target);
 752                if (ret)
 753                        return ret;
 754
 755                srp_map_desc(state, dma_addr, dma_len, target->rkey);
 756                srp_map_update_start(state, NULL, 0, 0);
 757                return 0;
 758        }
 759
 760        /* If this is the first sg to go into the FMR, save our position.
 761         * We need to know the first unmapped entry, its index, and the
 762         * first unmapped address within that entry to be able to restart
 763         * mapping after an error.
 764         */
 765        if (!state->unmapped_sg)
 766                srp_map_update_start(state, sg, sg_index, dma_addr);
 767
 768        while (dma_len) {
 769                if (state->npages == SRP_FMR_SIZE) {
 770                        ret = srp_map_finish_fmr(state, target);
 771                        if (ret)
 772                                return ret;
 773
 774                        srp_map_update_start(state, sg, sg_index, dma_addr);
 775                }
 776
 777                len = min_t(unsigned int, dma_len, dev->fmr_page_size);
 778
 779                if (!state->npages)
 780                        state->base_dma_addr = dma_addr;
 781                state->pages[state->npages++] = dma_addr;
 782                state->fmr_len += len;
 783                dma_addr += len;
 784                dma_len -= len;
 785        }
 786
 787        /* If the last entry of the FMR wasn't a full page, then we need to
 788         * close it out and start a new one -- we can only merge at page
 789         * boundries.
 790         */
 791        ret = 0;
 792        if (len != dev->fmr_page_size) {
 793                ret = srp_map_finish_fmr(state, target);
 794                if (!ret)
 795                        srp_map_update_start(state, NULL, 0, 0);
 796        }
 797        return ret;
 798}
 799
 800static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
 801                        struct srp_request *req)
 802{
 803        struct scatterlist *scat, *sg;
 804        struct srp_cmd *cmd = req->cmd->buf;
 805        int i, len, nents, count, use_fmr;
 806        struct srp_device *dev;
 807        struct ib_device *ibdev;
 808        struct srp_map_state state;
 809        struct srp_indirect_buf *indirect_hdr;
 810        u32 table_len;
 811        u8 fmt;
 812
 813        if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
 814                return sizeof (struct srp_cmd);
 815
 816        if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
 817            scmnd->sc_data_direction != DMA_TO_DEVICE) {
 818                shost_printk(KERN_WARNING, target->scsi_host,
 819                             PFX "Unhandled data direction %d\n",
 820                             scmnd->sc_data_direction);
 821                return -EINVAL;
 822        }
 823
 824        nents = scsi_sg_count(scmnd);
 825        scat  = scsi_sglist(scmnd);
 826
 827        dev = target->srp_host->srp_dev;
 828        ibdev = dev->dev;
 829
 830        count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
 831        if (unlikely(count == 0))
 832                return -EIO;
 833
 834        fmt = SRP_DATA_DESC_DIRECT;
 835        len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
 836
 837        if (count == 1) {
 838                /*
 839                 * The midlayer only generated a single gather/scatter
 840                 * entry, or DMA mapping coalesced everything to a
 841                 * single entry.  So a direct descriptor along with
 842                 * the DMA MR suffices.
 843                 */
 844                struct srp_direct_buf *buf = (void *) cmd->add_data;
 845
 846                buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
 847                buf->key = cpu_to_be32(target->rkey);
 848                buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
 849
 850                req->nfmr = 0;
 851                goto map_complete;
 852        }
 853
 854        /* We have more than one scatter/gather entry, so build our indirect
 855         * descriptor table, trying to merge as many entries with FMR as we
 856         * can.
 857         */
 858        indirect_hdr = (void *) cmd->add_data;
 859
 860        ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
 861                                   target->indirect_size, DMA_TO_DEVICE);
 862
 863        memset(&state, 0, sizeof(state));
 864        state.desc      = req->indirect_desc;
 865        state.pages     = req->map_page;
 866        state.next_fmr  = req->fmr_list;
 867
 868        use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
 869
 870        for_each_sg(scat, sg, count, i) {
 871                if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
 872                        /* FMR mapping failed, so backtrack to the first
 873                         * unmapped entry and continue on without using FMR.
 874                         */
 875                        dma_addr_t dma_addr;
 876                        unsigned int dma_len;
 877
 878backtrack:
 879                        sg = state.unmapped_sg;
 880                        i = state.unmapped_index;
 881
 882                        dma_addr = ib_sg_dma_address(ibdev, sg);
 883                        dma_len = ib_sg_dma_len(ibdev, sg);
 884                        dma_len -= (state.unmapped_addr - dma_addr);
 885                        dma_addr = state.unmapped_addr;
 886                        use_fmr = SRP_MAP_NO_FMR;
 887                        srp_map_desc(&state, dma_addr, dma_len, target->rkey);
 888                }
 889        }
 890
 891        if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
 892                goto backtrack;
 893
 894        /* We've mapped the request, now pull as much of the indirect
 895         * descriptor table as we can into the command buffer. If this
 896         * target is not using an external indirect table, we are
 897         * guaranteed to fit into the command, as the SCSI layer won't
 898         * give us more S/G entries than we allow.
 899         */
 900        req->nfmr = state.nfmr;
 901        if (state.ndesc == 1) {
 902                /* FMR mapping was able to collapse this to one entry,
 903                 * so use a direct descriptor.
 904                 */
 905                struct srp_direct_buf *buf = (void *) cmd->add_data;
 906
 907                *buf = req->indirect_desc[0];
 908                goto map_complete;
 909        }
 910
 911        if (unlikely(target->cmd_sg_cnt < state.ndesc &&
 912                                                !target->allow_ext_sg)) {
 913                shost_printk(KERN_ERR, target->scsi_host,
 914                             "Could not fit S/G list into SRP_CMD\n");
 915                return -EIO;
 916        }
 917
 918        count = min(state.ndesc, target->cmd_sg_cnt);
 919        table_len = state.ndesc * sizeof (struct srp_direct_buf);
 920
 921        fmt = SRP_DATA_DESC_INDIRECT;
 922        len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
 923        len += count * sizeof (struct srp_direct_buf);
 924
 925        memcpy(indirect_hdr->desc_list, req->indirect_desc,
 926               count * sizeof (struct srp_direct_buf));
 927
 928        indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
 929        indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
 930        indirect_hdr->table_desc.len = cpu_to_be32(table_len);
 931        indirect_hdr->len = cpu_to_be32(state.total_len);
 932
 933        if (scmnd->sc_data_direction == DMA_TO_DEVICE)
 934                cmd->data_out_desc_cnt = count;
 935        else
 936                cmd->data_in_desc_cnt = count;
 937
 938        ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
 939                                      DMA_TO_DEVICE);
 940
 941map_complete:
 942        if (scmnd->sc_data_direction == DMA_TO_DEVICE)
 943                cmd->buf_fmt = fmt << 4;
 944        else
 945                cmd->buf_fmt = fmt;
 946
 947        return len;
 948}
 949
 950/*
 951 * Return an IU and possible credit to the free pool
 952 */
 953static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
 954                          enum srp_iu_type iu_type)
 955{
 956        unsigned long flags;
 957
 958        spin_lock_irqsave(&target->lock, flags);
 959        list_add(&iu->list, &target->free_tx);
 960        if (iu_type != SRP_IU_RSP)
 961                ++target->req_lim;
 962        spin_unlock_irqrestore(&target->lock, flags);
 963}
 964
 965/*
 966 * Must be called with target->lock held to protect req_lim and free_tx.
 967 * If IU is not sent, it must be returned using srp_put_tx_iu().
 968 *
 969 * Note:
 970 * An upper limit for the number of allocated information units for each
 971 * request type is:
 972 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
 973 *   more than Scsi_Host.can_queue requests.
 974 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
 975 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
 976 *   one unanswered SRP request to an initiator.
 977 */
 978static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
 979                                      enum srp_iu_type iu_type)
 980{
 981        s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
 982        struct srp_iu *iu;
 983
 984        srp_send_completion(target->send_cq, target);
 985
 986        if (list_empty(&target->free_tx))
 987                return NULL;
 988
 989        /* Initiator responses to target requests do not consume credits */
 990        if (iu_type != SRP_IU_RSP) {
 991                if (target->req_lim <= rsv) {
 992                        ++target->zero_req_lim;
 993                        return NULL;
 994                }
 995
 996                --target->req_lim;
 997        }
 998
 999        iu = list_first_entry(&target->free_tx, struct srp_iu, list);
1000        list_del(&iu->list);
1001        return iu;
1002}
1003
1004static int srp_post_send(struct srp_target_port *target,
1005                         struct srp_iu *iu, int len)
1006{
1007        struct ib_sge list;
1008        struct ib_send_wr wr, *bad_wr;
1009
1010        list.addr   = iu->dma;
1011        list.length = len;
1012        list.lkey   = target->lkey;
1013
1014        wr.next       = NULL;
1015        wr.wr_id      = (uintptr_t) iu;
1016        wr.sg_list    = &list;
1017        wr.num_sge    = 1;
1018        wr.opcode     = IB_WR_SEND;
1019        wr.send_flags = IB_SEND_SIGNALED;
1020
1021        return ib_post_send(target->qp, &wr, &bad_wr);
1022}
1023
1024static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1025{
1026        struct ib_recv_wr wr, *bad_wr;
1027        struct ib_sge list;
1028
1029        list.addr   = iu->dma;
1030        list.length = iu->size;
1031        list.lkey   = target->lkey;
1032
1033        wr.next     = NULL;
1034        wr.wr_id    = (uintptr_t) iu;
1035        wr.sg_list  = &list;
1036        wr.num_sge  = 1;
1037
1038        return ib_post_recv(target->qp, &wr, &bad_wr);
1039}
1040
1041static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1042{
1043        struct srp_request *req;
1044        struct scsi_cmnd *scmnd;
1045        unsigned long flags;
1046
1047        if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1048                spin_lock_irqsave(&target->lock, flags);
1049                target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1050                spin_unlock_irqrestore(&target->lock, flags);
1051
1052                target->tsk_mgmt_status = -1;
1053                if (be32_to_cpu(rsp->resp_data_len) >= 4)
1054                        target->tsk_mgmt_status = rsp->data[3];
1055                complete(&target->tsk_mgmt_done);
1056        } else {
1057                req = &target->req_ring[rsp->tag];
1058                scmnd = req->scmnd;
1059                if (!scmnd)
1060                        shost_printk(KERN_ERR, target->scsi_host,
1061                                     "Null scmnd for RSP w/tag %016llx\n",
1062                                     (unsigned long long) rsp->tag);
1063                scmnd->result = rsp->status;
1064
1065                if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1066                        memcpy(scmnd->sense_buffer, rsp->data +
1067                               be32_to_cpu(rsp->resp_data_len),
1068                               min_t(int, be32_to_cpu(rsp->sense_data_len),
1069                                     SCSI_SENSE_BUFFERSIZE));
1070                }
1071
1072                if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
1073                        scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1074                else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1075                        scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1076
1077                srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
1078                scmnd->host_scribble = NULL;
1079                scmnd->scsi_done(scmnd);
1080        }
1081}
1082
1083static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1084                               void *rsp, int len)
1085{
1086        struct ib_device *dev = target->srp_host->srp_dev->dev;
1087        unsigned long flags;
1088        struct srp_iu *iu;
1089        int err;
1090
1091        spin_lock_irqsave(&target->lock, flags);
1092        target->req_lim += req_delta;
1093        iu = __srp_get_tx_iu(target, SRP_IU_RSP);
1094        spin_unlock_irqrestore(&target->lock, flags);
1095
1096        if (!iu) {
1097                shost_printk(KERN_ERR, target->scsi_host, PFX
1098                             "no IU available to send response\n");
1099                return 1;
1100        }
1101
1102        ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1103        memcpy(iu->buf, rsp, len);
1104        ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1105
1106        err = srp_post_send(target, iu, len);
1107        if (err) {
1108                shost_printk(KERN_ERR, target->scsi_host, PFX
1109                             "unable to post response: %d\n", err);
1110                srp_put_tx_iu(target, iu, SRP_IU_RSP);
1111        }
1112
1113        return err;
1114}
1115
1116static void srp_process_cred_req(struct srp_target_port *target,
1117                                 struct srp_cred_req *req)
1118{
1119        struct srp_cred_rsp rsp = {
1120                .opcode = SRP_CRED_RSP,
1121                .tag = req->tag,
1122        };
1123        s32 delta = be32_to_cpu(req->req_lim_delta);
1124
1125        if (srp_response_common(target, delta, &rsp, sizeof rsp))
1126                shost_printk(KERN_ERR, target->scsi_host, PFX
1127                             "problems processing SRP_CRED_REQ\n");
1128}
1129
1130static void srp_process_aer_req(struct srp_target_port *target,
1131                                struct srp_aer_req *req)
1132{
1133        struct srp_aer_rsp rsp = {
1134                .opcode = SRP_AER_RSP,
1135                .tag = req->tag,
1136        };
1137        s32 delta = be32_to_cpu(req->req_lim_delta);
1138
1139        shost_printk(KERN_ERR, target->scsi_host, PFX
1140                     "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1141
1142        if (srp_response_common(target, delta, &rsp, sizeof rsp))
1143                shost_printk(KERN_ERR, target->scsi_host, PFX
1144                             "problems processing SRP_AER_REQ\n");
1145}
1146
1147static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1148{
1149        struct ib_device *dev = target->srp_host->srp_dev->dev;
1150        struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1151        int res;
1152        u8 opcode;
1153
1154        ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1155                                   DMA_FROM_DEVICE);
1156
1157        opcode = *(u8 *) iu->buf;
1158
1159        if (0) {
1160                shost_printk(KERN_ERR, target->scsi_host,
1161                             PFX "recv completion, opcode 0x%02x\n", opcode);
1162                print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1163                               iu->buf, wc->byte_len, true);
1164        }
1165
1166        switch (opcode) {
1167        case SRP_RSP:
1168                srp_process_rsp(target, iu->buf);
1169                break;
1170
1171        case SRP_CRED_REQ:
1172                srp_process_cred_req(target, iu->buf);
1173                break;
1174
1175        case SRP_AER_REQ:
1176                srp_process_aer_req(target, iu->buf);
1177                break;
1178
1179        case SRP_T_LOGOUT:
1180                /* XXX Handle target logout */
1181                shost_printk(KERN_WARNING, target->scsi_host,
1182                             PFX "Got target logout request\n");
1183                break;
1184
1185        default:
1186                shost_printk(KERN_WARNING, target->scsi_host,
1187                             PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1188                break;
1189        }
1190
1191        ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1192                                      DMA_FROM_DEVICE);
1193
1194        res = srp_post_recv(target, iu);
1195        if (res != 0)
1196                shost_printk(KERN_ERR, target->scsi_host,
1197                             PFX "Recv failed with error code %d\n", res);
1198}
1199
1200static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1201{
1202        struct srp_target_port *target = target_ptr;
1203        struct ib_wc wc;
1204
1205        ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1206        while (ib_poll_cq(cq, 1, &wc) > 0) {
1207                if (wc.status) {
1208                        shost_printk(KERN_ERR, target->scsi_host,
1209                                     PFX "failed receive status %d\n",
1210                                     wc.status);
1211                        target->qp_in_error = 1;
1212                        break;
1213                }
1214
1215                srp_handle_recv(target, &wc);
1216        }
1217}
1218
1219static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1220{
1221        struct srp_target_port *target = target_ptr;
1222        struct ib_wc wc;
1223        struct srp_iu *iu;
1224
1225        while (ib_poll_cq(cq, 1, &wc) > 0) {
1226                if (wc.status) {
1227                        shost_printk(KERN_ERR, target->scsi_host,
1228                                     PFX "failed send status %d\n",
1229                                     wc.status);
1230                        target->qp_in_error = 1;
1231                        break;
1232                }
1233
1234                iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1235                list_add(&iu->list, &target->free_tx);
1236        }
1237}
1238
1239static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1240{
1241        struct srp_target_port *target = host_to_target(shost);
1242        struct srp_request *req;
1243        struct srp_iu *iu;
1244        struct srp_cmd *cmd;
1245        struct ib_device *dev;
1246        unsigned long flags;
1247        int len;
1248
1249        if (target->state == SRP_TARGET_CONNECTING)
1250                goto err;
1251
1252        if (target->state == SRP_TARGET_DEAD ||
1253            target->state == SRP_TARGET_REMOVED) {
1254                scmnd->result = DID_BAD_TARGET << 16;
1255                scmnd->scsi_done(scmnd);
1256                return 0;
1257        }
1258
1259        spin_lock_irqsave(&target->lock, flags);
1260        iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1261        if (!iu)
1262                goto err_unlock;
1263
1264        req = list_first_entry(&target->free_reqs, struct srp_request, list);
1265        list_del(&req->list);
1266        spin_unlock_irqrestore(&target->lock, flags);
1267
1268        dev = target->srp_host->srp_dev->dev;
1269        ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1270                                   DMA_TO_DEVICE);
1271
1272        scmnd->result        = 0;
1273        scmnd->host_scribble = (void *) req;
1274
1275        cmd = iu->buf;
1276        memset(cmd, 0, sizeof *cmd);
1277
1278        cmd->opcode = SRP_CMD;
1279        cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);
1280        cmd->tag    = req->index;
1281        memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1282
1283        req->scmnd    = scmnd;
1284        req->cmd      = iu;
1285
1286        len = srp_map_data(scmnd, target, req);
1287        if (len < 0) {
1288                shost_printk(KERN_ERR, target->scsi_host,
1289                             PFX "Failed to map data\n");
1290                goto err_iu;
1291        }
1292
1293        ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1294                                      DMA_TO_DEVICE);
1295
1296        if (srp_post_send(target, iu, len)) {
1297                shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1298                goto err_unmap;
1299        }
1300
1301        return 0;
1302
1303err_unmap:
1304        srp_unmap_data(scmnd, target, req);
1305
1306err_iu:
1307        srp_put_tx_iu(target, iu, SRP_IU_CMD);
1308
1309        spin_lock_irqsave(&target->lock, flags);
1310        list_add(&req->list, &target->free_reqs);
1311
1312err_unlock:
1313        spin_unlock_irqrestore(&target->lock, flags);
1314
1315err:
1316        return SCSI_MLQUEUE_HOST_BUSY;
1317}
1318
1319static int srp_alloc_iu_bufs(struct srp_target_port *target)
1320{
1321        int i;
1322
1323        for (i = 0; i < SRP_RQ_SIZE; ++i) {
1324                target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1325                                                  target->max_ti_iu_len,
1326                                                  GFP_KERNEL, DMA_FROM_DEVICE);
1327                if (!target->rx_ring[i])
1328                        goto err;
1329        }
1330
1331        for (i = 0; i < SRP_SQ_SIZE; ++i) {
1332                target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1333                                                  target->max_iu_len,
1334                                                  GFP_KERNEL, DMA_TO_DEVICE);
1335                if (!target->tx_ring[i])
1336                        goto err;
1337
1338                list_add(&target->tx_ring[i]->list, &target->free_tx);
1339        }
1340
1341        return 0;
1342
1343err:
1344        for (i = 0; i < SRP_RQ_SIZE; ++i) {
1345                srp_free_iu(target->srp_host, target->rx_ring[i]);
1346                target->rx_ring[i] = NULL;
1347        }
1348
1349        for (i = 0; i < SRP_SQ_SIZE; ++i) {
1350                srp_free_iu(target->srp_host, target->tx_ring[i]);
1351                target->tx_ring[i] = NULL;
1352        }
1353
1354        return -ENOMEM;
1355}
1356
1357static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1358                               struct srp_login_rsp *lrsp,
1359                               struct srp_target_port *target)
1360{
1361        struct ib_qp_attr *qp_attr = NULL;
1362        int attr_mask = 0;
1363        int ret;
1364        int i;
1365
1366        if (lrsp->opcode == SRP_LOGIN_RSP) {
1367                target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1368                target->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
1369
1370                /*
1371                 * Reserve credits for task management so we don't
1372                 * bounce requests back to the SCSI mid-layer.
1373                 */
1374                target->scsi_host->can_queue
1375                        = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1376                              target->scsi_host->can_queue);
1377        } else {
1378                shost_printk(KERN_WARNING, target->scsi_host,
1379                             PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1380                ret = -ECONNRESET;
1381                goto error;
1382        }
1383
1384        if (!target->rx_ring[0]) {
1385                ret = srp_alloc_iu_bufs(target);
1386                if (ret)
1387                        goto error;
1388        }
1389
1390        ret = -ENOMEM;
1391        qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1392        if (!qp_attr)
1393                goto error;
1394
1395        qp_attr->qp_state = IB_QPS_RTR;
1396        ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1397        if (ret)
1398                goto error_free;
1399
1400        ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1401        if (ret)
1402                goto error_free;
1403
1404        for (i = 0; i < SRP_RQ_SIZE; i++) {
1405                struct srp_iu *iu = target->rx_ring[i];
1406                ret = srp_post_recv(target, iu);
1407                if (ret)
1408                        goto error_free;
1409        }
1410
1411        qp_attr->qp_state = IB_QPS_RTS;
1412        ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1413        if (ret)
1414                goto error_free;
1415
1416        ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1417        if (ret)
1418                goto error_free;
1419
1420        ret = ib_send_cm_rtu(cm_id, NULL, 0);
1421
1422error_free:
1423        kfree(qp_attr);
1424
1425error:
1426        target->status = ret;
1427}
1428
1429static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1430                               struct ib_cm_event *event,
1431                               struct srp_target_port *target)
1432{
1433        struct Scsi_Host *shost = target->scsi_host;
1434        struct ib_class_port_info *cpi;
1435        int opcode;
1436
1437        switch (event->param.rej_rcvd.reason) {
1438        case IB_CM_REJ_PORT_CM_REDIRECT:
1439                cpi = event->param.rej_rcvd.ari;
1440                target->path.dlid = cpi->redirect_lid;
1441                target->path.pkey = cpi->redirect_pkey;
1442                cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1443                memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1444
1445                target->status = target->path.dlid ?
1446                        SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1447                break;
1448
1449        case IB_CM_REJ_PORT_REDIRECT:
1450                if (srp_target_is_topspin(target)) {
1451                        /*
1452                         * Topspin/Cisco SRP gateways incorrectly send
1453                         * reject reason code 25 when they mean 24
1454                         * (port redirect).
1455                         */
1456                        memcpy(target->path.dgid.raw,
1457                               event->param.rej_rcvd.ari, 16);
1458
1459                        shost_printk(KERN_DEBUG, shost,
1460                                     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1461                                     (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1462                                     (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1463
1464                        target->status = SRP_PORT_REDIRECT;
1465                } else {
1466                        shost_printk(KERN_WARNING, shost,
1467                                     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1468                        target->status = -ECONNRESET;
1469                }
1470                break;
1471
1472        case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1473                shost_printk(KERN_WARNING, shost,
1474                            "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1475                target->status = -ECONNRESET;
1476                break;
1477
1478        case IB_CM_REJ_CONSUMER_DEFINED:
1479                opcode = *(u8 *) event->private_data;
1480                if (opcode == SRP_LOGIN_REJ) {
1481                        struct srp_login_rej *rej = event->private_data;
1482                        u32 reason = be32_to_cpu(rej->reason);
1483
1484                        if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1485                                shost_printk(KERN_WARNING, shost,
1486                                             PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1487                        else
1488                                shost_printk(KERN_WARNING, shost,
1489                                            PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1490                } else
1491                        shost_printk(KERN_WARNING, shost,
1492                                     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1493                                     " opcode 0x%02x\n", opcode);
1494                target->status = -ECONNRESET;
1495                break;
1496
1497        case IB_CM_REJ_STALE_CONN:
1498                shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
1499                target->status = SRP_STALE_CONN;
1500                break;
1501
1502        default:
1503                shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
1504                             event->param.rej_rcvd.reason);
1505                target->status = -ECONNRESET;
1506        }
1507}
1508
1509static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1510{
1511        struct srp_target_port *target = cm_id->context;
1512        int comp = 0;
1513
1514        switch (event->event) {
1515        case IB_CM_REQ_ERROR:
1516                shost_printk(KERN_DEBUG, target->scsi_host,
1517                             PFX "Sending CM REQ failed\n");
1518                comp = 1;
1519                target->status = -ECONNRESET;
1520                break;
1521
1522        case IB_CM_REP_RECEIVED:
1523                comp = 1;
1524                srp_cm_rep_handler(cm_id, event->private_data, target);
1525                break;
1526
1527        case IB_CM_REJ_RECEIVED:
1528                shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1529                comp = 1;
1530
1531                srp_cm_rej_handler(cm_id, event, target);
1532                break;
1533
1534        case IB_CM_DREQ_RECEIVED:
1535                shost_printk(KERN_WARNING, target->scsi_host,
1536                             PFX "DREQ received - connection closed\n");
1537                if (ib_send_cm_drep(cm_id, NULL, 0))
1538                        shost_printk(KERN_ERR, target->scsi_host,
1539                                     PFX "Sending CM DREP failed\n");
1540                break;
1541
1542        case IB_CM_TIMEWAIT_EXIT:
1543                shost_printk(KERN_ERR, target->scsi_host,
1544                             PFX "connection closed\n");
1545
1546                comp = 1;
1547                target->status = 0;
1548                break;
1549
1550        case IB_CM_MRA_RECEIVED:
1551        case IB_CM_DREQ_ERROR:
1552        case IB_CM_DREP_RECEIVED:
1553                break;
1554
1555        default:
1556                shost_printk(KERN_WARNING, target->scsi_host,
1557                             PFX "Unhandled CM event %d\n", event->event);
1558                break;
1559        }
1560
1561        if (comp)
1562                complete(&target->done);
1563
1564        return 0;
1565}
1566
1567static int srp_send_tsk_mgmt(struct srp_target_port *target,
1568                             u64 req_tag, unsigned int lun, u8 func)
1569{
1570        struct ib_device *dev = target->srp_host->srp_dev->dev;
1571        struct srp_iu *iu;
1572        struct srp_tsk_mgmt *tsk_mgmt;
1573
1574        if (target->state == SRP_TARGET_DEAD ||
1575            target->state == SRP_TARGET_REMOVED)
1576                return -1;
1577
1578        init_completion(&target->tsk_mgmt_done);
1579
1580        spin_lock_irq(&target->lock);
1581        iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1582        spin_unlock_irq(&target->lock);
1583
1584        if (!iu)
1585                return -1;
1586
1587        ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1588                                   DMA_TO_DEVICE);
1589        tsk_mgmt = iu->buf;
1590        memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1591
1592        tsk_mgmt->opcode        = SRP_TSK_MGMT;
1593        tsk_mgmt->lun           = cpu_to_be64((u64) lun << 48);
1594        tsk_mgmt->tag           = req_tag | SRP_TAG_TSK_MGMT;
1595        tsk_mgmt->tsk_mgmt_func = func;
1596        tsk_mgmt->task_tag      = req_tag;
1597
1598        ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1599                                      DMA_TO_DEVICE);
1600        if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1601                srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1602                return -1;
1603        }
1604
1605        if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1606                                         msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1607                return -1;
1608
1609        return 0;
1610}
1611
1612static int srp_abort(struct scsi_cmnd *scmnd)
1613{
1614        struct srp_target_port *target = host_to_target(scmnd->device->host);
1615        struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1616        int ret = SUCCESS;
1617
1618        shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1619
1620        if (!req || target->qp_in_error)
1621                return FAILED;
1622        if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1623                              SRP_TSK_ABORT_TASK))
1624                return FAILED;
1625
1626        if (req->scmnd) {
1627                if (!target->tsk_mgmt_status) {
1628                        srp_remove_req(target, req, 0);
1629                        scmnd->result = DID_ABORT << 16;
1630                } else
1631                        ret = FAILED;
1632        }
1633
1634        return ret;
1635}
1636
1637static int srp_reset_device(struct scsi_cmnd *scmnd)
1638{
1639        struct srp_target_port *target = host_to_target(scmnd->device->host);
1640        int i;
1641
1642        shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1643
1644        if (target->qp_in_error)
1645                return FAILED;
1646        if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1647                              SRP_TSK_LUN_RESET))
1648                return FAILED;
1649        if (target->tsk_mgmt_status)
1650                return FAILED;
1651
1652        for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1653                struct srp_request *req = &target->req_ring[i];
1654                if (req->scmnd && req->scmnd->device == scmnd->device)
1655                        srp_reset_req(target, req);
1656        }
1657
1658        return SUCCESS;
1659}
1660
1661static int srp_reset_host(struct scsi_cmnd *scmnd)
1662{
1663        struct srp_target_port *target = host_to_target(scmnd->device->host);
1664        int ret = FAILED;
1665
1666        shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
1667
1668        if (!srp_reconnect_target(target))
1669                ret = SUCCESS;
1670
1671        return ret;
1672}
1673
1674static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1675                           char *buf)
1676{
1677        struct srp_target_port *target = host_to_target(class_to_shost(dev));
1678
1679        if (target->state == SRP_TARGET_DEAD ||
1680            target->state == SRP_TARGET_REMOVED)
1681                return -ENODEV;
1682
1683        return sprintf(buf, "0x%016llx\n",
1684                       (unsigned long long) be64_to_cpu(target->id_ext));
1685}
1686
1687static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1688                             char *buf)
1689{
1690        struct srp_target_port *target = host_to_target(class_to_shost(dev));
1691
1692        if (target->state == SRP_TARGET_DEAD ||
1693            target->state == SRP_TARGET_REMOVED)
1694                return -ENODEV;
1695
1696        return sprintf(buf, "0x%016llx\n",
1697                       (unsigned long long) be64_to_cpu(target->ioc_guid));
1698}
1699
1700static ssize_t show_service_id(struct device *dev,
1701                               struct device_attribute *attr, char *buf)
1702{
1703        struct srp_target_port *target = host_to_target(class_to_shost(dev));
1704
1705        if (target->state == SRP_TARGET_DEAD ||
1706            target->state == SRP_TARGET_REMOVED)
1707                return -ENODEV;
1708
1709        return sprintf(buf, "0x%016llx\n",
1710                       (unsigned long long) be64_to_cpu(target->service_id));
1711}
1712
1713static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1714                         char *buf)
1715{
1716        struct srp_target_port *target = host_to_target(class_to_shost(dev));
1717
1718        if (target->state == SRP_TARGET_DEAD ||
1719            target->state == SRP_TARGET_REMOVED)
1720                return -ENODEV;
1721
1722        return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1723}
1724
1725static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1726                         char *buf)
1727{
1728        struct srp_target_port *target = host_to_target(class_to_shost(dev));
1729
1730        if (target->state == SRP_TARGET_DEAD ||
1731            target->state == SRP_TARGET_REMOVED)
1732                return -ENODEV;
1733
1734        return sprintf(buf, "%pI6\n", target->path.dgid.raw);
1735}
1736
1737static ssize_t show_orig_dgid(struct device *dev,
1738                              struct device_attribute *attr, char *buf)
1739{
1740        struct srp_target_port *target = host_to_target(class_to_shost(dev));
1741
1742        if (target->state == SRP_TARGET_DEAD ||
1743            target->state == SRP_TARGET_REMOVED)
1744                return -ENODEV;
1745
1746        return sprintf(buf, "%pI6\n", target->orig_dgid);
1747}
1748
1749static ssize_t show_req_lim(struct device *dev,
1750                            struct device_attribute *attr, char *buf)
1751{
1752        struct srp_target_port *target = host_to_target(class_to_shost(dev));
1753
1754        if (target->state == SRP_TARGET_DEAD ||
1755            target->state == SRP_TARGET_REMOVED)
1756                return -ENODEV;
1757
1758        return sprintf(buf, "%d\n", target->req_lim);
1759}
1760
1761static ssize_t show_zero_req_lim(struct device *dev,
1762                                 struct device_attribute *attr, char *buf)
1763{
1764        struct srp_target_port *target = host_to_target(class_to_shost(dev));
1765
1766        if (target->state == SRP_TARGET_DEAD ||
1767            target->state == SRP_TARGET_REMOVED)
1768                return -ENODEV;
1769
1770        return sprintf(buf, "%d\n", target->zero_req_lim);
1771}
1772
1773static ssize_t show_local_ib_port(struct device *dev,
1774                                  struct device_attribute *attr, char *buf)
1775{
1776        struct srp_target_port *target = host_to_target(class_to_shost(dev));
1777
1778        return sprintf(buf, "%d\n", target->srp_host->port);
1779}
1780
1781static ssize_t show_local_ib_device(struct device *dev,
1782                                    struct device_attribute *attr, char *buf)
1783{
1784        struct srp_target_port *target = host_to_target(class_to_shost(dev));
1785
1786        return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
1787}
1788
1789static ssize_t show_cmd_sg_entries(struct device *dev,
1790                                   struct device_attribute *attr, char *buf)
1791{
1792        struct srp_target_port *target = host_to_target(class_to_shost(dev));
1793
1794        return sprintf(buf, "%u\n", target->cmd_sg_cnt);
1795}
1796
1797static ssize_t show_allow_ext_sg(struct device *dev,
1798                                 struct device_attribute *attr, char *buf)
1799{
1800        struct srp_target_port *target = host_to_target(class_to_shost(dev));
1801
1802        return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
1803}
1804
1805static DEVICE_ATTR(id_ext,          S_IRUGO, show_id_ext,          NULL);
1806static DEVICE_ATTR(ioc_guid,        S_IRUGO, show_ioc_guid,        NULL);
1807static DEVICE_ATTR(service_id,      S_IRUGO, show_service_id,      NULL);
1808static DEVICE_ATTR(pkey,            S_IRUGO, show_pkey,            NULL);
1809static DEVICE_ATTR(dgid,            S_IRUGO, show_dgid,            NULL);
1810static DEVICE_ATTR(orig_dgid,       S_IRUGO, show_orig_dgid,       NULL);
1811static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
1812static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,    NULL);
1813static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
1814static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
1815static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
1816static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
1817
1818static struct device_attribute *srp_host_attrs[] = {
1819        &dev_attr_id_ext,
1820        &dev_attr_ioc_guid,
1821        &dev_attr_service_id,
1822        &dev_attr_pkey,
1823        &dev_attr_dgid,
1824        &dev_attr_orig_dgid,
1825        &dev_attr_req_lim,
1826        &dev_attr_zero_req_lim,
1827        &dev_attr_local_ib_port,
1828        &dev_attr_local_ib_device,
1829        &dev_attr_cmd_sg_entries,
1830        &dev_attr_allow_ext_sg,
1831        NULL
1832};
1833
1834static struct scsi_host_template srp_template = {
1835        .module                         = THIS_MODULE,
1836        .name                           = "InfiniBand SRP initiator",
1837        .proc_name                      = DRV_NAME,
1838        .info                           = srp_target_info,
1839        .queuecommand                   = srp_queuecommand,
1840        .eh_abort_handler               = srp_abort,
1841        .eh_device_reset_handler        = srp_reset_device,
1842        .eh_host_reset_handler          = srp_reset_host,
1843        .sg_tablesize                   = SRP_DEF_SG_TABLESIZE,
1844        .can_queue                      = SRP_CMD_SQ_SIZE,
1845        .this_id                        = -1,
1846        .cmd_per_lun                    = SRP_CMD_SQ_SIZE,
1847        .use_clustering                 = ENABLE_CLUSTERING,
1848        .shost_attrs                    = srp_host_attrs
1849};
1850
1851static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1852{
1853        struct srp_rport_identifiers ids;
1854        struct srp_rport *rport;
1855
1856        sprintf(target->target_name, "SRP.T10:%016llX",
1857                 (unsigned long long) be64_to_cpu(target->id_ext));
1858
1859        if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
1860                return -ENODEV;
1861
1862        memcpy(ids.port_id, &target->id_ext, 8);
1863        memcpy(ids.port_id + 8, &target->ioc_guid, 8);
1864        ids.roles = SRP_RPORT_ROLE_TARGET;
1865        rport = srp_rport_add(target->scsi_host, &ids);
1866        if (IS_ERR(rport)) {
1867                scsi_remove_host(target->scsi_host);
1868                return PTR_ERR(rport);
1869        }
1870
1871        spin_lock(&host->target_lock);
1872        list_add_tail(&target->list, &host->target_list);
1873        spin_unlock(&host->target_lock);
1874
1875        target->state = SRP_TARGET_LIVE;
1876
1877        scsi_scan_target(&target->scsi_host->shost_gendev,
1878                         0, target->scsi_id, SCAN_WILD_CARD, 0);
1879
1880        return 0;
1881}
1882
1883static void srp_release_dev(struct device *dev)
1884{
1885        struct srp_host *host =
1886                container_of(dev, struct srp_host, dev);
1887
1888        complete(&host->released);
1889}
1890
1891static struct class srp_class = {
1892        .name    = "infiniband_srp",
1893        .dev_release = srp_release_dev
1894};
1895
1896/*
1897 * Target ports are added by writing
1898 *
1899 *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1900 *     pkey=<P_Key>,service_id=<service ID>
1901 *
1902 * to the add_target sysfs attribute.
1903 */
1904enum {
1905        SRP_OPT_ERR             = 0,
1906        SRP_OPT_ID_EXT          = 1 << 0,
1907        SRP_OPT_IOC_GUID        = 1 << 1,
1908        SRP_OPT_DGID            = 1 << 2,
1909        SRP_OPT_PKEY            = 1 << 3,
1910        SRP_OPT_SERVICE_ID      = 1 << 4,
1911        SRP_OPT_MAX_SECT        = 1 << 5,
1912        SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
1913        SRP_OPT_IO_CLASS        = 1 << 7,
1914        SRP_OPT_INITIATOR_EXT   = 1 << 8,
1915        SRP_OPT_CMD_SG_ENTRIES  = 1 << 9,
1916        SRP_OPT_ALLOW_EXT_SG    = 1 << 10,
1917        SRP_OPT_SG_TABLESIZE    = 1 << 11,
1918        SRP_OPT_ALL             = (SRP_OPT_ID_EXT       |
1919                                   SRP_OPT_IOC_GUID     |
1920                                   SRP_OPT_DGID         |
1921                                   SRP_OPT_PKEY         |
1922                                   SRP_OPT_SERVICE_ID),
1923};
1924
1925static const match_table_t srp_opt_tokens = {
1926        { SRP_OPT_ID_EXT,               "id_ext=%s"             },
1927        { SRP_OPT_IOC_GUID,             "ioc_guid=%s"           },
1928        { SRP_OPT_DGID,                 "dgid=%s"               },
1929        { SRP_OPT_PKEY,                 "pkey=%x"               },
1930        { SRP_OPT_SERVICE_ID,           "service_id=%s"         },
1931        { SRP_OPT_MAX_SECT,             "max_sect=%d"           },
1932        { SRP_OPT_MAX_CMD_PER_LUN,      "max_cmd_per_lun=%d"    },
1933        { SRP_OPT_IO_CLASS,             "io_class=%x"           },
1934        { SRP_OPT_INITIATOR_EXT,        "initiator_ext=%s"      },
1935        { SRP_OPT_CMD_SG_ENTRIES,       "cmd_sg_entries=%u"     },
1936        { SRP_OPT_ALLOW_EXT_SG,         "allow_ext_sg=%u"       },
1937        { SRP_OPT_SG_TABLESIZE,         "sg_tablesize=%u"       },
1938        { SRP_OPT_ERR,                  NULL                    }
1939};
1940
1941static int srp_parse_options(const char *buf, struct srp_target_port *target)
1942{
1943        char *options, *sep_opt;
1944        char *p;
1945        char dgid[3];
1946        substring_t args[MAX_OPT_ARGS];
1947        int opt_mask = 0;
1948        int token;
1949        int ret = -EINVAL;
1950        int i;
1951
1952        options = kstrdup(buf, GFP_KERNEL);
1953        if (!options)
1954                return -ENOMEM;
1955
1956        sep_opt = options;
1957        while ((p = strsep(&sep_opt, ",")) != NULL) {
1958                if (!*p)
1959                        continue;
1960
1961                token = match_token(p, srp_opt_tokens, args);
1962                opt_mask |= token;
1963
1964                switch (token) {
1965                case SRP_OPT_ID_EXT:
1966                        p = match_strdup(args);
1967                        if (!p) {
1968                                ret = -ENOMEM;
1969                                goto out;
1970                        }
1971                        target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1972                        kfree(p);
1973                        break;
1974
1975                case SRP_OPT_IOC_GUID:
1976                        p = match_strdup(args);
1977                        if (!p) {
1978                                ret = -ENOMEM;
1979                                goto out;
1980                        }
1981                        target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
1982                        kfree(p);
1983                        break;
1984
1985                case SRP_OPT_DGID:
1986                        p = match_strdup(args);
1987                        if (!p) {
1988                                ret = -ENOMEM;
1989                                goto out;
1990                        }
1991                        if (strlen(p) != 32) {
1992                                printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
1993                                kfree(p);
1994                                goto out;
1995                        }
1996
1997                        for (i = 0; i < 16; ++i) {
1998                                strlcpy(dgid, p + i * 2, 3);
1999                                target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2000                        }
2001                        kfree(p);
2002                        memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2003                        break;
2004
2005                case SRP_OPT_PKEY:
2006                        if (match_hex(args, &token)) {
2007                                printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
2008                                goto out;
2009                        }
2010                        target->path.pkey = cpu_to_be16(token);
2011                        break;
2012
2013                case SRP_OPT_SERVICE_ID:
2014                        p = match_strdup(args);
2015                        if (!p) {
2016                                ret = -ENOMEM;
2017                                goto out;
2018                        }
2019                        target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2020                        target->path.service_id = target->service_id;
2021                        kfree(p);
2022                        break;
2023
2024                case SRP_OPT_MAX_SECT:
2025                        if (match_int(args, &token)) {
2026                                printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
2027                                goto out;
2028                        }
2029                        target->scsi_host->max_sectors = token;
2030                        break;
2031
2032                case SRP_OPT_MAX_CMD_PER_LUN:
2033                        if (match_int(args, &token)) {
2034                                printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
2035                                goto out;
2036                        }
2037                        target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
2038                        break;
2039
2040                case SRP_OPT_IO_CLASS:
2041                        if (match_hex(args, &token)) {
2042                                printk(KERN_WARNING PFX "bad  IO class parameter '%s' \n", p);
2043                                goto out;
2044                        }
2045                        if (token != SRP_REV10_IB_IO_CLASS &&
2046                            token != SRP_REV16A_IB_IO_CLASS) {
2047                                printk(KERN_WARNING PFX "unknown IO class parameter value"
2048                                       " %x specified (use %x or %x).\n",
2049                                       token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
2050                                goto out;
2051                        }
2052                        target->io_class = token;
2053                        break;
2054
2055                case SRP_OPT_INITIATOR_EXT:
2056                        p = match_strdup(args);
2057                        if (!p) {
2058                                ret = -ENOMEM;
2059                                goto out;
2060                        }
2061                        target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2062                        kfree(p);
2063                        break;
2064
2065                case SRP_OPT_CMD_SG_ENTRIES:
2066                        if (match_int(args, &token) || token < 1 || token > 255) {
2067                                printk(KERN_WARNING PFX "bad max cmd_sg_entries parameter '%s'\n", p);
2068                                goto out;
2069                        }
2070                        target->cmd_sg_cnt = token;
2071                        break;
2072
2073                case SRP_OPT_ALLOW_EXT_SG:
2074                        if (match_int(args, &token)) {
2075                                printk(KERN_WARNING PFX "bad allow_ext_sg parameter '%s'\n", p);
2076                                goto out;
2077                        }
2078                        target->allow_ext_sg = !!token;
2079                        break;
2080
2081                case SRP_OPT_SG_TABLESIZE:
2082                        if (match_int(args, &token) || token < 1 ||
2083                                        token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
2084                                printk(KERN_WARNING PFX "bad max sg_tablesize parameter '%s'\n", p);
2085                                goto out;
2086                        }
2087                        target->sg_tablesize = token;
2088                        break;
2089
2090                default:
2091                        printk(KERN_WARNING PFX "unknown parameter or missing value "
2092                               "'%s' in target creation request\n", p);
2093                        goto out;
2094                }
2095        }
2096
2097        if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2098                ret = 0;
2099        else
2100                for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2101                        if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2102                            !(srp_opt_tokens[i].token & opt_mask))
2103                                printk(KERN_WARNING PFX "target creation request is "
2104                                       "missing parameter '%s'\n",
2105                                       srp_opt_tokens[i].pattern);
2106
2107out:
2108        kfree(options);
2109        return ret;
2110}
2111
2112static ssize_t srp_create_target(struct device *dev,
2113                                 struct device_attribute *attr,
2114                                 const char *buf, size_t count)
2115{
2116        struct srp_host *host =
2117                container_of(dev, struct srp_host, dev);
2118        struct Scsi_Host *target_host;
2119        struct srp_target_port *target;
2120        struct ib_device *ibdev = host->srp_dev->dev;
2121        dma_addr_t dma_addr;
2122        int i, ret;
2123
2124        target_host = scsi_host_alloc(&srp_template,
2125                                      sizeof (struct srp_target_port));
2126        if (!target_host)
2127                return -ENOMEM;
2128
2129        target_host->transportt  = ib_srp_transport_template;
2130        target_host->max_channel = 0;
2131        target_host->max_id      = 1;
2132        target_host->max_lun     = SRP_MAX_LUN;
2133        target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
2134
2135        target = host_to_target(target_host);
2136
2137        target->io_class        = SRP_REV16A_IB_IO_CLASS;
2138        target->scsi_host       = target_host;
2139        target->srp_host        = host;
2140        target->lkey            = host->srp_dev->mr->lkey;
2141        target->rkey            = host->srp_dev->mr->rkey;
2142        target->cmd_sg_cnt      = cmd_sg_entries;
2143        target->sg_tablesize    = indirect_sg_entries ? : cmd_sg_entries;
2144        target->allow_ext_sg    = allow_ext_sg;
2145
2146        ret = srp_parse_options(buf, target);
2147        if (ret)
2148                goto err;
2149
2150        if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2151                                target->cmd_sg_cnt < target->sg_tablesize) {
2152                printk(KERN_WARNING PFX "No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2153                target->sg_tablesize = target->cmd_sg_cnt;
2154        }
2155
2156        target_host->sg_tablesize = target->sg_tablesize;
2157        target->indirect_size = target->sg_tablesize *
2158                                sizeof (struct srp_direct_buf);
2159        target->max_iu_len = sizeof (struct srp_cmd) +
2160                             sizeof (struct srp_indirect_buf) +
2161                             target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2162
2163        spin_lock_init(&target->lock);
2164        INIT_LIST_HEAD(&target->free_tx);
2165        INIT_LIST_HEAD(&target->free_reqs);
2166        for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
2167                struct srp_request *req = &target->req_ring[i];
2168
2169                req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
2170                                        GFP_KERNEL);
2171                req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
2172                                        GFP_KERNEL);
2173                req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
2174                if (!req->fmr_list || !req->map_page || !req->indirect_desc)
2175                        goto err_free_mem;
2176
2177                dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
2178                                             target->indirect_size,
2179                                             DMA_TO_DEVICE);
2180                if (ib_dma_mapping_error(ibdev, dma_addr))
2181                        goto err_free_mem;
2182
2183                req->indirect_dma_addr = dma_addr;
2184                req->index = i;
2185                list_add_tail(&req->list, &target->free_reqs);
2186        }
2187
2188        ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
2189
2190        shost_printk(KERN_DEBUG, target->scsi_host, PFX
2191                     "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2192                     "service_id %016llx dgid %pI6\n",
2193               (unsigned long long) be64_to_cpu(target->id_ext),
2194               (unsigned long long) be64_to_cpu(target->ioc_guid),
2195               be16_to_cpu(target->path.pkey),
2196               (unsigned long long) be64_to_cpu(target->service_id),
2197               target->path.dgid.raw);
2198
2199        ret = srp_create_target_ib(target);
2200        if (ret)
2201                goto err_free_mem;
2202
2203        ret = srp_new_cm_id(target);
2204        if (ret)
2205                goto err_free_ib;
2206
2207        target->qp_in_error = 0;
2208        ret = srp_connect_target(target);
2209        if (ret) {
2210                shost_printk(KERN_ERR, target->scsi_host,
2211                             PFX "Connection failed\n");
2212                goto err_cm_id;
2213        }
2214
2215        ret = srp_add_target(host, target);
2216        if (ret)
2217                goto err_disconnect;
2218
2219        return count;
2220
2221err_disconnect:
2222        srp_disconnect_target(target);
2223
2224err_cm_id:
2225        ib_destroy_cm_id(target->cm_id);
2226
2227err_free_ib:
2228        srp_free_target_ib(target);
2229
2230err_free_mem:
2231        srp_free_req_data(target);
2232
2233err:
2234        scsi_host_put(target_host);
2235
2236        return ret;
2237}
2238
2239static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2240
2241static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2242                          char *buf)
2243{
2244        struct srp_host *host = container_of(dev, struct srp_host, dev);
2245
2246        return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2247}
2248
2249static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2250
2251static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2252                         char *buf)
2253{
2254        struct srp_host *host = container_of(dev, struct srp_host, dev);
2255
2256        return sprintf(buf, "%d\n", host->port);
2257}
2258
2259static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2260
2261static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
2262{
2263        struct srp_host *host;
2264
2265        host = kzalloc(sizeof *host, GFP_KERNEL);
2266        if (!host)
2267                return NULL;
2268
2269        INIT_LIST_HEAD(&host->target_list);
2270        spin_lock_init(&host->target_lock);
2271        init_completion(&host->released);
2272        host->srp_dev = device;
2273        host->port = port;
2274
2275        host->dev.class = &srp_class;
2276        host->dev.parent = device->dev->dma_device;
2277        dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2278
2279        if (device_register(&host->dev))
2280                goto free_host;
2281        if (device_create_file(&host->dev, &dev_attr_add_target))
2282                goto err_class;
2283        if (device_create_file(&host->dev, &dev_attr_ibdev))
2284                goto err_class;
2285        if (device_create_file(&host->dev, &dev_attr_port))
2286                goto err_class;
2287
2288        return host;
2289
2290err_class:
2291        device_unregister(&host->dev);
2292
2293free_host:
2294        kfree(host);
2295
2296        return NULL;
2297}
2298
2299static void srp_add_one(struct ib_device *device)
2300{
2301        struct srp_device *srp_dev;
2302        struct ib_device_attr *dev_attr;
2303        struct ib_fmr_pool_param fmr_param;
2304        struct srp_host *host;
2305        int max_pages_per_fmr, fmr_page_shift, s, e, p;
2306
2307        dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2308        if (!dev_attr)
2309                return;
2310
2311        if (ib_query_device(device, dev_attr)) {
2312                printk(KERN_WARNING PFX "Query device failed for %s\n",
2313                       device->name);
2314                goto free_attr;
2315        }
2316
2317        srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2318        if (!srp_dev)
2319                goto free_attr;
2320
2321        /*
2322         * Use the smallest page size supported by the HCA, down to a
2323         * minimum of 4096 bytes. We're unlikely to build large sglists
2324         * out of smaller entries.
2325         */
2326        fmr_page_shift          = max(12, ffs(dev_attr->page_size_cap) - 1);
2327        srp_dev->fmr_page_size  = 1 << fmr_page_shift;
2328        srp_dev->fmr_page_mask  = ~((u64) srp_dev->fmr_page_size - 1);
2329        srp_dev->fmr_max_size   = srp_dev->fmr_page_size * SRP_FMR_SIZE;
2330
2331        INIT_LIST_HEAD(&srp_dev->dev_list);
2332
2333        srp_dev->dev = device;
2334        srp_dev->pd  = ib_alloc_pd(device);
2335        if (IS_ERR(srp_dev->pd))
2336                goto free_dev;
2337
2338        srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2339                                    IB_ACCESS_LOCAL_WRITE |
2340                                    IB_ACCESS_REMOTE_READ |
2341                                    IB_ACCESS_REMOTE_WRITE);
2342        if (IS_ERR(srp_dev->mr))
2343                goto err_pd;
2344
2345        for (max_pages_per_fmr = SRP_FMR_SIZE;
2346                        max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2347                        max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2348                memset(&fmr_param, 0, sizeof fmr_param);
2349                fmr_param.pool_size         = SRP_FMR_POOL_SIZE;
2350                fmr_param.dirty_watermark   = SRP_FMR_DIRTY_SIZE;
2351                fmr_param.cache             = 1;
2352                fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2353                fmr_param.page_shift        = fmr_page_shift;
2354                fmr_param.access            = (IB_ACCESS_LOCAL_WRITE |
2355                                               IB_ACCESS_REMOTE_WRITE |
2356                                               IB_ACCESS_REMOTE_READ);
2357
2358                srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2359                if (!IS_ERR(srp_dev->fmr_pool))
2360                        break;
2361        }
2362
2363        if (IS_ERR(srp_dev->fmr_pool))
2364                srp_dev->fmr_pool = NULL;
2365
2366        if (device->node_type == RDMA_NODE_IB_SWITCH) {
2367                s = 0;
2368                e = 0;
2369        } else {
2370                s = 1;
2371                e = device->phys_port_cnt;
2372        }
2373
2374        for (p = s; p <= e; ++p) {
2375                host = srp_add_port(srp_dev, p);
2376                if (host)
2377                        list_add_tail(&host->list, &srp_dev->dev_list);
2378        }
2379
2380        ib_set_client_data(device, &srp_client, srp_dev);
2381
2382        goto free_attr;
2383
2384err_pd:
2385        ib_dealloc_pd(srp_dev->pd);
2386
2387free_dev:
2388        kfree(srp_dev);
2389
2390free_attr:
2391        kfree(dev_attr);
2392}
2393
2394static void srp_remove_one(struct ib_device *device)
2395{
2396        struct srp_device *srp_dev;
2397        struct srp_host *host, *tmp_host;
2398        LIST_HEAD(target_list);
2399        struct srp_target_port *target, *tmp_target;
2400
2401        srp_dev = ib_get_client_data(device, &srp_client);
2402
2403        list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2404                device_unregister(&host->dev);
2405                /*
2406                 * Wait for the sysfs entry to go away, so that no new
2407                 * target ports can be created.
2408                 */
2409                wait_for_completion(&host->released);
2410
2411                /*
2412                 * Mark all target ports as removed, so we stop queueing
2413                 * commands and don't try to reconnect.
2414                 */
2415                spin_lock(&host->target_lock);
2416                list_for_each_entry(target, &host->target_list, list) {
2417                        spin_lock_irq(&target->lock);
2418                        target->state = SRP_TARGET_REMOVED;
2419                        spin_unlock_irq(&target->lock);
2420                }
2421                spin_unlock(&host->target_lock);
2422
2423                /*
2424                 * Wait for any reconnection tasks that may have
2425                 * started before we marked our target ports as
2426                 * removed, and any target port removal tasks.
2427                 */
2428                flush_workqueue(ib_wq);
2429
2430                list_for_each_entry_safe(target, tmp_target,
2431                                         &host->target_list, list) {
2432                        srp_remove_host(target->scsi_host);
2433                        scsi_remove_host(target->scsi_host);
2434                        srp_disconnect_target(target);
2435                        ib_destroy_cm_id(target->cm_id);
2436                        srp_free_target_ib(target);
2437                        srp_free_req_data(target);
2438                        scsi_host_put(target->scsi_host);
2439                }
2440
2441                kfree(host);
2442        }
2443
2444        if (srp_dev->fmr_pool)
2445                ib_destroy_fmr_pool(srp_dev->fmr_pool);
2446        ib_dereg_mr(srp_dev->mr);
2447        ib_dealloc_pd(srp_dev->pd);
2448
2449        kfree(srp_dev);
2450}
2451
2452static struct srp_function_template ib_srp_transport_functions = {
2453};
2454
2455static int __init srp_init_module(void)
2456{
2457        int ret;
2458
2459        BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2460
2461        if (srp_sg_tablesize) {
2462                printk(KERN_WARNING PFX "srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2463                if (!cmd_sg_entries)
2464                        cmd_sg_entries = srp_sg_tablesize;
2465        }
2466
2467        if (!cmd_sg_entries)
2468                cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2469
2470        if (cmd_sg_entries > 255) {
2471                printk(KERN_WARNING PFX "Clamping cmd_sg_entries to 255\n");
2472                cmd_sg_entries = 255;
2473        }
2474
2475        if (!indirect_sg_entries)
2476                indirect_sg_entries = cmd_sg_entries;
2477        else if (indirect_sg_entries < cmd_sg_entries) {
2478                printk(KERN_WARNING PFX "Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", cmd_sg_entries);
2479                indirect_sg_entries = cmd_sg_entries;
2480        }
2481
2482        ib_srp_transport_template =
2483                srp_attach_transport(&ib_srp_transport_functions);
2484        if (!ib_srp_transport_template)
2485                return -ENOMEM;
2486
2487        ret = class_register(&srp_class);
2488        if (ret) {
2489                printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
2490                srp_release_transport(ib_srp_transport_template);
2491                return ret;
2492        }
2493
2494        ib_sa_register_client(&srp_sa_client);
2495
2496        ret = ib_register_client(&srp_client);
2497        if (ret) {
2498                printk(KERN_ERR PFX "couldn't register IB client\n");
2499                srp_release_transport(ib_srp_transport_template);
2500                ib_sa_unregister_client(&srp_sa_client);
2501                class_unregister(&srp_class);
2502                return ret;
2503        }
2504
2505        return 0;
2506}
2507
2508static void __exit srp_cleanup_module(void)
2509{
2510        ib_unregister_client(&srp_client);
2511        ib_sa_unregister_client(&srp_sa_client);
2512        class_unregister(&srp_class);
2513        srp_release_transport(ib_srp_transport_template);
2514}
2515
2516module_init(srp_init_module);
2517module_exit(srp_cleanup_module);
2518