linux/drivers/infiniband/ulp/srp/ib_srp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#define pr_fmt(fmt) PFX fmt
  34
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/slab.h>
  38#include <linux/err.h>
  39#include <linux/string.h>
  40#include <linux/parser.h>
  41#include <linux/random.h>
  42#include <linux/jiffies.h>
  43
  44#include <linux/atomic.h>
  45
  46#include <scsi/scsi.h>
  47#include <scsi/scsi_device.h>
  48#include <scsi/scsi_dbg.h>
  49#include <scsi/scsi_tcq.h>
  50#include <scsi/srp.h>
  51#include <scsi/scsi_transport_srp.h>
  52
  53#include "ib_srp.h"
  54
  55#define DRV_NAME        "ib_srp"
  56#define PFX             DRV_NAME ": "
  57#define DRV_VERSION     "1.0"
  58#define DRV_RELDATE     "July 1, 2013"
  59
  60MODULE_AUTHOR("Roland Dreier");
  61MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
  62                   "v" DRV_VERSION " (" DRV_RELDATE ")");
  63MODULE_LICENSE("Dual BSD/GPL");
  64
  65static unsigned int srp_sg_tablesize;
  66static unsigned int cmd_sg_entries;
  67static unsigned int indirect_sg_entries;
  68static bool allow_ext_sg;
  69static int topspin_workarounds = 1;
  70
  71module_param(srp_sg_tablesize, uint, 0444);
  72MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
  73
  74module_param(cmd_sg_entries, uint, 0444);
  75MODULE_PARM_DESC(cmd_sg_entries,
  76                 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
  77
  78module_param(indirect_sg_entries, uint, 0444);
  79MODULE_PARM_DESC(indirect_sg_entries,
  80                 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
  81
  82module_param(allow_ext_sg, bool, 0444);
  83MODULE_PARM_DESC(allow_ext_sg,
  84                  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
  85
  86module_param(topspin_workarounds, int, 0444);
  87MODULE_PARM_DESC(topspin_workarounds,
  88                 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
  89
  90static struct kernel_param_ops srp_tmo_ops;
  91
  92static int srp_reconnect_delay = 10;
  93module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
  94                S_IRUGO | S_IWUSR);
  95MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
  96
  97static int srp_fast_io_fail_tmo = 15;
  98module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
  99                S_IRUGO | S_IWUSR);
 100MODULE_PARM_DESC(fast_io_fail_tmo,
 101                 "Number of seconds between the observation of a transport"
 102                 " layer error and failing all I/O. \"off\" means that this"
 103                 " functionality is disabled.");
 104
 105static int srp_dev_loss_tmo = 600;
 106module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
 107                S_IRUGO | S_IWUSR);
 108MODULE_PARM_DESC(dev_loss_tmo,
 109                 "Maximum number of seconds that the SRP transport should"
 110                 " insulate transport layer errors. After this time has been"
 111                 " exceeded the SCSI host is removed. Should be"
 112                 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
 113                 " if fast_io_fail_tmo has not been set. \"off\" means that"
 114                 " this functionality is disabled.");
 115
 116static void srp_add_one(struct ib_device *device);
 117static void srp_remove_one(struct ib_device *device);
 118static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
 119static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
 120static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
 121
 122static struct scsi_transport_template *ib_srp_transport_template;
 123
 124static struct ib_client srp_client = {
 125        .name   = "srp",
 126        .add    = srp_add_one,
 127        .remove = srp_remove_one
 128};
 129
 130static struct ib_sa_client srp_sa_client;
 131
 132static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
 133{
 134        int tmo = *(int *)kp->arg;
 135
 136        if (tmo >= 0)
 137                return sprintf(buffer, "%d", tmo);
 138        else
 139                return sprintf(buffer, "off");
 140}
 141
 142static int srp_tmo_set(const char *val, const struct kernel_param *kp)
 143{
 144        int tmo, res;
 145
 146        if (strncmp(val, "off", 3) != 0) {
 147                res = kstrtoint(val, 0, &tmo);
 148                if (res)
 149                        goto out;
 150        } else {
 151                tmo = -1;
 152        }
 153        if (kp->arg == &srp_reconnect_delay)
 154                res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
 155                                    srp_dev_loss_tmo);
 156        else if (kp->arg == &srp_fast_io_fail_tmo)
 157                res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
 158        else
 159                res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
 160                                    tmo);
 161        if (res)
 162                goto out;
 163        *(int *)kp->arg = tmo;
 164
 165out:
 166        return res;
 167}
 168
 169static struct kernel_param_ops srp_tmo_ops = {
 170        .get = srp_tmo_get,
 171        .set = srp_tmo_set,
 172};
 173
 174static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
 175{
 176        return (struct srp_target_port *) host->hostdata;
 177}
 178
 179static const char *srp_target_info(struct Scsi_Host *host)
 180{
 181        return host_to_target(host)->target_name;
 182}
 183
 184static int srp_target_is_topspin(struct srp_target_port *target)
 185{
 186        static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
 187        static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
 188
 189        return topspin_workarounds &&
 190                (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
 191                 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
 192}
 193
 194static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
 195                                   gfp_t gfp_mask,
 196                                   enum dma_data_direction direction)
 197{
 198        struct srp_iu *iu;
 199
 200        iu = kmalloc(sizeof *iu, gfp_mask);
 201        if (!iu)
 202                goto out;
 203
 204        iu->buf = kzalloc(size, gfp_mask);
 205        if (!iu->buf)
 206                goto out_free_iu;
 207
 208        iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
 209                                    direction);
 210        if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
 211                goto out_free_buf;
 212
 213        iu->size      = size;
 214        iu->direction = direction;
 215
 216        return iu;
 217
 218out_free_buf:
 219        kfree(iu->buf);
 220out_free_iu:
 221        kfree(iu);
 222out:
 223        return NULL;
 224}
 225
 226static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
 227{
 228        if (!iu)
 229                return;
 230
 231        ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
 232                            iu->direction);
 233        kfree(iu->buf);
 234        kfree(iu);
 235}
 236
 237static void srp_qp_event(struct ib_event *event, void *context)
 238{
 239        pr_debug("QP event %d\n", event->event);
 240}
 241
 242static int srp_init_qp(struct srp_target_port *target,
 243                       struct ib_qp *qp)
 244{
 245        struct ib_qp_attr *attr;
 246        int ret;
 247
 248        attr = kmalloc(sizeof *attr, GFP_KERNEL);
 249        if (!attr)
 250                return -ENOMEM;
 251
 252        ret = ib_find_pkey(target->srp_host->srp_dev->dev,
 253                           target->srp_host->port,
 254                           be16_to_cpu(target->path.pkey),
 255                           &attr->pkey_index);
 256        if (ret)
 257                goto out;
 258
 259        attr->qp_state        = IB_QPS_INIT;
 260        attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
 261                                    IB_ACCESS_REMOTE_WRITE);
 262        attr->port_num        = target->srp_host->port;
 263
 264        ret = ib_modify_qp(qp, attr,
 265                           IB_QP_STATE          |
 266                           IB_QP_PKEY_INDEX     |
 267                           IB_QP_ACCESS_FLAGS   |
 268                           IB_QP_PORT);
 269
 270out:
 271        kfree(attr);
 272        return ret;
 273}
 274
 275static int srp_new_cm_id(struct srp_target_port *target)
 276{
 277        struct ib_cm_id *new_cm_id;
 278
 279        new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
 280                                    srp_cm_handler, target);
 281        if (IS_ERR(new_cm_id))
 282                return PTR_ERR(new_cm_id);
 283
 284        if (target->cm_id)
 285                ib_destroy_cm_id(target->cm_id);
 286        target->cm_id = new_cm_id;
 287
 288        return 0;
 289}
 290
 291static int srp_create_target_ib(struct srp_target_port *target)
 292{
 293        struct ib_qp_init_attr *init_attr;
 294        struct ib_cq *recv_cq, *send_cq;
 295        struct ib_qp *qp;
 296        int ret;
 297
 298        init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
 299        if (!init_attr)
 300                return -ENOMEM;
 301
 302        recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
 303                               srp_recv_completion, NULL, target,
 304                               target->queue_size, target->comp_vector);
 305        if (IS_ERR(recv_cq)) {
 306                ret = PTR_ERR(recv_cq);
 307                goto err;
 308        }
 309
 310        send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
 311                               srp_send_completion, NULL, target,
 312                               target->queue_size, target->comp_vector);
 313        if (IS_ERR(send_cq)) {
 314                ret = PTR_ERR(send_cq);
 315                goto err_recv_cq;
 316        }
 317
 318        ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
 319
 320        init_attr->event_handler       = srp_qp_event;
 321        init_attr->cap.max_send_wr     = target->queue_size;
 322        init_attr->cap.max_recv_wr     = target->queue_size;
 323        init_attr->cap.max_recv_sge    = 1;
 324        init_attr->cap.max_send_sge    = 1;
 325        init_attr->sq_sig_type         = IB_SIGNAL_ALL_WR;
 326        init_attr->qp_type             = IB_QPT_RC;
 327        init_attr->send_cq             = send_cq;
 328        init_attr->recv_cq             = recv_cq;
 329
 330        qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
 331        if (IS_ERR(qp)) {
 332                ret = PTR_ERR(qp);
 333                goto err_send_cq;
 334        }
 335
 336        ret = srp_init_qp(target, qp);
 337        if (ret)
 338                goto err_qp;
 339
 340        if (target->qp)
 341                ib_destroy_qp(target->qp);
 342        if (target->recv_cq)
 343                ib_destroy_cq(target->recv_cq);
 344        if (target->send_cq)
 345                ib_destroy_cq(target->send_cq);
 346
 347        target->qp = qp;
 348        target->recv_cq = recv_cq;
 349        target->send_cq = send_cq;
 350
 351        kfree(init_attr);
 352        return 0;
 353
 354err_qp:
 355        ib_destroy_qp(qp);
 356
 357err_send_cq:
 358        ib_destroy_cq(send_cq);
 359
 360err_recv_cq:
 361        ib_destroy_cq(recv_cq);
 362
 363err:
 364        kfree(init_attr);
 365        return ret;
 366}
 367
 368/*
 369 * Note: this function may be called without srp_alloc_iu_bufs() having been
 370 * invoked. Hence the target->[rt]x_ring checks.
 371 */
 372static void srp_free_target_ib(struct srp_target_port *target)
 373{
 374        int i;
 375
 376        ib_destroy_qp(target->qp);
 377        ib_destroy_cq(target->send_cq);
 378        ib_destroy_cq(target->recv_cq);
 379
 380        target->qp = NULL;
 381        target->send_cq = target->recv_cq = NULL;
 382
 383        if (target->rx_ring) {
 384                for (i = 0; i < target->queue_size; ++i)
 385                        srp_free_iu(target->srp_host, target->rx_ring[i]);
 386                kfree(target->rx_ring);
 387                target->rx_ring = NULL;
 388        }
 389        if (target->tx_ring) {
 390                for (i = 0; i < target->queue_size; ++i)
 391                        srp_free_iu(target->srp_host, target->tx_ring[i]);
 392                kfree(target->tx_ring);
 393                target->tx_ring = NULL;
 394        }
 395}
 396
 397static void srp_path_rec_completion(int status,
 398                                    struct ib_sa_path_rec *pathrec,
 399                                    void *target_ptr)
 400{
 401        struct srp_target_port *target = target_ptr;
 402
 403        target->status = status;
 404        if (status)
 405                shost_printk(KERN_ERR, target->scsi_host,
 406                             PFX "Got failed path rec status %d\n", status);
 407        else
 408                target->path = *pathrec;
 409        complete(&target->done);
 410}
 411
 412static int srp_lookup_path(struct srp_target_port *target)
 413{
 414        int ret;
 415
 416        target->path.numb_path = 1;
 417
 418        init_completion(&target->done);
 419
 420        target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
 421                                                   target->srp_host->srp_dev->dev,
 422                                                   target->srp_host->port,
 423                                                   &target->path,
 424                                                   IB_SA_PATH_REC_SERVICE_ID    |
 425                                                   IB_SA_PATH_REC_DGID          |
 426                                                   IB_SA_PATH_REC_SGID          |
 427                                                   IB_SA_PATH_REC_NUMB_PATH     |
 428                                                   IB_SA_PATH_REC_PKEY,
 429                                                   SRP_PATH_REC_TIMEOUT_MS,
 430                                                   GFP_KERNEL,
 431                                                   srp_path_rec_completion,
 432                                                   target, &target->path_query);
 433        if (target->path_query_id < 0)
 434                return target->path_query_id;
 435
 436        ret = wait_for_completion_interruptible(&target->done);
 437        if (ret < 0)
 438                return ret;
 439
 440        if (target->status < 0)
 441                shost_printk(KERN_WARNING, target->scsi_host,
 442                             PFX "Path record query failed\n");
 443
 444        return target->status;
 445}
 446
 447static int srp_send_req(struct srp_target_port *target)
 448{
 449        struct {
 450                struct ib_cm_req_param param;
 451                struct srp_login_req   priv;
 452        } *req = NULL;
 453        int status;
 454
 455        req = kzalloc(sizeof *req, GFP_KERNEL);
 456        if (!req)
 457                return -ENOMEM;
 458
 459        req->param.primary_path               = &target->path;
 460        req->param.alternate_path             = NULL;
 461        req->param.service_id                 = target->service_id;
 462        req->param.qp_num                     = target->qp->qp_num;
 463        req->param.qp_type                    = target->qp->qp_type;
 464        req->param.private_data               = &req->priv;
 465        req->param.private_data_len           = sizeof req->priv;
 466        req->param.flow_control               = 1;
 467
 468        get_random_bytes(&req->param.starting_psn, 4);
 469        req->param.starting_psn              &= 0xffffff;
 470
 471        /*
 472         * Pick some arbitrary defaults here; we could make these
 473         * module parameters if anyone cared about setting them.
 474         */
 475        req->param.responder_resources        = 4;
 476        req->param.remote_cm_response_timeout = 20;
 477        req->param.local_cm_response_timeout  = 20;
 478        req->param.retry_count                = target->tl_retry_count;
 479        req->param.rnr_retry_count            = 7;
 480        req->param.max_cm_retries             = 15;
 481
 482        req->priv.opcode        = SRP_LOGIN_REQ;
 483        req->priv.tag           = 0;
 484        req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
 485        req->priv.req_buf_fmt   = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
 486                                              SRP_BUF_FORMAT_INDIRECT);
 487        /*
 488         * In the published SRP specification (draft rev. 16a), the
 489         * port identifier format is 8 bytes of ID extension followed
 490         * by 8 bytes of GUID.  Older drafts put the two halves in the
 491         * opposite order, so that the GUID comes first.
 492         *
 493         * Targets conforming to these obsolete drafts can be
 494         * recognized by the I/O Class they report.
 495         */
 496        if (target->io_class == SRP_REV10_IB_IO_CLASS) {
 497                memcpy(req->priv.initiator_port_id,
 498                       &target->path.sgid.global.interface_id, 8);
 499                memcpy(req->priv.initiator_port_id + 8,
 500                       &target->initiator_ext, 8);
 501                memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
 502                memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
 503        } else {
 504                memcpy(req->priv.initiator_port_id,
 505                       &target->initiator_ext, 8);
 506                memcpy(req->priv.initiator_port_id + 8,
 507                       &target->path.sgid.global.interface_id, 8);
 508                memcpy(req->priv.target_port_id,     &target->id_ext, 8);
 509                memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
 510        }
 511
 512        /*
 513         * Topspin/Cisco SRP targets will reject our login unless we
 514         * zero out the first 8 bytes of our initiator port ID and set
 515         * the second 8 bytes to the local node GUID.
 516         */
 517        if (srp_target_is_topspin(target)) {
 518                shost_printk(KERN_DEBUG, target->scsi_host,
 519                             PFX "Topspin/Cisco initiator port ID workaround "
 520                             "activated for target GUID %016llx\n",
 521                             (unsigned long long) be64_to_cpu(target->ioc_guid));
 522                memset(req->priv.initiator_port_id, 0, 8);
 523                memcpy(req->priv.initiator_port_id + 8,
 524                       &target->srp_host->srp_dev->dev->node_guid, 8);
 525        }
 526
 527        status = ib_send_cm_req(target->cm_id, &req->param);
 528
 529        kfree(req);
 530
 531        return status;
 532}
 533
 534static bool srp_queue_remove_work(struct srp_target_port *target)
 535{
 536        bool changed = false;
 537
 538        spin_lock_irq(&target->lock);
 539        if (target->state != SRP_TARGET_REMOVED) {
 540                target->state = SRP_TARGET_REMOVED;
 541                changed = true;
 542        }
 543        spin_unlock_irq(&target->lock);
 544
 545        if (changed)
 546                queue_work(system_long_wq, &target->remove_work);
 547
 548        return changed;
 549}
 550
 551static bool srp_change_conn_state(struct srp_target_port *target,
 552                                  bool connected)
 553{
 554        bool changed = false;
 555
 556        spin_lock_irq(&target->lock);
 557        if (target->connected != connected) {
 558                target->connected = connected;
 559                changed = true;
 560        }
 561        spin_unlock_irq(&target->lock);
 562
 563        return changed;
 564}
 565
 566static void srp_disconnect_target(struct srp_target_port *target)
 567{
 568        if (srp_change_conn_state(target, false)) {
 569                /* XXX should send SRP_I_LOGOUT request */
 570
 571                if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
 572                        shost_printk(KERN_DEBUG, target->scsi_host,
 573                                     PFX "Sending CM DREQ failed\n");
 574                }
 575        }
 576}
 577
 578static void srp_free_req_data(struct srp_target_port *target)
 579{
 580        struct ib_device *ibdev = target->srp_host->srp_dev->dev;
 581        struct srp_request *req;
 582        int i;
 583
 584        if (!target->req_ring)
 585                return;
 586
 587        for (i = 0; i < target->req_ring_size; ++i) {
 588                req = &target->req_ring[i];
 589                kfree(req->fmr_list);
 590                kfree(req->map_page);
 591                if (req->indirect_dma_addr) {
 592                        ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
 593                                            target->indirect_size,
 594                                            DMA_TO_DEVICE);
 595                }
 596                kfree(req->indirect_desc);
 597        }
 598
 599        kfree(target->req_ring);
 600        target->req_ring = NULL;
 601}
 602
 603static int srp_alloc_req_data(struct srp_target_port *target)
 604{
 605        struct srp_device *srp_dev = target->srp_host->srp_dev;
 606        struct ib_device *ibdev = srp_dev->dev;
 607        struct srp_request *req;
 608        dma_addr_t dma_addr;
 609        int i, ret = -ENOMEM;
 610
 611        INIT_LIST_HEAD(&target->free_reqs);
 612
 613        target->req_ring = kzalloc(target->req_ring_size *
 614                                   sizeof(*target->req_ring), GFP_KERNEL);
 615        if (!target->req_ring)
 616                goto out;
 617
 618        for (i = 0; i < target->req_ring_size; ++i) {
 619                req = &target->req_ring[i];
 620                req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
 621                                        GFP_KERNEL);
 622                req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *),
 623                                        GFP_KERNEL);
 624                req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
 625                if (!req->fmr_list || !req->map_page || !req->indirect_desc)
 626                        goto out;
 627
 628                dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
 629                                             target->indirect_size,
 630                                             DMA_TO_DEVICE);
 631                if (ib_dma_mapping_error(ibdev, dma_addr))
 632                        goto out;
 633
 634                req->indirect_dma_addr = dma_addr;
 635                req->index = i;
 636                list_add_tail(&req->list, &target->free_reqs);
 637        }
 638        ret = 0;
 639
 640out:
 641        return ret;
 642}
 643
 644/**
 645 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
 646 * @shost: SCSI host whose attributes to remove from sysfs.
 647 *
 648 * Note: Any attributes defined in the host template and that did not exist
 649 * before invocation of this function will be ignored.
 650 */
 651static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
 652{
 653        struct device_attribute **attr;
 654
 655        for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
 656                device_remove_file(&shost->shost_dev, *attr);
 657}
 658
 659static void srp_remove_target(struct srp_target_port *target)
 660{
 661        WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
 662
 663        srp_del_scsi_host_attr(target->scsi_host);
 664        srp_rport_get(target->rport);
 665        srp_remove_host(target->scsi_host);
 666        scsi_remove_host(target->scsi_host);
 667        srp_stop_rport_timers(target->rport);
 668        srp_disconnect_target(target);
 669        ib_destroy_cm_id(target->cm_id);
 670        srp_free_target_ib(target);
 671        cancel_work_sync(&target->tl_err_work);
 672        srp_rport_put(target->rport);
 673        srp_free_req_data(target);
 674
 675        spin_lock(&target->srp_host->target_lock);
 676        list_del(&target->list);
 677        spin_unlock(&target->srp_host->target_lock);
 678
 679        scsi_host_put(target->scsi_host);
 680}
 681
 682static void srp_remove_work(struct work_struct *work)
 683{
 684        struct srp_target_port *target =
 685                container_of(work, struct srp_target_port, remove_work);
 686
 687        WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
 688
 689        srp_remove_target(target);
 690}
 691
 692static void srp_rport_delete(struct srp_rport *rport)
 693{
 694        struct srp_target_port *target = rport->lld_data;
 695
 696        srp_queue_remove_work(target);
 697}
 698
 699static int srp_connect_target(struct srp_target_port *target)
 700{
 701        int retries = 3;
 702        int ret;
 703
 704        WARN_ON_ONCE(target->connected);
 705
 706        target->qp_in_error = false;
 707
 708        ret = srp_lookup_path(target);
 709        if (ret)
 710                return ret;
 711
 712        while (1) {
 713                init_completion(&target->done);
 714                ret = srp_send_req(target);
 715                if (ret)
 716                        return ret;
 717                ret = wait_for_completion_interruptible(&target->done);
 718                if (ret < 0)
 719                        return ret;
 720
 721                /*
 722                 * The CM event handling code will set status to
 723                 * SRP_PORT_REDIRECT if we get a port redirect REJ
 724                 * back, or SRP_DLID_REDIRECT if we get a lid/qp
 725                 * redirect REJ back.
 726                 */
 727                switch (target->status) {
 728                case 0:
 729                        srp_change_conn_state(target, true);
 730                        return 0;
 731
 732                case SRP_PORT_REDIRECT:
 733                        ret = srp_lookup_path(target);
 734                        if (ret)
 735                                return ret;
 736                        break;
 737
 738                case SRP_DLID_REDIRECT:
 739                        break;
 740
 741                case SRP_STALE_CONN:
 742                        /* Our current CM id was stale, and is now in timewait.
 743                         * Try to reconnect with a new one.
 744                         */
 745                        if (!retries-- || srp_new_cm_id(target)) {
 746                                shost_printk(KERN_ERR, target->scsi_host, PFX
 747                                             "giving up on stale connection\n");
 748                                target->status = -ECONNRESET;
 749                                return target->status;
 750                        }
 751
 752                        shost_printk(KERN_ERR, target->scsi_host, PFX
 753                                     "retrying stale connection\n");
 754                        break;
 755
 756                default:
 757                        return target->status;
 758                }
 759        }
 760}
 761
 762static void srp_unmap_data(struct scsi_cmnd *scmnd,
 763                           struct srp_target_port *target,
 764                           struct srp_request *req)
 765{
 766        struct ib_device *ibdev = target->srp_host->srp_dev->dev;
 767        struct ib_pool_fmr **pfmr;
 768
 769        if (!scsi_sglist(scmnd) ||
 770            (scmnd->sc_data_direction != DMA_TO_DEVICE &&
 771             scmnd->sc_data_direction != DMA_FROM_DEVICE))
 772                return;
 773
 774        pfmr = req->fmr_list;
 775        while (req->nfmr--)
 776                ib_fmr_pool_unmap(*pfmr++);
 777
 778        ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
 779                        scmnd->sc_data_direction);
 780}
 781
 782/**
 783 * srp_claim_req - Take ownership of the scmnd associated with a request.
 784 * @target: SRP target port.
 785 * @req: SRP request.
 786 * @sdev: If not NULL, only take ownership for this SCSI device.
 787 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
 788 *         ownership of @req->scmnd if it equals @scmnd.
 789 *
 790 * Return value:
 791 * Either NULL or a pointer to the SCSI command the caller became owner of.
 792 */
 793static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
 794                                       struct srp_request *req,
 795                                       struct scsi_device *sdev,
 796                                       struct scsi_cmnd *scmnd)
 797{
 798        unsigned long flags;
 799
 800        spin_lock_irqsave(&target->lock, flags);
 801        if (req->scmnd &&
 802            (!sdev || req->scmnd->device == sdev) &&
 803            (!scmnd || req->scmnd == scmnd)) {
 804                scmnd = req->scmnd;
 805                req->scmnd = NULL;
 806        } else {
 807                scmnd = NULL;
 808        }
 809        spin_unlock_irqrestore(&target->lock, flags);
 810
 811        return scmnd;
 812}
 813
 814/**
 815 * srp_free_req() - Unmap data and add request to the free request list.
 816 */
 817static void srp_free_req(struct srp_target_port *target,
 818                         struct srp_request *req, struct scsi_cmnd *scmnd,
 819                         s32 req_lim_delta)
 820{
 821        unsigned long flags;
 822
 823        srp_unmap_data(scmnd, target, req);
 824
 825        spin_lock_irqsave(&target->lock, flags);
 826        target->req_lim += req_lim_delta;
 827        list_add_tail(&req->list, &target->free_reqs);
 828        spin_unlock_irqrestore(&target->lock, flags);
 829}
 830
 831static void srp_finish_req(struct srp_target_port *target,
 832                           struct srp_request *req, struct scsi_device *sdev,
 833                           int result)
 834{
 835        struct scsi_cmnd *scmnd = srp_claim_req(target, req, sdev, NULL);
 836
 837        if (scmnd) {
 838                srp_free_req(target, req, scmnd, 0);
 839                scmnd->result = result;
 840                scmnd->scsi_done(scmnd);
 841        }
 842}
 843
 844static void srp_terminate_io(struct srp_rport *rport)
 845{
 846        struct srp_target_port *target = rport->lld_data;
 847        struct Scsi_Host *shost = target->scsi_host;
 848        struct scsi_device *sdev;
 849        int i;
 850
 851        /*
 852         * Invoking srp_terminate_io() while srp_queuecommand() is running
 853         * is not safe. Hence the warning statement below.
 854         */
 855        shost_for_each_device(sdev, shost)
 856                WARN_ON_ONCE(sdev->request_queue->request_fn_active);
 857
 858        for (i = 0; i < target->req_ring_size; ++i) {
 859                struct srp_request *req = &target->req_ring[i];
 860                srp_finish_req(target, req, NULL, DID_TRANSPORT_FAILFAST << 16);
 861        }
 862}
 863
 864/*
 865 * It is up to the caller to ensure that srp_rport_reconnect() calls are
 866 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
 867 * srp_reset_device() or srp_reset_host() calls will occur while this function
 868 * is in progress. One way to realize that is not to call this function
 869 * directly but to call srp_reconnect_rport() instead since that last function
 870 * serializes calls of this function via rport->mutex and also blocks
 871 * srp_queuecommand() calls before invoking this function.
 872 */
 873static int srp_rport_reconnect(struct srp_rport *rport)
 874{
 875        struct srp_target_port *target = rport->lld_data;
 876        int i, ret;
 877
 878        srp_disconnect_target(target);
 879        /*
 880         * Now get a new local CM ID so that we avoid confusing the target in
 881         * case things are really fouled up. Doing so also ensures that all CM
 882         * callbacks will have finished before a new QP is allocated.
 883         */
 884        ret = srp_new_cm_id(target);
 885        /*
 886         * Whether or not creating a new CM ID succeeded, create a new
 887         * QP. This guarantees that all completion callback function
 888         * invocations have finished before request resetting starts.
 889         */
 890        if (ret == 0)
 891                ret = srp_create_target_ib(target);
 892        else
 893                srp_create_target_ib(target);
 894
 895        for (i = 0; i < target->req_ring_size; ++i) {
 896                struct srp_request *req = &target->req_ring[i];
 897                srp_finish_req(target, req, NULL, DID_RESET << 16);
 898        }
 899
 900        INIT_LIST_HEAD(&target->free_tx);
 901        for (i = 0; i < target->queue_size; ++i)
 902                list_add(&target->tx_ring[i]->list, &target->free_tx);
 903
 904        if (ret == 0)
 905                ret = srp_connect_target(target);
 906
 907        if (ret == 0)
 908                shost_printk(KERN_INFO, target->scsi_host,
 909                             PFX "reconnect succeeded\n");
 910
 911        return ret;
 912}
 913
 914static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
 915                         unsigned int dma_len, u32 rkey)
 916{
 917        struct srp_direct_buf *desc = state->desc;
 918
 919        desc->va = cpu_to_be64(dma_addr);
 920        desc->key = cpu_to_be32(rkey);
 921        desc->len = cpu_to_be32(dma_len);
 922
 923        state->total_len += dma_len;
 924        state->desc++;
 925        state->ndesc++;
 926}
 927
 928static int srp_map_finish_fmr(struct srp_map_state *state,
 929                              struct srp_target_port *target)
 930{
 931        struct srp_device *dev = target->srp_host->srp_dev;
 932        struct ib_pool_fmr *fmr;
 933        u64 io_addr = 0;
 934
 935        if (!state->npages)
 936                return 0;
 937
 938        if (state->npages == 1) {
 939                srp_map_desc(state, state->base_dma_addr, state->fmr_len,
 940                             target->rkey);
 941                state->npages = state->fmr_len = 0;
 942                return 0;
 943        }
 944
 945        fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
 946                                   state->npages, io_addr);
 947        if (IS_ERR(fmr))
 948                return PTR_ERR(fmr);
 949
 950        *state->next_fmr++ = fmr;
 951        state->nfmr++;
 952
 953        srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
 954        state->npages = state->fmr_len = 0;
 955        return 0;
 956}
 957
 958static void srp_map_update_start(struct srp_map_state *state,
 959                                 struct scatterlist *sg, int sg_index,
 960                                 dma_addr_t dma_addr)
 961{
 962        state->unmapped_sg = sg;
 963        state->unmapped_index = sg_index;
 964        state->unmapped_addr = dma_addr;
 965}
 966
 967static int srp_map_sg_entry(struct srp_map_state *state,
 968                            struct srp_target_port *target,
 969                            struct scatterlist *sg, int sg_index,
 970                            int use_fmr)
 971{
 972        struct srp_device *dev = target->srp_host->srp_dev;
 973        struct ib_device *ibdev = dev->dev;
 974        dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
 975        unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
 976        unsigned int len;
 977        int ret;
 978
 979        if (!dma_len)
 980                return 0;
 981
 982        if (use_fmr == SRP_MAP_NO_FMR) {
 983                /* Once we're in direct map mode for a request, we don't
 984                 * go back to FMR mode, so no need to update anything
 985                 * other than the descriptor.
 986                 */
 987                srp_map_desc(state, dma_addr, dma_len, target->rkey);
 988                return 0;
 989        }
 990
 991        /* If we start at an offset into the FMR page, don't merge into
 992         * the current FMR. Finish it out, and use the kernel's MR for this
 993         * sg entry. This is to avoid potential bugs on some SRP targets
 994         * that were never quite defined, but went away when the initiator
 995         * avoided using FMR on such page fragments.
 996         */
 997        if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
 998                ret = srp_map_finish_fmr(state, target);
 999                if (ret)
1000                        return ret;
1001
1002                srp_map_desc(state, dma_addr, dma_len, target->rkey);
1003                srp_map_update_start(state, NULL, 0, 0);
1004                return 0;
1005        }
1006
1007        /* If this is the first sg to go into the FMR, save our position.
1008         * We need to know the first unmapped entry, its index, and the
1009         * first unmapped address within that entry to be able to restart
1010         * mapping after an error.
1011         */
1012        if (!state->unmapped_sg)
1013                srp_map_update_start(state, sg, sg_index, dma_addr);
1014
1015        while (dma_len) {
1016                if (state->npages == SRP_FMR_SIZE) {
1017                        ret = srp_map_finish_fmr(state, target);
1018                        if (ret)
1019                                return ret;
1020
1021                        srp_map_update_start(state, sg, sg_index, dma_addr);
1022                }
1023
1024                len = min_t(unsigned int, dma_len, dev->fmr_page_size);
1025
1026                if (!state->npages)
1027                        state->base_dma_addr = dma_addr;
1028                state->pages[state->npages++] = dma_addr;
1029                state->fmr_len += len;
1030                dma_addr += len;
1031                dma_len -= len;
1032        }
1033
1034        /* If the last entry of the FMR wasn't a full page, then we need to
1035         * close it out and start a new one -- we can only merge at page
1036         * boundries.
1037         */
1038        ret = 0;
1039        if (len != dev->fmr_page_size) {
1040                ret = srp_map_finish_fmr(state, target);
1041                if (!ret)
1042                        srp_map_update_start(state, NULL, 0, 0);
1043        }
1044        return ret;
1045}
1046
1047static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
1048                        struct srp_request *req)
1049{
1050        struct scatterlist *scat, *sg;
1051        struct srp_cmd *cmd = req->cmd->buf;
1052        int i, len, nents, count, use_fmr;
1053        struct srp_device *dev;
1054        struct ib_device *ibdev;
1055        struct srp_map_state state;
1056        struct srp_indirect_buf *indirect_hdr;
1057        u32 table_len;
1058        u8 fmt;
1059
1060        if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1061                return sizeof (struct srp_cmd);
1062
1063        if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1064            scmnd->sc_data_direction != DMA_TO_DEVICE) {
1065                shost_printk(KERN_WARNING, target->scsi_host,
1066                             PFX "Unhandled data direction %d\n",
1067                             scmnd->sc_data_direction);
1068                return -EINVAL;
1069        }
1070
1071        nents = scsi_sg_count(scmnd);
1072        scat  = scsi_sglist(scmnd);
1073
1074        dev = target->srp_host->srp_dev;
1075        ibdev = dev->dev;
1076
1077        count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1078        if (unlikely(count == 0))
1079                return -EIO;
1080
1081        fmt = SRP_DATA_DESC_DIRECT;
1082        len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1083
1084        if (count == 1) {
1085                /*
1086                 * The midlayer only generated a single gather/scatter
1087                 * entry, or DMA mapping coalesced everything to a
1088                 * single entry.  So a direct descriptor along with
1089                 * the DMA MR suffices.
1090                 */
1091                struct srp_direct_buf *buf = (void *) cmd->add_data;
1092
1093                buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1094                buf->key = cpu_to_be32(target->rkey);
1095                buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1096
1097                req->nfmr = 0;
1098                goto map_complete;
1099        }
1100
1101        /* We have more than one scatter/gather entry, so build our indirect
1102         * descriptor table, trying to merge as many entries with FMR as we
1103         * can.
1104         */
1105        indirect_hdr = (void *) cmd->add_data;
1106
1107        ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1108                                   target->indirect_size, DMA_TO_DEVICE);
1109
1110        memset(&state, 0, sizeof(state));
1111        state.desc      = req->indirect_desc;
1112        state.pages     = req->map_page;
1113        state.next_fmr  = req->fmr_list;
1114
1115        use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
1116
1117        for_each_sg(scat, sg, count, i) {
1118                if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
1119                        /* FMR mapping failed, so backtrack to the first
1120                         * unmapped entry and continue on without using FMR.
1121                         */
1122                        dma_addr_t dma_addr;
1123                        unsigned int dma_len;
1124
1125backtrack:
1126                        sg = state.unmapped_sg;
1127                        i = state.unmapped_index;
1128
1129                        dma_addr = ib_sg_dma_address(ibdev, sg);
1130                        dma_len = ib_sg_dma_len(ibdev, sg);
1131                        dma_len -= (state.unmapped_addr - dma_addr);
1132                        dma_addr = state.unmapped_addr;
1133                        use_fmr = SRP_MAP_NO_FMR;
1134                        srp_map_desc(&state, dma_addr, dma_len, target->rkey);
1135                }
1136        }
1137
1138        if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
1139                goto backtrack;
1140
1141        /* We've mapped the request, now pull as much of the indirect
1142         * descriptor table as we can into the command buffer. If this
1143         * target is not using an external indirect table, we are
1144         * guaranteed to fit into the command, as the SCSI layer won't
1145         * give us more S/G entries than we allow.
1146         */
1147        req->nfmr = state.nfmr;
1148        if (state.ndesc == 1) {
1149                /* FMR mapping was able to collapse this to one entry,
1150                 * so use a direct descriptor.
1151                 */
1152                struct srp_direct_buf *buf = (void *) cmd->add_data;
1153
1154                *buf = req->indirect_desc[0];
1155                goto map_complete;
1156        }
1157
1158        if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1159                                                !target->allow_ext_sg)) {
1160                shost_printk(KERN_ERR, target->scsi_host,
1161                             "Could not fit S/G list into SRP_CMD\n");
1162                return -EIO;
1163        }
1164
1165        count = min(state.ndesc, target->cmd_sg_cnt);
1166        table_len = state.ndesc * sizeof (struct srp_direct_buf);
1167
1168        fmt = SRP_DATA_DESC_INDIRECT;
1169        len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1170        len += count * sizeof (struct srp_direct_buf);
1171
1172        memcpy(indirect_hdr->desc_list, req->indirect_desc,
1173               count * sizeof (struct srp_direct_buf));
1174
1175        indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1176        indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1177        indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1178        indirect_hdr->len = cpu_to_be32(state.total_len);
1179
1180        if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1181                cmd->data_out_desc_cnt = count;
1182        else
1183                cmd->data_in_desc_cnt = count;
1184
1185        ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1186                                      DMA_TO_DEVICE);
1187
1188map_complete:
1189        if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1190                cmd->buf_fmt = fmt << 4;
1191        else
1192                cmd->buf_fmt = fmt;
1193
1194        return len;
1195}
1196
1197/*
1198 * Return an IU and possible credit to the free pool
1199 */
1200static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1201                          enum srp_iu_type iu_type)
1202{
1203        unsigned long flags;
1204
1205        spin_lock_irqsave(&target->lock, flags);
1206        list_add(&iu->list, &target->free_tx);
1207        if (iu_type != SRP_IU_RSP)
1208                ++target->req_lim;
1209        spin_unlock_irqrestore(&target->lock, flags);
1210}
1211
1212/*
1213 * Must be called with target->lock held to protect req_lim and free_tx.
1214 * If IU is not sent, it must be returned using srp_put_tx_iu().
1215 *
1216 * Note:
1217 * An upper limit for the number of allocated information units for each
1218 * request type is:
1219 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1220 *   more than Scsi_Host.can_queue requests.
1221 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1222 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1223 *   one unanswered SRP request to an initiator.
1224 */
1225static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1226                                      enum srp_iu_type iu_type)
1227{
1228        s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1229        struct srp_iu *iu;
1230
1231        srp_send_completion(target->send_cq, target);
1232
1233        if (list_empty(&target->free_tx))
1234                return NULL;
1235
1236        /* Initiator responses to target requests do not consume credits */
1237        if (iu_type != SRP_IU_RSP) {
1238                if (target->req_lim <= rsv) {
1239                        ++target->zero_req_lim;
1240                        return NULL;
1241                }
1242
1243                --target->req_lim;
1244        }
1245
1246        iu = list_first_entry(&target->free_tx, struct srp_iu, list);
1247        list_del(&iu->list);
1248        return iu;
1249}
1250
1251static int srp_post_send(struct srp_target_port *target,
1252                         struct srp_iu *iu, int len)
1253{
1254        struct ib_sge list;
1255        struct ib_send_wr wr, *bad_wr;
1256
1257        list.addr   = iu->dma;
1258        list.length = len;
1259        list.lkey   = target->lkey;
1260
1261        wr.next       = NULL;
1262        wr.wr_id      = (uintptr_t) iu;
1263        wr.sg_list    = &list;
1264        wr.num_sge    = 1;
1265        wr.opcode     = IB_WR_SEND;
1266        wr.send_flags = IB_SEND_SIGNALED;
1267
1268        return ib_post_send(target->qp, &wr, &bad_wr);
1269}
1270
1271static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1272{
1273        struct ib_recv_wr wr, *bad_wr;
1274        struct ib_sge list;
1275
1276        list.addr   = iu->dma;
1277        list.length = iu->size;
1278        list.lkey   = target->lkey;
1279
1280        wr.next     = NULL;
1281        wr.wr_id    = (uintptr_t) iu;
1282        wr.sg_list  = &list;
1283        wr.num_sge  = 1;
1284
1285        return ib_post_recv(target->qp, &wr, &bad_wr);
1286}
1287
1288static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1289{
1290        struct srp_request *req;
1291        struct scsi_cmnd *scmnd;
1292        unsigned long flags;
1293
1294        if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1295                spin_lock_irqsave(&target->lock, flags);
1296                target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1297                spin_unlock_irqrestore(&target->lock, flags);
1298
1299                target->tsk_mgmt_status = -1;
1300                if (be32_to_cpu(rsp->resp_data_len) >= 4)
1301                        target->tsk_mgmt_status = rsp->data[3];
1302                complete(&target->tsk_mgmt_done);
1303        } else {
1304                req = &target->req_ring[rsp->tag];
1305                scmnd = srp_claim_req(target, req, NULL, NULL);
1306                if (!scmnd) {
1307                        shost_printk(KERN_ERR, target->scsi_host,
1308                                     "Null scmnd for RSP w/tag %016llx\n",
1309                                     (unsigned long long) rsp->tag);
1310
1311                        spin_lock_irqsave(&target->lock, flags);
1312                        target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1313                        spin_unlock_irqrestore(&target->lock, flags);
1314
1315                        return;
1316                }
1317                scmnd->result = rsp->status;
1318
1319                if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1320                        memcpy(scmnd->sense_buffer, rsp->data +
1321                               be32_to_cpu(rsp->resp_data_len),
1322                               min_t(int, be32_to_cpu(rsp->sense_data_len),
1323                                     SCSI_SENSE_BUFFERSIZE));
1324                }
1325
1326                if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
1327                        scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1328                else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1329                        scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1330
1331                srp_free_req(target, req, scmnd,
1332                             be32_to_cpu(rsp->req_lim_delta));
1333
1334                scmnd->host_scribble = NULL;
1335                scmnd->scsi_done(scmnd);
1336        }
1337}
1338
1339static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1340                               void *rsp, int len)
1341{
1342        struct ib_device *dev = target->srp_host->srp_dev->dev;
1343        unsigned long flags;
1344        struct srp_iu *iu;
1345        int err;
1346
1347        spin_lock_irqsave(&target->lock, flags);
1348        target->req_lim += req_delta;
1349        iu = __srp_get_tx_iu(target, SRP_IU_RSP);
1350        spin_unlock_irqrestore(&target->lock, flags);
1351
1352        if (!iu) {
1353                shost_printk(KERN_ERR, target->scsi_host, PFX
1354                             "no IU available to send response\n");
1355                return 1;
1356        }
1357
1358        ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1359        memcpy(iu->buf, rsp, len);
1360        ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1361
1362        err = srp_post_send(target, iu, len);
1363        if (err) {
1364                shost_printk(KERN_ERR, target->scsi_host, PFX
1365                             "unable to post response: %d\n", err);
1366                srp_put_tx_iu(target, iu, SRP_IU_RSP);
1367        }
1368
1369        return err;
1370}
1371
1372static void srp_process_cred_req(struct srp_target_port *target,
1373                                 struct srp_cred_req *req)
1374{
1375        struct srp_cred_rsp rsp = {
1376                .opcode = SRP_CRED_RSP,
1377                .tag = req->tag,
1378        };
1379        s32 delta = be32_to_cpu(req->req_lim_delta);
1380
1381        if (srp_response_common(target, delta, &rsp, sizeof rsp))
1382                shost_printk(KERN_ERR, target->scsi_host, PFX
1383                             "problems processing SRP_CRED_REQ\n");
1384}
1385
1386static void srp_process_aer_req(struct srp_target_port *target,
1387                                struct srp_aer_req *req)
1388{
1389        struct srp_aer_rsp rsp = {
1390                .opcode = SRP_AER_RSP,
1391                .tag = req->tag,
1392        };
1393        s32 delta = be32_to_cpu(req->req_lim_delta);
1394
1395        shost_printk(KERN_ERR, target->scsi_host, PFX
1396                     "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1397
1398        if (srp_response_common(target, delta, &rsp, sizeof rsp))
1399                shost_printk(KERN_ERR, target->scsi_host, PFX
1400                             "problems processing SRP_AER_REQ\n");
1401}
1402
1403static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1404{
1405        struct ib_device *dev = target->srp_host->srp_dev->dev;
1406        struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1407        int res;
1408        u8 opcode;
1409
1410        ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1411                                   DMA_FROM_DEVICE);
1412
1413        opcode = *(u8 *) iu->buf;
1414
1415        if (0) {
1416                shost_printk(KERN_ERR, target->scsi_host,
1417                             PFX "recv completion, opcode 0x%02x\n", opcode);
1418                print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1419                               iu->buf, wc->byte_len, true);
1420        }
1421
1422        switch (opcode) {
1423        case SRP_RSP:
1424                srp_process_rsp(target, iu->buf);
1425                break;
1426
1427        case SRP_CRED_REQ:
1428                srp_process_cred_req(target, iu->buf);
1429                break;
1430
1431        case SRP_AER_REQ:
1432                srp_process_aer_req(target, iu->buf);
1433                break;
1434
1435        case SRP_T_LOGOUT:
1436                /* XXX Handle target logout */
1437                shost_printk(KERN_WARNING, target->scsi_host,
1438                             PFX "Got target logout request\n");
1439                break;
1440
1441        default:
1442                shost_printk(KERN_WARNING, target->scsi_host,
1443                             PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1444                break;
1445        }
1446
1447        ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1448                                      DMA_FROM_DEVICE);
1449
1450        res = srp_post_recv(target, iu);
1451        if (res != 0)
1452                shost_printk(KERN_ERR, target->scsi_host,
1453                             PFX "Recv failed with error code %d\n", res);
1454}
1455
1456/**
1457 * srp_tl_err_work() - handle a transport layer error
1458 *
1459 * Note: This function may get invoked before the rport has been created,
1460 * hence the target->rport test.
1461 */
1462static void srp_tl_err_work(struct work_struct *work)
1463{
1464        struct srp_target_port *target;
1465
1466        target = container_of(work, struct srp_target_port, tl_err_work);
1467        if (target->rport)
1468                srp_start_tl_fail_timers(target->rport);
1469}
1470
1471static void srp_handle_qp_err(enum ib_wc_status wc_status, bool send_err,
1472                              struct srp_target_port *target)
1473{
1474        if (target->connected && !target->qp_in_error) {
1475                shost_printk(KERN_ERR, target->scsi_host,
1476                             PFX "failed %s status %d\n",
1477                             send_err ? "send" : "receive",
1478                             wc_status);
1479                queue_work(system_long_wq, &target->tl_err_work);
1480        }
1481        target->qp_in_error = true;
1482}
1483
1484static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1485{
1486        struct srp_target_port *target = target_ptr;
1487        struct ib_wc wc;
1488
1489        ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1490        while (ib_poll_cq(cq, 1, &wc) > 0) {
1491                if (likely(wc.status == IB_WC_SUCCESS)) {
1492                        srp_handle_recv(target, &wc);
1493                } else {
1494                        srp_handle_qp_err(wc.status, false, target);
1495                }
1496        }
1497}
1498
1499static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1500{
1501        struct srp_target_port *target = target_ptr;
1502        struct ib_wc wc;
1503        struct srp_iu *iu;
1504
1505        while (ib_poll_cq(cq, 1, &wc) > 0) {
1506                if (likely(wc.status == IB_WC_SUCCESS)) {
1507                        iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1508                        list_add(&iu->list, &target->free_tx);
1509                } else {
1510                        srp_handle_qp_err(wc.status, true, target);
1511                }
1512        }
1513}
1514
1515static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1516{
1517        struct srp_target_port *target = host_to_target(shost);
1518        struct srp_rport *rport = target->rport;
1519        struct srp_request *req;
1520        struct srp_iu *iu;
1521        struct srp_cmd *cmd;
1522        struct ib_device *dev;
1523        unsigned long flags;
1524        int len, result;
1525        const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1526
1527        /*
1528         * The SCSI EH thread is the only context from which srp_queuecommand()
1529         * can get invoked for blocked devices (SDEV_BLOCK /
1530         * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1531         * locking the rport mutex if invoked from inside the SCSI EH.
1532         */
1533        if (in_scsi_eh)
1534                mutex_lock(&rport->mutex);
1535
1536        result = srp_chkready(target->rport);
1537        if (unlikely(result)) {
1538                scmnd->result = result;
1539                scmnd->scsi_done(scmnd);
1540                goto unlock_rport;
1541        }
1542
1543        spin_lock_irqsave(&target->lock, flags);
1544        iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1545        if (!iu)
1546                goto err_unlock;
1547
1548        req = list_first_entry(&target->free_reqs, struct srp_request, list);
1549        list_del(&req->list);
1550        spin_unlock_irqrestore(&target->lock, flags);
1551
1552        dev = target->srp_host->srp_dev->dev;
1553        ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1554                                   DMA_TO_DEVICE);
1555
1556        scmnd->result        = 0;
1557        scmnd->host_scribble = (void *) req;
1558
1559        cmd = iu->buf;
1560        memset(cmd, 0, sizeof *cmd);
1561
1562        cmd->opcode = SRP_CMD;
1563        cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);
1564        cmd->tag    = req->index;
1565        memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1566
1567        req->scmnd    = scmnd;
1568        req->cmd      = iu;
1569
1570        len = srp_map_data(scmnd, target, req);
1571        if (len < 0) {
1572                shost_printk(KERN_ERR, target->scsi_host,
1573                             PFX "Failed to map data\n");
1574                goto err_iu;
1575        }
1576
1577        ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1578                                      DMA_TO_DEVICE);
1579
1580        if (srp_post_send(target, iu, len)) {
1581                shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1582                goto err_unmap;
1583        }
1584
1585unlock_rport:
1586        if (in_scsi_eh)
1587                mutex_unlock(&rport->mutex);
1588
1589        return 0;
1590
1591err_unmap:
1592        srp_unmap_data(scmnd, target, req);
1593
1594err_iu:
1595        srp_put_tx_iu(target, iu, SRP_IU_CMD);
1596
1597        spin_lock_irqsave(&target->lock, flags);
1598        list_add(&req->list, &target->free_reqs);
1599
1600err_unlock:
1601        spin_unlock_irqrestore(&target->lock, flags);
1602
1603        if (in_scsi_eh)
1604                mutex_unlock(&rport->mutex);
1605
1606        return SCSI_MLQUEUE_HOST_BUSY;
1607}
1608
1609/*
1610 * Note: the resources allocated in this function are freed in
1611 * srp_free_target_ib().
1612 */
1613static int srp_alloc_iu_bufs(struct srp_target_port *target)
1614{
1615        int i;
1616
1617        target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring),
1618                                  GFP_KERNEL);
1619        if (!target->rx_ring)
1620                goto err_no_ring;
1621        target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring),
1622                                  GFP_KERNEL);
1623        if (!target->tx_ring)
1624                goto err_no_ring;
1625
1626        for (i = 0; i < target->queue_size; ++i) {
1627                target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1628                                                  target->max_ti_iu_len,
1629                                                  GFP_KERNEL, DMA_FROM_DEVICE);
1630                if (!target->rx_ring[i])
1631                        goto err;
1632        }
1633
1634        for (i = 0; i < target->queue_size; ++i) {
1635                target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1636                                                  target->max_iu_len,
1637                                                  GFP_KERNEL, DMA_TO_DEVICE);
1638                if (!target->tx_ring[i])
1639                        goto err;
1640
1641                list_add(&target->tx_ring[i]->list, &target->free_tx);
1642        }
1643
1644        return 0;
1645
1646err:
1647        for (i = 0; i < target->queue_size; ++i) {
1648                srp_free_iu(target->srp_host, target->rx_ring[i]);
1649                srp_free_iu(target->srp_host, target->tx_ring[i]);
1650        }
1651
1652
1653err_no_ring:
1654        kfree(target->tx_ring);
1655        target->tx_ring = NULL;
1656        kfree(target->rx_ring);
1657        target->rx_ring = NULL;
1658
1659        return -ENOMEM;
1660}
1661
1662static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
1663{
1664        uint64_t T_tr_ns, max_compl_time_ms;
1665        uint32_t rq_tmo_jiffies;
1666
1667        /*
1668         * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1669         * table 91), both the QP timeout and the retry count have to be set
1670         * for RC QP's during the RTR to RTS transition.
1671         */
1672        WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
1673                     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
1674
1675        /*
1676         * Set target->rq_tmo_jiffies to one second more than the largest time
1677         * it can take before an error completion is generated. See also
1678         * C9-140..142 in the IBTA spec for more information about how to
1679         * convert the QP Local ACK Timeout value to nanoseconds.
1680         */
1681        T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
1682        max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
1683        do_div(max_compl_time_ms, NSEC_PER_MSEC);
1684        rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
1685
1686        return rq_tmo_jiffies;
1687}
1688
1689static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1690                               struct srp_login_rsp *lrsp,
1691                               struct srp_target_port *target)
1692{
1693        struct ib_qp_attr *qp_attr = NULL;
1694        int attr_mask = 0;
1695        int ret;
1696        int i;
1697
1698        if (lrsp->opcode == SRP_LOGIN_RSP) {
1699                target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1700                target->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
1701
1702                /*
1703                 * Reserve credits for task management so we don't
1704                 * bounce requests back to the SCSI mid-layer.
1705                 */
1706                target->scsi_host->can_queue
1707                        = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1708                              target->scsi_host->can_queue);
1709                target->scsi_host->cmd_per_lun
1710                        = min_t(int, target->scsi_host->can_queue,
1711                                target->scsi_host->cmd_per_lun);
1712        } else {
1713                shost_printk(KERN_WARNING, target->scsi_host,
1714                             PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1715                ret = -ECONNRESET;
1716                goto error;
1717        }
1718
1719        if (!target->rx_ring) {
1720                ret = srp_alloc_iu_bufs(target);
1721                if (ret)
1722                        goto error;
1723        }
1724
1725        ret = -ENOMEM;
1726        qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1727        if (!qp_attr)
1728                goto error;
1729
1730        qp_attr->qp_state = IB_QPS_RTR;
1731        ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1732        if (ret)
1733                goto error_free;
1734
1735        ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1736        if (ret)
1737                goto error_free;
1738
1739        for (i = 0; i < target->queue_size; i++) {
1740                struct srp_iu *iu = target->rx_ring[i];
1741                ret = srp_post_recv(target, iu);
1742                if (ret)
1743                        goto error_free;
1744        }
1745
1746        qp_attr->qp_state = IB_QPS_RTS;
1747        ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1748        if (ret)
1749                goto error_free;
1750
1751        target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
1752
1753        ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1754        if (ret)
1755                goto error_free;
1756
1757        ret = ib_send_cm_rtu(cm_id, NULL, 0);
1758
1759error_free:
1760        kfree(qp_attr);
1761
1762error:
1763        target->status = ret;
1764}
1765
1766static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1767                               struct ib_cm_event *event,
1768                               struct srp_target_port *target)
1769{
1770        struct Scsi_Host *shost = target->scsi_host;
1771        struct ib_class_port_info *cpi;
1772        int opcode;
1773
1774        switch (event->param.rej_rcvd.reason) {
1775        case IB_CM_REJ_PORT_CM_REDIRECT:
1776                cpi = event->param.rej_rcvd.ari;
1777                target->path.dlid = cpi->redirect_lid;
1778                target->path.pkey = cpi->redirect_pkey;
1779                cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1780                memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1781
1782                target->status = target->path.dlid ?
1783                        SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1784                break;
1785
1786        case IB_CM_REJ_PORT_REDIRECT:
1787                if (srp_target_is_topspin(target)) {
1788                        /*
1789                         * Topspin/Cisco SRP gateways incorrectly send
1790                         * reject reason code 25 when they mean 24
1791                         * (port redirect).
1792                         */
1793                        memcpy(target->path.dgid.raw,
1794                               event->param.rej_rcvd.ari, 16);
1795
1796                        shost_printk(KERN_DEBUG, shost,
1797                                     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1798                                     (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1799                                     (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1800
1801                        target->status = SRP_PORT_REDIRECT;
1802                } else {
1803                        shost_printk(KERN_WARNING, shost,
1804                                     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1805                        target->status = -ECONNRESET;
1806                }
1807                break;
1808
1809        case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1810                shost_printk(KERN_WARNING, shost,
1811                            "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1812                target->status = -ECONNRESET;
1813                break;
1814
1815        case IB_CM_REJ_CONSUMER_DEFINED:
1816                opcode = *(u8 *) event->private_data;
1817                if (opcode == SRP_LOGIN_REJ) {
1818                        struct srp_login_rej *rej = event->private_data;
1819                        u32 reason = be32_to_cpu(rej->reason);
1820
1821                        if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1822                                shost_printk(KERN_WARNING, shost,
1823                                             PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1824                        else
1825                                shost_printk(KERN_WARNING, shost, PFX
1826                                             "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
1827                                             target->path.sgid.raw,
1828                                             target->orig_dgid, reason);
1829                } else
1830                        shost_printk(KERN_WARNING, shost,
1831                                     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1832                                     " opcode 0x%02x\n", opcode);
1833                target->status = -ECONNRESET;
1834                break;
1835
1836        case IB_CM_REJ_STALE_CONN:
1837                shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
1838                target->status = SRP_STALE_CONN;
1839                break;
1840
1841        default:
1842                shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
1843                             event->param.rej_rcvd.reason);
1844                target->status = -ECONNRESET;
1845        }
1846}
1847
1848static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1849{
1850        struct srp_target_port *target = cm_id->context;
1851        int comp = 0;
1852
1853        switch (event->event) {
1854        case IB_CM_REQ_ERROR:
1855                shost_printk(KERN_DEBUG, target->scsi_host,
1856                             PFX "Sending CM REQ failed\n");
1857                comp = 1;
1858                target->status = -ECONNRESET;
1859                break;
1860
1861        case IB_CM_REP_RECEIVED:
1862                comp = 1;
1863                srp_cm_rep_handler(cm_id, event->private_data, target);
1864                break;
1865
1866        case IB_CM_REJ_RECEIVED:
1867                shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1868                comp = 1;
1869
1870                srp_cm_rej_handler(cm_id, event, target);
1871                break;
1872
1873        case IB_CM_DREQ_RECEIVED:
1874                shost_printk(KERN_WARNING, target->scsi_host,
1875                             PFX "DREQ received - connection closed\n");
1876                srp_change_conn_state(target, false);
1877                if (ib_send_cm_drep(cm_id, NULL, 0))
1878                        shost_printk(KERN_ERR, target->scsi_host,
1879                                     PFX "Sending CM DREP failed\n");
1880                queue_work(system_long_wq, &target->tl_err_work);
1881                break;
1882
1883        case IB_CM_TIMEWAIT_EXIT:
1884                shost_printk(KERN_ERR, target->scsi_host,
1885                             PFX "connection closed\n");
1886                comp = 1;
1887
1888                target->status = 0;
1889                break;
1890
1891        case IB_CM_MRA_RECEIVED:
1892        case IB_CM_DREQ_ERROR:
1893        case IB_CM_DREP_RECEIVED:
1894                break;
1895
1896        default:
1897                shost_printk(KERN_WARNING, target->scsi_host,
1898                             PFX "Unhandled CM event %d\n", event->event);
1899                break;
1900        }
1901
1902        if (comp)
1903                complete(&target->done);
1904
1905        return 0;
1906}
1907
1908/**
1909 * srp_change_queue_type - changing device queue tag type
1910 * @sdev: scsi device struct
1911 * @tag_type: requested tag type
1912 *
1913 * Returns queue tag type.
1914 */
1915static int
1916srp_change_queue_type(struct scsi_device *sdev, int tag_type)
1917{
1918        if (sdev->tagged_supported) {
1919                scsi_set_tag_type(sdev, tag_type);
1920                if (tag_type)
1921                        scsi_activate_tcq(sdev, sdev->queue_depth);
1922                else
1923                        scsi_deactivate_tcq(sdev, sdev->queue_depth);
1924        } else
1925                tag_type = 0;
1926
1927        return tag_type;
1928}
1929
1930/**
1931 * srp_change_queue_depth - setting device queue depth
1932 * @sdev: scsi device struct
1933 * @qdepth: requested queue depth
1934 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
1935 * (see include/scsi/scsi_host.h for definition)
1936 *
1937 * Returns queue depth.
1938 */
1939static int
1940srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1941{
1942        struct Scsi_Host *shost = sdev->host;
1943        int max_depth;
1944        if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
1945                max_depth = shost->can_queue;
1946                if (!sdev->tagged_supported)
1947                        max_depth = 1;
1948                if (qdepth > max_depth)
1949                        qdepth = max_depth;
1950                scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1951        } else if (reason == SCSI_QDEPTH_QFULL)
1952                scsi_track_queue_full(sdev, qdepth);
1953        else
1954                return -EOPNOTSUPP;
1955
1956        return sdev->queue_depth;
1957}
1958
1959static int srp_send_tsk_mgmt(struct srp_target_port *target,
1960                             u64 req_tag, unsigned int lun, u8 func)
1961{
1962        struct srp_rport *rport = target->rport;
1963        struct ib_device *dev = target->srp_host->srp_dev->dev;
1964        struct srp_iu *iu;
1965        struct srp_tsk_mgmt *tsk_mgmt;
1966
1967        if (!target->connected || target->qp_in_error)
1968                return -1;
1969
1970        init_completion(&target->tsk_mgmt_done);
1971
1972        /*
1973         * Lock the rport mutex to avoid that srp_create_target_ib() is
1974         * invoked while a task management function is being sent.
1975         */
1976        mutex_lock(&rport->mutex);
1977        spin_lock_irq(&target->lock);
1978        iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1979        spin_unlock_irq(&target->lock);
1980
1981        if (!iu) {
1982                mutex_unlock(&rport->mutex);
1983
1984                return -1;
1985        }
1986
1987        ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1988                                   DMA_TO_DEVICE);
1989        tsk_mgmt = iu->buf;
1990        memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1991
1992        tsk_mgmt->opcode        = SRP_TSK_MGMT;
1993        tsk_mgmt->lun           = cpu_to_be64((u64) lun << 48);
1994        tsk_mgmt->tag           = req_tag | SRP_TAG_TSK_MGMT;
1995        tsk_mgmt->tsk_mgmt_func = func;
1996        tsk_mgmt->task_tag      = req_tag;
1997
1998        ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1999                                      DMA_TO_DEVICE);
2000        if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
2001                srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
2002                mutex_unlock(&rport->mutex);
2003
2004                return -1;
2005        }
2006        mutex_unlock(&rport->mutex);
2007
2008        if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
2009                                         msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2010                return -1;
2011
2012        return 0;
2013}
2014
2015static int srp_abort(struct scsi_cmnd *scmnd)
2016{
2017        struct srp_target_port *target = host_to_target(scmnd->device->host);
2018        struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2019        int ret;
2020
2021        shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2022
2023        if (!req || !srp_claim_req(target, req, NULL, scmnd))
2024                return SUCCESS;
2025        if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
2026                              SRP_TSK_ABORT_TASK) == 0)
2027                ret = SUCCESS;
2028        else if (target->rport->state == SRP_RPORT_LOST)
2029                ret = FAST_IO_FAIL;
2030        else
2031                ret = FAILED;
2032        srp_free_req(target, req, scmnd, 0);
2033        scmnd->result = DID_ABORT << 16;
2034        scmnd->scsi_done(scmnd);
2035
2036        return ret;
2037}
2038
2039static int srp_reset_device(struct scsi_cmnd *scmnd)
2040{
2041        struct srp_target_port *target = host_to_target(scmnd->device->host);
2042        int i;
2043
2044        shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2045
2046        if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
2047                              SRP_TSK_LUN_RESET))
2048                return FAILED;
2049        if (target->tsk_mgmt_status)
2050                return FAILED;
2051
2052        for (i = 0; i < target->req_ring_size; ++i) {
2053                struct srp_request *req = &target->req_ring[i];
2054                srp_finish_req(target, req, scmnd->device, DID_RESET << 16);
2055        }
2056
2057        return SUCCESS;
2058}
2059
2060static int srp_reset_host(struct scsi_cmnd *scmnd)
2061{
2062        struct srp_target_port *target = host_to_target(scmnd->device->host);
2063
2064        shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2065
2066        return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2067}
2068
2069static int srp_slave_configure(struct scsi_device *sdev)
2070{
2071        struct Scsi_Host *shost = sdev->host;
2072        struct srp_target_port *target = host_to_target(shost);
2073        struct request_queue *q = sdev->request_queue;
2074        unsigned long timeout;
2075
2076        if (sdev->type == TYPE_DISK) {
2077                timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2078                blk_queue_rq_timeout(q, timeout);
2079        }
2080
2081        return 0;
2082}
2083
2084static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2085                           char *buf)
2086{
2087        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2088
2089        return sprintf(buf, "0x%016llx\n",
2090                       (unsigned long long) be64_to_cpu(target->id_ext));
2091}
2092
2093static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2094                             char *buf)
2095{
2096        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2097
2098        return sprintf(buf, "0x%016llx\n",
2099                       (unsigned long long) be64_to_cpu(target->ioc_guid));
2100}
2101
2102static ssize_t show_service_id(struct device *dev,
2103                               struct device_attribute *attr, char *buf)
2104{
2105        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2106
2107        return sprintf(buf, "0x%016llx\n",
2108                       (unsigned long long) be64_to_cpu(target->service_id));
2109}
2110
2111static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2112                         char *buf)
2113{
2114        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2115
2116        return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
2117}
2118
2119static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2120                         char *buf)
2121{
2122        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2123
2124        return sprintf(buf, "%pI6\n", target->path.sgid.raw);
2125}
2126
2127static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2128                         char *buf)
2129{
2130        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2131
2132        return sprintf(buf, "%pI6\n", target->path.dgid.raw);
2133}
2134
2135static ssize_t show_orig_dgid(struct device *dev,
2136                              struct device_attribute *attr, char *buf)
2137{
2138        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2139
2140        return sprintf(buf, "%pI6\n", target->orig_dgid);
2141}
2142
2143static ssize_t show_req_lim(struct device *dev,
2144                            struct device_attribute *attr, char *buf)
2145{
2146        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2147
2148        return sprintf(buf, "%d\n", target->req_lim);
2149}
2150
2151static ssize_t show_zero_req_lim(struct device *dev,
2152                                 struct device_attribute *attr, char *buf)
2153{
2154        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2155
2156        return sprintf(buf, "%d\n", target->zero_req_lim);
2157}
2158
2159static ssize_t show_local_ib_port(struct device *dev,
2160                                  struct device_attribute *attr, char *buf)
2161{
2162        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2163
2164        return sprintf(buf, "%d\n", target->srp_host->port);
2165}
2166
2167static ssize_t show_local_ib_device(struct device *dev,
2168                                    struct device_attribute *attr, char *buf)
2169{
2170        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2171
2172        return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2173}
2174
2175static ssize_t show_comp_vector(struct device *dev,
2176                                struct device_attribute *attr, char *buf)
2177{
2178        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2179
2180        return sprintf(buf, "%d\n", target->comp_vector);
2181}
2182
2183static ssize_t show_tl_retry_count(struct device *dev,
2184                                   struct device_attribute *attr, char *buf)
2185{
2186        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2187
2188        return sprintf(buf, "%d\n", target->tl_retry_count);
2189}
2190
2191static ssize_t show_cmd_sg_entries(struct device *dev,
2192                                   struct device_attribute *attr, char *buf)
2193{
2194        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2195
2196        return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2197}
2198
2199static ssize_t show_allow_ext_sg(struct device *dev,
2200                                 struct device_attribute *attr, char *buf)
2201{
2202        struct srp_target_port *target = host_to_target(class_to_shost(dev));
2203
2204        return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2205}
2206
2207static DEVICE_ATTR(id_ext,          S_IRUGO, show_id_ext,          NULL);
2208static DEVICE_ATTR(ioc_guid,        S_IRUGO, show_ioc_guid,        NULL);
2209static DEVICE_ATTR(service_id,      S_IRUGO, show_service_id,      NULL);
2210static DEVICE_ATTR(pkey,            S_IRUGO, show_pkey,            NULL);
2211static DEVICE_ATTR(sgid,            S_IRUGO, show_sgid,            NULL);
2212static DEVICE_ATTR(dgid,            S_IRUGO, show_dgid,            NULL);
2213static DEVICE_ATTR(orig_dgid,       S_IRUGO, show_orig_dgid,       NULL);
2214static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2215static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,    NULL);
2216static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2217static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2218static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
2219static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
2220static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2221static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
2222
2223static struct device_attribute *srp_host_attrs[] = {
2224        &dev_attr_id_ext,
2225        &dev_attr_ioc_guid,
2226        &dev_attr_service_id,
2227        &dev_attr_pkey,
2228        &dev_attr_sgid,
2229        &dev_attr_dgid,
2230        &dev_attr_orig_dgid,
2231        &dev_attr_req_lim,
2232        &dev_attr_zero_req_lim,
2233        &dev_attr_local_ib_port,
2234        &dev_attr_local_ib_device,
2235        &dev_attr_comp_vector,
2236        &dev_attr_tl_retry_count,
2237        &dev_attr_cmd_sg_entries,
2238        &dev_attr_allow_ext_sg,
2239        NULL
2240};
2241
2242static struct scsi_host_template srp_template = {
2243        .module                         = THIS_MODULE,
2244        .name                           = "InfiniBand SRP initiator",
2245        .proc_name                      = DRV_NAME,
2246        .slave_configure                = srp_slave_configure,
2247        .info                           = srp_target_info,
2248        .queuecommand                   = srp_queuecommand,
2249        .change_queue_depth             = srp_change_queue_depth,
2250        .change_queue_type              = srp_change_queue_type,
2251        .eh_abort_handler               = srp_abort,
2252        .eh_device_reset_handler        = srp_reset_device,
2253        .eh_host_reset_handler          = srp_reset_host,
2254        .skip_settle_delay              = true,
2255        .sg_tablesize                   = SRP_DEF_SG_TABLESIZE,
2256        .can_queue                      = SRP_DEFAULT_CMD_SQ_SIZE,
2257        .this_id                        = -1,
2258        .cmd_per_lun                    = SRP_DEFAULT_CMD_SQ_SIZE,
2259        .use_clustering                 = ENABLE_CLUSTERING,
2260        .shost_attrs                    = srp_host_attrs
2261};
2262
2263static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2264{
2265        struct srp_rport_identifiers ids;
2266        struct srp_rport *rport;
2267
2268        sprintf(target->target_name, "SRP.T10:%016llX",
2269                 (unsigned long long) be64_to_cpu(target->id_ext));
2270
2271        if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2272                return -ENODEV;
2273
2274        memcpy(ids.port_id, &target->id_ext, 8);
2275        memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2276        ids.roles = SRP_RPORT_ROLE_TARGET;
2277        rport = srp_rport_add(target->scsi_host, &ids);
2278        if (IS_ERR(rport)) {
2279                scsi_remove_host(target->scsi_host);
2280                return PTR_ERR(rport);
2281        }
2282
2283        rport->lld_data = target;
2284        target->rport = rport;
2285
2286        spin_lock(&host->target_lock);
2287        list_add_tail(&target->list, &host->target_list);
2288        spin_unlock(&host->target_lock);
2289
2290        target->state = SRP_TARGET_LIVE;
2291
2292        scsi_scan_target(&target->scsi_host->shost_gendev,
2293                         0, target->scsi_id, SCAN_WILD_CARD, 0);
2294
2295        return 0;
2296}
2297
2298static void srp_release_dev(struct device *dev)
2299{
2300        struct srp_host *host =
2301                container_of(dev, struct srp_host, dev);
2302
2303        complete(&host->released);
2304}
2305
2306static struct class srp_class = {
2307        .name    = "infiniband_srp",
2308        .dev_release = srp_release_dev
2309};
2310
2311/**
2312 * srp_conn_unique() - check whether the connection to a target is unique
2313 */
2314static bool srp_conn_unique(struct srp_host *host,
2315                            struct srp_target_port *target)
2316{
2317        struct srp_target_port *t;
2318        bool ret = false;
2319
2320        if (target->state == SRP_TARGET_REMOVED)
2321                goto out;
2322
2323        ret = true;
2324
2325        spin_lock(&host->target_lock);
2326        list_for_each_entry(t, &host->target_list, list) {
2327                if (t != target &&
2328                    target->id_ext == t->id_ext &&
2329                    target->ioc_guid == t->ioc_guid &&
2330                    target->initiator_ext == t->initiator_ext) {
2331                        ret = false;
2332                        break;
2333                }
2334        }
2335        spin_unlock(&host->target_lock);
2336
2337out:
2338        return ret;
2339}
2340
2341/*
2342 * Target ports are added by writing
2343 *
2344 *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2345 *     pkey=<P_Key>,service_id=<service ID>
2346 *
2347 * to the add_target sysfs attribute.
2348 */
2349enum {
2350        SRP_OPT_ERR             = 0,
2351        SRP_OPT_ID_EXT          = 1 << 0,
2352        SRP_OPT_IOC_GUID        = 1 << 1,
2353        SRP_OPT_DGID            = 1 << 2,
2354        SRP_OPT_PKEY            = 1 << 3,
2355        SRP_OPT_SERVICE_ID      = 1 << 4,
2356        SRP_OPT_MAX_SECT        = 1 << 5,
2357        SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2358        SRP_OPT_IO_CLASS        = 1 << 7,
2359        SRP_OPT_INITIATOR_EXT   = 1 << 8,
2360        SRP_OPT_CMD_SG_ENTRIES  = 1 << 9,
2361        SRP_OPT_ALLOW_EXT_SG    = 1 << 10,
2362        SRP_OPT_SG_TABLESIZE    = 1 << 11,
2363        SRP_OPT_COMP_VECTOR     = 1 << 12,
2364        SRP_OPT_TL_RETRY_COUNT  = 1 << 13,
2365        SRP_OPT_QUEUE_SIZE      = 1 << 14,
2366        SRP_OPT_ALL             = (SRP_OPT_ID_EXT       |
2367                                   SRP_OPT_IOC_GUID     |
2368                                   SRP_OPT_DGID         |
2369                                   SRP_OPT_PKEY         |
2370                                   SRP_OPT_SERVICE_ID),
2371};
2372
2373static const match_table_t srp_opt_tokens = {
2374        { SRP_OPT_ID_EXT,               "id_ext=%s"             },
2375        { SRP_OPT_IOC_GUID,             "ioc_guid=%s"           },
2376        { SRP_OPT_DGID,                 "dgid=%s"               },
2377        { SRP_OPT_PKEY,                 "pkey=%x"               },
2378        { SRP_OPT_SERVICE_ID,           "service_id=%s"         },
2379        { SRP_OPT_MAX_SECT,             "max_sect=%d"           },
2380        { SRP_OPT_MAX_CMD_PER_LUN,      "max_cmd_per_lun=%d"    },
2381        { SRP_OPT_IO_CLASS,             "io_class=%x"           },
2382        { SRP_OPT_INITIATOR_EXT,        "initiator_ext=%s"      },
2383        { SRP_OPT_CMD_SG_ENTRIES,       "cmd_sg_entries=%u"     },
2384        { SRP_OPT_ALLOW_EXT_SG,         "allow_ext_sg=%u"       },
2385        { SRP_OPT_SG_TABLESIZE,         "sg_tablesize=%u"       },
2386        { SRP_OPT_COMP_VECTOR,          "comp_vector=%u"        },
2387        { SRP_OPT_TL_RETRY_COUNT,       "tl_retry_count=%u"     },
2388        { SRP_OPT_QUEUE_SIZE,           "queue_size=%d"         },
2389        { SRP_OPT_ERR,                  NULL                    }
2390};
2391
2392static int srp_parse_options(const char *buf, struct srp_target_port *target)
2393{
2394        char *options, *sep_opt;
2395        char *p;
2396        char dgid[3];
2397        substring_t args[MAX_OPT_ARGS];
2398        int opt_mask = 0;
2399        int token;
2400        int ret = -EINVAL;
2401        int i;
2402
2403        options = kstrdup(buf, GFP_KERNEL);
2404        if (!options)
2405                return -ENOMEM;
2406
2407        sep_opt = options;
2408        while ((p = strsep(&sep_opt, ",")) != NULL) {
2409                if (!*p)
2410                        continue;
2411
2412                token = match_token(p, srp_opt_tokens, args);
2413                opt_mask |= token;
2414
2415                switch (token) {
2416                case SRP_OPT_ID_EXT:
2417                        p = match_strdup(args);
2418                        if (!p) {
2419                                ret = -ENOMEM;
2420                                goto out;
2421                        }
2422                        target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2423                        kfree(p);
2424                        break;
2425
2426                case SRP_OPT_IOC_GUID:
2427                        p = match_strdup(args);
2428                        if (!p) {
2429                                ret = -ENOMEM;
2430                                goto out;
2431                        }
2432                        target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2433                        kfree(p);
2434                        break;
2435
2436                case SRP_OPT_DGID:
2437                        p = match_strdup(args);
2438                        if (!p) {
2439                                ret = -ENOMEM;
2440                                goto out;
2441                        }
2442                        if (strlen(p) != 32) {
2443                                pr_warn("bad dest GID parameter '%s'\n", p);
2444                                kfree(p);
2445                                goto out;
2446                        }
2447
2448                        for (i = 0; i < 16; ++i) {
2449                                strlcpy(dgid, p + i * 2, 3);
2450                                target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2451                        }
2452                        kfree(p);
2453                        memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2454                        break;
2455
2456                case SRP_OPT_PKEY:
2457                        if (match_hex(args, &token)) {
2458                                pr_warn("bad P_Key parameter '%s'\n", p);
2459                                goto out;
2460                        }
2461                        target->path.pkey = cpu_to_be16(token);
2462                        break;
2463
2464                case SRP_OPT_SERVICE_ID:
2465                        p = match_strdup(args);
2466                        if (!p) {
2467                                ret = -ENOMEM;
2468                                goto out;
2469                        }
2470                        target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2471                        target->path.service_id = target->service_id;
2472                        kfree(p);
2473                        break;
2474
2475                case SRP_OPT_MAX_SECT:
2476                        if (match_int(args, &token)) {
2477                                pr_warn("bad max sect parameter '%s'\n", p);
2478                                goto out;
2479                        }
2480                        target->scsi_host->max_sectors = token;
2481                        break;
2482
2483                case SRP_OPT_QUEUE_SIZE:
2484                        if (match_int(args, &token) || token < 1) {
2485                                pr_warn("bad queue_size parameter '%s'\n", p);
2486                                goto out;
2487                        }
2488                        target->scsi_host->can_queue = token;
2489                        target->queue_size = token + SRP_RSP_SQ_SIZE +
2490                                             SRP_TSK_MGMT_SQ_SIZE;
2491                        if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2492                                target->scsi_host->cmd_per_lun = token;
2493                        break;
2494
2495                case SRP_OPT_MAX_CMD_PER_LUN:
2496                        if (match_int(args, &token) || token < 1) {
2497                                pr_warn("bad max cmd_per_lun parameter '%s'\n",
2498                                        p);
2499                                goto out;
2500                        }
2501                        target->scsi_host->cmd_per_lun = token;
2502                        break;
2503
2504                case SRP_OPT_IO_CLASS:
2505                        if (match_hex(args, &token)) {
2506                                pr_warn("bad IO class parameter '%s'\n", p);
2507                                goto out;
2508                        }
2509                        if (token != SRP_REV10_IB_IO_CLASS &&
2510                            token != SRP_REV16A_IB_IO_CLASS) {
2511                                pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2512                                        token, SRP_REV10_IB_IO_CLASS,
2513                                        SRP_REV16A_IB_IO_CLASS);
2514                                goto out;
2515                        }
2516                        target->io_class = token;
2517                        break;
2518
2519                case SRP_OPT_INITIATOR_EXT:
2520                        p = match_strdup(args);
2521                        if (!p) {
2522                                ret = -ENOMEM;
2523                                goto out;
2524                        }
2525                        target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2526                        kfree(p);
2527                        break;
2528
2529                case SRP_OPT_CMD_SG_ENTRIES:
2530                        if (match_int(args, &token) || token < 1 || token > 255) {
2531                                pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2532                                        p);
2533                                goto out;
2534                        }
2535                        target->cmd_sg_cnt = token;
2536                        break;
2537
2538                case SRP_OPT_ALLOW_EXT_SG:
2539                        if (match_int(args, &token)) {
2540                                pr_warn("bad allow_ext_sg parameter '%s'\n", p);
2541                                goto out;
2542                        }
2543                        target->allow_ext_sg = !!token;
2544                        break;
2545
2546                case SRP_OPT_SG_TABLESIZE:
2547                        if (match_int(args, &token) || token < 1 ||
2548                                        token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
2549                                pr_warn("bad max sg_tablesize parameter '%s'\n",
2550                                        p);
2551                                goto out;
2552                        }
2553                        target->sg_tablesize = token;
2554                        break;
2555
2556                case SRP_OPT_COMP_VECTOR:
2557                        if (match_int(args, &token) || token < 0) {
2558                                pr_warn("bad comp_vector parameter '%s'\n", p);
2559                                goto out;
2560                        }
2561                        target->comp_vector = token;
2562                        break;
2563
2564                case SRP_OPT_TL_RETRY_COUNT:
2565                        if (match_int(args, &token) || token < 2 || token > 7) {
2566                                pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2567                                        p);
2568                                goto out;
2569                        }
2570                        target->tl_retry_count = token;
2571                        break;
2572
2573                default:
2574                        pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2575                                p);
2576                        goto out;
2577                }
2578        }
2579
2580        if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2581                ret = 0;
2582        else
2583                for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2584                        if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2585                            !(srp_opt_tokens[i].token & opt_mask))
2586                                pr_warn("target creation request is missing parameter '%s'\n",
2587                                        srp_opt_tokens[i].pattern);
2588
2589        if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
2590            && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2591                pr_warn("cmd_per_lun = %d > queue_size = %d\n",
2592                        target->scsi_host->cmd_per_lun,
2593                        target->scsi_host->can_queue);
2594
2595out:
2596        kfree(options);
2597        return ret;
2598}
2599
2600static ssize_t srp_create_target(struct device *dev,
2601                                 struct device_attribute *attr,
2602                                 const char *buf, size_t count)
2603{
2604        struct srp_host *host =
2605                container_of(dev, struct srp_host, dev);
2606        struct Scsi_Host *target_host;
2607        struct srp_target_port *target;
2608        struct ib_device *ibdev = host->srp_dev->dev;
2609        int ret;
2610
2611        target_host = scsi_host_alloc(&srp_template,
2612                                      sizeof (struct srp_target_port));
2613        if (!target_host)
2614                return -ENOMEM;
2615
2616        target_host->transportt  = ib_srp_transport_template;
2617        target_host->max_channel = 0;
2618        target_host->max_id      = 1;
2619        target_host->max_lun     = SRP_MAX_LUN;
2620        target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
2621
2622        target = host_to_target(target_host);
2623
2624        target->io_class        = SRP_REV16A_IB_IO_CLASS;
2625        target->scsi_host       = target_host;
2626        target->srp_host        = host;
2627        target->lkey            = host->srp_dev->mr->lkey;
2628        target->rkey            = host->srp_dev->mr->rkey;
2629        target->cmd_sg_cnt      = cmd_sg_entries;
2630        target->sg_tablesize    = indirect_sg_entries ? : cmd_sg_entries;
2631        target->allow_ext_sg    = allow_ext_sg;
2632        target->tl_retry_count  = 7;
2633        target->queue_size      = SRP_DEFAULT_QUEUE_SIZE;
2634
2635        mutex_lock(&host->add_target_mutex);
2636
2637        ret = srp_parse_options(buf, target);
2638        if (ret)
2639                goto err;
2640
2641        target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
2642
2643        if (!srp_conn_unique(target->srp_host, target)) {
2644                shost_printk(KERN_INFO, target->scsi_host,
2645                             PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
2646                             be64_to_cpu(target->id_ext),
2647                             be64_to_cpu(target->ioc_guid),
2648                             be64_to_cpu(target->initiator_ext));
2649                ret = -EEXIST;
2650                goto err;
2651        }
2652
2653        if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2654                                target->cmd_sg_cnt < target->sg_tablesize) {
2655                pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2656                target->sg_tablesize = target->cmd_sg_cnt;
2657        }
2658
2659        target_host->sg_tablesize = target->sg_tablesize;
2660        target->indirect_size = target->sg_tablesize *
2661                                sizeof (struct srp_direct_buf);
2662        target->max_iu_len = sizeof (struct srp_cmd) +
2663                             sizeof (struct srp_indirect_buf) +
2664                             target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2665
2666        INIT_WORK(&target->tl_err_work, srp_tl_err_work);
2667        INIT_WORK(&target->remove_work, srp_remove_work);
2668        spin_lock_init(&target->lock);
2669        INIT_LIST_HEAD(&target->free_tx);
2670        ret = srp_alloc_req_data(target);
2671        if (ret)
2672                goto err_free_mem;
2673
2674        ret = ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
2675        if (ret)
2676                goto err_free_mem;
2677
2678        ret = srp_create_target_ib(target);
2679        if (ret)
2680                goto err_free_mem;
2681
2682        ret = srp_new_cm_id(target);
2683        if (ret)
2684                goto err_free_ib;
2685
2686        ret = srp_connect_target(target);
2687        if (ret) {
2688                shost_printk(KERN_ERR, target->scsi_host,
2689                             PFX "Connection failed\n");
2690                goto err_cm_id;
2691        }
2692
2693        ret = srp_add_target(host, target);
2694        if (ret)
2695                goto err_disconnect;
2696
2697        shost_printk(KERN_DEBUG, target->scsi_host, PFX
2698                     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
2699                     be64_to_cpu(target->id_ext),
2700                     be64_to_cpu(target->ioc_guid),
2701                     be16_to_cpu(target->path.pkey),
2702                     be64_to_cpu(target->service_id),
2703                     target->path.sgid.raw, target->path.dgid.raw);
2704
2705        ret = count;
2706
2707out:
2708        mutex_unlock(&host->add_target_mutex);
2709        return ret;
2710
2711err_disconnect:
2712        srp_disconnect_target(target);
2713
2714err_cm_id:
2715        ib_destroy_cm_id(target->cm_id);
2716
2717err_free_ib:
2718        srp_free_target_ib(target);
2719
2720err_free_mem:
2721        srp_free_req_data(target);
2722
2723err:
2724        scsi_host_put(target_host);
2725        goto out;
2726}
2727
2728static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2729
2730static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2731                          char *buf)
2732{
2733        struct srp_host *host = container_of(dev, struct srp_host, dev);
2734
2735        return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2736}
2737
2738static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2739
2740static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2741                         char *buf)
2742{
2743        struct srp_host *host = container_of(dev, struct srp_host, dev);
2744
2745        return sprintf(buf, "%d\n", host->port);
2746}
2747
2748static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2749
2750static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
2751{
2752        struct srp_host *host;
2753
2754        host = kzalloc(sizeof *host, GFP_KERNEL);
2755        if (!host)
2756                return NULL;
2757
2758        INIT_LIST_HEAD(&host->target_list);
2759        spin_lock_init(&host->target_lock);
2760        init_completion(&host->released);
2761        mutex_init(&host->add_target_mutex);
2762        host->srp_dev = device;
2763        host->port = port;
2764
2765        host->dev.class = &srp_class;
2766        host->dev.parent = device->dev->dma_device;
2767        dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2768
2769        if (device_register(&host->dev))
2770                goto free_host;
2771        if (device_create_file(&host->dev, &dev_attr_add_target))
2772                goto err_class;
2773        if (device_create_file(&host->dev, &dev_attr_ibdev))
2774                goto err_class;
2775        if (device_create_file(&host->dev, &dev_attr_port))
2776                goto err_class;
2777
2778        return host;
2779
2780err_class:
2781        device_unregister(&host->dev);
2782
2783free_host:
2784        kfree(host);
2785
2786        return NULL;
2787}
2788
2789static void srp_add_one(struct ib_device *device)
2790{
2791        struct srp_device *srp_dev;
2792        struct ib_device_attr *dev_attr;
2793        struct ib_fmr_pool_param fmr_param;
2794        struct srp_host *host;
2795        int max_pages_per_fmr, fmr_page_shift, s, e, p;
2796
2797        dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2798        if (!dev_attr)
2799                return;
2800
2801        if (ib_query_device(device, dev_attr)) {
2802                pr_warn("Query device failed for %s\n", device->name);
2803                goto free_attr;
2804        }
2805
2806        srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2807        if (!srp_dev)
2808                goto free_attr;
2809
2810        /*
2811         * Use the smallest page size supported by the HCA, down to a
2812         * minimum of 4096 bytes. We're unlikely to build large sglists
2813         * out of smaller entries.
2814         */
2815        fmr_page_shift          = max(12, ffs(dev_attr->page_size_cap) - 1);
2816        srp_dev->fmr_page_size  = 1 << fmr_page_shift;
2817        srp_dev->fmr_page_mask  = ~((u64) srp_dev->fmr_page_size - 1);
2818        srp_dev->fmr_max_size   = srp_dev->fmr_page_size * SRP_FMR_SIZE;
2819
2820        INIT_LIST_HEAD(&srp_dev->dev_list);
2821
2822        srp_dev->dev = device;
2823        srp_dev->pd  = ib_alloc_pd(device);
2824        if (IS_ERR(srp_dev->pd))
2825                goto free_dev;
2826
2827        srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2828                                    IB_ACCESS_LOCAL_WRITE |
2829                                    IB_ACCESS_REMOTE_READ |
2830                                    IB_ACCESS_REMOTE_WRITE);
2831        if (IS_ERR(srp_dev->mr))
2832                goto err_pd;
2833
2834        for (max_pages_per_fmr = SRP_FMR_SIZE;
2835                        max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2836                        max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2837                memset(&fmr_param, 0, sizeof fmr_param);
2838                fmr_param.pool_size         = SRP_FMR_POOL_SIZE;
2839                fmr_param.dirty_watermark   = SRP_FMR_DIRTY_SIZE;
2840                fmr_param.cache             = 1;
2841                fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2842                fmr_param.page_shift        = fmr_page_shift;
2843                fmr_param.access            = (IB_ACCESS_LOCAL_WRITE |
2844                                               IB_ACCESS_REMOTE_WRITE |
2845                                               IB_ACCESS_REMOTE_READ);
2846
2847                srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2848                if (!IS_ERR(srp_dev->fmr_pool))
2849                        break;
2850        }
2851
2852        if (IS_ERR(srp_dev->fmr_pool))
2853                srp_dev->fmr_pool = NULL;
2854
2855        if (device->node_type == RDMA_NODE_IB_SWITCH) {
2856                s = 0;
2857                e = 0;
2858        } else {
2859                s = 1;
2860                e = device->phys_port_cnt;
2861        }
2862
2863        for (p = s; p <= e; ++p) {
2864                host = srp_add_port(srp_dev, p);
2865                if (host)
2866                        list_add_tail(&host->list, &srp_dev->dev_list);
2867        }
2868
2869        ib_set_client_data(device, &srp_client, srp_dev);
2870
2871        goto free_attr;
2872
2873err_pd:
2874        ib_dealloc_pd(srp_dev->pd);
2875
2876free_dev:
2877        kfree(srp_dev);
2878
2879free_attr:
2880        kfree(dev_attr);
2881}
2882
2883static void srp_remove_one(struct ib_device *device)
2884{
2885        struct srp_device *srp_dev;
2886        struct srp_host *host, *tmp_host;
2887        struct srp_target_port *target;
2888
2889        srp_dev = ib_get_client_data(device, &srp_client);
2890        if (!srp_dev)
2891                return;
2892
2893        list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2894                device_unregister(&host->dev);
2895                /*
2896                 * Wait for the sysfs entry to go away, so that no new
2897                 * target ports can be created.
2898                 */
2899                wait_for_completion(&host->released);
2900
2901                /*
2902                 * Remove all target ports.
2903                 */
2904                spin_lock(&host->target_lock);
2905                list_for_each_entry(target, &host->target_list, list)
2906                        srp_queue_remove_work(target);
2907                spin_unlock(&host->target_lock);
2908
2909                /*
2910                 * Wait for target port removal tasks.
2911                 */
2912                flush_workqueue(system_long_wq);
2913
2914                kfree(host);
2915        }
2916
2917        if (srp_dev->fmr_pool)
2918                ib_destroy_fmr_pool(srp_dev->fmr_pool);
2919        ib_dereg_mr(srp_dev->mr);
2920        ib_dealloc_pd(srp_dev->pd);
2921
2922        kfree(srp_dev);
2923}
2924
2925static struct srp_function_template ib_srp_transport_functions = {
2926        .has_rport_state         = true,
2927        .reset_timer_if_blocked  = true,
2928        .reconnect_delay         = &srp_reconnect_delay,
2929        .fast_io_fail_tmo        = &srp_fast_io_fail_tmo,
2930        .dev_loss_tmo            = &srp_dev_loss_tmo,
2931        .reconnect               = srp_rport_reconnect,
2932        .rport_delete            = srp_rport_delete,
2933        .terminate_rport_io      = srp_terminate_io,
2934};
2935
2936static int __init srp_init_module(void)
2937{
2938        int ret;
2939
2940        BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2941
2942        if (srp_sg_tablesize) {
2943                pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2944                if (!cmd_sg_entries)
2945                        cmd_sg_entries = srp_sg_tablesize;
2946        }
2947
2948        if (!cmd_sg_entries)
2949                cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2950
2951        if (cmd_sg_entries > 255) {
2952                pr_warn("Clamping cmd_sg_entries to 255\n");
2953                cmd_sg_entries = 255;
2954        }
2955
2956        if (!indirect_sg_entries)
2957                indirect_sg_entries = cmd_sg_entries;
2958        else if (indirect_sg_entries < cmd_sg_entries) {
2959                pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2960                        cmd_sg_entries);
2961                indirect_sg_entries = cmd_sg_entries;
2962        }
2963
2964        ib_srp_transport_template =
2965                srp_attach_transport(&ib_srp_transport_functions);
2966        if (!ib_srp_transport_template)
2967                return -ENOMEM;
2968
2969        ret = class_register(&srp_class);
2970        if (ret) {
2971                pr_err("couldn't register class infiniband_srp\n");
2972                srp_release_transport(ib_srp_transport_template);
2973                return ret;
2974        }
2975
2976        ib_sa_register_client(&srp_sa_client);
2977
2978        ret = ib_register_client(&srp_client);
2979        if (ret) {
2980                pr_err("couldn't register IB client\n");
2981                srp_release_transport(ib_srp_transport_template);
2982                ib_sa_unregister_client(&srp_sa_client);
2983                class_unregister(&srp_class);
2984                return ret;
2985        }
2986
2987        return 0;
2988}
2989
2990static void __exit srp_cleanup_module(void)
2991{
2992        ib_unregister_client(&srp_client);
2993        ib_sa_unregister_client(&srp_sa_client);
2994        class_unregister(&srp_class);
2995        srp_release_transport(ib_srp_transport_template);
2996}
2997
2998module_init(srp_init_module);
2999module_exit(srp_cleanup_module);
3000